]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.2.2-3.0.7-201110172337.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-3.0.7-201110172337.patch
CommitLineData
0e5338d9
PK
1diff -urNp linux-3.0.7/arch/alpha/include/asm/elf.h linux-3.0.7/arch/alpha/include/asm/elf.h
2--- linux-3.0.7/arch/alpha/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
3+++ linux-3.0.7/arch/alpha/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
4@@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8+#ifdef CONFIG_PAX_ASLR
9+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10+
11+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13+#endif
14+
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18diff -urNp linux-3.0.7/arch/alpha/include/asm/pgtable.h linux-3.0.7/arch/alpha/include/asm/pgtable.h
19--- linux-3.0.7/arch/alpha/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
20+++ linux-3.0.7/arch/alpha/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
21@@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25+
26+#ifdef CONFIG_PAX_PAGEEXEC
27+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30+#else
31+# define PAGE_SHARED_NOEXEC PAGE_SHARED
32+# define PAGE_COPY_NOEXEC PAGE_COPY
33+# define PAGE_READONLY_NOEXEC PAGE_READONLY
34+#endif
35+
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39diff -urNp linux-3.0.7/arch/alpha/kernel/module.c linux-3.0.7/arch/alpha/kernel/module.c
40--- linux-3.0.7/arch/alpha/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
41+++ linux-3.0.7/arch/alpha/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
42@@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46- gp = (u64)me->module_core + me->core_size - 0x8000;
47+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51diff -urNp linux-3.0.7/arch/alpha/kernel/osf_sys.c linux-3.0.7/arch/alpha/kernel/osf_sys.c
52--- linux-3.0.7/arch/alpha/kernel/osf_sys.c 2011-07-21 22:17:23.000000000 -0400
53+++ linux-3.0.7/arch/alpha/kernel/osf_sys.c 2011-08-23 21:47:55.000000000 -0400
54@@ -1145,7 +1145,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
57 return -ENOMEM;
58- if (!vma || addr + len <= vma->vm_start)
59+ if (check_heap_stack_gap(vma, addr, len))
60 return addr;
61 addr = vma->vm_end;
62 vma = vma->vm_next;
63@@ -1181,6 +1181,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
66
67+#ifdef CONFIG_PAX_RANDMMAP
68+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69+#endif
70+
71 if (addr) {
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74@@ -1188,8 +1192,8 @@ arch_get_unmapped_area(struct file *filp
75 }
76
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79- len, limit);
80+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81+
82 if (addr != (unsigned long) -ENOMEM)
83 return addr;
84
85diff -urNp linux-3.0.7/arch/alpha/mm/fault.c linux-3.0.7/arch/alpha/mm/fault.c
86--- linux-3.0.7/arch/alpha/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
87+++ linux-3.0.7/arch/alpha/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
88@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89 __reload_thread(pcb);
90 }
91
92+#ifdef CONFIG_PAX_PAGEEXEC
93+/*
94+ * PaX: decide what to do with offenders (regs->pc = fault address)
95+ *
96+ * returns 1 when task should be killed
97+ * 2 when patched PLT trampoline was detected
98+ * 3 when unpatched PLT trampoline was detected
99+ */
100+static int pax_handle_fetch_fault(struct pt_regs *regs)
101+{
102+
103+#ifdef CONFIG_PAX_EMUPLT
104+ int err;
105+
106+ do { /* PaX: patched PLT emulation #1 */
107+ unsigned int ldah, ldq, jmp;
108+
109+ err = get_user(ldah, (unsigned int *)regs->pc);
110+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112+
113+ if (err)
114+ break;
115+
116+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118+ jmp == 0x6BFB0000U)
119+ {
120+ unsigned long r27, addr;
121+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123+
124+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125+ err = get_user(r27, (unsigned long *)addr);
126+ if (err)
127+ break;
128+
129+ regs->r27 = r27;
130+ regs->pc = r27;
131+ return 2;
132+ }
133+ } while (0);
134+
135+ do { /* PaX: patched PLT emulation #2 */
136+ unsigned int ldah, lda, br;
137+
138+ err = get_user(ldah, (unsigned int *)regs->pc);
139+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
140+ err |= get_user(br, (unsigned int *)(regs->pc+8));
141+
142+ if (err)
143+ break;
144+
145+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
147+ (br & 0xFFE00000U) == 0xC3E00000U)
148+ {
149+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152+
153+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155+ return 2;
156+ }
157+ } while (0);
158+
159+ do { /* PaX: unpatched PLT emulation */
160+ unsigned int br;
161+
162+ err = get_user(br, (unsigned int *)regs->pc);
163+
164+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165+ unsigned int br2, ldq, nop, jmp;
166+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167+
168+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169+ err = get_user(br2, (unsigned int *)addr);
170+ err |= get_user(ldq, (unsigned int *)(addr+4));
171+ err |= get_user(nop, (unsigned int *)(addr+8));
172+ err |= get_user(jmp, (unsigned int *)(addr+12));
173+ err |= get_user(resolver, (unsigned long *)(addr+16));
174+
175+ if (err)
176+ break;
177+
178+ if (br2 == 0xC3600000U &&
179+ ldq == 0xA77B000CU &&
180+ nop == 0x47FF041FU &&
181+ jmp == 0x6B7B0000U)
182+ {
183+ regs->r28 = regs->pc+4;
184+ regs->r27 = addr+16;
185+ regs->pc = resolver;
186+ return 3;
187+ }
188+ }
189+ } while (0);
190+#endif
191+
192+ return 1;
193+}
194+
195+void pax_report_insns(void *pc, void *sp)
196+{
197+ unsigned long i;
198+
199+ printk(KERN_ERR "PAX: bytes at PC: ");
200+ for (i = 0; i < 5; i++) {
201+ unsigned int c;
202+ if (get_user(c, (unsigned int *)pc+i))
203+ printk(KERN_CONT "???????? ");
204+ else
205+ printk(KERN_CONT "%08x ", c);
206+ }
207+ printk("\n");
208+}
209+#endif
210
211 /*
212 * This routine handles page faults. It determines the address,
213@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214 good_area:
215 si_code = SEGV_ACCERR;
216 if (cause < 0) {
217- if (!(vma->vm_flags & VM_EXEC))
218+ if (!(vma->vm_flags & VM_EXEC)) {
219+
220+#ifdef CONFIG_PAX_PAGEEXEC
221+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222+ goto bad_area;
223+
224+ up_read(&mm->mmap_sem);
225+ switch (pax_handle_fetch_fault(regs)) {
226+
227+#ifdef CONFIG_PAX_EMUPLT
228+ case 2:
229+ case 3:
230+ return;
231+#endif
232+
233+ }
234+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235+ do_group_exit(SIGKILL);
236+#else
237 goto bad_area;
238+#endif
239+
240+ }
241 } else if (!cause) {
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244diff -urNp linux-3.0.7/arch/arm/include/asm/elf.h linux-3.0.7/arch/arm/include/asm/elf.h
245--- linux-3.0.7/arch/arm/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
246+++ linux-3.0.7/arch/arm/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
247@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
250
251-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253+
254+#ifdef CONFIG_PAX_ASLR
255+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
256+
257+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259+#endif
260
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263@@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t
264 extern void elf_set_personality(const struct elf32_hdr *);
265 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
266
267-struct mm_struct;
268-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
269-#define arch_randomize_brk arch_randomize_brk
270-
271 extern int vectors_user_mapping(void);
272 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
273 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
274diff -urNp linux-3.0.7/arch/arm/include/asm/kmap_types.h linux-3.0.7/arch/arm/include/asm/kmap_types.h
275--- linux-3.0.7/arch/arm/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
276+++ linux-3.0.7/arch/arm/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
277@@ -21,6 +21,7 @@ enum km_type {
278 KM_L1_CACHE,
279 KM_L2_CACHE,
280 KM_KDB,
281+ KM_CLEARPAGE,
282 KM_TYPE_NR
283 };
284
285diff -urNp linux-3.0.7/arch/arm/include/asm/uaccess.h linux-3.0.7/arch/arm/include/asm/uaccess.h
286--- linux-3.0.7/arch/arm/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
287+++ linux-3.0.7/arch/arm/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
288@@ -22,6 +22,8 @@
289 #define VERIFY_READ 0
290 #define VERIFY_WRITE 1
291
292+extern void check_object_size(const void *ptr, unsigned long n, bool to);
293+
294 /*
295 * The exception table consists of pairs of addresses: the first is the
296 * address of an instruction that is allowed to fault, and the second is
297@@ -387,8 +389,23 @@ do { \
298
299
300 #ifdef CONFIG_MMU
301-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
302-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
303+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
304+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
305+
306+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
307+{
308+ if (!__builtin_constant_p(n))
309+ check_object_size(to, n, false);
310+ return ___copy_from_user(to, from, n);
311+}
312+
313+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
314+{
315+ if (!__builtin_constant_p(n))
316+ check_object_size(from, n, true);
317+ return ___copy_to_user(to, from, n);
318+}
319+
320 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
321 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
322 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
323@@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
324
325 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
326 {
327+ if ((long)n < 0)
328+ return n;
329+
330 if (access_ok(VERIFY_READ, from, n))
331 n = __copy_from_user(to, from, n);
332 else /* security hole - plug it */
333@@ -412,6 +432,9 @@ static inline unsigned long __must_check
334
335 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
336 {
337+ if ((long)n < 0)
338+ return n;
339+
340 if (access_ok(VERIFY_WRITE, to, n))
341 n = __copy_to_user(to, from, n);
342 return n;
343diff -urNp linux-3.0.7/arch/arm/kernel/armksyms.c linux-3.0.7/arch/arm/kernel/armksyms.c
344--- linux-3.0.7/arch/arm/kernel/armksyms.c 2011-07-21 22:17:23.000000000 -0400
345+++ linux-3.0.7/arch/arm/kernel/armksyms.c 2011-08-23 21:47:55.000000000 -0400
346@@ -98,8 +98,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
347 #ifdef CONFIG_MMU
348 EXPORT_SYMBOL(copy_page);
349
350-EXPORT_SYMBOL(__copy_from_user);
351-EXPORT_SYMBOL(__copy_to_user);
352+EXPORT_SYMBOL(___copy_from_user);
353+EXPORT_SYMBOL(___copy_to_user);
354 EXPORT_SYMBOL(__clear_user);
355
356 EXPORT_SYMBOL(__get_user_1);
357diff -urNp linux-3.0.7/arch/arm/kernel/process.c linux-3.0.7/arch/arm/kernel/process.c
358--- linux-3.0.7/arch/arm/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
359+++ linux-3.0.7/arch/arm/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
360@@ -28,7 +28,6 @@
361 #include <linux/tick.h>
362 #include <linux/utsname.h>
363 #include <linux/uaccess.h>
364-#include <linux/random.h>
365 #include <linux/hw_breakpoint.h>
366
367 #include <asm/cacheflush.h>
368@@ -479,12 +478,6 @@ unsigned long get_wchan(struct task_stru
369 return 0;
370 }
371
372-unsigned long arch_randomize_brk(struct mm_struct *mm)
373-{
374- unsigned long range_end = mm->brk + 0x02000000;
375- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
376-}
377-
378 #ifdef CONFIG_MMU
379 /*
380 * The vectors page is always readable from user space for the
381diff -urNp linux-3.0.7/arch/arm/kernel/traps.c linux-3.0.7/arch/arm/kernel/traps.c
382--- linux-3.0.7/arch/arm/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
383+++ linux-3.0.7/arch/arm/kernel/traps.c 2011-08-23 21:48:14.000000000 -0400
384@@ -257,6 +257,8 @@ static int __die(const char *str, int er
385
386 static DEFINE_SPINLOCK(die_lock);
387
388+extern void gr_handle_kernel_exploit(void);
389+
390 /*
391 * This function is protected against re-entrancy.
392 */
393@@ -284,6 +286,9 @@ void die(const char *str, struct pt_regs
394 panic("Fatal exception in interrupt");
395 if (panic_on_oops)
396 panic("Fatal exception");
397+
398+ gr_handle_kernel_exploit();
399+
400 if (ret != NOTIFY_STOP)
401 do_exit(SIGSEGV);
402 }
403diff -urNp linux-3.0.7/arch/arm/lib/copy_from_user.S linux-3.0.7/arch/arm/lib/copy_from_user.S
404--- linux-3.0.7/arch/arm/lib/copy_from_user.S 2011-07-21 22:17:23.000000000 -0400
405+++ linux-3.0.7/arch/arm/lib/copy_from_user.S 2011-08-23 21:47:55.000000000 -0400
406@@ -16,7 +16,7 @@
407 /*
408 * Prototype:
409 *
410- * size_t __copy_from_user(void *to, const void *from, size_t n)
411+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
412 *
413 * Purpose:
414 *
415@@ -84,11 +84,11 @@
416
417 .text
418
419-ENTRY(__copy_from_user)
420+ENTRY(___copy_from_user)
421
422 #include "copy_template.S"
423
424-ENDPROC(__copy_from_user)
425+ENDPROC(___copy_from_user)
426
427 .pushsection .fixup,"ax"
428 .align 0
429diff -urNp linux-3.0.7/arch/arm/lib/copy_to_user.S linux-3.0.7/arch/arm/lib/copy_to_user.S
430--- linux-3.0.7/arch/arm/lib/copy_to_user.S 2011-07-21 22:17:23.000000000 -0400
431+++ linux-3.0.7/arch/arm/lib/copy_to_user.S 2011-08-23 21:47:55.000000000 -0400
432@@ -16,7 +16,7 @@
433 /*
434 * Prototype:
435 *
436- * size_t __copy_to_user(void *to, const void *from, size_t n)
437+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
438 *
439 * Purpose:
440 *
441@@ -88,11 +88,11 @@
442 .text
443
444 ENTRY(__copy_to_user_std)
445-WEAK(__copy_to_user)
446+WEAK(___copy_to_user)
447
448 #include "copy_template.S"
449
450-ENDPROC(__copy_to_user)
451+ENDPROC(___copy_to_user)
452 ENDPROC(__copy_to_user_std)
453
454 .pushsection .fixup,"ax"
455diff -urNp linux-3.0.7/arch/arm/lib/uaccess.S linux-3.0.7/arch/arm/lib/uaccess.S
456--- linux-3.0.7/arch/arm/lib/uaccess.S 2011-07-21 22:17:23.000000000 -0400
457+++ linux-3.0.7/arch/arm/lib/uaccess.S 2011-08-23 21:47:55.000000000 -0400
458@@ -20,7 +20,7 @@
459
460 #define PAGE_SHIFT 12
461
462-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
463+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
464 * Purpose : copy a block to user memory from kernel memory
465 * Params : to - user memory
466 * : from - kernel memory
467@@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May f
468 sub r2, r2, ip
469 b .Lc2u_dest_aligned
470
471-ENTRY(__copy_to_user)
472+ENTRY(___copy_to_user)
473 stmfd sp!, {r2, r4 - r7, lr}
474 cmp r2, #4
475 blt .Lc2u_not_enough
476@@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May f
477 ldrgtb r3, [r1], #0
478 USER( T(strgtb) r3, [r0], #1) @ May fault
479 b .Lc2u_finished
480-ENDPROC(__copy_to_user)
481+ENDPROC(___copy_to_user)
482
483 .pushsection .fixup,"ax"
484 .align 0
485 9001: ldmfd sp!, {r0, r4 - r7, pc}
486 .popsection
487
488-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
489+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
490 * Purpose : copy a block from user memory to kernel memory
491 * Params : to - kernel memory
492 * : from - user memory
493@@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May f
494 sub r2, r2, ip
495 b .Lcfu_dest_aligned
496
497-ENTRY(__copy_from_user)
498+ENTRY(___copy_from_user)
499 stmfd sp!, {r0, r2, r4 - r7, lr}
500 cmp r2, #4
501 blt .Lcfu_not_enough
502@@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May f
503 USER( T(ldrgtb) r3, [r1], #1) @ May fault
504 strgtb r3, [r0], #1
505 b .Lcfu_finished
506-ENDPROC(__copy_from_user)
507+ENDPROC(___copy_from_user)
508
509 .pushsection .fixup,"ax"
510 .align 0
511diff -urNp linux-3.0.7/arch/arm/lib/uaccess_with_memcpy.c linux-3.0.7/arch/arm/lib/uaccess_with_memcpy.c
512--- linux-3.0.7/arch/arm/lib/uaccess_with_memcpy.c 2011-07-21 22:17:23.000000000 -0400
513+++ linux-3.0.7/arch/arm/lib/uaccess_with_memcpy.c 2011-08-23 21:47:55.000000000 -0400
514@@ -103,7 +103,7 @@ out:
515 }
516
517 unsigned long
518-__copy_to_user(void __user *to, const void *from, unsigned long n)
519+___copy_to_user(void __user *to, const void *from, unsigned long n)
520 {
521 /*
522 * This test is stubbed out of the main function above to keep
523diff -urNp linux-3.0.7/arch/arm/mach-ux500/mbox-db5500.c linux-3.0.7/arch/arm/mach-ux500/mbox-db5500.c
524--- linux-3.0.7/arch/arm/mach-ux500/mbox-db5500.c 2011-07-21 22:17:23.000000000 -0400
525+++ linux-3.0.7/arch/arm/mach-ux500/mbox-db5500.c 2011-08-23 21:48:14.000000000 -0400
526@@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct dev
527 return sprintf(buf, "0x%X\n", mbox_value);
528 }
529
530-static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
531+static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
532
533 static int mbox_show(struct seq_file *s, void *data)
534 {
535diff -urNp linux-3.0.7/arch/arm/mm/fault.c linux-3.0.7/arch/arm/mm/fault.c
536--- linux-3.0.7/arch/arm/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
537+++ linux-3.0.7/arch/arm/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
538@@ -182,6 +182,13 @@ __do_user_fault(struct task_struct *tsk,
539 }
540 #endif
541
542+#ifdef CONFIG_PAX_PAGEEXEC
543+ if (fsr & FSR_LNX_PF) {
544+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
545+ do_group_exit(SIGKILL);
546+ }
547+#endif
548+
549 tsk->thread.address = addr;
550 tsk->thread.error_code = fsr;
551 tsk->thread.trap_no = 14;
552@@ -379,6 +386,33 @@ do_page_fault(unsigned long addr, unsign
553 }
554 #endif /* CONFIG_MMU */
555
556+#ifdef CONFIG_PAX_PAGEEXEC
557+void pax_report_insns(void *pc, void *sp)
558+{
559+ long i;
560+
561+ printk(KERN_ERR "PAX: bytes at PC: ");
562+ for (i = 0; i < 20; i++) {
563+ unsigned char c;
564+ if (get_user(c, (__force unsigned char __user *)pc+i))
565+ printk(KERN_CONT "?? ");
566+ else
567+ printk(KERN_CONT "%02x ", c);
568+ }
569+ printk("\n");
570+
571+ printk(KERN_ERR "PAX: bytes at SP-4: ");
572+ for (i = -1; i < 20; i++) {
573+ unsigned long c;
574+ if (get_user(c, (__force unsigned long __user *)sp+i))
575+ printk(KERN_CONT "???????? ");
576+ else
577+ printk(KERN_CONT "%08lx ", c);
578+ }
579+ printk("\n");
580+}
581+#endif
582+
583 /*
584 * First Level Translation Fault Handler
585 *
586diff -urNp linux-3.0.7/arch/arm/mm/mmap.c linux-3.0.7/arch/arm/mm/mmap.c
587--- linux-3.0.7/arch/arm/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
588+++ linux-3.0.7/arch/arm/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
589@@ -65,6 +65,10 @@ arch_get_unmapped_area(struct file *filp
590 if (len > TASK_SIZE)
591 return -ENOMEM;
592
593+#ifdef CONFIG_PAX_RANDMMAP
594+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
595+#endif
596+
597 if (addr) {
598 if (do_align)
599 addr = COLOUR_ALIGN(addr, pgoff);
600@@ -72,15 +76,14 @@ arch_get_unmapped_area(struct file *filp
601 addr = PAGE_ALIGN(addr);
602
603 vma = find_vma(mm, addr);
604- if (TASK_SIZE - len >= addr &&
605- (!vma || addr + len <= vma->vm_start))
606+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
607 return addr;
608 }
609 if (len > mm->cached_hole_size) {
610- start_addr = addr = mm->free_area_cache;
611+ start_addr = addr = mm->free_area_cache;
612 } else {
613- start_addr = addr = TASK_UNMAPPED_BASE;
614- mm->cached_hole_size = 0;
615+ start_addr = addr = mm->mmap_base;
616+ mm->cached_hole_size = 0;
617 }
618 /* 8 bits of randomness in 20 address space bits */
619 if ((current->flags & PF_RANDOMIZE) &&
620@@ -100,14 +103,14 @@ full_search:
621 * Start a new search - just in case we missed
622 * some holes.
623 */
624- if (start_addr != TASK_UNMAPPED_BASE) {
625- start_addr = addr = TASK_UNMAPPED_BASE;
626+ if (start_addr != mm->mmap_base) {
627+ start_addr = addr = mm->mmap_base;
628 mm->cached_hole_size = 0;
629 goto full_search;
630 }
631 return -ENOMEM;
632 }
633- if (!vma || addr + len <= vma->vm_start) {
634+ if (check_heap_stack_gap(vma, addr, len)) {
635 /*
636 * Remember the place where we stopped the search:
637 */
638diff -urNp linux-3.0.7/arch/avr32/include/asm/elf.h linux-3.0.7/arch/avr32/include/asm/elf.h
639--- linux-3.0.7/arch/avr32/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
640+++ linux-3.0.7/arch/avr32/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
641@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpreg
642 the loader. We need to make sure that it is out of the way of the program
643 that it will "exec", and that there is sufficient room for the brk. */
644
645-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
646+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
647
648+#ifdef CONFIG_PAX_ASLR
649+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
650+
651+#define PAX_DELTA_MMAP_LEN 15
652+#define PAX_DELTA_STACK_LEN 15
653+#endif
654
655 /* This yields a mask that user programs can use to figure out what
656 instruction set this CPU supports. This could be done in user space,
657diff -urNp linux-3.0.7/arch/avr32/include/asm/kmap_types.h linux-3.0.7/arch/avr32/include/asm/kmap_types.h
658--- linux-3.0.7/arch/avr32/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
659+++ linux-3.0.7/arch/avr32/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
660@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
661 D(11) KM_IRQ1,
662 D(12) KM_SOFTIRQ0,
663 D(13) KM_SOFTIRQ1,
664-D(14) KM_TYPE_NR
665+D(14) KM_CLEARPAGE,
666+D(15) KM_TYPE_NR
667 };
668
669 #undef D
670diff -urNp linux-3.0.7/arch/avr32/mm/fault.c linux-3.0.7/arch/avr32/mm/fault.c
671--- linux-3.0.7/arch/avr32/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
672+++ linux-3.0.7/arch/avr32/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
673@@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
674
675 int exception_trace = 1;
676
677+#ifdef CONFIG_PAX_PAGEEXEC
678+void pax_report_insns(void *pc, void *sp)
679+{
680+ unsigned long i;
681+
682+ printk(KERN_ERR "PAX: bytes at PC: ");
683+ for (i = 0; i < 20; i++) {
684+ unsigned char c;
685+ if (get_user(c, (unsigned char *)pc+i))
686+ printk(KERN_CONT "???????? ");
687+ else
688+ printk(KERN_CONT "%02x ", c);
689+ }
690+ printk("\n");
691+}
692+#endif
693+
694 /*
695 * This routine handles page faults. It determines the address and the
696 * problem, and then passes it off to one of the appropriate routines.
697@@ -156,6 +173,16 @@ bad_area:
698 up_read(&mm->mmap_sem);
699
700 if (user_mode(regs)) {
701+
702+#ifdef CONFIG_PAX_PAGEEXEC
703+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
704+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
705+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
706+ do_group_exit(SIGKILL);
707+ }
708+ }
709+#endif
710+
711 if (exception_trace && printk_ratelimit())
712 printk("%s%s[%d]: segfault at %08lx pc %08lx "
713 "sp %08lx ecr %lu\n",
714diff -urNp linux-3.0.7/arch/frv/include/asm/kmap_types.h linux-3.0.7/arch/frv/include/asm/kmap_types.h
715--- linux-3.0.7/arch/frv/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
716+++ linux-3.0.7/arch/frv/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
717@@ -23,6 +23,7 @@ enum km_type {
718 KM_IRQ1,
719 KM_SOFTIRQ0,
720 KM_SOFTIRQ1,
721+ KM_CLEARPAGE,
722 KM_TYPE_NR
723 };
724
725diff -urNp linux-3.0.7/arch/frv/mm/elf-fdpic.c linux-3.0.7/arch/frv/mm/elf-fdpic.c
726--- linux-3.0.7/arch/frv/mm/elf-fdpic.c 2011-07-21 22:17:23.000000000 -0400
727+++ linux-3.0.7/arch/frv/mm/elf-fdpic.c 2011-08-23 21:47:55.000000000 -0400
728@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
729 if (addr) {
730 addr = PAGE_ALIGN(addr);
731 vma = find_vma(current->mm, addr);
732- if (TASK_SIZE - len >= addr &&
733- (!vma || addr + len <= vma->vm_start))
734+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
735 goto success;
736 }
737
738@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
739 for (; vma; vma = vma->vm_next) {
740 if (addr > limit)
741 break;
742- if (addr + len <= vma->vm_start)
743+ if (check_heap_stack_gap(vma, addr, len))
744 goto success;
745 addr = vma->vm_end;
746 }
747@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
748 for (; vma; vma = vma->vm_next) {
749 if (addr > limit)
750 break;
751- if (addr + len <= vma->vm_start)
752+ if (check_heap_stack_gap(vma, addr, len))
753 goto success;
754 addr = vma->vm_end;
755 }
756diff -urNp linux-3.0.7/arch/ia64/include/asm/elf.h linux-3.0.7/arch/ia64/include/asm/elf.h
757--- linux-3.0.7/arch/ia64/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
758+++ linux-3.0.7/arch/ia64/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
759@@ -42,6 +42,13 @@
760 */
761 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
762
763+#ifdef CONFIG_PAX_ASLR
764+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
765+
766+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
767+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
768+#endif
769+
770 #define PT_IA_64_UNWIND 0x70000001
771
772 /* IA-64 relocations: */
773diff -urNp linux-3.0.7/arch/ia64/include/asm/pgtable.h linux-3.0.7/arch/ia64/include/asm/pgtable.h
774--- linux-3.0.7/arch/ia64/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
775+++ linux-3.0.7/arch/ia64/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
776@@ -12,7 +12,7 @@
777 * David Mosberger-Tang <davidm@hpl.hp.com>
778 */
779
780-
781+#include <linux/const.h>
782 #include <asm/mman.h>
783 #include <asm/page.h>
784 #include <asm/processor.h>
785@@ -143,6 +143,17 @@
786 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
787 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
788 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
789+
790+#ifdef CONFIG_PAX_PAGEEXEC
791+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
792+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
793+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
794+#else
795+# define PAGE_SHARED_NOEXEC PAGE_SHARED
796+# define PAGE_READONLY_NOEXEC PAGE_READONLY
797+# define PAGE_COPY_NOEXEC PAGE_COPY
798+#endif
799+
800 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
801 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
802 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
803diff -urNp linux-3.0.7/arch/ia64/include/asm/spinlock.h linux-3.0.7/arch/ia64/include/asm/spinlock.h
804--- linux-3.0.7/arch/ia64/include/asm/spinlock.h 2011-07-21 22:17:23.000000000 -0400
805+++ linux-3.0.7/arch/ia64/include/asm/spinlock.h 2011-08-23 21:47:55.000000000 -0400
806@@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
807 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
808
809 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
810- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
811+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
812 }
813
814 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
815diff -urNp linux-3.0.7/arch/ia64/include/asm/uaccess.h linux-3.0.7/arch/ia64/include/asm/uaccess.h
816--- linux-3.0.7/arch/ia64/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
817+++ linux-3.0.7/arch/ia64/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
818@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
819 const void *__cu_from = (from); \
820 long __cu_len = (n); \
821 \
822- if (__access_ok(__cu_to, __cu_len, get_fs())) \
823+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
824 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
825 __cu_len; \
826 })
827@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
828 long __cu_len = (n); \
829 \
830 __chk_user_ptr(__cu_from); \
831- if (__access_ok(__cu_from, __cu_len, get_fs())) \
832+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
833 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
834 __cu_len; \
835 })
836diff -urNp linux-3.0.7/arch/ia64/kernel/module.c linux-3.0.7/arch/ia64/kernel/module.c
837--- linux-3.0.7/arch/ia64/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
838+++ linux-3.0.7/arch/ia64/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
839@@ -315,8 +315,7 @@ module_alloc (unsigned long size)
840 void
841 module_free (struct module *mod, void *module_region)
842 {
843- if (mod && mod->arch.init_unw_table &&
844- module_region == mod->module_init) {
845+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
846 unw_remove_unwind_table(mod->arch.init_unw_table);
847 mod->arch.init_unw_table = NULL;
848 }
849@@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
850 }
851
852 static inline int
853+in_init_rx (const struct module *mod, uint64_t addr)
854+{
855+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
856+}
857+
858+static inline int
859+in_init_rw (const struct module *mod, uint64_t addr)
860+{
861+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
862+}
863+
864+static inline int
865 in_init (const struct module *mod, uint64_t addr)
866 {
867- return addr - (uint64_t) mod->module_init < mod->init_size;
868+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
869+}
870+
871+static inline int
872+in_core_rx (const struct module *mod, uint64_t addr)
873+{
874+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
875+}
876+
877+static inline int
878+in_core_rw (const struct module *mod, uint64_t addr)
879+{
880+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
881 }
882
883 static inline int
884 in_core (const struct module *mod, uint64_t addr)
885 {
886- return addr - (uint64_t) mod->module_core < mod->core_size;
887+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
888 }
889
890 static inline int
891@@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
892 break;
893
894 case RV_BDREL:
895- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
896+ if (in_init_rx(mod, val))
897+ val -= (uint64_t) mod->module_init_rx;
898+ else if (in_init_rw(mod, val))
899+ val -= (uint64_t) mod->module_init_rw;
900+ else if (in_core_rx(mod, val))
901+ val -= (uint64_t) mod->module_core_rx;
902+ else if (in_core_rw(mod, val))
903+ val -= (uint64_t) mod->module_core_rw;
904 break;
905
906 case RV_LTV:
907@@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
908 * addresses have been selected...
909 */
910 uint64_t gp;
911- if (mod->core_size > MAX_LTOFF)
912+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
913 /*
914 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
915 * at the end of the module.
916 */
917- gp = mod->core_size - MAX_LTOFF / 2;
918+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
919 else
920- gp = mod->core_size / 2;
921- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
922+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
923+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
924 mod->arch.gp = gp;
925 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
926 }
927diff -urNp linux-3.0.7/arch/ia64/kernel/sys_ia64.c linux-3.0.7/arch/ia64/kernel/sys_ia64.c
928--- linux-3.0.7/arch/ia64/kernel/sys_ia64.c 2011-07-21 22:17:23.000000000 -0400
929+++ linux-3.0.7/arch/ia64/kernel/sys_ia64.c 2011-08-23 21:47:55.000000000 -0400
930@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
931 if (REGION_NUMBER(addr) == RGN_HPAGE)
932 addr = 0;
933 #endif
934+
935+#ifdef CONFIG_PAX_RANDMMAP
936+ if (mm->pax_flags & MF_PAX_RANDMMAP)
937+ addr = mm->free_area_cache;
938+ else
939+#endif
940+
941 if (!addr)
942 addr = mm->free_area_cache;
943
944@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
945 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
946 /* At this point: (!vma || addr < vma->vm_end). */
947 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
948- if (start_addr != TASK_UNMAPPED_BASE) {
949+ if (start_addr != mm->mmap_base) {
950 /* Start a new search --- just in case we missed some holes. */
951- addr = TASK_UNMAPPED_BASE;
952+ addr = mm->mmap_base;
953 goto full_search;
954 }
955 return -ENOMEM;
956 }
957- if (!vma || addr + len <= vma->vm_start) {
958+ if (check_heap_stack_gap(vma, addr, len)) {
959 /* Remember the address where we stopped this search: */
960 mm->free_area_cache = addr + len;
961 return addr;
962diff -urNp linux-3.0.7/arch/ia64/kernel/vmlinux.lds.S linux-3.0.7/arch/ia64/kernel/vmlinux.lds.S
963--- linux-3.0.7/arch/ia64/kernel/vmlinux.lds.S 2011-07-21 22:17:23.000000000 -0400
964+++ linux-3.0.7/arch/ia64/kernel/vmlinux.lds.S 2011-08-23 21:47:55.000000000 -0400
965@@ -199,7 +199,7 @@ SECTIONS {
966 /* Per-cpu data: */
967 . = ALIGN(PERCPU_PAGE_SIZE);
968 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
969- __phys_per_cpu_start = __per_cpu_load;
970+ __phys_per_cpu_start = per_cpu_load;
971 /*
972 * ensure percpu data fits
973 * into percpu page size
974diff -urNp linux-3.0.7/arch/ia64/mm/fault.c linux-3.0.7/arch/ia64/mm/fault.c
975--- linux-3.0.7/arch/ia64/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
976+++ linux-3.0.7/arch/ia64/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
977@@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned
978 return pte_present(pte);
979 }
980
981+#ifdef CONFIG_PAX_PAGEEXEC
982+void pax_report_insns(void *pc, void *sp)
983+{
984+ unsigned long i;
985+
986+ printk(KERN_ERR "PAX: bytes at PC: ");
987+ for (i = 0; i < 8; i++) {
988+ unsigned int c;
989+ if (get_user(c, (unsigned int *)pc+i))
990+ printk(KERN_CONT "???????? ");
991+ else
992+ printk(KERN_CONT "%08x ", c);
993+ }
994+ printk("\n");
995+}
996+#endif
997+
998 void __kprobes
999 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1000 {
1001@@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long addres
1002 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1003 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1004
1005- if ((vma->vm_flags & mask) != mask)
1006+ if ((vma->vm_flags & mask) != mask) {
1007+
1008+#ifdef CONFIG_PAX_PAGEEXEC
1009+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1010+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1011+ goto bad_area;
1012+
1013+ up_read(&mm->mmap_sem);
1014+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1015+ do_group_exit(SIGKILL);
1016+ }
1017+#endif
1018+
1019 goto bad_area;
1020
1021+ }
1022+
1023 /*
1024 * If for any reason at all we couldn't handle the fault, make
1025 * sure we exit gracefully rather than endlessly redo the
1026diff -urNp linux-3.0.7/arch/ia64/mm/hugetlbpage.c linux-3.0.7/arch/ia64/mm/hugetlbpage.c
1027--- linux-3.0.7/arch/ia64/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
1028+++ linux-3.0.7/arch/ia64/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
1029@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(
1030 /* At this point: (!vmm || addr < vmm->vm_end). */
1031 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1032 return -ENOMEM;
1033- if (!vmm || (addr + len) <= vmm->vm_start)
1034+ if (check_heap_stack_gap(vmm, addr, len))
1035 return addr;
1036 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1037 }
1038diff -urNp linux-3.0.7/arch/ia64/mm/init.c linux-3.0.7/arch/ia64/mm/init.c
1039--- linux-3.0.7/arch/ia64/mm/init.c 2011-07-21 22:17:23.000000000 -0400
1040+++ linux-3.0.7/arch/ia64/mm/init.c 2011-08-23 21:47:55.000000000 -0400
1041@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
1042 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1043 vma->vm_end = vma->vm_start + PAGE_SIZE;
1044 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1045+
1046+#ifdef CONFIG_PAX_PAGEEXEC
1047+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1048+ vma->vm_flags &= ~VM_EXEC;
1049+
1050+#ifdef CONFIG_PAX_MPROTECT
1051+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1052+ vma->vm_flags &= ~VM_MAYEXEC;
1053+#endif
1054+
1055+ }
1056+#endif
1057+
1058 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1059 down_write(&current->mm->mmap_sem);
1060 if (insert_vm_struct(current->mm, vma)) {
1061diff -urNp linux-3.0.7/arch/m32r/lib/usercopy.c linux-3.0.7/arch/m32r/lib/usercopy.c
1062--- linux-3.0.7/arch/m32r/lib/usercopy.c 2011-07-21 22:17:23.000000000 -0400
1063+++ linux-3.0.7/arch/m32r/lib/usercopy.c 2011-08-23 21:47:55.000000000 -0400
1064@@ -14,6 +14,9 @@
1065 unsigned long
1066 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1067 {
1068+ if ((long)n < 0)
1069+ return n;
1070+
1071 prefetch(from);
1072 if (access_ok(VERIFY_WRITE, to, n))
1073 __copy_user(to,from,n);
1074@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1075 unsigned long
1076 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1077 {
1078+ if ((long)n < 0)
1079+ return n;
1080+
1081 prefetchw(to);
1082 if (access_ok(VERIFY_READ, from, n))
1083 __copy_user_zeroing(to,from,n);
1084diff -urNp linux-3.0.7/arch/mips/include/asm/elf.h linux-3.0.7/arch/mips/include/asm/elf.h
1085--- linux-3.0.7/arch/mips/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1086+++ linux-3.0.7/arch/mips/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1087@@ -372,13 +372,16 @@ extern const char *__elf_platform;
1088 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1089 #endif
1090
1091+#ifdef CONFIG_PAX_ASLR
1092+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1093+
1094+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1095+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1096+#endif
1097+
1098 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1099 struct linux_binprm;
1100 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1101 int uses_interp);
1102
1103-struct mm_struct;
1104-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1105-#define arch_randomize_brk arch_randomize_brk
1106-
1107 #endif /* _ASM_ELF_H */
1108diff -urNp linux-3.0.7/arch/mips/include/asm/page.h linux-3.0.7/arch/mips/include/asm/page.h
1109--- linux-3.0.7/arch/mips/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
1110+++ linux-3.0.7/arch/mips/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
1111@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1112 #ifdef CONFIG_CPU_MIPS32
1113 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1114 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1115- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1116+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1117 #else
1118 typedef struct { unsigned long long pte; } pte_t;
1119 #define pte_val(x) ((x).pte)
1120diff -urNp linux-3.0.7/arch/mips/include/asm/system.h linux-3.0.7/arch/mips/include/asm/system.h
1121--- linux-3.0.7/arch/mips/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
1122+++ linux-3.0.7/arch/mips/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
1123@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1124 */
1125 #define __ARCH_WANT_UNLOCKED_CTXSW
1126
1127-extern unsigned long arch_align_stack(unsigned long sp);
1128+#define arch_align_stack(x) ((x) & ~0xfUL)
1129
1130 #endif /* _ASM_SYSTEM_H */
1131diff -urNp linux-3.0.7/arch/mips/kernel/binfmt_elfn32.c linux-3.0.7/arch/mips/kernel/binfmt_elfn32.c
1132--- linux-3.0.7/arch/mips/kernel/binfmt_elfn32.c 2011-07-21 22:17:23.000000000 -0400
1133+++ linux-3.0.7/arch/mips/kernel/binfmt_elfn32.c 2011-08-23 21:47:55.000000000 -0400
1134@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1135 #undef ELF_ET_DYN_BASE
1136 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1137
1138+#ifdef CONFIG_PAX_ASLR
1139+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1140+
1141+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1142+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1143+#endif
1144+
1145 #include <asm/processor.h>
1146 #include <linux/module.h>
1147 #include <linux/elfcore.h>
1148diff -urNp linux-3.0.7/arch/mips/kernel/binfmt_elfo32.c linux-3.0.7/arch/mips/kernel/binfmt_elfo32.c
1149--- linux-3.0.7/arch/mips/kernel/binfmt_elfo32.c 2011-07-21 22:17:23.000000000 -0400
1150+++ linux-3.0.7/arch/mips/kernel/binfmt_elfo32.c 2011-08-23 21:47:55.000000000 -0400
1151@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1152 #undef ELF_ET_DYN_BASE
1153 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1154
1155+#ifdef CONFIG_PAX_ASLR
1156+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1157+
1158+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1159+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1160+#endif
1161+
1162 #include <asm/processor.h>
1163
1164 /*
1165diff -urNp linux-3.0.7/arch/mips/kernel/process.c linux-3.0.7/arch/mips/kernel/process.c
1166--- linux-3.0.7/arch/mips/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
1167+++ linux-3.0.7/arch/mips/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
1168@@ -473,15 +473,3 @@ unsigned long get_wchan(struct task_stru
1169 out:
1170 return pc;
1171 }
1172-
1173-/*
1174- * Don't forget that the stack pointer must be aligned on a 8 bytes
1175- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1176- */
1177-unsigned long arch_align_stack(unsigned long sp)
1178-{
1179- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1180- sp -= get_random_int() & ~PAGE_MASK;
1181-
1182- return sp & ALMASK;
1183-}
1184diff -urNp linux-3.0.7/arch/mips/mm/fault.c linux-3.0.7/arch/mips/mm/fault.c
1185--- linux-3.0.7/arch/mips/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
1186+++ linux-3.0.7/arch/mips/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
1187@@ -28,6 +28,23 @@
1188 #include <asm/highmem.h> /* For VMALLOC_END */
1189 #include <linux/kdebug.h>
1190
1191+#ifdef CONFIG_PAX_PAGEEXEC
1192+void pax_report_insns(void *pc, void *sp)
1193+{
1194+ unsigned long i;
1195+
1196+ printk(KERN_ERR "PAX: bytes at PC: ");
1197+ for (i = 0; i < 5; i++) {
1198+ unsigned int c;
1199+ if (get_user(c, (unsigned int *)pc+i))
1200+ printk(KERN_CONT "???????? ");
1201+ else
1202+ printk(KERN_CONT "%08x ", c);
1203+ }
1204+ printk("\n");
1205+}
1206+#endif
1207+
1208 /*
1209 * This routine handles page faults. It determines the address,
1210 * and the problem, and then passes it off to one of the appropriate
1211diff -urNp linux-3.0.7/arch/mips/mm/mmap.c linux-3.0.7/arch/mips/mm/mmap.c
1212--- linux-3.0.7/arch/mips/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
1213+++ linux-3.0.7/arch/mips/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
1214@@ -48,14 +48,18 @@ unsigned long arch_get_unmapped_area(str
1215 do_color_align = 0;
1216 if (filp || (flags & MAP_SHARED))
1217 do_color_align = 1;
1218+
1219+#ifdef CONFIG_PAX_RANDMMAP
1220+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1221+#endif
1222+
1223 if (addr) {
1224 if (do_color_align)
1225 addr = COLOUR_ALIGN(addr, pgoff);
1226 else
1227 addr = PAGE_ALIGN(addr);
1228 vmm = find_vma(current->mm, addr);
1229- if (TASK_SIZE - len >= addr &&
1230- (!vmm || addr + len <= vmm->vm_start))
1231+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
1232 return addr;
1233 }
1234 addr = current->mm->mmap_base;
1235@@ -68,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
1236 /* At this point: (!vmm || addr < vmm->vm_end). */
1237 if (TASK_SIZE - len < addr)
1238 return -ENOMEM;
1239- if (!vmm || addr + len <= vmm->vm_start)
1240+ if (check_heap_stack_gap(vmm, addr, len))
1241 return addr;
1242 addr = vmm->vm_end;
1243 if (do_color_align)
1244@@ -93,30 +97,3 @@ void arch_pick_mmap_layout(struct mm_str
1245 mm->get_unmapped_area = arch_get_unmapped_area;
1246 mm->unmap_area = arch_unmap_area;
1247 }
1248-
1249-static inline unsigned long brk_rnd(void)
1250-{
1251- unsigned long rnd = get_random_int();
1252-
1253- rnd = rnd << PAGE_SHIFT;
1254- /* 8MB for 32bit, 256MB for 64bit */
1255- if (TASK_IS_32BIT_ADDR)
1256- rnd = rnd & 0x7ffffful;
1257- else
1258- rnd = rnd & 0xffffffful;
1259-
1260- return rnd;
1261-}
1262-
1263-unsigned long arch_randomize_brk(struct mm_struct *mm)
1264-{
1265- unsigned long base = mm->brk;
1266- unsigned long ret;
1267-
1268- ret = PAGE_ALIGN(base + brk_rnd());
1269-
1270- if (ret < mm->brk)
1271- return mm->brk;
1272-
1273- return ret;
1274-}
1275diff -urNp linux-3.0.7/arch/parisc/include/asm/elf.h linux-3.0.7/arch/parisc/include/asm/elf.h
1276--- linux-3.0.7/arch/parisc/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1277+++ linux-3.0.7/arch/parisc/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1278@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration..
1279
1280 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1281
1282+#ifdef CONFIG_PAX_ASLR
1283+#define PAX_ELF_ET_DYN_BASE 0x10000UL
1284+
1285+#define PAX_DELTA_MMAP_LEN 16
1286+#define PAX_DELTA_STACK_LEN 16
1287+#endif
1288+
1289 /* This yields a mask that user programs can use to figure out what
1290 instruction set this CPU supports. This could be done in user space,
1291 but it's not easy, and we've already done it here. */
1292diff -urNp linux-3.0.7/arch/parisc/include/asm/pgtable.h linux-3.0.7/arch/parisc/include/asm/pgtable.h
1293--- linux-3.0.7/arch/parisc/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
1294+++ linux-3.0.7/arch/parisc/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
1295@@ -210,6 +210,17 @@ struct vm_area_struct;
1296 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1297 #define PAGE_COPY PAGE_EXECREAD
1298 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1299+
1300+#ifdef CONFIG_PAX_PAGEEXEC
1301+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1302+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1303+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1304+#else
1305+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1306+# define PAGE_COPY_NOEXEC PAGE_COPY
1307+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1308+#endif
1309+
1310 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1311 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
1312 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
1313diff -urNp linux-3.0.7/arch/parisc/kernel/module.c linux-3.0.7/arch/parisc/kernel/module.c
1314--- linux-3.0.7/arch/parisc/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
1315+++ linux-3.0.7/arch/parisc/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
1316@@ -98,16 +98,38 @@
1317
1318 /* three functions to determine where in the module core
1319 * or init pieces the location is */
1320+static inline int in_init_rx(struct module *me, void *loc)
1321+{
1322+ return (loc >= me->module_init_rx &&
1323+ loc < (me->module_init_rx + me->init_size_rx));
1324+}
1325+
1326+static inline int in_init_rw(struct module *me, void *loc)
1327+{
1328+ return (loc >= me->module_init_rw &&
1329+ loc < (me->module_init_rw + me->init_size_rw));
1330+}
1331+
1332 static inline int in_init(struct module *me, void *loc)
1333 {
1334- return (loc >= me->module_init &&
1335- loc <= (me->module_init + me->init_size));
1336+ return in_init_rx(me, loc) || in_init_rw(me, loc);
1337+}
1338+
1339+static inline int in_core_rx(struct module *me, void *loc)
1340+{
1341+ return (loc >= me->module_core_rx &&
1342+ loc < (me->module_core_rx + me->core_size_rx));
1343+}
1344+
1345+static inline int in_core_rw(struct module *me, void *loc)
1346+{
1347+ return (loc >= me->module_core_rw &&
1348+ loc < (me->module_core_rw + me->core_size_rw));
1349 }
1350
1351 static inline int in_core(struct module *me, void *loc)
1352 {
1353- return (loc >= me->module_core &&
1354- loc <= (me->module_core + me->core_size));
1355+ return in_core_rx(me, loc) || in_core_rw(me, loc);
1356 }
1357
1358 static inline int in_local(struct module *me, void *loc)
1359@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_
1360 }
1361
1362 /* align things a bit */
1363- me->core_size = ALIGN(me->core_size, 16);
1364- me->arch.got_offset = me->core_size;
1365- me->core_size += gots * sizeof(struct got_entry);
1366-
1367- me->core_size = ALIGN(me->core_size, 16);
1368- me->arch.fdesc_offset = me->core_size;
1369- me->core_size += fdescs * sizeof(Elf_Fdesc);
1370+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1371+ me->arch.got_offset = me->core_size_rw;
1372+ me->core_size_rw += gots * sizeof(struct got_entry);
1373+
1374+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1375+ me->arch.fdesc_offset = me->core_size_rw;
1376+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1377
1378 me->arch.got_max = gots;
1379 me->arch.fdesc_max = fdescs;
1380@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module
1381
1382 BUG_ON(value == 0);
1383
1384- got = me->module_core + me->arch.got_offset;
1385+ got = me->module_core_rw + me->arch.got_offset;
1386 for (i = 0; got[i].addr; i++)
1387 if (got[i].addr == value)
1388 goto out;
1389@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module
1390 #ifdef CONFIG_64BIT
1391 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1392 {
1393- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1394+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1395
1396 if (!value) {
1397 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1398@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module
1399
1400 /* Create new one */
1401 fdesc->addr = value;
1402- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1403+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1404 return (Elf_Addr)fdesc;
1405 }
1406 #endif /* CONFIG_64BIT */
1407@@ -857,7 +879,7 @@ register_unwind_table(struct module *me,
1408
1409 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1410 end = table + sechdrs[me->arch.unwind_section].sh_size;
1411- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1412+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1413
1414 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1415 me->arch.unwind_section, table, end, gp);
1416diff -urNp linux-3.0.7/arch/parisc/kernel/sys_parisc.c linux-3.0.7/arch/parisc/kernel/sys_parisc.c
1417--- linux-3.0.7/arch/parisc/kernel/sys_parisc.c 2011-07-21 22:17:23.000000000 -0400
1418+++ linux-3.0.7/arch/parisc/kernel/sys_parisc.c 2011-08-23 21:47:55.000000000 -0400
1419@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1420 /* At this point: (!vma || addr < vma->vm_end). */
1421 if (TASK_SIZE - len < addr)
1422 return -ENOMEM;
1423- if (!vma || addr + len <= vma->vm_start)
1424+ if (check_heap_stack_gap(vma, addr, len))
1425 return addr;
1426 addr = vma->vm_end;
1427 }
1428@@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1429 /* At this point: (!vma || addr < vma->vm_end). */
1430 if (TASK_SIZE - len < addr)
1431 return -ENOMEM;
1432- if (!vma || addr + len <= vma->vm_start)
1433+ if (check_heap_stack_gap(vma, addr, len))
1434 return addr;
1435 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1436 if (addr < vma->vm_end) /* handle wraparound */
1437@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1438 if (flags & MAP_FIXED)
1439 return addr;
1440 if (!addr)
1441- addr = TASK_UNMAPPED_BASE;
1442+ addr = current->mm->mmap_base;
1443
1444 if (filp) {
1445 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1446diff -urNp linux-3.0.7/arch/parisc/kernel/traps.c linux-3.0.7/arch/parisc/kernel/traps.c
1447--- linux-3.0.7/arch/parisc/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
1448+++ linux-3.0.7/arch/parisc/kernel/traps.c 2011-08-23 21:47:55.000000000 -0400
1449@@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1450
1451 down_read(&current->mm->mmap_sem);
1452 vma = find_vma(current->mm,regs->iaoq[0]);
1453- if (vma && (regs->iaoq[0] >= vma->vm_start)
1454- && (vma->vm_flags & VM_EXEC)) {
1455-
1456+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1457 fault_address = regs->iaoq[0];
1458 fault_space = regs->iasq[0];
1459
1460diff -urNp linux-3.0.7/arch/parisc/mm/fault.c linux-3.0.7/arch/parisc/mm/fault.c
1461--- linux-3.0.7/arch/parisc/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
1462+++ linux-3.0.7/arch/parisc/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
1463@@ -15,6 +15,7 @@
1464 #include <linux/sched.h>
1465 #include <linux/interrupt.h>
1466 #include <linux/module.h>
1467+#include <linux/unistd.h>
1468
1469 #include <asm/uaccess.h>
1470 #include <asm/traps.h>
1471@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1472 static unsigned long
1473 parisc_acctyp(unsigned long code, unsigned int inst)
1474 {
1475- if (code == 6 || code == 16)
1476+ if (code == 6 || code == 7 || code == 16)
1477 return VM_EXEC;
1478
1479 switch (inst & 0xf0000000) {
1480@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1481 }
1482 #endif
1483
1484+#ifdef CONFIG_PAX_PAGEEXEC
1485+/*
1486+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1487+ *
1488+ * returns 1 when task should be killed
1489+ * 2 when rt_sigreturn trampoline was detected
1490+ * 3 when unpatched PLT trampoline was detected
1491+ */
1492+static int pax_handle_fetch_fault(struct pt_regs *regs)
1493+{
1494+
1495+#ifdef CONFIG_PAX_EMUPLT
1496+ int err;
1497+
1498+ do { /* PaX: unpatched PLT emulation */
1499+ unsigned int bl, depwi;
1500+
1501+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1502+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1503+
1504+ if (err)
1505+ break;
1506+
1507+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1508+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1509+
1510+ err = get_user(ldw, (unsigned int *)addr);
1511+ err |= get_user(bv, (unsigned int *)(addr+4));
1512+ err |= get_user(ldw2, (unsigned int *)(addr+8));
1513+
1514+ if (err)
1515+ break;
1516+
1517+ if (ldw == 0x0E801096U &&
1518+ bv == 0xEAC0C000U &&
1519+ ldw2 == 0x0E881095U)
1520+ {
1521+ unsigned int resolver, map;
1522+
1523+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1524+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1525+ if (err)
1526+ break;
1527+
1528+ regs->gr[20] = instruction_pointer(regs)+8;
1529+ regs->gr[21] = map;
1530+ regs->gr[22] = resolver;
1531+ regs->iaoq[0] = resolver | 3UL;
1532+ regs->iaoq[1] = regs->iaoq[0] + 4;
1533+ return 3;
1534+ }
1535+ }
1536+ } while (0);
1537+#endif
1538+
1539+#ifdef CONFIG_PAX_EMUTRAMP
1540+
1541+#ifndef CONFIG_PAX_EMUSIGRT
1542+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1543+ return 1;
1544+#endif
1545+
1546+ do { /* PaX: rt_sigreturn emulation */
1547+ unsigned int ldi1, ldi2, bel, nop;
1548+
1549+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1550+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1551+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1552+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1553+
1554+ if (err)
1555+ break;
1556+
1557+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1558+ ldi2 == 0x3414015AU &&
1559+ bel == 0xE4008200U &&
1560+ nop == 0x08000240U)
1561+ {
1562+ regs->gr[25] = (ldi1 & 2) >> 1;
1563+ regs->gr[20] = __NR_rt_sigreturn;
1564+ regs->gr[31] = regs->iaoq[1] + 16;
1565+ regs->sr[0] = regs->iasq[1];
1566+ regs->iaoq[0] = 0x100UL;
1567+ regs->iaoq[1] = regs->iaoq[0] + 4;
1568+ regs->iasq[0] = regs->sr[2];
1569+ regs->iasq[1] = regs->sr[2];
1570+ return 2;
1571+ }
1572+ } while (0);
1573+#endif
1574+
1575+ return 1;
1576+}
1577+
1578+void pax_report_insns(void *pc, void *sp)
1579+{
1580+ unsigned long i;
1581+
1582+ printk(KERN_ERR "PAX: bytes at PC: ");
1583+ for (i = 0; i < 5; i++) {
1584+ unsigned int c;
1585+ if (get_user(c, (unsigned int *)pc+i))
1586+ printk(KERN_CONT "???????? ");
1587+ else
1588+ printk(KERN_CONT "%08x ", c);
1589+ }
1590+ printk("\n");
1591+}
1592+#endif
1593+
1594 int fixup_exception(struct pt_regs *regs)
1595 {
1596 const struct exception_table_entry *fix;
1597@@ -192,8 +303,33 @@ good_area:
1598
1599 acc_type = parisc_acctyp(code,regs->iir);
1600
1601- if ((vma->vm_flags & acc_type) != acc_type)
1602+ if ((vma->vm_flags & acc_type) != acc_type) {
1603+
1604+#ifdef CONFIG_PAX_PAGEEXEC
1605+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1606+ (address & ~3UL) == instruction_pointer(regs))
1607+ {
1608+ up_read(&mm->mmap_sem);
1609+ switch (pax_handle_fetch_fault(regs)) {
1610+
1611+#ifdef CONFIG_PAX_EMUPLT
1612+ case 3:
1613+ return;
1614+#endif
1615+
1616+#ifdef CONFIG_PAX_EMUTRAMP
1617+ case 2:
1618+ return;
1619+#endif
1620+
1621+ }
1622+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1623+ do_group_exit(SIGKILL);
1624+ }
1625+#endif
1626+
1627 goto bad_area;
1628+ }
1629
1630 /*
1631 * If for any reason at all we couldn't handle the fault, make
1632diff -urNp linux-3.0.7/arch/powerpc/include/asm/elf.h linux-3.0.7/arch/powerpc/include/asm/elf.h
1633--- linux-3.0.7/arch/powerpc/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1634+++ linux-3.0.7/arch/powerpc/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1635@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
1636 the loader. We need to make sure that it is out of the way of the program
1637 that it will "exec", and that there is sufficient room for the brk. */
1638
1639-extern unsigned long randomize_et_dyn(unsigned long base);
1640-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
1641+#define ELF_ET_DYN_BASE (0x20000000)
1642+
1643+#ifdef CONFIG_PAX_ASLR
1644+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
1645+
1646+#ifdef __powerpc64__
1647+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
1648+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
1649+#else
1650+#define PAX_DELTA_MMAP_LEN 15
1651+#define PAX_DELTA_STACK_LEN 15
1652+#endif
1653+#endif
1654
1655 /*
1656 * Our registers are always unsigned longs, whether we're a 32 bit
1657@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(s
1658 (0x7ff >> (PAGE_SHIFT - 12)) : \
1659 (0x3ffff >> (PAGE_SHIFT - 12)))
1660
1661-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1662-#define arch_randomize_brk arch_randomize_brk
1663-
1664 #endif /* __KERNEL__ */
1665
1666 /*
1667diff -urNp linux-3.0.7/arch/powerpc/include/asm/kmap_types.h linux-3.0.7/arch/powerpc/include/asm/kmap_types.h
1668--- linux-3.0.7/arch/powerpc/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
1669+++ linux-3.0.7/arch/powerpc/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
1670@@ -27,6 +27,7 @@ enum km_type {
1671 KM_PPC_SYNC_PAGE,
1672 KM_PPC_SYNC_ICACHE,
1673 KM_KDB,
1674+ KM_CLEARPAGE,
1675 KM_TYPE_NR
1676 };
1677
1678diff -urNp linux-3.0.7/arch/powerpc/include/asm/mman.h linux-3.0.7/arch/powerpc/include/asm/mman.h
1679--- linux-3.0.7/arch/powerpc/include/asm/mman.h 2011-07-21 22:17:23.000000000 -0400
1680+++ linux-3.0.7/arch/powerpc/include/asm/mman.h 2011-08-23 21:47:55.000000000 -0400
1681@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm
1682 }
1683 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
1684
1685-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
1686+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
1687 {
1688 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
1689 }
1690diff -urNp linux-3.0.7/arch/powerpc/include/asm/page_64.h linux-3.0.7/arch/powerpc/include/asm/page_64.h
1691--- linux-3.0.7/arch/powerpc/include/asm/page_64.h 2011-07-21 22:17:23.000000000 -0400
1692+++ linux-3.0.7/arch/powerpc/include/asm/page_64.h 2011-08-23 21:47:55.000000000 -0400
1693@@ -155,15 +155,18 @@ do { \
1694 * stack by default, so in the absence of a PT_GNU_STACK program header
1695 * we turn execute permission off.
1696 */
1697-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1698- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1699+#define VM_STACK_DEFAULT_FLAGS32 \
1700+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1701+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1702
1703 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1704 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1705
1706+#ifndef CONFIG_PAX_PAGEEXEC
1707 #define VM_STACK_DEFAULT_FLAGS \
1708 (is_32bit_task() ? \
1709 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
1710+#endif
1711
1712 #include <asm-generic/getorder.h>
1713
1714diff -urNp linux-3.0.7/arch/powerpc/include/asm/page.h linux-3.0.7/arch/powerpc/include/asm/page.h
1715--- linux-3.0.7/arch/powerpc/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
1716+++ linux-3.0.7/arch/powerpc/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
1717@@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr;
1718 * and needs to be executable. This means the whole heap ends
1719 * up being executable.
1720 */
1721-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1722- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1723+#define VM_DATA_DEFAULT_FLAGS32 \
1724+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1725+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1726
1727 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1728 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1729@@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr;
1730 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
1731 #endif
1732
1733+#define ktla_ktva(addr) (addr)
1734+#define ktva_ktla(addr) (addr)
1735+
1736 #ifndef __ASSEMBLY__
1737
1738 #undef STRICT_MM_TYPECHECKS
1739diff -urNp linux-3.0.7/arch/powerpc/include/asm/pgtable.h linux-3.0.7/arch/powerpc/include/asm/pgtable.h
1740--- linux-3.0.7/arch/powerpc/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
1741+++ linux-3.0.7/arch/powerpc/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
1742@@ -2,6 +2,7 @@
1743 #define _ASM_POWERPC_PGTABLE_H
1744 #ifdef __KERNEL__
1745
1746+#include <linux/const.h>
1747 #ifndef __ASSEMBLY__
1748 #include <asm/processor.h> /* For TASK_SIZE */
1749 #include <asm/mmu.h>
1750diff -urNp linux-3.0.7/arch/powerpc/include/asm/pte-hash32.h linux-3.0.7/arch/powerpc/include/asm/pte-hash32.h
1751--- linux-3.0.7/arch/powerpc/include/asm/pte-hash32.h 2011-07-21 22:17:23.000000000 -0400
1752+++ linux-3.0.7/arch/powerpc/include/asm/pte-hash32.h 2011-08-23 21:47:55.000000000 -0400
1753@@ -21,6 +21,7 @@
1754 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
1755 #define _PAGE_USER 0x004 /* usermode access allowed */
1756 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
1757+#define _PAGE_EXEC _PAGE_GUARDED
1758 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
1759 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
1760 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
1761diff -urNp linux-3.0.7/arch/powerpc/include/asm/reg.h linux-3.0.7/arch/powerpc/include/asm/reg.h
1762--- linux-3.0.7/arch/powerpc/include/asm/reg.h 2011-07-21 22:17:23.000000000 -0400
1763+++ linux-3.0.7/arch/powerpc/include/asm/reg.h 2011-08-23 21:47:55.000000000 -0400
1764@@ -209,6 +209,7 @@
1765 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
1766 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
1767 #define DSISR_NOHPTE 0x40000000 /* no translation found */
1768+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
1769 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
1770 #define DSISR_ISSTORE 0x02000000 /* access was a store */
1771 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
1772diff -urNp linux-3.0.7/arch/powerpc/include/asm/system.h linux-3.0.7/arch/powerpc/include/asm/system.h
1773--- linux-3.0.7/arch/powerpc/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
1774+++ linux-3.0.7/arch/powerpc/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
1775@@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
1776 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
1777 #endif
1778
1779-extern unsigned long arch_align_stack(unsigned long sp);
1780+#define arch_align_stack(x) ((x) & ~0xfUL)
1781
1782 /* Used in very early kernel initialization. */
1783 extern unsigned long reloc_offset(void);
1784diff -urNp linux-3.0.7/arch/powerpc/include/asm/uaccess.h linux-3.0.7/arch/powerpc/include/asm/uaccess.h
1785--- linux-3.0.7/arch/powerpc/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
1786+++ linux-3.0.7/arch/powerpc/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
1787@@ -13,6 +13,8 @@
1788 #define VERIFY_READ 0
1789 #define VERIFY_WRITE 1
1790
1791+extern void check_object_size(const void *ptr, unsigned long n, bool to);
1792+
1793 /*
1794 * The fs value determines whether argument validity checking should be
1795 * performed or not. If get_fs() == USER_DS, checking is performed, with
1796@@ -327,52 +329,6 @@ do { \
1797 extern unsigned long __copy_tofrom_user(void __user *to,
1798 const void __user *from, unsigned long size);
1799
1800-#ifndef __powerpc64__
1801-
1802-static inline unsigned long copy_from_user(void *to,
1803- const void __user *from, unsigned long n)
1804-{
1805- unsigned long over;
1806-
1807- if (access_ok(VERIFY_READ, from, n))
1808- return __copy_tofrom_user((__force void __user *)to, from, n);
1809- if ((unsigned long)from < TASK_SIZE) {
1810- over = (unsigned long)from + n - TASK_SIZE;
1811- return __copy_tofrom_user((__force void __user *)to, from,
1812- n - over) + over;
1813- }
1814- return n;
1815-}
1816-
1817-static inline unsigned long copy_to_user(void __user *to,
1818- const void *from, unsigned long n)
1819-{
1820- unsigned long over;
1821-
1822- if (access_ok(VERIFY_WRITE, to, n))
1823- return __copy_tofrom_user(to, (__force void __user *)from, n);
1824- if ((unsigned long)to < TASK_SIZE) {
1825- over = (unsigned long)to + n - TASK_SIZE;
1826- return __copy_tofrom_user(to, (__force void __user *)from,
1827- n - over) + over;
1828- }
1829- return n;
1830-}
1831-
1832-#else /* __powerpc64__ */
1833-
1834-#define __copy_in_user(to, from, size) \
1835- __copy_tofrom_user((to), (from), (size))
1836-
1837-extern unsigned long copy_from_user(void *to, const void __user *from,
1838- unsigned long n);
1839-extern unsigned long copy_to_user(void __user *to, const void *from,
1840- unsigned long n);
1841-extern unsigned long copy_in_user(void __user *to, const void __user *from,
1842- unsigned long n);
1843-
1844-#endif /* __powerpc64__ */
1845-
1846 static inline unsigned long __copy_from_user_inatomic(void *to,
1847 const void __user *from, unsigned long n)
1848 {
1849@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
1850 if (ret == 0)
1851 return 0;
1852 }
1853+
1854+ if (!__builtin_constant_p(n))
1855+ check_object_size(to, n, false);
1856+
1857 return __copy_tofrom_user((__force void __user *)to, from, n);
1858 }
1859
1860@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
1861 if (ret == 0)
1862 return 0;
1863 }
1864+
1865+ if (!__builtin_constant_p(n))
1866+ check_object_size(from, n, true);
1867+
1868 return __copy_tofrom_user(to, (__force const void __user *)from, n);
1869 }
1870
1871@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
1872 return __copy_to_user_inatomic(to, from, size);
1873 }
1874
1875+#ifndef __powerpc64__
1876+
1877+static inline unsigned long __must_check copy_from_user(void *to,
1878+ const void __user *from, unsigned long n)
1879+{
1880+ unsigned long over;
1881+
1882+ if ((long)n < 0)
1883+ return n;
1884+
1885+ if (access_ok(VERIFY_READ, from, n)) {
1886+ if (!__builtin_constant_p(n))
1887+ check_object_size(to, n, false);
1888+ return __copy_tofrom_user((__force void __user *)to, from, n);
1889+ }
1890+ if ((unsigned long)from < TASK_SIZE) {
1891+ over = (unsigned long)from + n - TASK_SIZE;
1892+ if (!__builtin_constant_p(n - over))
1893+ check_object_size(to, n - over, false);
1894+ return __copy_tofrom_user((__force void __user *)to, from,
1895+ n - over) + over;
1896+ }
1897+ return n;
1898+}
1899+
1900+static inline unsigned long __must_check copy_to_user(void __user *to,
1901+ const void *from, unsigned long n)
1902+{
1903+ unsigned long over;
1904+
1905+ if ((long)n < 0)
1906+ return n;
1907+
1908+ if (access_ok(VERIFY_WRITE, to, n)) {
1909+ if (!__builtin_constant_p(n))
1910+ check_object_size(from, n, true);
1911+ return __copy_tofrom_user(to, (__force void __user *)from, n);
1912+ }
1913+ if ((unsigned long)to < TASK_SIZE) {
1914+ over = (unsigned long)to + n - TASK_SIZE;
1915+ if (!__builtin_constant_p(n))
1916+ check_object_size(from, n - over, true);
1917+ return __copy_tofrom_user(to, (__force void __user *)from,
1918+ n - over) + over;
1919+ }
1920+ return n;
1921+}
1922+
1923+#else /* __powerpc64__ */
1924+
1925+#define __copy_in_user(to, from, size) \
1926+ __copy_tofrom_user((to), (from), (size))
1927+
1928+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1929+{
1930+ if ((long)n < 0 || n > INT_MAX)
1931+ return n;
1932+
1933+ if (!__builtin_constant_p(n))
1934+ check_object_size(to, n, false);
1935+
1936+ if (likely(access_ok(VERIFY_READ, from, n)))
1937+ n = __copy_from_user(to, from, n);
1938+ else
1939+ memset(to, 0, n);
1940+ return n;
1941+}
1942+
1943+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1944+{
1945+ if ((long)n < 0 || n > INT_MAX)
1946+ return n;
1947+
1948+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
1949+ if (!__builtin_constant_p(n))
1950+ check_object_size(from, n, true);
1951+ n = __copy_to_user(to, from, n);
1952+ }
1953+ return n;
1954+}
1955+
1956+extern unsigned long copy_in_user(void __user *to, const void __user *from,
1957+ unsigned long n);
1958+
1959+#endif /* __powerpc64__ */
1960+
1961 extern unsigned long __clear_user(void __user *addr, unsigned long size);
1962
1963 static inline unsigned long clear_user(void __user *addr, unsigned long size)
1964diff -urNp linux-3.0.7/arch/powerpc/kernel/exceptions-64e.S linux-3.0.7/arch/powerpc/kernel/exceptions-64e.S
1965--- linux-3.0.7/arch/powerpc/kernel/exceptions-64e.S 2011-07-21 22:17:23.000000000 -0400
1966+++ linux-3.0.7/arch/powerpc/kernel/exceptions-64e.S 2011-08-23 21:47:55.000000000 -0400
1967@@ -567,6 +567,7 @@ storage_fault_common:
1968 std r14,_DAR(r1)
1969 std r15,_DSISR(r1)
1970 addi r3,r1,STACK_FRAME_OVERHEAD
1971+ bl .save_nvgprs
1972 mr r4,r14
1973 mr r5,r15
1974 ld r14,PACA_EXGEN+EX_R14(r13)
1975@@ -576,8 +577,7 @@ storage_fault_common:
1976 cmpdi r3,0
1977 bne- 1f
1978 b .ret_from_except_lite
1979-1: bl .save_nvgprs
1980- mr r5,r3
1981+1: mr r5,r3
1982 addi r3,r1,STACK_FRAME_OVERHEAD
1983 ld r4,_DAR(r1)
1984 bl .bad_page_fault
1985diff -urNp linux-3.0.7/arch/powerpc/kernel/exceptions-64s.S linux-3.0.7/arch/powerpc/kernel/exceptions-64s.S
1986--- linux-3.0.7/arch/powerpc/kernel/exceptions-64s.S 2011-07-21 22:17:23.000000000 -0400
1987+++ linux-3.0.7/arch/powerpc/kernel/exceptions-64s.S 2011-08-23 21:47:55.000000000 -0400
1988@@ -956,10 +956,10 @@ handle_page_fault:
1989 11: ld r4,_DAR(r1)
1990 ld r5,_DSISR(r1)
1991 addi r3,r1,STACK_FRAME_OVERHEAD
1992+ bl .save_nvgprs
1993 bl .do_page_fault
1994 cmpdi r3,0
1995 beq+ 13f
1996- bl .save_nvgprs
1997 mr r5,r3
1998 addi r3,r1,STACK_FRAME_OVERHEAD
1999 lwz r4,_DAR(r1)
2000diff -urNp linux-3.0.7/arch/powerpc/kernel/module_32.c linux-3.0.7/arch/powerpc/kernel/module_32.c
2001--- linux-3.0.7/arch/powerpc/kernel/module_32.c 2011-07-21 22:17:23.000000000 -0400
2002+++ linux-3.0.7/arch/powerpc/kernel/module_32.c 2011-08-23 21:47:55.000000000 -0400
2003@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2004 me->arch.core_plt_section = i;
2005 }
2006 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2007- printk("Module doesn't contain .plt or .init.plt sections.\n");
2008+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2009 return -ENOEXEC;
2010 }
2011
2012@@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2013
2014 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2015 /* Init, or core PLT? */
2016- if (location >= mod->module_core
2017- && location < mod->module_core + mod->core_size)
2018+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2019+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2020 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2021- else
2022+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2023+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2024 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2025+ else {
2026+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2027+ return ~0UL;
2028+ }
2029
2030 /* Find this entry, or if that fails, the next avail. entry */
2031 while (entry->jump[0]) {
2032diff -urNp linux-3.0.7/arch/powerpc/kernel/module.c linux-3.0.7/arch/powerpc/kernel/module.c
2033--- linux-3.0.7/arch/powerpc/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
2034+++ linux-3.0.7/arch/powerpc/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
2035@@ -31,11 +31,24 @@
2036
2037 LIST_HEAD(module_bug_list);
2038
2039+#ifdef CONFIG_PAX_KERNEXEC
2040 void *module_alloc(unsigned long size)
2041 {
2042 if (size == 0)
2043 return NULL;
2044
2045+ return vmalloc(size);
2046+}
2047+
2048+void *module_alloc_exec(unsigned long size)
2049+#else
2050+void *module_alloc(unsigned long size)
2051+#endif
2052+
2053+{
2054+ if (size == 0)
2055+ return NULL;
2056+
2057 return vmalloc_exec(size);
2058 }
2059
2060@@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2061 vfree(module_region);
2062 }
2063
2064+#ifdef CONFIG_PAX_KERNEXEC
2065+void module_free_exec(struct module *mod, void *module_region)
2066+{
2067+ module_free(mod, module_region);
2068+}
2069+#endif
2070+
2071 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2072 const Elf_Shdr *sechdrs,
2073 const char *name)
2074diff -urNp linux-3.0.7/arch/powerpc/kernel/process.c linux-3.0.7/arch/powerpc/kernel/process.c
2075--- linux-3.0.7/arch/powerpc/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2076+++ linux-3.0.7/arch/powerpc/kernel/process.c 2011-08-23 21:48:14.000000000 -0400
2077@@ -676,8 +676,8 @@ void show_regs(struct pt_regs * regs)
2078 * Lookup NIP late so we have the best change of getting the
2079 * above info out without failing
2080 */
2081- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2082- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2083+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2084+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2085 #endif
2086 show_stack(current, (unsigned long *) regs->gpr[1]);
2087 if (!user_mode(regs))
2088@@ -1183,10 +1183,10 @@ void show_stack(struct task_struct *tsk,
2089 newsp = stack[0];
2090 ip = stack[STACK_FRAME_LR_SAVE];
2091 if (!firstframe || ip != lr) {
2092- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2093+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2094 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2095 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2096- printk(" (%pS)",
2097+ printk(" (%pA)",
2098 (void *)current->ret_stack[curr_frame].ret);
2099 curr_frame--;
2100 }
2101@@ -1206,7 +1206,7 @@ void show_stack(struct task_struct *tsk,
2102 struct pt_regs *regs = (struct pt_regs *)
2103 (sp + STACK_FRAME_OVERHEAD);
2104 lr = regs->link;
2105- printk("--- Exception: %lx at %pS\n LR = %pS\n",
2106+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
2107 regs->trap, (void *)regs->nip, (void *)lr);
2108 firstframe = 1;
2109 }
2110@@ -1281,58 +1281,3 @@ void thread_info_cache_init(void)
2111 }
2112
2113 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2114-
2115-unsigned long arch_align_stack(unsigned long sp)
2116-{
2117- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2118- sp -= get_random_int() & ~PAGE_MASK;
2119- return sp & ~0xf;
2120-}
2121-
2122-static inline unsigned long brk_rnd(void)
2123-{
2124- unsigned long rnd = 0;
2125-
2126- /* 8MB for 32bit, 1GB for 64bit */
2127- if (is_32bit_task())
2128- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2129- else
2130- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2131-
2132- return rnd << PAGE_SHIFT;
2133-}
2134-
2135-unsigned long arch_randomize_brk(struct mm_struct *mm)
2136-{
2137- unsigned long base = mm->brk;
2138- unsigned long ret;
2139-
2140-#ifdef CONFIG_PPC_STD_MMU_64
2141- /*
2142- * If we are using 1TB segments and we are allowed to randomise
2143- * the heap, we can put it above 1TB so it is backed by a 1TB
2144- * segment. Otherwise the heap will be in the bottom 1TB
2145- * which always uses 256MB segments and this may result in a
2146- * performance penalty.
2147- */
2148- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2149- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2150-#endif
2151-
2152- ret = PAGE_ALIGN(base + brk_rnd());
2153-
2154- if (ret < mm->brk)
2155- return mm->brk;
2156-
2157- return ret;
2158-}
2159-
2160-unsigned long randomize_et_dyn(unsigned long base)
2161-{
2162- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2163-
2164- if (ret < base)
2165- return base;
2166-
2167- return ret;
2168-}
2169diff -urNp linux-3.0.7/arch/powerpc/kernel/signal_32.c linux-3.0.7/arch/powerpc/kernel/signal_32.c
2170--- linux-3.0.7/arch/powerpc/kernel/signal_32.c 2011-07-21 22:17:23.000000000 -0400
2171+++ linux-3.0.7/arch/powerpc/kernel/signal_32.c 2011-08-23 21:47:55.000000000 -0400
2172@@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig
2173 /* Save user registers on the stack */
2174 frame = &rt_sf->uc.uc_mcontext;
2175 addr = frame;
2176- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2177+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2178 if (save_user_regs(regs, frame, 0, 1))
2179 goto badframe;
2180 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2181diff -urNp linux-3.0.7/arch/powerpc/kernel/signal_64.c linux-3.0.7/arch/powerpc/kernel/signal_64.c
2182--- linux-3.0.7/arch/powerpc/kernel/signal_64.c 2011-07-21 22:17:23.000000000 -0400
2183+++ linux-3.0.7/arch/powerpc/kernel/signal_64.c 2011-08-23 21:47:55.000000000 -0400
2184@@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct
2185 current->thread.fpscr.val = 0;
2186
2187 /* Set up to return from userspace. */
2188- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2189+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2190 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2191 } else {
2192 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2193diff -urNp linux-3.0.7/arch/powerpc/kernel/traps.c linux-3.0.7/arch/powerpc/kernel/traps.c
2194--- linux-3.0.7/arch/powerpc/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
2195+++ linux-3.0.7/arch/powerpc/kernel/traps.c 2011-08-23 21:48:14.000000000 -0400
2196@@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
2197 static inline void pmac_backlight_unblank(void) { }
2198 #endif
2199
2200+extern void gr_handle_kernel_exploit(void);
2201+
2202 int die(const char *str, struct pt_regs *regs, long err)
2203 {
2204 static struct {
2205@@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs
2206 if (panic_on_oops)
2207 panic("Fatal exception");
2208
2209+ gr_handle_kernel_exploit();
2210+
2211 oops_exit();
2212 do_exit(err);
2213
2214diff -urNp linux-3.0.7/arch/powerpc/kernel/vdso.c linux-3.0.7/arch/powerpc/kernel/vdso.c
2215--- linux-3.0.7/arch/powerpc/kernel/vdso.c 2011-07-21 22:17:23.000000000 -0400
2216+++ linux-3.0.7/arch/powerpc/kernel/vdso.c 2011-08-23 21:47:55.000000000 -0400
2217@@ -36,6 +36,7 @@
2218 #include <asm/firmware.h>
2219 #include <asm/vdso.h>
2220 #include <asm/vdso_datapage.h>
2221+#include <asm/mman.h>
2222
2223 #include "setup.h"
2224
2225@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2226 vdso_base = VDSO32_MBASE;
2227 #endif
2228
2229- current->mm->context.vdso_base = 0;
2230+ current->mm->context.vdso_base = ~0UL;
2231
2232 /* vDSO has a problem and was disabled, just don't "enable" it for the
2233 * process
2234@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2235 vdso_base = get_unmapped_area(NULL, vdso_base,
2236 (vdso_pages << PAGE_SHIFT) +
2237 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2238- 0, 0);
2239+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
2240 if (IS_ERR_VALUE(vdso_base)) {
2241 rc = vdso_base;
2242 goto fail_mmapsem;
2243diff -urNp linux-3.0.7/arch/powerpc/lib/usercopy_64.c linux-3.0.7/arch/powerpc/lib/usercopy_64.c
2244--- linux-3.0.7/arch/powerpc/lib/usercopy_64.c 2011-07-21 22:17:23.000000000 -0400
2245+++ linux-3.0.7/arch/powerpc/lib/usercopy_64.c 2011-08-23 21:47:55.000000000 -0400
2246@@ -9,22 +9,6 @@
2247 #include <linux/module.h>
2248 #include <asm/uaccess.h>
2249
2250-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2251-{
2252- if (likely(access_ok(VERIFY_READ, from, n)))
2253- n = __copy_from_user(to, from, n);
2254- else
2255- memset(to, 0, n);
2256- return n;
2257-}
2258-
2259-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2260-{
2261- if (likely(access_ok(VERIFY_WRITE, to, n)))
2262- n = __copy_to_user(to, from, n);
2263- return n;
2264-}
2265-
2266 unsigned long copy_in_user(void __user *to, const void __user *from,
2267 unsigned long n)
2268 {
2269@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2270 return n;
2271 }
2272
2273-EXPORT_SYMBOL(copy_from_user);
2274-EXPORT_SYMBOL(copy_to_user);
2275 EXPORT_SYMBOL(copy_in_user);
2276
2277diff -urNp linux-3.0.7/arch/powerpc/mm/fault.c linux-3.0.7/arch/powerpc/mm/fault.c
2278--- linux-3.0.7/arch/powerpc/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
2279+++ linux-3.0.7/arch/powerpc/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
2280@@ -32,6 +32,10 @@
2281 #include <linux/perf_event.h>
2282 #include <linux/magic.h>
2283 #include <linux/ratelimit.h>
2284+#include <linux/slab.h>
2285+#include <linux/pagemap.h>
2286+#include <linux/compiler.h>
2287+#include <linux/unistd.h>
2288
2289 #include <asm/firmware.h>
2290 #include <asm/page.h>
2291@@ -43,6 +47,7 @@
2292 #include <asm/tlbflush.h>
2293 #include <asm/siginfo.h>
2294 #include <mm/mmu_decl.h>
2295+#include <asm/ptrace.h>
2296
2297 #ifdef CONFIG_KPROBES
2298 static inline int notify_page_fault(struct pt_regs *regs)
2299@@ -66,6 +71,33 @@ static inline int notify_page_fault(stru
2300 }
2301 #endif
2302
2303+#ifdef CONFIG_PAX_PAGEEXEC
2304+/*
2305+ * PaX: decide what to do with offenders (regs->nip = fault address)
2306+ *
2307+ * returns 1 when task should be killed
2308+ */
2309+static int pax_handle_fetch_fault(struct pt_regs *regs)
2310+{
2311+ return 1;
2312+}
2313+
2314+void pax_report_insns(void *pc, void *sp)
2315+{
2316+ unsigned long i;
2317+
2318+ printk(KERN_ERR "PAX: bytes at PC: ");
2319+ for (i = 0; i < 5; i++) {
2320+ unsigned int c;
2321+ if (get_user(c, (unsigned int __user *)pc+i))
2322+ printk(KERN_CONT "???????? ");
2323+ else
2324+ printk(KERN_CONT "%08x ", c);
2325+ }
2326+ printk("\n");
2327+}
2328+#endif
2329+
2330 /*
2331 * Check whether the instruction at regs->nip is a store using
2332 * an update addressing form which will update r1.
2333@@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_re
2334 * indicate errors in DSISR but can validly be set in SRR1.
2335 */
2336 if (trap == 0x400)
2337- error_code &= 0x48200000;
2338+ error_code &= 0x58200000;
2339 else
2340 is_write = error_code & DSISR_ISSTORE;
2341 #else
2342@@ -259,7 +291,7 @@ good_area:
2343 * "undefined". Of those that can be set, this is the only
2344 * one which seems bad.
2345 */
2346- if (error_code & 0x10000000)
2347+ if (error_code & DSISR_GUARDED)
2348 /* Guarded storage error. */
2349 goto bad_area;
2350 #endif /* CONFIG_8xx */
2351@@ -274,7 +306,7 @@ good_area:
2352 * processors use the same I/D cache coherency mechanism
2353 * as embedded.
2354 */
2355- if (error_code & DSISR_PROTFAULT)
2356+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2357 goto bad_area;
2358 #endif /* CONFIG_PPC_STD_MMU */
2359
2360@@ -343,6 +375,23 @@ bad_area:
2361 bad_area_nosemaphore:
2362 /* User mode accesses cause a SIGSEGV */
2363 if (user_mode(regs)) {
2364+
2365+#ifdef CONFIG_PAX_PAGEEXEC
2366+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2367+#ifdef CONFIG_PPC_STD_MMU
2368+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2369+#else
2370+ if (is_exec && regs->nip == address) {
2371+#endif
2372+ switch (pax_handle_fetch_fault(regs)) {
2373+ }
2374+
2375+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2376+ do_group_exit(SIGKILL);
2377+ }
2378+ }
2379+#endif
2380+
2381 _exception(SIGSEGV, regs, code, address);
2382 return 0;
2383 }
2384diff -urNp linux-3.0.7/arch/powerpc/mm/mmap_64.c linux-3.0.7/arch/powerpc/mm/mmap_64.c
2385--- linux-3.0.7/arch/powerpc/mm/mmap_64.c 2011-07-21 22:17:23.000000000 -0400
2386+++ linux-3.0.7/arch/powerpc/mm/mmap_64.c 2011-08-23 21:47:55.000000000 -0400
2387@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2388 */
2389 if (mmap_is_legacy()) {
2390 mm->mmap_base = TASK_UNMAPPED_BASE;
2391+
2392+#ifdef CONFIG_PAX_RANDMMAP
2393+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2394+ mm->mmap_base += mm->delta_mmap;
2395+#endif
2396+
2397 mm->get_unmapped_area = arch_get_unmapped_area;
2398 mm->unmap_area = arch_unmap_area;
2399 } else {
2400 mm->mmap_base = mmap_base();
2401+
2402+#ifdef CONFIG_PAX_RANDMMAP
2403+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2404+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2405+#endif
2406+
2407 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2408 mm->unmap_area = arch_unmap_area_topdown;
2409 }
2410diff -urNp linux-3.0.7/arch/powerpc/mm/slice.c linux-3.0.7/arch/powerpc/mm/slice.c
2411--- linux-3.0.7/arch/powerpc/mm/slice.c 2011-07-21 22:17:23.000000000 -0400
2412+++ linux-3.0.7/arch/powerpc/mm/slice.c 2011-08-23 21:47:55.000000000 -0400
2413@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
2414 if ((mm->task_size - len) < addr)
2415 return 0;
2416 vma = find_vma(mm, addr);
2417- return (!vma || (addr + len) <= vma->vm_start);
2418+ return check_heap_stack_gap(vma, addr, len);
2419 }
2420
2421 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2422@@ -256,7 +256,7 @@ full_search:
2423 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2424 continue;
2425 }
2426- if (!vma || addr + len <= vma->vm_start) {
2427+ if (check_heap_stack_gap(vma, addr, len)) {
2428 /*
2429 * Remember the place where we stopped the search:
2430 */
2431@@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
2432 }
2433 }
2434
2435- addr = mm->mmap_base;
2436- while (addr > len) {
2437+ if (mm->mmap_base < len)
2438+ addr = -ENOMEM;
2439+ else
2440+ addr = mm->mmap_base - len;
2441+
2442+ while (!IS_ERR_VALUE(addr)) {
2443 /* Go down by chunk size */
2444- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2445+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
2446
2447 /* Check for hit with different page size */
2448 mask = slice_range_to_mask(addr, len);
2449@@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
2450 * return with success:
2451 */
2452 vma = find_vma(mm, addr);
2453- if (!vma || (addr + len) <= vma->vm_start) {
2454+ if (check_heap_stack_gap(vma, addr, len)) {
2455 /* remember the address as a hint for next time */
2456 if (use_cache)
2457 mm->free_area_cache = addr;
2458@@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
2459 mm->cached_hole_size = vma->vm_start - addr;
2460
2461 /* try just below the current vma->vm_start */
2462- addr = vma->vm_start;
2463+ addr = skip_heap_stack_gap(vma, len);
2464 }
2465
2466 /*
2467@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
2468 if (fixed && addr > (mm->task_size - len))
2469 return -EINVAL;
2470
2471+#ifdef CONFIG_PAX_RANDMMAP
2472+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2473+ addr = 0;
2474+#endif
2475+
2476 /* If hint, make sure it matches our alignment restrictions */
2477 if (!fixed && addr) {
2478 addr = _ALIGN_UP(addr, 1ul << pshift);
2479diff -urNp linux-3.0.7/arch/s390/include/asm/elf.h linux-3.0.7/arch/s390/include/asm/elf.h
2480--- linux-3.0.7/arch/s390/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
2481+++ linux-3.0.7/arch/s390/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
2482@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2483 the loader. We need to make sure that it is out of the way of the program
2484 that it will "exec", and that there is sufficient room for the brk. */
2485
2486-extern unsigned long randomize_et_dyn(unsigned long base);
2487-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2488+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2489+
2490+#ifdef CONFIG_PAX_ASLR
2491+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2492+
2493+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2494+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2495+#endif
2496
2497 /* This yields a mask that user programs can use to figure out what
2498 instruction set this CPU supports. */
2499@@ -210,7 +216,4 @@ struct linux_binprm;
2500 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2501 int arch_setup_additional_pages(struct linux_binprm *, int);
2502
2503-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2504-#define arch_randomize_brk arch_randomize_brk
2505-
2506 #endif
2507diff -urNp linux-3.0.7/arch/s390/include/asm/system.h linux-3.0.7/arch/s390/include/asm/system.h
2508--- linux-3.0.7/arch/s390/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
2509+++ linux-3.0.7/arch/s390/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
2510@@ -255,7 +255,7 @@ extern void (*_machine_restart)(char *co
2511 extern void (*_machine_halt)(void);
2512 extern void (*_machine_power_off)(void);
2513
2514-extern unsigned long arch_align_stack(unsigned long sp);
2515+#define arch_align_stack(x) ((x) & ~0xfUL)
2516
2517 static inline int tprot(unsigned long addr)
2518 {
2519diff -urNp linux-3.0.7/arch/s390/include/asm/uaccess.h linux-3.0.7/arch/s390/include/asm/uaccess.h
2520--- linux-3.0.7/arch/s390/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
2521+++ linux-3.0.7/arch/s390/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
2522@@ -235,6 +235,10 @@ static inline unsigned long __must_check
2523 copy_to_user(void __user *to, const void *from, unsigned long n)
2524 {
2525 might_fault();
2526+
2527+ if ((long)n < 0)
2528+ return n;
2529+
2530 if (access_ok(VERIFY_WRITE, to, n))
2531 n = __copy_to_user(to, from, n);
2532 return n;
2533@@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void
2534 static inline unsigned long __must_check
2535 __copy_from_user(void *to, const void __user *from, unsigned long n)
2536 {
2537+ if ((long)n < 0)
2538+ return n;
2539+
2540 if (__builtin_constant_p(n) && (n <= 256))
2541 return uaccess.copy_from_user_small(n, from, to);
2542 else
2543@@ -294,6 +301,10 @@ copy_from_user(void *to, const void __us
2544 unsigned int sz = __compiletime_object_size(to);
2545
2546 might_fault();
2547+
2548+ if ((long)n < 0)
2549+ return n;
2550+
2551 if (unlikely(sz != -1 && sz < n)) {
2552 copy_from_user_overflow();
2553 return n;
2554diff -urNp linux-3.0.7/arch/s390/kernel/module.c linux-3.0.7/arch/s390/kernel/module.c
2555--- linux-3.0.7/arch/s390/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
2556+++ linux-3.0.7/arch/s390/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
2557@@ -168,11 +168,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
2558
2559 /* Increase core size by size of got & plt and set start
2560 offsets for got and plt. */
2561- me->core_size = ALIGN(me->core_size, 4);
2562- me->arch.got_offset = me->core_size;
2563- me->core_size += me->arch.got_size;
2564- me->arch.plt_offset = me->core_size;
2565- me->core_size += me->arch.plt_size;
2566+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
2567+ me->arch.got_offset = me->core_size_rw;
2568+ me->core_size_rw += me->arch.got_size;
2569+ me->arch.plt_offset = me->core_size_rx;
2570+ me->core_size_rx += me->arch.plt_size;
2571 return 0;
2572 }
2573
2574@@ -258,7 +258,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2575 if (info->got_initialized == 0) {
2576 Elf_Addr *gotent;
2577
2578- gotent = me->module_core + me->arch.got_offset +
2579+ gotent = me->module_core_rw + me->arch.got_offset +
2580 info->got_offset;
2581 *gotent = val;
2582 info->got_initialized = 1;
2583@@ -282,7 +282,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2584 else if (r_type == R_390_GOTENT ||
2585 r_type == R_390_GOTPLTENT)
2586 *(unsigned int *) loc =
2587- (val + (Elf_Addr) me->module_core - loc) >> 1;
2588+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
2589 else if (r_type == R_390_GOT64 ||
2590 r_type == R_390_GOTPLT64)
2591 *(unsigned long *) loc = val;
2592@@ -296,7 +296,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2593 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
2594 if (info->plt_initialized == 0) {
2595 unsigned int *ip;
2596- ip = me->module_core + me->arch.plt_offset +
2597+ ip = me->module_core_rx + me->arch.plt_offset +
2598 info->plt_offset;
2599 #ifndef CONFIG_64BIT
2600 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
2601@@ -321,7 +321,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2602 val - loc + 0xffffUL < 0x1ffffeUL) ||
2603 (r_type == R_390_PLT32DBL &&
2604 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
2605- val = (Elf_Addr) me->module_core +
2606+ val = (Elf_Addr) me->module_core_rx +
2607 me->arch.plt_offset +
2608 info->plt_offset;
2609 val += rela->r_addend - loc;
2610@@ -343,7 +343,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2611 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
2612 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
2613 val = val + rela->r_addend -
2614- ((Elf_Addr) me->module_core + me->arch.got_offset);
2615+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
2616 if (r_type == R_390_GOTOFF16)
2617 *(unsigned short *) loc = val;
2618 else if (r_type == R_390_GOTOFF32)
2619@@ -353,7 +353,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2620 break;
2621 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
2622 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
2623- val = (Elf_Addr) me->module_core + me->arch.got_offset +
2624+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
2625 rela->r_addend - loc;
2626 if (r_type == R_390_GOTPC)
2627 *(unsigned int *) loc = val;
2628diff -urNp linux-3.0.7/arch/s390/kernel/process.c linux-3.0.7/arch/s390/kernel/process.c
2629--- linux-3.0.7/arch/s390/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2630+++ linux-3.0.7/arch/s390/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
2631@@ -319,39 +319,3 @@ unsigned long get_wchan(struct task_stru
2632 }
2633 return 0;
2634 }
2635-
2636-unsigned long arch_align_stack(unsigned long sp)
2637-{
2638- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2639- sp -= get_random_int() & ~PAGE_MASK;
2640- return sp & ~0xf;
2641-}
2642-
2643-static inline unsigned long brk_rnd(void)
2644-{
2645- /* 8MB for 32bit, 1GB for 64bit */
2646- if (is_32bit_task())
2647- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
2648- else
2649- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
2650-}
2651-
2652-unsigned long arch_randomize_brk(struct mm_struct *mm)
2653-{
2654- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
2655-
2656- if (ret < mm->brk)
2657- return mm->brk;
2658- return ret;
2659-}
2660-
2661-unsigned long randomize_et_dyn(unsigned long base)
2662-{
2663- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2664-
2665- if (!(current->flags & PF_RANDOMIZE))
2666- return base;
2667- if (ret < base)
2668- return base;
2669- return ret;
2670-}
2671diff -urNp linux-3.0.7/arch/s390/kernel/setup.c linux-3.0.7/arch/s390/kernel/setup.c
2672--- linux-3.0.7/arch/s390/kernel/setup.c 2011-07-21 22:17:23.000000000 -0400
2673+++ linux-3.0.7/arch/s390/kernel/setup.c 2011-08-23 21:47:55.000000000 -0400
2674@@ -271,7 +271,7 @@ static int __init early_parse_mem(char *
2675 }
2676 early_param("mem", early_parse_mem);
2677
2678-unsigned int user_mode = HOME_SPACE_MODE;
2679+unsigned int user_mode = SECONDARY_SPACE_MODE;
2680 EXPORT_SYMBOL_GPL(user_mode);
2681
2682 static int set_amode_and_uaccess(unsigned long user_amode,
2683diff -urNp linux-3.0.7/arch/s390/mm/mmap.c linux-3.0.7/arch/s390/mm/mmap.c
2684--- linux-3.0.7/arch/s390/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
2685+++ linux-3.0.7/arch/s390/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
2686@@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_str
2687 */
2688 if (mmap_is_legacy()) {
2689 mm->mmap_base = TASK_UNMAPPED_BASE;
2690+
2691+#ifdef CONFIG_PAX_RANDMMAP
2692+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2693+ mm->mmap_base += mm->delta_mmap;
2694+#endif
2695+
2696 mm->get_unmapped_area = arch_get_unmapped_area;
2697 mm->unmap_area = arch_unmap_area;
2698 } else {
2699 mm->mmap_base = mmap_base();
2700+
2701+#ifdef CONFIG_PAX_RANDMMAP
2702+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2703+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2704+#endif
2705+
2706 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2707 mm->unmap_area = arch_unmap_area_topdown;
2708 }
2709@@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_str
2710 */
2711 if (mmap_is_legacy()) {
2712 mm->mmap_base = TASK_UNMAPPED_BASE;
2713+
2714+#ifdef CONFIG_PAX_RANDMMAP
2715+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2716+ mm->mmap_base += mm->delta_mmap;
2717+#endif
2718+
2719 mm->get_unmapped_area = s390_get_unmapped_area;
2720 mm->unmap_area = arch_unmap_area;
2721 } else {
2722 mm->mmap_base = mmap_base();
2723+
2724+#ifdef CONFIG_PAX_RANDMMAP
2725+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2726+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2727+#endif
2728+
2729 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
2730 mm->unmap_area = arch_unmap_area_topdown;
2731 }
2732diff -urNp linux-3.0.7/arch/score/include/asm/system.h linux-3.0.7/arch/score/include/asm/system.h
2733--- linux-3.0.7/arch/score/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
2734+++ linux-3.0.7/arch/score/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
2735@@ -17,7 +17,7 @@ do { \
2736 #define finish_arch_switch(prev) do {} while (0)
2737
2738 typedef void (*vi_handler_t)(void);
2739-extern unsigned long arch_align_stack(unsigned long sp);
2740+#define arch_align_stack(x) (x)
2741
2742 #define mb() barrier()
2743 #define rmb() barrier()
2744diff -urNp linux-3.0.7/arch/score/kernel/process.c linux-3.0.7/arch/score/kernel/process.c
2745--- linux-3.0.7/arch/score/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2746+++ linux-3.0.7/arch/score/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
2747@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
2748
2749 return task_pt_regs(task)->cp0_epc;
2750 }
2751-
2752-unsigned long arch_align_stack(unsigned long sp)
2753-{
2754- return sp;
2755-}
2756diff -urNp linux-3.0.7/arch/sh/mm/mmap.c linux-3.0.7/arch/sh/mm/mmap.c
2757--- linux-3.0.7/arch/sh/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
2758+++ linux-3.0.7/arch/sh/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
2759@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
2760 addr = PAGE_ALIGN(addr);
2761
2762 vma = find_vma(mm, addr);
2763- if (TASK_SIZE - len >= addr &&
2764- (!vma || addr + len <= vma->vm_start))
2765+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2766 return addr;
2767 }
2768
2769@@ -106,7 +105,7 @@ full_search:
2770 }
2771 return -ENOMEM;
2772 }
2773- if (likely(!vma || addr + len <= vma->vm_start)) {
2774+ if (likely(check_heap_stack_gap(vma, addr, len))) {
2775 /*
2776 * Remember the place where we stopped the search:
2777 */
2778@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
2779 addr = PAGE_ALIGN(addr);
2780
2781 vma = find_vma(mm, addr);
2782- if (TASK_SIZE - len >= addr &&
2783- (!vma || addr + len <= vma->vm_start))
2784+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2785 return addr;
2786 }
2787
2788@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
2789 /* make sure it can fit in the remaining address space */
2790 if (likely(addr > len)) {
2791 vma = find_vma(mm, addr-len);
2792- if (!vma || addr <= vma->vm_start) {
2793+ if (check_heap_stack_gap(vma, addr - len, len)) {
2794 /* remember the address as a hint for next time */
2795 return (mm->free_area_cache = addr-len);
2796 }
2797@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
2798 if (unlikely(mm->mmap_base < len))
2799 goto bottomup;
2800
2801- addr = mm->mmap_base-len;
2802- if (do_colour_align)
2803- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2804+ addr = mm->mmap_base - len;
2805
2806 do {
2807+ if (do_colour_align)
2808+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2809 /*
2810 * Lookup failure means no vma is above this address,
2811 * else if new region fits below vma->vm_start,
2812 * return with success:
2813 */
2814 vma = find_vma(mm, addr);
2815- if (likely(!vma || addr+len <= vma->vm_start)) {
2816+ if (likely(check_heap_stack_gap(vma, addr, len))) {
2817 /* remember the address as a hint for next time */
2818 return (mm->free_area_cache = addr);
2819 }
2820@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
2821 mm->cached_hole_size = vma->vm_start - addr;
2822
2823 /* try just below the current vma->vm_start */
2824- addr = vma->vm_start-len;
2825- if (do_colour_align)
2826- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2827- } while (likely(len < vma->vm_start));
2828+ addr = skip_heap_stack_gap(vma, len);
2829+ } while (!IS_ERR_VALUE(addr));
2830
2831 bottomup:
2832 /*
2833diff -urNp linux-3.0.7/arch/sparc/include/asm/atomic_64.h linux-3.0.7/arch/sparc/include/asm/atomic_64.h
2834--- linux-3.0.7/arch/sparc/include/asm/atomic_64.h 2011-07-21 22:17:23.000000000 -0400
2835+++ linux-3.0.7/arch/sparc/include/asm/atomic_64.h 2011-08-23 21:48:14.000000000 -0400
2836@@ -14,18 +14,40 @@
2837 #define ATOMIC64_INIT(i) { (i) }
2838
2839 #define atomic_read(v) (*(volatile int *)&(v)->counter)
2840+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
2841+{
2842+ return v->counter;
2843+}
2844 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
2845+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
2846+{
2847+ return v->counter;
2848+}
2849
2850 #define atomic_set(v, i) (((v)->counter) = i)
2851+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
2852+{
2853+ v->counter = i;
2854+}
2855 #define atomic64_set(v, i) (((v)->counter) = i)
2856+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
2857+{
2858+ v->counter = i;
2859+}
2860
2861 extern void atomic_add(int, atomic_t *);
2862+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
2863 extern void atomic64_add(long, atomic64_t *);
2864+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
2865 extern void atomic_sub(int, atomic_t *);
2866+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
2867 extern void atomic64_sub(long, atomic64_t *);
2868+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
2869
2870 extern int atomic_add_ret(int, atomic_t *);
2871+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
2872 extern long atomic64_add_ret(long, atomic64_t *);
2873+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
2874 extern int atomic_sub_ret(int, atomic_t *);
2875 extern long atomic64_sub_ret(long, atomic64_t *);
2876
2877@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
2878 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
2879
2880 #define atomic_inc_return(v) atomic_add_ret(1, v)
2881+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
2882+{
2883+ return atomic_add_ret_unchecked(1, v);
2884+}
2885 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
2886+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
2887+{
2888+ return atomic64_add_ret_unchecked(1, v);
2889+}
2890
2891 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
2892 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
2893
2894 #define atomic_add_return(i, v) atomic_add_ret(i, v)
2895+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
2896+{
2897+ return atomic_add_ret_unchecked(i, v);
2898+}
2899 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
2900+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
2901+{
2902+ return atomic64_add_ret_unchecked(i, v);
2903+}
2904
2905 /*
2906 * atomic_inc_and_test - increment and test
2907@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomi
2908 * other cases.
2909 */
2910 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
2911+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
2912+{
2913+ return atomic_inc_return_unchecked(v) == 0;
2914+}
2915 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
2916
2917 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
2918@@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomi
2919 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
2920
2921 #define atomic_inc(v) atomic_add(1, v)
2922+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
2923+{
2924+ atomic_add_unchecked(1, v);
2925+}
2926 #define atomic64_inc(v) atomic64_add(1, v)
2927+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
2928+{
2929+ atomic64_add_unchecked(1, v);
2930+}
2931
2932 #define atomic_dec(v) atomic_sub(1, v)
2933+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
2934+{
2935+ atomic_sub_unchecked(1, v);
2936+}
2937 #define atomic64_dec(v) atomic64_sub(1, v)
2938+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
2939+{
2940+ atomic64_sub_unchecked(1, v);
2941+}
2942
2943 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
2944 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
2945
2946 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
2947+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
2948+{
2949+ return cmpxchg(&v->counter, old, new);
2950+}
2951 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
2952+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
2953+{
2954+ return xchg(&v->counter, new);
2955+}
2956
2957 static inline int atomic_add_unless(atomic_t *v, int a, int u)
2958 {
2959- int c, old;
2960+ int c, old, new;
2961 c = atomic_read(v);
2962 for (;;) {
2963- if (unlikely(c == (u)))
2964+ if (unlikely(c == u))
2965 break;
2966- old = atomic_cmpxchg((v), c, c + (a));
2967+
2968+ asm volatile("addcc %2, %0, %0\n"
2969+
2970+#ifdef CONFIG_PAX_REFCOUNT
2971+ "tvs %%icc, 6\n"
2972+#endif
2973+
2974+ : "=r" (new)
2975+ : "0" (c), "ir" (a)
2976+ : "cc");
2977+
2978+ old = atomic_cmpxchg(v, c, new);
2979 if (likely(old == c))
2980 break;
2981 c = old;
2982 }
2983- return c != (u);
2984+ return c != u;
2985 }
2986
2987 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
2988@@ -90,20 +167,35 @@ static inline int atomic_add_unless(atom
2989 #define atomic64_cmpxchg(v, o, n) \
2990 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
2991 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
2992+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
2993+{
2994+ return xchg(&v->counter, new);
2995+}
2996
2997 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
2998 {
2999- long c, old;
3000+ long c, old, new;
3001 c = atomic64_read(v);
3002 for (;;) {
3003- if (unlikely(c == (u)))
3004+ if (unlikely(c == u))
3005 break;
3006- old = atomic64_cmpxchg((v), c, c + (a));
3007+
3008+ asm volatile("addcc %2, %0, %0\n"
3009+
3010+#ifdef CONFIG_PAX_REFCOUNT
3011+ "tvs %%xcc, 6\n"
3012+#endif
3013+
3014+ : "=r" (new)
3015+ : "0" (c), "ir" (a)
3016+ : "cc");
3017+
3018+ old = atomic64_cmpxchg(v, c, new);
3019 if (likely(old == c))
3020 break;
3021 c = old;
3022 }
3023- return c != (u);
3024+ return c != u;
3025 }
3026
3027 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3028diff -urNp linux-3.0.7/arch/sparc/include/asm/cache.h linux-3.0.7/arch/sparc/include/asm/cache.h
3029--- linux-3.0.7/arch/sparc/include/asm/cache.h 2011-07-21 22:17:23.000000000 -0400
3030+++ linux-3.0.7/arch/sparc/include/asm/cache.h 2011-08-23 21:47:55.000000000 -0400
3031@@ -10,7 +10,7 @@
3032 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3033
3034 #define L1_CACHE_SHIFT 5
3035-#define L1_CACHE_BYTES 32
3036+#define L1_CACHE_BYTES 32UL
3037
3038 #ifdef CONFIG_SPARC32
3039 #define SMP_CACHE_BYTES_SHIFT 5
3040diff -urNp linux-3.0.7/arch/sparc/include/asm/elf_32.h linux-3.0.7/arch/sparc/include/asm/elf_32.h
3041--- linux-3.0.7/arch/sparc/include/asm/elf_32.h 2011-07-21 22:17:23.000000000 -0400
3042+++ linux-3.0.7/arch/sparc/include/asm/elf_32.h 2011-08-23 21:47:55.000000000 -0400
3043@@ -114,6 +114,13 @@ typedef struct {
3044
3045 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3046
3047+#ifdef CONFIG_PAX_ASLR
3048+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3049+
3050+#define PAX_DELTA_MMAP_LEN 16
3051+#define PAX_DELTA_STACK_LEN 16
3052+#endif
3053+
3054 /* This yields a mask that user programs can use to figure out what
3055 instruction set this cpu supports. This can NOT be done in userspace
3056 on Sparc. */
3057diff -urNp linux-3.0.7/arch/sparc/include/asm/elf_64.h linux-3.0.7/arch/sparc/include/asm/elf_64.h
3058--- linux-3.0.7/arch/sparc/include/asm/elf_64.h 2011-09-02 18:11:21.000000000 -0400
3059+++ linux-3.0.7/arch/sparc/include/asm/elf_64.h 2011-08-23 21:47:55.000000000 -0400
3060@@ -180,6 +180,13 @@ typedef struct {
3061 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3062 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3063
3064+#ifdef CONFIG_PAX_ASLR
3065+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3066+
3067+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3068+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3069+#endif
3070+
3071 extern unsigned long sparc64_elf_hwcap;
3072 #define ELF_HWCAP sparc64_elf_hwcap
3073
3074diff -urNp linux-3.0.7/arch/sparc/include/asm/pgtable_32.h linux-3.0.7/arch/sparc/include/asm/pgtable_32.h
3075--- linux-3.0.7/arch/sparc/include/asm/pgtable_32.h 2011-07-21 22:17:23.000000000 -0400
3076+++ linux-3.0.7/arch/sparc/include/asm/pgtable_32.h 2011-08-23 21:47:55.000000000 -0400
3077@@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3078 BTFIXUPDEF_INT(page_none)
3079 BTFIXUPDEF_INT(page_copy)
3080 BTFIXUPDEF_INT(page_readonly)
3081+
3082+#ifdef CONFIG_PAX_PAGEEXEC
3083+BTFIXUPDEF_INT(page_shared_noexec)
3084+BTFIXUPDEF_INT(page_copy_noexec)
3085+BTFIXUPDEF_INT(page_readonly_noexec)
3086+#endif
3087+
3088 BTFIXUPDEF_INT(page_kernel)
3089
3090 #define PMD_SHIFT SUN4C_PMD_SHIFT
3091@@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
3092 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3093 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3094
3095+#ifdef CONFIG_PAX_PAGEEXEC
3096+extern pgprot_t PAGE_SHARED_NOEXEC;
3097+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3098+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3099+#else
3100+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3101+# define PAGE_COPY_NOEXEC PAGE_COPY
3102+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3103+#endif
3104+
3105 extern unsigned long page_kernel;
3106
3107 #ifdef MODULE
3108diff -urNp linux-3.0.7/arch/sparc/include/asm/pgtsrmmu.h linux-3.0.7/arch/sparc/include/asm/pgtsrmmu.h
3109--- linux-3.0.7/arch/sparc/include/asm/pgtsrmmu.h 2011-07-21 22:17:23.000000000 -0400
3110+++ linux-3.0.7/arch/sparc/include/asm/pgtsrmmu.h 2011-08-23 21:47:55.000000000 -0400
3111@@ -115,6 +115,13 @@
3112 SRMMU_EXEC | SRMMU_REF)
3113 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3114 SRMMU_EXEC | SRMMU_REF)
3115+
3116+#ifdef CONFIG_PAX_PAGEEXEC
3117+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3118+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3119+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3120+#endif
3121+
3122 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3123 SRMMU_DIRTY | SRMMU_REF)
3124
3125diff -urNp linux-3.0.7/arch/sparc/include/asm/spinlock_64.h linux-3.0.7/arch/sparc/include/asm/spinlock_64.h
3126--- linux-3.0.7/arch/sparc/include/asm/spinlock_64.h 2011-10-16 21:54:53.000000000 -0400
3127+++ linux-3.0.7/arch/sparc/include/asm/spinlock_64.h 2011-10-16 21:55:27.000000000 -0400
3128@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(
3129
3130 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3131
3132-static void inline arch_read_lock(arch_rwlock_t *lock)
3133+static inline void arch_read_lock(arch_rwlock_t *lock)
3134 {
3135 unsigned long tmp1, tmp2;
3136
3137 __asm__ __volatile__ (
3138 "1: ldsw [%2], %0\n"
3139 " brlz,pn %0, 2f\n"
3140-"4: add %0, 1, %1\n"
3141+"4: addcc %0, 1, %1\n"
3142+
3143+#ifdef CONFIG_PAX_REFCOUNT
3144+" tvs %%icc, 6\n"
3145+#endif
3146+
3147 " cas [%2], %0, %1\n"
3148 " cmp %0, %1\n"
3149 " bne,pn %%icc, 1b\n"
3150@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_r
3151 " .previous"
3152 : "=&r" (tmp1), "=&r" (tmp2)
3153 : "r" (lock)
3154- : "memory");
3155+ : "memory", "cc");
3156 }
3157
3158-static int inline arch_read_trylock(arch_rwlock_t *lock)
3159+static inline int arch_read_trylock(arch_rwlock_t *lock)
3160 {
3161 int tmp1, tmp2;
3162
3163@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch
3164 "1: ldsw [%2], %0\n"
3165 " brlz,a,pn %0, 2f\n"
3166 " mov 0, %0\n"
3167-" add %0, 1, %1\n"
3168+" addcc %0, 1, %1\n"
3169+
3170+#ifdef CONFIG_PAX_REFCOUNT
3171+" tvs %%icc, 6\n"
3172+#endif
3173+
3174 " cas [%2], %0, %1\n"
3175 " cmp %0, %1\n"
3176 " bne,pn %%icc, 1b\n"
3177@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch
3178 return tmp1;
3179 }
3180
3181-static void inline arch_read_unlock(arch_rwlock_t *lock)
3182+static inline void arch_read_unlock(arch_rwlock_t *lock)
3183 {
3184 unsigned long tmp1, tmp2;
3185
3186 __asm__ __volatile__(
3187 "1: lduw [%2], %0\n"
3188-" sub %0, 1, %1\n"
3189+" subcc %0, 1, %1\n"
3190+
3191+#ifdef CONFIG_PAX_REFCOUNT
3192+" tvs %%icc, 6\n"
3193+#endif
3194+
3195 " cas [%2], %0, %1\n"
3196 " cmp %0, %1\n"
3197 " bne,pn %%xcc, 1b\n"
3198@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch
3199 : "memory");
3200 }
3201
3202-static void inline arch_write_lock(arch_rwlock_t *lock)
3203+static inline void arch_write_lock(arch_rwlock_t *lock)
3204 {
3205 unsigned long mask, tmp1, tmp2;
3206
3207@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_
3208 : "memory");
3209 }
3210
3211-static void inline arch_write_unlock(arch_rwlock_t *lock)
3212+static inline void arch_write_unlock(arch_rwlock_t *lock)
3213 {
3214 __asm__ __volatile__(
3215 " stw %%g0, [%0]"
3216@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arc
3217 : "memory");
3218 }
3219
3220-static int inline arch_write_trylock(arch_rwlock_t *lock)
3221+static inline int arch_write_trylock(arch_rwlock_t *lock)
3222 {
3223 unsigned long mask, tmp1, tmp2, result;
3224
3225diff -urNp linux-3.0.7/arch/sparc/include/asm/thread_info_32.h linux-3.0.7/arch/sparc/include/asm/thread_info_32.h
3226--- linux-3.0.7/arch/sparc/include/asm/thread_info_32.h 2011-07-21 22:17:23.000000000 -0400
3227+++ linux-3.0.7/arch/sparc/include/asm/thread_info_32.h 2011-08-23 21:47:55.000000000 -0400
3228@@ -50,6 +50,8 @@ struct thread_info {
3229 unsigned long w_saved;
3230
3231 struct restart_block restart_block;
3232+
3233+ unsigned long lowest_stack;
3234 };
3235
3236 /*
3237diff -urNp linux-3.0.7/arch/sparc/include/asm/thread_info_64.h linux-3.0.7/arch/sparc/include/asm/thread_info_64.h
3238--- linux-3.0.7/arch/sparc/include/asm/thread_info_64.h 2011-07-21 22:17:23.000000000 -0400
3239+++ linux-3.0.7/arch/sparc/include/asm/thread_info_64.h 2011-08-23 21:47:55.000000000 -0400
3240@@ -63,6 +63,8 @@ struct thread_info {
3241 struct pt_regs *kern_una_regs;
3242 unsigned int kern_una_insn;
3243
3244+ unsigned long lowest_stack;
3245+
3246 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3247 };
3248
3249diff -urNp linux-3.0.7/arch/sparc/include/asm/uaccess_32.h linux-3.0.7/arch/sparc/include/asm/uaccess_32.h
3250--- linux-3.0.7/arch/sparc/include/asm/uaccess_32.h 2011-07-21 22:17:23.000000000 -0400
3251+++ linux-3.0.7/arch/sparc/include/asm/uaccess_32.h 2011-08-23 21:47:55.000000000 -0400
3252@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
3253
3254 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3255 {
3256- if (n && __access_ok((unsigned long) to, n))
3257+ if ((long)n < 0)
3258+ return n;
3259+
3260+ if (n && __access_ok((unsigned long) to, n)) {
3261+ if (!__builtin_constant_p(n))
3262+ check_object_size(from, n, true);
3263 return __copy_user(to, (__force void __user *) from, n);
3264- else
3265+ } else
3266 return n;
3267 }
3268
3269 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3270 {
3271+ if ((long)n < 0)
3272+ return n;
3273+
3274+ if (!__builtin_constant_p(n))
3275+ check_object_size(from, n, true);
3276+
3277 return __copy_user(to, (__force void __user *) from, n);
3278 }
3279
3280 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3281 {
3282- if (n && __access_ok((unsigned long) from, n))
3283+ if ((long)n < 0)
3284+ return n;
3285+
3286+ if (n && __access_ok((unsigned long) from, n)) {
3287+ if (!__builtin_constant_p(n))
3288+ check_object_size(to, n, false);
3289 return __copy_user((__force void __user *) to, from, n);
3290- else
3291+ } else
3292 return n;
3293 }
3294
3295 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3296 {
3297+ if ((long)n < 0)
3298+ return n;
3299+
3300 return __copy_user((__force void __user *) to, from, n);
3301 }
3302
3303diff -urNp linux-3.0.7/arch/sparc/include/asm/uaccess_64.h linux-3.0.7/arch/sparc/include/asm/uaccess_64.h
3304--- linux-3.0.7/arch/sparc/include/asm/uaccess_64.h 2011-07-21 22:17:23.000000000 -0400
3305+++ linux-3.0.7/arch/sparc/include/asm/uaccess_64.h 2011-08-23 21:47:55.000000000 -0400
3306@@ -10,6 +10,7 @@
3307 #include <linux/compiler.h>
3308 #include <linux/string.h>
3309 #include <linux/thread_info.h>
3310+#include <linux/kernel.h>
3311 #include <asm/asi.h>
3312 #include <asm/system.h>
3313 #include <asm/spitfire.h>
3314@@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixu
3315 static inline unsigned long __must_check
3316 copy_from_user(void *to, const void __user *from, unsigned long size)
3317 {
3318- unsigned long ret = ___copy_from_user(to, from, size);
3319+ unsigned long ret;
3320
3321+ if ((long)size < 0 || size > INT_MAX)
3322+ return size;
3323+
3324+ if (!__builtin_constant_p(size))
3325+ check_object_size(to, size, false);
3326+
3327+ ret = ___copy_from_user(to, from, size);
3328 if (unlikely(ret))
3329 ret = copy_from_user_fixup(to, from, size);
3330
3331@@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(
3332 static inline unsigned long __must_check
3333 copy_to_user(void __user *to, const void *from, unsigned long size)
3334 {
3335- unsigned long ret = ___copy_to_user(to, from, size);
3336+ unsigned long ret;
3337+
3338+ if ((long)size < 0 || size > INT_MAX)
3339+ return size;
3340+
3341+ if (!__builtin_constant_p(size))
3342+ check_object_size(from, size, true);
3343
3344+ ret = ___copy_to_user(to, from, size);
3345 if (unlikely(ret))
3346 ret = copy_to_user_fixup(to, from, size);
3347 return ret;
3348diff -urNp linux-3.0.7/arch/sparc/include/asm/uaccess.h linux-3.0.7/arch/sparc/include/asm/uaccess.h
3349--- linux-3.0.7/arch/sparc/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
3350+++ linux-3.0.7/arch/sparc/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
3351@@ -1,5 +1,13 @@
3352 #ifndef ___ASM_SPARC_UACCESS_H
3353 #define ___ASM_SPARC_UACCESS_H
3354+
3355+#ifdef __KERNEL__
3356+#ifndef __ASSEMBLY__
3357+#include <linux/types.h>
3358+extern void check_object_size(const void *ptr, unsigned long n, bool to);
3359+#endif
3360+#endif
3361+
3362 #if defined(__sparc__) && defined(__arch64__)
3363 #include <asm/uaccess_64.h>
3364 #else
3365diff -urNp linux-3.0.7/arch/sparc/kernel/Makefile linux-3.0.7/arch/sparc/kernel/Makefile
3366--- linux-3.0.7/arch/sparc/kernel/Makefile 2011-10-16 21:54:53.000000000 -0400
3367+++ linux-3.0.7/arch/sparc/kernel/Makefile 2011-10-16 21:55:27.000000000 -0400
3368@@ -3,7 +3,7 @@
3369 #
3370
3371 asflags-y := -ansi
3372-ccflags-y := -Werror
3373+#ccflags-y := -Werror
3374
3375 extra-y := head_$(BITS).o
3376 extra-y += init_task.o
3377diff -urNp linux-3.0.7/arch/sparc/kernel/process_32.c linux-3.0.7/arch/sparc/kernel/process_32.c
3378--- linux-3.0.7/arch/sparc/kernel/process_32.c 2011-07-21 22:17:23.000000000 -0400
3379+++ linux-3.0.7/arch/sparc/kernel/process_32.c 2011-08-23 21:48:14.000000000 -0400
3380@@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
3381 rw->ins[4], rw->ins[5],
3382 rw->ins[6],
3383 rw->ins[7]);
3384- printk("%pS\n", (void *) rw->ins[7]);
3385+ printk("%pA\n", (void *) rw->ins[7]);
3386 rw = (struct reg_window32 *) rw->ins[6];
3387 }
3388 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3389@@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
3390
3391 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3392 r->psr, r->pc, r->npc, r->y, print_tainted());
3393- printk("PC: <%pS>\n", (void *) r->pc);
3394+ printk("PC: <%pA>\n", (void *) r->pc);
3395 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3396 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3397 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3398 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3399 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3400 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3401- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3402+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3403
3404 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3405 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3406@@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk,
3407 rw = (struct reg_window32 *) fp;
3408 pc = rw->ins[7];
3409 printk("[%08lx : ", pc);
3410- printk("%pS ] ", (void *) pc);
3411+ printk("%pA ] ", (void *) pc);
3412 fp = rw->ins[6];
3413 } while (++count < 16);
3414 printk("\n");
3415diff -urNp linux-3.0.7/arch/sparc/kernel/process_64.c linux-3.0.7/arch/sparc/kernel/process_64.c
3416--- linux-3.0.7/arch/sparc/kernel/process_64.c 2011-07-21 22:17:23.000000000 -0400
3417+++ linux-3.0.7/arch/sparc/kernel/process_64.c 2011-08-23 21:48:14.000000000 -0400
3418@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
3419 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3420 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3421 if (regs->tstate & TSTATE_PRIV)
3422- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3423+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3424 }
3425
3426 void show_regs(struct pt_regs *regs)
3427 {
3428 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3429 regs->tpc, regs->tnpc, regs->y, print_tainted());
3430- printk("TPC: <%pS>\n", (void *) regs->tpc);
3431+ printk("TPC: <%pA>\n", (void *) regs->tpc);
3432 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3433 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3434 regs->u_regs[3]);
3435@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3436 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3437 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3438 regs->u_regs[15]);
3439- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3440+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3441 show_regwindow(regs);
3442 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3443 }
3444@@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void
3445 ((tp && tp->task) ? tp->task->pid : -1));
3446
3447 if (gp->tstate & TSTATE_PRIV) {
3448- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3449+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3450 (void *) gp->tpc,
3451 (void *) gp->o7,
3452 (void *) gp->i7,
3453diff -urNp linux-3.0.7/arch/sparc/kernel/sys_sparc_32.c linux-3.0.7/arch/sparc/kernel/sys_sparc_32.c
3454--- linux-3.0.7/arch/sparc/kernel/sys_sparc_32.c 2011-07-21 22:17:23.000000000 -0400
3455+++ linux-3.0.7/arch/sparc/kernel/sys_sparc_32.c 2011-08-23 21:47:55.000000000 -0400
3456@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(str
3457 if (ARCH_SUN4C && len > 0x20000000)
3458 return -ENOMEM;
3459 if (!addr)
3460- addr = TASK_UNMAPPED_BASE;
3461+ addr = current->mm->mmap_base;
3462
3463 if (flags & MAP_SHARED)
3464 addr = COLOUR_ALIGN(addr);
3465@@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(str
3466 }
3467 if (TASK_SIZE - PAGE_SIZE - len < addr)
3468 return -ENOMEM;
3469- if (!vmm || addr + len <= vmm->vm_start)
3470+ if (check_heap_stack_gap(vmm, addr, len))
3471 return addr;
3472 addr = vmm->vm_end;
3473 if (flags & MAP_SHARED)
3474diff -urNp linux-3.0.7/arch/sparc/kernel/sys_sparc_64.c linux-3.0.7/arch/sparc/kernel/sys_sparc_64.c
3475--- linux-3.0.7/arch/sparc/kernel/sys_sparc_64.c 2011-07-21 22:17:23.000000000 -0400
3476+++ linux-3.0.7/arch/sparc/kernel/sys_sparc_64.c 2011-08-23 21:47:55.000000000 -0400
3477@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str
3478 /* We do not accept a shared mapping if it would violate
3479 * cache aliasing constraints.
3480 */
3481- if ((flags & MAP_SHARED) &&
3482+ if ((filp || (flags & MAP_SHARED)) &&
3483 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3484 return -EINVAL;
3485 return addr;
3486@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(str
3487 if (filp || (flags & MAP_SHARED))
3488 do_color_align = 1;
3489
3490+#ifdef CONFIG_PAX_RANDMMAP
3491+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3492+#endif
3493+
3494 if (addr) {
3495 if (do_color_align)
3496 addr = COLOUR_ALIGN(addr, pgoff);
3497@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(str
3498 addr = PAGE_ALIGN(addr);
3499
3500 vma = find_vma(mm, addr);
3501- if (task_size - len >= addr &&
3502- (!vma || addr + len <= vma->vm_start))
3503+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3504 return addr;
3505 }
3506
3507 if (len > mm->cached_hole_size) {
3508- start_addr = addr = mm->free_area_cache;
3509+ start_addr = addr = mm->free_area_cache;
3510 } else {
3511- start_addr = addr = TASK_UNMAPPED_BASE;
3512+ start_addr = addr = mm->mmap_base;
3513 mm->cached_hole_size = 0;
3514 }
3515
3516@@ -174,14 +177,14 @@ full_search:
3517 vma = find_vma(mm, VA_EXCLUDE_END);
3518 }
3519 if (unlikely(task_size < addr)) {
3520- if (start_addr != TASK_UNMAPPED_BASE) {
3521- start_addr = addr = TASK_UNMAPPED_BASE;
3522+ if (start_addr != mm->mmap_base) {
3523+ start_addr = addr = mm->mmap_base;
3524 mm->cached_hole_size = 0;
3525 goto full_search;
3526 }
3527 return -ENOMEM;
3528 }
3529- if (likely(!vma || addr + len <= vma->vm_start)) {
3530+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3531 /*
3532 * Remember the place where we stopped the search:
3533 */
3534@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct fi
3535 /* We do not accept a shared mapping if it would violate
3536 * cache aliasing constraints.
3537 */
3538- if ((flags & MAP_SHARED) &&
3539+ if ((filp || (flags & MAP_SHARED)) &&
3540 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3541 return -EINVAL;
3542 return addr;
3543@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct fi
3544 addr = PAGE_ALIGN(addr);
3545
3546 vma = find_vma(mm, addr);
3547- if (task_size - len >= addr &&
3548- (!vma || addr + len <= vma->vm_start))
3549+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3550 return addr;
3551 }
3552
3553@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct fi
3554 /* make sure it can fit in the remaining address space */
3555 if (likely(addr > len)) {
3556 vma = find_vma(mm, addr-len);
3557- if (!vma || addr <= vma->vm_start) {
3558+ if (check_heap_stack_gap(vma, addr - len, len)) {
3559 /* remember the address as a hint for next time */
3560 return (mm->free_area_cache = addr-len);
3561 }
3562@@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct fi
3563 if (unlikely(mm->mmap_base < len))
3564 goto bottomup;
3565
3566- addr = mm->mmap_base-len;
3567- if (do_color_align)
3568- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3569+ addr = mm->mmap_base - len;
3570
3571 do {
3572+ if (do_color_align)
3573+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3574 /*
3575 * Lookup failure means no vma is above this address,
3576 * else if new region fits below vma->vm_start,
3577 * return with success:
3578 */
3579 vma = find_vma(mm, addr);
3580- if (likely(!vma || addr+len <= vma->vm_start)) {
3581+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3582 /* remember the address as a hint for next time */
3583 return (mm->free_area_cache = addr);
3584 }
3585@@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct fi
3586 mm->cached_hole_size = vma->vm_start - addr;
3587
3588 /* try just below the current vma->vm_start */
3589- addr = vma->vm_start-len;
3590- if (do_color_align)
3591- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3592- } while (likely(len < vma->vm_start));
3593+ addr = skip_heap_stack_gap(vma, len);
3594+ } while (!IS_ERR_VALUE(addr));
3595
3596 bottomup:
3597 /*
3598@@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_str
3599 gap == RLIM_INFINITY ||
3600 sysctl_legacy_va_layout) {
3601 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
3602+
3603+#ifdef CONFIG_PAX_RANDMMAP
3604+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3605+ mm->mmap_base += mm->delta_mmap;
3606+#endif
3607+
3608 mm->get_unmapped_area = arch_get_unmapped_area;
3609 mm->unmap_area = arch_unmap_area;
3610 } else {
3611@@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_str
3612 gap = (task_size / 6 * 5);
3613
3614 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
3615+
3616+#ifdef CONFIG_PAX_RANDMMAP
3617+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3618+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3619+#endif
3620+
3621 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3622 mm->unmap_area = arch_unmap_area_topdown;
3623 }
3624diff -urNp linux-3.0.7/arch/sparc/kernel/traps_32.c linux-3.0.7/arch/sparc/kernel/traps_32.c
3625--- linux-3.0.7/arch/sparc/kernel/traps_32.c 2011-07-21 22:17:23.000000000 -0400
3626+++ linux-3.0.7/arch/sparc/kernel/traps_32.c 2011-08-23 21:48:14.000000000 -0400
3627@@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
3628 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
3629 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
3630
3631+extern void gr_handle_kernel_exploit(void);
3632+
3633 void die_if_kernel(char *str, struct pt_regs *regs)
3634 {
3635 static int die_counter;
3636@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
3637 count++ < 30 &&
3638 (((unsigned long) rw) >= PAGE_OFFSET) &&
3639 !(((unsigned long) rw) & 0x7)) {
3640- printk("Caller[%08lx]: %pS\n", rw->ins[7],
3641+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
3642 (void *) rw->ins[7]);
3643 rw = (struct reg_window32 *)rw->ins[6];
3644 }
3645 }
3646 printk("Instruction DUMP:");
3647 instruction_dump ((unsigned long *) regs->pc);
3648- if(regs->psr & PSR_PS)
3649+ if(regs->psr & PSR_PS) {
3650+ gr_handle_kernel_exploit();
3651 do_exit(SIGKILL);
3652+ }
3653 do_exit(SIGSEGV);
3654 }
3655
3656diff -urNp linux-3.0.7/arch/sparc/kernel/traps_64.c linux-3.0.7/arch/sparc/kernel/traps_64.c
3657--- linux-3.0.7/arch/sparc/kernel/traps_64.c 2011-07-21 22:17:23.000000000 -0400
3658+++ linux-3.0.7/arch/sparc/kernel/traps_64.c 2011-08-23 21:48:14.000000000 -0400
3659@@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_
3660 i + 1,
3661 p->trapstack[i].tstate, p->trapstack[i].tpc,
3662 p->trapstack[i].tnpc, p->trapstack[i].tt);
3663- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
3664+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
3665 }
3666 }
3667
3668@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long
3669
3670 lvl -= 0x100;
3671 if (regs->tstate & TSTATE_PRIV) {
3672+
3673+#ifdef CONFIG_PAX_REFCOUNT
3674+ if (lvl == 6)
3675+ pax_report_refcount_overflow(regs);
3676+#endif
3677+
3678 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
3679 die_if_kernel(buffer, regs);
3680 }
3681@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long
3682 void bad_trap_tl1(struct pt_regs *regs, long lvl)
3683 {
3684 char buffer[32];
3685-
3686+
3687 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
3688 0, lvl, SIGTRAP) == NOTIFY_STOP)
3689 return;
3690
3691+#ifdef CONFIG_PAX_REFCOUNT
3692+ if (lvl == 6)
3693+ pax_report_refcount_overflow(regs);
3694+#endif
3695+
3696 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
3697
3698 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
3699@@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt
3700 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
3701 printk("%s" "ERROR(%d): ",
3702 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
3703- printk("TPC<%pS>\n", (void *) regs->tpc);
3704+ printk("TPC<%pA>\n", (void *) regs->tpc);
3705 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
3706 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
3707 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
3708@@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type,
3709 smp_processor_id(),
3710 (type & 0x1) ? 'I' : 'D',
3711 regs->tpc);
3712- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
3713+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
3714 panic("Irrecoverable Cheetah+ parity error.");
3715 }
3716
3717@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type,
3718 smp_processor_id(),
3719 (type & 0x1) ? 'I' : 'D',
3720 regs->tpc);
3721- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
3722+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
3723 }
3724
3725 struct sun4v_error_entry {
3726@@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_r
3727
3728 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
3729 regs->tpc, tl);
3730- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
3731+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
3732 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3733- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
3734+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
3735 (void *) regs->u_regs[UREG_I7]);
3736 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
3737 "pte[%lx] error[%lx]\n",
3738@@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_r
3739
3740 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
3741 regs->tpc, tl);
3742- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
3743+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
3744 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3745- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
3746+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
3747 (void *) regs->u_regs[UREG_I7]);
3748 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
3749 "pte[%lx] error[%lx]\n",
3750@@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk,
3751 fp = (unsigned long)sf->fp + STACK_BIAS;
3752 }
3753
3754- printk(" [%016lx] %pS\n", pc, (void *) pc);
3755+ printk(" [%016lx] %pA\n", pc, (void *) pc);
3756 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3757 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
3758 int index = tsk->curr_ret_stack;
3759 if (tsk->ret_stack && index >= graph) {
3760 pc = tsk->ret_stack[index - graph].ret;
3761- printk(" [%016lx] %pS\n", pc, (void *) pc);
3762+ printk(" [%016lx] %pA\n", pc, (void *) pc);
3763 graph++;
3764 }
3765 }
3766@@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_
3767 return (struct reg_window *) (fp + STACK_BIAS);
3768 }
3769
3770+extern void gr_handle_kernel_exploit(void);
3771+
3772 void die_if_kernel(char *str, struct pt_regs *regs)
3773 {
3774 static int die_counter;
3775@@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_
3776 while (rw &&
3777 count++ < 30 &&
3778 kstack_valid(tp, (unsigned long) rw)) {
3779- printk("Caller[%016lx]: %pS\n", rw->ins[7],
3780+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
3781 (void *) rw->ins[7]);
3782
3783 rw = kernel_stack_up(rw);
3784@@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_
3785 }
3786 user_instruction_dump ((unsigned int __user *) regs->tpc);
3787 }
3788- if (regs->tstate & TSTATE_PRIV)
3789+ if (regs->tstate & TSTATE_PRIV) {
3790+ gr_handle_kernel_exploit();
3791 do_exit(SIGKILL);
3792+ }
3793 do_exit(SIGSEGV);
3794 }
3795 EXPORT_SYMBOL(die_if_kernel);
3796diff -urNp linux-3.0.7/arch/sparc/kernel/unaligned_64.c linux-3.0.7/arch/sparc/kernel/unaligned_64.c
3797--- linux-3.0.7/arch/sparc/kernel/unaligned_64.c 2011-09-02 18:11:21.000000000 -0400
3798+++ linux-3.0.7/arch/sparc/kernel/unaligned_64.c 2011-08-23 21:48:14.000000000 -0400
3799@@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs
3800 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
3801
3802 if (__ratelimit(&ratelimit)) {
3803- printk("Kernel unaligned access at TPC[%lx] %pS\n",
3804+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
3805 regs->tpc, (void *) regs->tpc);
3806 }
3807 }
3808diff -urNp linux-3.0.7/arch/sparc/lib/atomic_64.S linux-3.0.7/arch/sparc/lib/atomic_64.S
3809--- linux-3.0.7/arch/sparc/lib/atomic_64.S 2011-07-21 22:17:23.000000000 -0400
3810+++ linux-3.0.7/arch/sparc/lib/atomic_64.S 2011-08-23 21:47:55.000000000 -0400
3811@@ -18,7 +18,12 @@
3812 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
3813 BACKOFF_SETUP(%o2)
3814 1: lduw [%o1], %g1
3815- add %g1, %o0, %g7
3816+ addcc %g1, %o0, %g7
3817+
3818+#ifdef CONFIG_PAX_REFCOUNT
3819+ tvs %icc, 6
3820+#endif
3821+
3822 cas [%o1], %g1, %g7
3823 cmp %g1, %g7
3824 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3825@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
3826 2: BACKOFF_SPIN(%o2, %o3, 1b)
3827 .size atomic_add, .-atomic_add
3828
3829+ .globl atomic_add_unchecked
3830+ .type atomic_add_unchecked,#function
3831+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3832+ BACKOFF_SETUP(%o2)
3833+1: lduw [%o1], %g1
3834+ add %g1, %o0, %g7
3835+ cas [%o1], %g1, %g7
3836+ cmp %g1, %g7
3837+ bne,pn %icc, 2f
3838+ nop
3839+ retl
3840+ nop
3841+2: BACKOFF_SPIN(%o2, %o3, 1b)
3842+ .size atomic_add_unchecked, .-atomic_add_unchecked
3843+
3844 .globl atomic_sub
3845 .type atomic_sub,#function
3846 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3847 BACKOFF_SETUP(%o2)
3848 1: lduw [%o1], %g1
3849- sub %g1, %o0, %g7
3850+ subcc %g1, %o0, %g7
3851+
3852+#ifdef CONFIG_PAX_REFCOUNT
3853+ tvs %icc, 6
3854+#endif
3855+
3856 cas [%o1], %g1, %g7
3857 cmp %g1, %g7
3858 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3859@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
3860 2: BACKOFF_SPIN(%o2, %o3, 1b)
3861 .size atomic_sub, .-atomic_sub
3862
3863+ .globl atomic_sub_unchecked
3864+ .type atomic_sub_unchecked,#function
3865+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3866+ BACKOFF_SETUP(%o2)
3867+1: lduw [%o1], %g1
3868+ sub %g1, %o0, %g7
3869+ cas [%o1], %g1, %g7
3870+ cmp %g1, %g7
3871+ bne,pn %icc, 2f
3872+ nop
3873+ retl
3874+ nop
3875+2: BACKOFF_SPIN(%o2, %o3, 1b)
3876+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
3877+
3878 .globl atomic_add_ret
3879 .type atomic_add_ret,#function
3880 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3881 BACKOFF_SETUP(%o2)
3882 1: lduw [%o1], %g1
3883- add %g1, %o0, %g7
3884+ addcc %g1, %o0, %g7
3885+
3886+#ifdef CONFIG_PAX_REFCOUNT
3887+ tvs %icc, 6
3888+#endif
3889+
3890 cas [%o1], %g1, %g7
3891 cmp %g1, %g7
3892 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3893@@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1
3894 2: BACKOFF_SPIN(%o2, %o3, 1b)
3895 .size atomic_add_ret, .-atomic_add_ret
3896
3897+ .globl atomic_add_ret_unchecked
3898+ .type atomic_add_ret_unchecked,#function
3899+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3900+ BACKOFF_SETUP(%o2)
3901+1: lduw [%o1], %g1
3902+ addcc %g1, %o0, %g7
3903+ cas [%o1], %g1, %g7
3904+ cmp %g1, %g7
3905+ bne,pn %icc, 2f
3906+ add %g7, %o0, %g7
3907+ sra %g7, 0, %o0
3908+ retl
3909+ nop
3910+2: BACKOFF_SPIN(%o2, %o3, 1b)
3911+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
3912+
3913 .globl atomic_sub_ret
3914 .type atomic_sub_ret,#function
3915 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
3916 BACKOFF_SETUP(%o2)
3917 1: lduw [%o1], %g1
3918- sub %g1, %o0, %g7
3919+ subcc %g1, %o0, %g7
3920+
3921+#ifdef CONFIG_PAX_REFCOUNT
3922+ tvs %icc, 6
3923+#endif
3924+
3925 cas [%o1], %g1, %g7
3926 cmp %g1, %g7
3927 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3928@@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
3929 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
3930 BACKOFF_SETUP(%o2)
3931 1: ldx [%o1], %g1
3932- add %g1, %o0, %g7
3933+ addcc %g1, %o0, %g7
3934+
3935+#ifdef CONFIG_PAX_REFCOUNT
3936+ tvs %xcc, 6
3937+#endif
3938+
3939 casx [%o1], %g1, %g7
3940 cmp %g1, %g7
3941 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3942@@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 =
3943 2: BACKOFF_SPIN(%o2, %o3, 1b)
3944 .size atomic64_add, .-atomic64_add
3945
3946+ .globl atomic64_add_unchecked
3947+ .type atomic64_add_unchecked,#function
3948+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3949+ BACKOFF_SETUP(%o2)
3950+1: ldx [%o1], %g1
3951+ addcc %g1, %o0, %g7
3952+ casx [%o1], %g1, %g7
3953+ cmp %g1, %g7
3954+ bne,pn %xcc, 2f
3955+ nop
3956+ retl
3957+ nop
3958+2: BACKOFF_SPIN(%o2, %o3, 1b)
3959+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
3960+
3961 .globl atomic64_sub
3962 .type atomic64_sub,#function
3963 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3964 BACKOFF_SETUP(%o2)
3965 1: ldx [%o1], %g1
3966- sub %g1, %o0, %g7
3967+ subcc %g1, %o0, %g7
3968+
3969+#ifdef CONFIG_PAX_REFCOUNT
3970+ tvs %xcc, 6
3971+#endif
3972+
3973 casx [%o1], %g1, %g7
3974 cmp %g1, %g7
3975 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3976@@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
3977 2: BACKOFF_SPIN(%o2, %o3, 1b)
3978 .size atomic64_sub, .-atomic64_sub
3979
3980+ .globl atomic64_sub_unchecked
3981+ .type atomic64_sub_unchecked,#function
3982+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3983+ BACKOFF_SETUP(%o2)
3984+1: ldx [%o1], %g1
3985+ subcc %g1, %o0, %g7
3986+ casx [%o1], %g1, %g7
3987+ cmp %g1, %g7
3988+ bne,pn %xcc, 2f
3989+ nop
3990+ retl
3991+ nop
3992+2: BACKOFF_SPIN(%o2, %o3, 1b)
3993+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
3994+
3995 .globl atomic64_add_ret
3996 .type atomic64_add_ret,#function
3997 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3998 BACKOFF_SETUP(%o2)
3999 1: ldx [%o1], %g1
4000- add %g1, %o0, %g7
4001+ addcc %g1, %o0, %g7
4002+
4003+#ifdef CONFIG_PAX_REFCOUNT
4004+ tvs %xcc, 6
4005+#endif
4006+
4007 casx [%o1], %g1, %g7
4008 cmp %g1, %g7
4009 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4010@@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4011 2: BACKOFF_SPIN(%o2, %o3, 1b)
4012 .size atomic64_add_ret, .-atomic64_add_ret
4013
4014+ .globl atomic64_add_ret_unchecked
4015+ .type atomic64_add_ret_unchecked,#function
4016+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4017+ BACKOFF_SETUP(%o2)
4018+1: ldx [%o1], %g1
4019+ addcc %g1, %o0, %g7
4020+ casx [%o1], %g1, %g7
4021+ cmp %g1, %g7
4022+ bne,pn %xcc, 2f
4023+ add %g7, %o0, %g7
4024+ mov %g7, %o0
4025+ retl
4026+ nop
4027+2: BACKOFF_SPIN(%o2, %o3, 1b)
4028+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4029+
4030 .globl atomic64_sub_ret
4031 .type atomic64_sub_ret,#function
4032 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4033 BACKOFF_SETUP(%o2)
4034 1: ldx [%o1], %g1
4035- sub %g1, %o0, %g7
4036+ subcc %g1, %o0, %g7
4037+
4038+#ifdef CONFIG_PAX_REFCOUNT
4039+ tvs %xcc, 6
4040+#endif
4041+
4042 casx [%o1], %g1, %g7
4043 cmp %g1, %g7
4044 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4045diff -urNp linux-3.0.7/arch/sparc/lib/ksyms.c linux-3.0.7/arch/sparc/lib/ksyms.c
4046--- linux-3.0.7/arch/sparc/lib/ksyms.c 2011-07-21 22:17:23.000000000 -0400
4047+++ linux-3.0.7/arch/sparc/lib/ksyms.c 2011-08-23 21:48:14.000000000 -0400
4048@@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
4049
4050 /* Atomic counter implementation. */
4051 EXPORT_SYMBOL(atomic_add);
4052+EXPORT_SYMBOL(atomic_add_unchecked);
4053 EXPORT_SYMBOL(atomic_add_ret);
4054+EXPORT_SYMBOL(atomic_add_ret_unchecked);
4055 EXPORT_SYMBOL(atomic_sub);
4056+EXPORT_SYMBOL(atomic_sub_unchecked);
4057 EXPORT_SYMBOL(atomic_sub_ret);
4058 EXPORT_SYMBOL(atomic64_add);
4059+EXPORT_SYMBOL(atomic64_add_unchecked);
4060 EXPORT_SYMBOL(atomic64_add_ret);
4061+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4062 EXPORT_SYMBOL(atomic64_sub);
4063+EXPORT_SYMBOL(atomic64_sub_unchecked);
4064 EXPORT_SYMBOL(atomic64_sub_ret);
4065
4066 /* Atomic bit operations. */
4067diff -urNp linux-3.0.7/arch/sparc/lib/Makefile linux-3.0.7/arch/sparc/lib/Makefile
4068--- linux-3.0.7/arch/sparc/lib/Makefile 2011-09-02 18:11:21.000000000 -0400
4069+++ linux-3.0.7/arch/sparc/lib/Makefile 2011-08-23 21:47:55.000000000 -0400
4070@@ -2,7 +2,7 @@
4071 #
4072
4073 asflags-y := -ansi -DST_DIV0=0x02
4074-ccflags-y := -Werror
4075+#ccflags-y := -Werror
4076
4077 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4078 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4079diff -urNp linux-3.0.7/arch/sparc/Makefile linux-3.0.7/arch/sparc/Makefile
4080--- linux-3.0.7/arch/sparc/Makefile 2011-07-21 22:17:23.000000000 -0400
4081+++ linux-3.0.7/arch/sparc/Makefile 2011-08-23 21:48:14.000000000 -0400
4082@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
4083 # Export what is needed by arch/sparc/boot/Makefile
4084 export VMLINUX_INIT VMLINUX_MAIN
4085 VMLINUX_INIT := $(head-y) $(init-y)
4086-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4087+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4088 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4089 VMLINUX_MAIN += $(drivers-y) $(net-y)
4090
4091diff -urNp linux-3.0.7/arch/sparc/mm/fault_32.c linux-3.0.7/arch/sparc/mm/fault_32.c
4092--- linux-3.0.7/arch/sparc/mm/fault_32.c 2011-07-21 22:17:23.000000000 -0400
4093+++ linux-3.0.7/arch/sparc/mm/fault_32.c 2011-08-23 21:47:55.000000000 -0400
4094@@ -22,6 +22,9 @@
4095 #include <linux/interrupt.h>
4096 #include <linux/module.h>
4097 #include <linux/kdebug.h>
4098+#include <linux/slab.h>
4099+#include <linux/pagemap.h>
4100+#include <linux/compiler.h>
4101
4102 #include <asm/system.h>
4103 #include <asm/page.h>
4104@@ -209,6 +212,268 @@ static unsigned long compute_si_addr(str
4105 return safe_compute_effective_address(regs, insn);
4106 }
4107
4108+#ifdef CONFIG_PAX_PAGEEXEC
4109+#ifdef CONFIG_PAX_DLRESOLVE
4110+static void pax_emuplt_close(struct vm_area_struct *vma)
4111+{
4112+ vma->vm_mm->call_dl_resolve = 0UL;
4113+}
4114+
4115+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4116+{
4117+ unsigned int *kaddr;
4118+
4119+ vmf->page = alloc_page(GFP_HIGHUSER);
4120+ if (!vmf->page)
4121+ return VM_FAULT_OOM;
4122+
4123+ kaddr = kmap(vmf->page);
4124+ memset(kaddr, 0, PAGE_SIZE);
4125+ kaddr[0] = 0x9DE3BFA8U; /* save */
4126+ flush_dcache_page(vmf->page);
4127+ kunmap(vmf->page);
4128+ return VM_FAULT_MAJOR;
4129+}
4130+
4131+static const struct vm_operations_struct pax_vm_ops = {
4132+ .close = pax_emuplt_close,
4133+ .fault = pax_emuplt_fault
4134+};
4135+
4136+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4137+{
4138+ int ret;
4139+
4140+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4141+ vma->vm_mm = current->mm;
4142+ vma->vm_start = addr;
4143+ vma->vm_end = addr + PAGE_SIZE;
4144+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4145+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4146+ vma->vm_ops = &pax_vm_ops;
4147+
4148+ ret = insert_vm_struct(current->mm, vma);
4149+ if (ret)
4150+ return ret;
4151+
4152+ ++current->mm->total_vm;
4153+ return 0;
4154+}
4155+#endif
4156+
4157+/*
4158+ * PaX: decide what to do with offenders (regs->pc = fault address)
4159+ *
4160+ * returns 1 when task should be killed
4161+ * 2 when patched PLT trampoline was detected
4162+ * 3 when unpatched PLT trampoline was detected
4163+ */
4164+static int pax_handle_fetch_fault(struct pt_regs *regs)
4165+{
4166+
4167+#ifdef CONFIG_PAX_EMUPLT
4168+ int err;
4169+
4170+ do { /* PaX: patched PLT emulation #1 */
4171+ unsigned int sethi1, sethi2, jmpl;
4172+
4173+ err = get_user(sethi1, (unsigned int *)regs->pc);
4174+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4175+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4176+
4177+ if (err)
4178+ break;
4179+
4180+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4181+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4182+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4183+ {
4184+ unsigned int addr;
4185+
4186+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4187+ addr = regs->u_regs[UREG_G1];
4188+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4189+ regs->pc = addr;
4190+ regs->npc = addr+4;
4191+ return 2;
4192+ }
4193+ } while (0);
4194+
4195+ { /* PaX: patched PLT emulation #2 */
4196+ unsigned int ba;
4197+
4198+ err = get_user(ba, (unsigned int *)regs->pc);
4199+
4200+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4201+ unsigned int addr;
4202+
4203+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4204+ regs->pc = addr;
4205+ regs->npc = addr+4;
4206+ return 2;
4207+ }
4208+ }
4209+
4210+ do { /* PaX: patched PLT emulation #3 */
4211+ unsigned int sethi, jmpl, nop;
4212+
4213+ err = get_user(sethi, (unsigned int *)regs->pc);
4214+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4215+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4216+
4217+ if (err)
4218+ break;
4219+
4220+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4221+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4222+ nop == 0x01000000U)
4223+ {
4224+ unsigned int addr;
4225+
4226+ addr = (sethi & 0x003FFFFFU) << 10;
4227+ regs->u_regs[UREG_G1] = addr;
4228+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4229+ regs->pc = addr;
4230+ regs->npc = addr+4;
4231+ return 2;
4232+ }
4233+ } while (0);
4234+
4235+ do { /* PaX: unpatched PLT emulation step 1 */
4236+ unsigned int sethi, ba, nop;
4237+
4238+ err = get_user(sethi, (unsigned int *)regs->pc);
4239+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
4240+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4241+
4242+ if (err)
4243+ break;
4244+
4245+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4246+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4247+ nop == 0x01000000U)
4248+ {
4249+ unsigned int addr, save, call;
4250+
4251+ if ((ba & 0xFFC00000U) == 0x30800000U)
4252+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4253+ else
4254+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4255+
4256+ err = get_user(save, (unsigned int *)addr);
4257+ err |= get_user(call, (unsigned int *)(addr+4));
4258+ err |= get_user(nop, (unsigned int *)(addr+8));
4259+ if (err)
4260+ break;
4261+
4262+#ifdef CONFIG_PAX_DLRESOLVE
4263+ if (save == 0x9DE3BFA8U &&
4264+ (call & 0xC0000000U) == 0x40000000U &&
4265+ nop == 0x01000000U)
4266+ {
4267+ struct vm_area_struct *vma;
4268+ unsigned long call_dl_resolve;
4269+
4270+ down_read(&current->mm->mmap_sem);
4271+ call_dl_resolve = current->mm->call_dl_resolve;
4272+ up_read(&current->mm->mmap_sem);
4273+ if (likely(call_dl_resolve))
4274+ goto emulate;
4275+
4276+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4277+
4278+ down_write(&current->mm->mmap_sem);
4279+ if (current->mm->call_dl_resolve) {
4280+ call_dl_resolve = current->mm->call_dl_resolve;
4281+ up_write(&current->mm->mmap_sem);
4282+ if (vma)
4283+ kmem_cache_free(vm_area_cachep, vma);
4284+ goto emulate;
4285+ }
4286+
4287+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4288+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4289+ up_write(&current->mm->mmap_sem);
4290+ if (vma)
4291+ kmem_cache_free(vm_area_cachep, vma);
4292+ return 1;
4293+ }
4294+
4295+ if (pax_insert_vma(vma, call_dl_resolve)) {
4296+ up_write(&current->mm->mmap_sem);
4297+ kmem_cache_free(vm_area_cachep, vma);
4298+ return 1;
4299+ }
4300+
4301+ current->mm->call_dl_resolve = call_dl_resolve;
4302+ up_write(&current->mm->mmap_sem);
4303+
4304+emulate:
4305+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4306+ regs->pc = call_dl_resolve;
4307+ regs->npc = addr+4;
4308+ return 3;
4309+ }
4310+#endif
4311+
4312+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4313+ if ((save & 0xFFC00000U) == 0x05000000U &&
4314+ (call & 0xFFFFE000U) == 0x85C0A000U &&
4315+ nop == 0x01000000U)
4316+ {
4317+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4318+ regs->u_regs[UREG_G2] = addr + 4;
4319+ addr = (save & 0x003FFFFFU) << 10;
4320+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4321+ regs->pc = addr;
4322+ regs->npc = addr+4;
4323+ return 3;
4324+ }
4325+ }
4326+ } while (0);
4327+
4328+ do { /* PaX: unpatched PLT emulation step 2 */
4329+ unsigned int save, call, nop;
4330+
4331+ err = get_user(save, (unsigned int *)(regs->pc-4));
4332+ err |= get_user(call, (unsigned int *)regs->pc);
4333+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
4334+ if (err)
4335+ break;
4336+
4337+ if (save == 0x9DE3BFA8U &&
4338+ (call & 0xC0000000U) == 0x40000000U &&
4339+ nop == 0x01000000U)
4340+ {
4341+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4342+
4343+ regs->u_regs[UREG_RETPC] = regs->pc;
4344+ regs->pc = dl_resolve;
4345+ regs->npc = dl_resolve+4;
4346+ return 3;
4347+ }
4348+ } while (0);
4349+#endif
4350+
4351+ return 1;
4352+}
4353+
4354+void pax_report_insns(void *pc, void *sp)
4355+{
4356+ unsigned long i;
4357+
4358+ printk(KERN_ERR "PAX: bytes at PC: ");
4359+ for (i = 0; i < 8; i++) {
4360+ unsigned int c;
4361+ if (get_user(c, (unsigned int *)pc+i))
4362+ printk(KERN_CONT "???????? ");
4363+ else
4364+ printk(KERN_CONT "%08x ", c);
4365+ }
4366+ printk("\n");
4367+}
4368+#endif
4369+
4370 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4371 int text_fault)
4372 {
4373@@ -281,6 +546,24 @@ good_area:
4374 if(!(vma->vm_flags & VM_WRITE))
4375 goto bad_area;
4376 } else {
4377+
4378+#ifdef CONFIG_PAX_PAGEEXEC
4379+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4380+ up_read(&mm->mmap_sem);
4381+ switch (pax_handle_fetch_fault(regs)) {
4382+
4383+#ifdef CONFIG_PAX_EMUPLT
4384+ case 2:
4385+ case 3:
4386+ return;
4387+#endif
4388+
4389+ }
4390+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4391+ do_group_exit(SIGKILL);
4392+ }
4393+#endif
4394+
4395 /* Allow reads even for write-only mappings */
4396 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4397 goto bad_area;
4398diff -urNp linux-3.0.7/arch/sparc/mm/fault_64.c linux-3.0.7/arch/sparc/mm/fault_64.c
4399--- linux-3.0.7/arch/sparc/mm/fault_64.c 2011-07-21 22:17:23.000000000 -0400
4400+++ linux-3.0.7/arch/sparc/mm/fault_64.c 2011-08-23 21:48:14.000000000 -0400
4401@@ -21,6 +21,9 @@
4402 #include <linux/kprobes.h>
4403 #include <linux/kdebug.h>
4404 #include <linux/percpu.h>
4405+#include <linux/slab.h>
4406+#include <linux/pagemap.h>
4407+#include <linux/compiler.h>
4408
4409 #include <asm/page.h>
4410 #include <asm/pgtable.h>
4411@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(stru
4412 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4413 regs->tpc);
4414 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4415- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4416+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4417 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4418 dump_stack();
4419 unhandled_fault(regs->tpc, current, regs);
4420@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32b
4421 show_regs(regs);
4422 }
4423
4424+#ifdef CONFIG_PAX_PAGEEXEC
4425+#ifdef CONFIG_PAX_DLRESOLVE
4426+static void pax_emuplt_close(struct vm_area_struct *vma)
4427+{
4428+ vma->vm_mm->call_dl_resolve = 0UL;
4429+}
4430+
4431+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4432+{
4433+ unsigned int *kaddr;
4434+
4435+ vmf->page = alloc_page(GFP_HIGHUSER);
4436+ if (!vmf->page)
4437+ return VM_FAULT_OOM;
4438+
4439+ kaddr = kmap(vmf->page);
4440+ memset(kaddr, 0, PAGE_SIZE);
4441+ kaddr[0] = 0x9DE3BFA8U; /* save */
4442+ flush_dcache_page(vmf->page);
4443+ kunmap(vmf->page);
4444+ return VM_FAULT_MAJOR;
4445+}
4446+
4447+static const struct vm_operations_struct pax_vm_ops = {
4448+ .close = pax_emuplt_close,
4449+ .fault = pax_emuplt_fault
4450+};
4451+
4452+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4453+{
4454+ int ret;
4455+
4456+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4457+ vma->vm_mm = current->mm;
4458+ vma->vm_start = addr;
4459+ vma->vm_end = addr + PAGE_SIZE;
4460+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4461+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4462+ vma->vm_ops = &pax_vm_ops;
4463+
4464+ ret = insert_vm_struct(current->mm, vma);
4465+ if (ret)
4466+ return ret;
4467+
4468+ ++current->mm->total_vm;
4469+ return 0;
4470+}
4471+#endif
4472+
4473+/*
4474+ * PaX: decide what to do with offenders (regs->tpc = fault address)
4475+ *
4476+ * returns 1 when task should be killed
4477+ * 2 when patched PLT trampoline was detected
4478+ * 3 when unpatched PLT trampoline was detected
4479+ */
4480+static int pax_handle_fetch_fault(struct pt_regs *regs)
4481+{
4482+
4483+#ifdef CONFIG_PAX_EMUPLT
4484+ int err;
4485+
4486+ do { /* PaX: patched PLT emulation #1 */
4487+ unsigned int sethi1, sethi2, jmpl;
4488+
4489+ err = get_user(sethi1, (unsigned int *)regs->tpc);
4490+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4491+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4492+
4493+ if (err)
4494+ break;
4495+
4496+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4497+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4498+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4499+ {
4500+ unsigned long addr;
4501+
4502+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4503+ addr = regs->u_regs[UREG_G1];
4504+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4505+
4506+ if (test_thread_flag(TIF_32BIT))
4507+ addr &= 0xFFFFFFFFUL;
4508+
4509+ regs->tpc = addr;
4510+ regs->tnpc = addr+4;
4511+ return 2;
4512+ }
4513+ } while (0);
4514+
4515+ { /* PaX: patched PLT emulation #2 */
4516+ unsigned int ba;
4517+
4518+ err = get_user(ba, (unsigned int *)regs->tpc);
4519+
4520+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4521+ unsigned long addr;
4522+
4523+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4524+
4525+ if (test_thread_flag(TIF_32BIT))
4526+ addr &= 0xFFFFFFFFUL;
4527+
4528+ regs->tpc = addr;
4529+ regs->tnpc = addr+4;
4530+ return 2;
4531+ }
4532+ }
4533+
4534+ do { /* PaX: patched PLT emulation #3 */
4535+ unsigned int sethi, jmpl, nop;
4536+
4537+ err = get_user(sethi, (unsigned int *)regs->tpc);
4538+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
4539+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4540+
4541+ if (err)
4542+ break;
4543+
4544+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4545+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4546+ nop == 0x01000000U)
4547+ {
4548+ unsigned long addr;
4549+
4550+ addr = (sethi & 0x003FFFFFU) << 10;
4551+ regs->u_regs[UREG_G1] = addr;
4552+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4553+
4554+ if (test_thread_flag(TIF_32BIT))
4555+ addr &= 0xFFFFFFFFUL;
4556+
4557+ regs->tpc = addr;
4558+ regs->tnpc = addr+4;
4559+ return 2;
4560+ }
4561+ } while (0);
4562+
4563+ do { /* PaX: patched PLT emulation #4 */
4564+ unsigned int sethi, mov1, call, mov2;
4565+
4566+ err = get_user(sethi, (unsigned int *)regs->tpc);
4567+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
4568+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
4569+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
4570+
4571+ if (err)
4572+ break;
4573+
4574+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4575+ mov1 == 0x8210000FU &&
4576+ (call & 0xC0000000U) == 0x40000000U &&
4577+ mov2 == 0x9E100001U)
4578+ {
4579+ unsigned long addr;
4580+
4581+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
4582+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4583+
4584+ if (test_thread_flag(TIF_32BIT))
4585+ addr &= 0xFFFFFFFFUL;
4586+
4587+ regs->tpc = addr;
4588+ regs->tnpc = addr+4;
4589+ return 2;
4590+ }
4591+ } while (0);
4592+
4593+ do { /* PaX: patched PLT emulation #5 */
4594+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
4595+
4596+ err = get_user(sethi, (unsigned int *)regs->tpc);
4597+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4598+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4599+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
4600+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
4601+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
4602+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
4603+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
4604+
4605+ if (err)
4606+ break;
4607+
4608+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4609+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
4610+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4611+ (or1 & 0xFFFFE000U) == 0x82106000U &&
4612+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
4613+ sllx == 0x83287020U &&
4614+ jmpl == 0x81C04005U &&
4615+ nop == 0x01000000U)
4616+ {
4617+ unsigned long addr;
4618+
4619+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4620+ regs->u_regs[UREG_G1] <<= 32;
4621+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4622+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4623+ regs->tpc = addr;
4624+ regs->tnpc = addr+4;
4625+ return 2;
4626+ }
4627+ } while (0);
4628+
4629+ do { /* PaX: patched PLT emulation #6 */
4630+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
4631+
4632+ err = get_user(sethi, (unsigned int *)regs->tpc);
4633+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4634+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4635+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
4636+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
4637+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
4638+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
4639+
4640+ if (err)
4641+ break;
4642+
4643+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4644+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
4645+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4646+ sllx == 0x83287020U &&
4647+ (or & 0xFFFFE000U) == 0x8A116000U &&
4648+ jmpl == 0x81C04005U &&
4649+ nop == 0x01000000U)
4650+ {
4651+ unsigned long addr;
4652+
4653+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
4654+ regs->u_regs[UREG_G1] <<= 32;
4655+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
4656+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4657+ regs->tpc = addr;
4658+ regs->tnpc = addr+4;
4659+ return 2;
4660+ }
4661+ } while (0);
4662+
4663+ do { /* PaX: unpatched PLT emulation step 1 */
4664+ unsigned int sethi, ba, nop;
4665+
4666+ err = get_user(sethi, (unsigned int *)regs->tpc);
4667+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4668+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4669+
4670+ if (err)
4671+ break;
4672+
4673+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4674+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4675+ nop == 0x01000000U)
4676+ {
4677+ unsigned long addr;
4678+ unsigned int save, call;
4679+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
4680+
4681+ if ((ba & 0xFFC00000U) == 0x30800000U)
4682+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4683+ else
4684+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4685+
4686+ if (test_thread_flag(TIF_32BIT))
4687+ addr &= 0xFFFFFFFFUL;
4688+
4689+ err = get_user(save, (unsigned int *)addr);
4690+ err |= get_user(call, (unsigned int *)(addr+4));
4691+ err |= get_user(nop, (unsigned int *)(addr+8));
4692+ if (err)
4693+ break;
4694+
4695+#ifdef CONFIG_PAX_DLRESOLVE
4696+ if (save == 0x9DE3BFA8U &&
4697+ (call & 0xC0000000U) == 0x40000000U &&
4698+ nop == 0x01000000U)
4699+ {
4700+ struct vm_area_struct *vma;
4701+ unsigned long call_dl_resolve;
4702+
4703+ down_read(&current->mm->mmap_sem);
4704+ call_dl_resolve = current->mm->call_dl_resolve;
4705+ up_read(&current->mm->mmap_sem);
4706+ if (likely(call_dl_resolve))
4707+ goto emulate;
4708+
4709+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4710+
4711+ down_write(&current->mm->mmap_sem);
4712+ if (current->mm->call_dl_resolve) {
4713+ call_dl_resolve = current->mm->call_dl_resolve;
4714+ up_write(&current->mm->mmap_sem);
4715+ if (vma)
4716+ kmem_cache_free(vm_area_cachep, vma);
4717+ goto emulate;
4718+ }
4719+
4720+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4721+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4722+ up_write(&current->mm->mmap_sem);
4723+ if (vma)
4724+ kmem_cache_free(vm_area_cachep, vma);
4725+ return 1;
4726+ }
4727+
4728+ if (pax_insert_vma(vma, call_dl_resolve)) {
4729+ up_write(&current->mm->mmap_sem);
4730+ kmem_cache_free(vm_area_cachep, vma);
4731+ return 1;
4732+ }
4733+
4734+ current->mm->call_dl_resolve = call_dl_resolve;
4735+ up_write(&current->mm->mmap_sem);
4736+
4737+emulate:
4738+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4739+ regs->tpc = call_dl_resolve;
4740+ regs->tnpc = addr+4;
4741+ return 3;
4742+ }
4743+#endif
4744+
4745+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4746+ if ((save & 0xFFC00000U) == 0x05000000U &&
4747+ (call & 0xFFFFE000U) == 0x85C0A000U &&
4748+ nop == 0x01000000U)
4749+ {
4750+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4751+ regs->u_regs[UREG_G2] = addr + 4;
4752+ addr = (save & 0x003FFFFFU) << 10;
4753+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4754+
4755+ if (test_thread_flag(TIF_32BIT))
4756+ addr &= 0xFFFFFFFFUL;
4757+
4758+ regs->tpc = addr;
4759+ regs->tnpc = addr+4;
4760+ return 3;
4761+ }
4762+
4763+ /* PaX: 64-bit PLT stub */
4764+ err = get_user(sethi1, (unsigned int *)addr);
4765+ err |= get_user(sethi2, (unsigned int *)(addr+4));
4766+ err |= get_user(or1, (unsigned int *)(addr+8));
4767+ err |= get_user(or2, (unsigned int *)(addr+12));
4768+ err |= get_user(sllx, (unsigned int *)(addr+16));
4769+ err |= get_user(add, (unsigned int *)(addr+20));
4770+ err |= get_user(jmpl, (unsigned int *)(addr+24));
4771+ err |= get_user(nop, (unsigned int *)(addr+28));
4772+ if (err)
4773+ break;
4774+
4775+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
4776+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4777+ (or1 & 0xFFFFE000U) == 0x88112000U &&
4778+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
4779+ sllx == 0x89293020U &&
4780+ add == 0x8A010005U &&
4781+ jmpl == 0x89C14000U &&
4782+ nop == 0x01000000U)
4783+ {
4784+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4785+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4786+ regs->u_regs[UREG_G4] <<= 32;
4787+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4788+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
4789+ regs->u_regs[UREG_G4] = addr + 24;
4790+ addr = regs->u_regs[UREG_G5];
4791+ regs->tpc = addr;
4792+ regs->tnpc = addr+4;
4793+ return 3;
4794+ }
4795+ }
4796+ } while (0);
4797+
4798+#ifdef CONFIG_PAX_DLRESOLVE
4799+ do { /* PaX: unpatched PLT emulation step 2 */
4800+ unsigned int save, call, nop;
4801+
4802+ err = get_user(save, (unsigned int *)(regs->tpc-4));
4803+ err |= get_user(call, (unsigned int *)regs->tpc);
4804+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
4805+ if (err)
4806+ break;
4807+
4808+ if (save == 0x9DE3BFA8U &&
4809+ (call & 0xC0000000U) == 0x40000000U &&
4810+ nop == 0x01000000U)
4811+ {
4812+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4813+
4814+ if (test_thread_flag(TIF_32BIT))
4815+ dl_resolve &= 0xFFFFFFFFUL;
4816+
4817+ regs->u_regs[UREG_RETPC] = regs->tpc;
4818+ regs->tpc = dl_resolve;
4819+ regs->tnpc = dl_resolve+4;
4820+ return 3;
4821+ }
4822+ } while (0);
4823+#endif
4824+
4825+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
4826+ unsigned int sethi, ba, nop;
4827+
4828+ err = get_user(sethi, (unsigned int *)regs->tpc);
4829+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4830+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4831+
4832+ if (err)
4833+ break;
4834+
4835+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4836+ (ba & 0xFFF00000U) == 0x30600000U &&
4837+ nop == 0x01000000U)
4838+ {
4839+ unsigned long addr;
4840+
4841+ addr = (sethi & 0x003FFFFFU) << 10;
4842+ regs->u_regs[UREG_G1] = addr;
4843+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4844+
4845+ if (test_thread_flag(TIF_32BIT))
4846+ addr &= 0xFFFFFFFFUL;
4847+
4848+ regs->tpc = addr;
4849+ regs->tnpc = addr+4;
4850+ return 2;
4851+ }
4852+ } while (0);
4853+
4854+#endif
4855+
4856+ return 1;
4857+}
4858+
4859+void pax_report_insns(void *pc, void *sp)
4860+{
4861+ unsigned long i;
4862+
4863+ printk(KERN_ERR "PAX: bytes at PC: ");
4864+ for (i = 0; i < 8; i++) {
4865+ unsigned int c;
4866+ if (get_user(c, (unsigned int *)pc+i))
4867+ printk(KERN_CONT "???????? ");
4868+ else
4869+ printk(KERN_CONT "%08x ", c);
4870+ }
4871+ printk("\n");
4872+}
4873+#endif
4874+
4875 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
4876 {
4877 struct mm_struct *mm = current->mm;
4878@@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fau
4879 if (!vma)
4880 goto bad_area;
4881
4882+#ifdef CONFIG_PAX_PAGEEXEC
4883+ /* PaX: detect ITLB misses on non-exec pages */
4884+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
4885+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
4886+ {
4887+ if (address != regs->tpc)
4888+ goto good_area;
4889+
4890+ up_read(&mm->mmap_sem);
4891+ switch (pax_handle_fetch_fault(regs)) {
4892+
4893+#ifdef CONFIG_PAX_EMUPLT
4894+ case 2:
4895+ case 3:
4896+ return;
4897+#endif
4898+
4899+ }
4900+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
4901+ do_group_exit(SIGKILL);
4902+ }
4903+#endif
4904+
4905 /* Pure DTLB misses do not tell us whether the fault causing
4906 * load/store/atomic was a write or not, it only says that there
4907 * was no match. So in such a case we (carefully) read the
4908diff -urNp linux-3.0.7/arch/sparc/mm/hugetlbpage.c linux-3.0.7/arch/sparc/mm/hugetlbpage.c
4909--- linux-3.0.7/arch/sparc/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
4910+++ linux-3.0.7/arch/sparc/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
4911@@ -68,7 +68,7 @@ full_search:
4912 }
4913 return -ENOMEM;
4914 }
4915- if (likely(!vma || addr + len <= vma->vm_start)) {
4916+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4917 /*
4918 * Remember the place where we stopped the search:
4919 */
4920@@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct
4921 /* make sure it can fit in the remaining address space */
4922 if (likely(addr > len)) {
4923 vma = find_vma(mm, addr-len);
4924- if (!vma || addr <= vma->vm_start) {
4925+ if (check_heap_stack_gap(vma, addr - len, len)) {
4926 /* remember the address as a hint for next time */
4927 return (mm->free_area_cache = addr-len);
4928 }
4929@@ -116,16 +116,17 @@ hugetlb_get_unmapped_area_topdown(struct
4930 if (unlikely(mm->mmap_base < len))
4931 goto bottomup;
4932
4933- addr = (mm->mmap_base-len) & HPAGE_MASK;
4934+ addr = mm->mmap_base - len;
4935
4936 do {
4937+ addr &= HPAGE_MASK;
4938 /*
4939 * Lookup failure means no vma is above this address,
4940 * else if new region fits below vma->vm_start,
4941 * return with success:
4942 */
4943 vma = find_vma(mm, addr);
4944- if (likely(!vma || addr+len <= vma->vm_start)) {
4945+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4946 /* remember the address as a hint for next time */
4947 return (mm->free_area_cache = addr);
4948 }
4949@@ -135,8 +136,8 @@ hugetlb_get_unmapped_area_topdown(struct
4950 mm->cached_hole_size = vma->vm_start - addr;
4951
4952 /* try just below the current vma->vm_start */
4953- addr = (vma->vm_start-len) & HPAGE_MASK;
4954- } while (likely(len < vma->vm_start));
4955+ addr = skip_heap_stack_gap(vma, len);
4956+ } while (!IS_ERR_VALUE(addr));
4957
4958 bottomup:
4959 /*
4960@@ -182,8 +183,7 @@ hugetlb_get_unmapped_area(struct file *f
4961 if (addr) {
4962 addr = ALIGN(addr, HPAGE_SIZE);
4963 vma = find_vma(mm, addr);
4964- if (task_size - len >= addr &&
4965- (!vma || addr + len <= vma->vm_start))
4966+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4967 return addr;
4968 }
4969 if (mm->get_unmapped_area == arch_get_unmapped_area)
4970diff -urNp linux-3.0.7/arch/sparc/mm/init_32.c linux-3.0.7/arch/sparc/mm/init_32.c
4971--- linux-3.0.7/arch/sparc/mm/init_32.c 2011-07-21 22:17:23.000000000 -0400
4972+++ linux-3.0.7/arch/sparc/mm/init_32.c 2011-08-23 21:47:55.000000000 -0400
4973@@ -316,6 +316,9 @@ extern void device_scan(void);
4974 pgprot_t PAGE_SHARED __read_mostly;
4975 EXPORT_SYMBOL(PAGE_SHARED);
4976
4977+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
4978+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
4979+
4980 void __init paging_init(void)
4981 {
4982 switch(sparc_cpu_model) {
4983@@ -344,17 +347,17 @@ void __init paging_init(void)
4984
4985 /* Initialize the protection map with non-constant, MMU dependent values. */
4986 protection_map[0] = PAGE_NONE;
4987- protection_map[1] = PAGE_READONLY;
4988- protection_map[2] = PAGE_COPY;
4989- protection_map[3] = PAGE_COPY;
4990+ protection_map[1] = PAGE_READONLY_NOEXEC;
4991+ protection_map[2] = PAGE_COPY_NOEXEC;
4992+ protection_map[3] = PAGE_COPY_NOEXEC;
4993 protection_map[4] = PAGE_READONLY;
4994 protection_map[5] = PAGE_READONLY;
4995 protection_map[6] = PAGE_COPY;
4996 protection_map[7] = PAGE_COPY;
4997 protection_map[8] = PAGE_NONE;
4998- protection_map[9] = PAGE_READONLY;
4999- protection_map[10] = PAGE_SHARED;
5000- protection_map[11] = PAGE_SHARED;
5001+ protection_map[9] = PAGE_READONLY_NOEXEC;
5002+ protection_map[10] = PAGE_SHARED_NOEXEC;
5003+ protection_map[11] = PAGE_SHARED_NOEXEC;
5004 protection_map[12] = PAGE_READONLY;
5005 protection_map[13] = PAGE_READONLY;
5006 protection_map[14] = PAGE_SHARED;
5007diff -urNp linux-3.0.7/arch/sparc/mm/Makefile linux-3.0.7/arch/sparc/mm/Makefile
5008--- linux-3.0.7/arch/sparc/mm/Makefile 2011-07-21 22:17:23.000000000 -0400
5009+++ linux-3.0.7/arch/sparc/mm/Makefile 2011-08-23 21:47:55.000000000 -0400
5010@@ -2,7 +2,7 @@
5011 #
5012
5013 asflags-y := -ansi
5014-ccflags-y := -Werror
5015+#ccflags-y := -Werror
5016
5017 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5018 obj-y += fault_$(BITS).o
5019diff -urNp linux-3.0.7/arch/sparc/mm/srmmu.c linux-3.0.7/arch/sparc/mm/srmmu.c
5020--- linux-3.0.7/arch/sparc/mm/srmmu.c 2011-07-21 22:17:23.000000000 -0400
5021+++ linux-3.0.7/arch/sparc/mm/srmmu.c 2011-08-23 21:47:55.000000000 -0400
5022@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5023 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5024 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5025 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5026+
5027+#ifdef CONFIG_PAX_PAGEEXEC
5028+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5029+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5030+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5031+#endif
5032+
5033 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5034 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5035
5036diff -urNp linux-3.0.7/arch/um/include/asm/kmap_types.h linux-3.0.7/arch/um/include/asm/kmap_types.h
5037--- linux-3.0.7/arch/um/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
5038+++ linux-3.0.7/arch/um/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
5039@@ -23,6 +23,7 @@ enum km_type {
5040 KM_IRQ1,
5041 KM_SOFTIRQ0,
5042 KM_SOFTIRQ1,
5043+ KM_CLEARPAGE,
5044 KM_TYPE_NR
5045 };
5046
5047diff -urNp linux-3.0.7/arch/um/include/asm/page.h linux-3.0.7/arch/um/include/asm/page.h
5048--- linux-3.0.7/arch/um/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
5049+++ linux-3.0.7/arch/um/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
5050@@ -14,6 +14,9 @@
5051 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5052 #define PAGE_MASK (~(PAGE_SIZE-1))
5053
5054+#define ktla_ktva(addr) (addr)
5055+#define ktva_ktla(addr) (addr)
5056+
5057 #ifndef __ASSEMBLY__
5058
5059 struct page;
5060diff -urNp linux-3.0.7/arch/um/kernel/process.c linux-3.0.7/arch/um/kernel/process.c
5061--- linux-3.0.7/arch/um/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
5062+++ linux-3.0.7/arch/um/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
5063@@ -404,22 +404,6 @@ int singlestepping(void * t)
5064 return 2;
5065 }
5066
5067-/*
5068- * Only x86 and x86_64 have an arch_align_stack().
5069- * All other arches have "#define arch_align_stack(x) (x)"
5070- * in their asm/system.h
5071- * As this is included in UML from asm-um/system-generic.h,
5072- * we can use it to behave as the subarch does.
5073- */
5074-#ifndef arch_align_stack
5075-unsigned long arch_align_stack(unsigned long sp)
5076-{
5077- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5078- sp -= get_random_int() % 8192;
5079- return sp & ~0xf;
5080-}
5081-#endif
5082-
5083 unsigned long get_wchan(struct task_struct *p)
5084 {
5085 unsigned long stack_page, sp, ip;
5086diff -urNp linux-3.0.7/arch/um/sys-i386/syscalls.c linux-3.0.7/arch/um/sys-i386/syscalls.c
5087--- linux-3.0.7/arch/um/sys-i386/syscalls.c 2011-07-21 22:17:23.000000000 -0400
5088+++ linux-3.0.7/arch/um/sys-i386/syscalls.c 2011-08-23 21:47:55.000000000 -0400
5089@@ -11,6 +11,21 @@
5090 #include "asm/uaccess.h"
5091 #include "asm/unistd.h"
5092
5093+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5094+{
5095+ unsigned long pax_task_size = TASK_SIZE;
5096+
5097+#ifdef CONFIG_PAX_SEGMEXEC
5098+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5099+ pax_task_size = SEGMEXEC_TASK_SIZE;
5100+#endif
5101+
5102+ if (len > pax_task_size || addr > pax_task_size - len)
5103+ return -EINVAL;
5104+
5105+ return 0;
5106+}
5107+
5108 /*
5109 * The prototype on i386 is:
5110 *
5111diff -urNp linux-3.0.7/arch/x86/boot/bitops.h linux-3.0.7/arch/x86/boot/bitops.h
5112--- linux-3.0.7/arch/x86/boot/bitops.h 2011-07-21 22:17:23.000000000 -0400
5113+++ linux-3.0.7/arch/x86/boot/bitops.h 2011-08-23 21:47:55.000000000 -0400
5114@@ -26,7 +26,7 @@ static inline int variable_test_bit(int
5115 u8 v;
5116 const u32 *p = (const u32 *)addr;
5117
5118- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5119+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5120 return v;
5121 }
5122
5123@@ -37,7 +37,7 @@ static inline int variable_test_bit(int
5124
5125 static inline void set_bit(int nr, void *addr)
5126 {
5127- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5128+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5129 }
5130
5131 #endif /* BOOT_BITOPS_H */
5132diff -urNp linux-3.0.7/arch/x86/boot/boot.h linux-3.0.7/arch/x86/boot/boot.h
5133--- linux-3.0.7/arch/x86/boot/boot.h 2011-07-21 22:17:23.000000000 -0400
5134+++ linux-3.0.7/arch/x86/boot/boot.h 2011-08-23 21:47:55.000000000 -0400
5135@@ -85,7 +85,7 @@ static inline void io_delay(void)
5136 static inline u16 ds(void)
5137 {
5138 u16 seg;
5139- asm("movw %%ds,%0" : "=rm" (seg));
5140+ asm volatile("movw %%ds,%0" : "=rm" (seg));
5141 return seg;
5142 }
5143
5144@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t
5145 static inline int memcmp(const void *s1, const void *s2, size_t len)
5146 {
5147 u8 diff;
5148- asm("repe; cmpsb; setnz %0"
5149+ asm volatile("repe; cmpsb; setnz %0"
5150 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5151 return diff;
5152 }
5153diff -urNp linux-3.0.7/arch/x86/boot/compressed/head_32.S linux-3.0.7/arch/x86/boot/compressed/head_32.S
5154--- linux-3.0.7/arch/x86/boot/compressed/head_32.S 2011-07-21 22:17:23.000000000 -0400
5155+++ linux-3.0.7/arch/x86/boot/compressed/head_32.S 2011-08-23 21:47:55.000000000 -0400
5156@@ -76,7 +76,7 @@ ENTRY(startup_32)
5157 notl %eax
5158 andl %eax, %ebx
5159 #else
5160- movl $LOAD_PHYSICAL_ADDR, %ebx
5161+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5162 #endif
5163
5164 /* Target address to relocate to for decompression */
5165@@ -162,7 +162,7 @@ relocated:
5166 * and where it was actually loaded.
5167 */
5168 movl %ebp, %ebx
5169- subl $LOAD_PHYSICAL_ADDR, %ebx
5170+ subl $____LOAD_PHYSICAL_ADDR, %ebx
5171 jz 2f /* Nothing to be done if loaded at compiled addr. */
5172 /*
5173 * Process relocations.
5174@@ -170,8 +170,7 @@ relocated:
5175
5176 1: subl $4, %edi
5177 movl (%edi), %ecx
5178- testl %ecx, %ecx
5179- jz 2f
5180+ jecxz 2f
5181 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5182 jmp 1b
5183 2:
5184diff -urNp linux-3.0.7/arch/x86/boot/compressed/head_64.S linux-3.0.7/arch/x86/boot/compressed/head_64.S
5185--- linux-3.0.7/arch/x86/boot/compressed/head_64.S 2011-07-21 22:17:23.000000000 -0400
5186+++ linux-3.0.7/arch/x86/boot/compressed/head_64.S 2011-08-23 21:47:55.000000000 -0400
5187@@ -91,7 +91,7 @@ ENTRY(startup_32)
5188 notl %eax
5189 andl %eax, %ebx
5190 #else
5191- movl $LOAD_PHYSICAL_ADDR, %ebx
5192+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5193 #endif
5194
5195 /* Target address to relocate to for decompression */
5196@@ -233,7 +233,7 @@ ENTRY(startup_64)
5197 notq %rax
5198 andq %rax, %rbp
5199 #else
5200- movq $LOAD_PHYSICAL_ADDR, %rbp
5201+ movq $____LOAD_PHYSICAL_ADDR, %rbp
5202 #endif
5203
5204 /* Target address to relocate to for decompression */
5205diff -urNp linux-3.0.7/arch/x86/boot/compressed/Makefile linux-3.0.7/arch/x86/boot/compressed/Makefile
5206--- linux-3.0.7/arch/x86/boot/compressed/Makefile 2011-07-21 22:17:23.000000000 -0400
5207+++ linux-3.0.7/arch/x86/boot/compressed/Makefile 2011-08-23 21:47:55.000000000 -0400
5208@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
5209 KBUILD_CFLAGS += $(cflags-y)
5210 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5211 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5212+ifdef CONSTIFY_PLUGIN
5213+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5214+endif
5215
5216 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5217 GCOV_PROFILE := n
5218diff -urNp linux-3.0.7/arch/x86/boot/compressed/misc.c linux-3.0.7/arch/x86/boot/compressed/misc.c
5219--- linux-3.0.7/arch/x86/boot/compressed/misc.c 2011-07-21 22:17:23.000000000 -0400
5220+++ linux-3.0.7/arch/x86/boot/compressed/misc.c 2011-08-23 21:47:55.000000000 -0400
5221@@ -310,7 +310,7 @@ static void parse_elf(void *output)
5222 case PT_LOAD:
5223 #ifdef CONFIG_RELOCATABLE
5224 dest = output;
5225- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5226+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5227 #else
5228 dest = (void *)(phdr->p_paddr);
5229 #endif
5230@@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *
5231 error("Destination address too large");
5232 #endif
5233 #ifndef CONFIG_RELOCATABLE
5234- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5235+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5236 error("Wrong destination address");
5237 #endif
5238
5239diff -urNp linux-3.0.7/arch/x86/boot/compressed/relocs.c linux-3.0.7/arch/x86/boot/compressed/relocs.c
5240--- linux-3.0.7/arch/x86/boot/compressed/relocs.c 2011-07-21 22:17:23.000000000 -0400
5241+++ linux-3.0.7/arch/x86/boot/compressed/relocs.c 2011-08-23 21:47:55.000000000 -0400
5242@@ -13,8 +13,11 @@
5243
5244 static void die(char *fmt, ...);
5245
5246+#include "../../../../include/generated/autoconf.h"
5247+
5248 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5249 static Elf32_Ehdr ehdr;
5250+static Elf32_Phdr *phdr;
5251 static unsigned long reloc_count, reloc_idx;
5252 static unsigned long *relocs;
5253
5254@@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5255 }
5256 }
5257
5258+static void read_phdrs(FILE *fp)
5259+{
5260+ unsigned int i;
5261+
5262+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5263+ if (!phdr) {
5264+ die("Unable to allocate %d program headers\n",
5265+ ehdr.e_phnum);
5266+ }
5267+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5268+ die("Seek to %d failed: %s\n",
5269+ ehdr.e_phoff, strerror(errno));
5270+ }
5271+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5272+ die("Cannot read ELF program headers: %s\n",
5273+ strerror(errno));
5274+ }
5275+ for(i = 0; i < ehdr.e_phnum; i++) {
5276+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5277+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5278+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5279+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5280+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5281+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5282+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5283+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5284+ }
5285+
5286+}
5287+
5288 static void read_shdrs(FILE *fp)
5289 {
5290- int i;
5291+ unsigned int i;
5292 Elf32_Shdr shdr;
5293
5294 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5295@@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5296
5297 static void read_strtabs(FILE *fp)
5298 {
5299- int i;
5300+ unsigned int i;
5301 for (i = 0; i < ehdr.e_shnum; i++) {
5302 struct section *sec = &secs[i];
5303 if (sec->shdr.sh_type != SHT_STRTAB) {
5304@@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5305
5306 static void read_symtabs(FILE *fp)
5307 {
5308- int i,j;
5309+ unsigned int i,j;
5310 for (i = 0; i < ehdr.e_shnum; i++) {
5311 struct section *sec = &secs[i];
5312 if (sec->shdr.sh_type != SHT_SYMTAB) {
5313@@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5314
5315 static void read_relocs(FILE *fp)
5316 {
5317- int i,j;
5318+ unsigned int i,j;
5319+ uint32_t base;
5320+
5321 for (i = 0; i < ehdr.e_shnum; i++) {
5322 struct section *sec = &secs[i];
5323 if (sec->shdr.sh_type != SHT_REL) {
5324@@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
5325 die("Cannot read symbol table: %s\n",
5326 strerror(errno));
5327 }
5328+ base = 0;
5329+ for (j = 0; j < ehdr.e_phnum; j++) {
5330+ if (phdr[j].p_type != PT_LOAD )
5331+ continue;
5332+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
5333+ continue;
5334+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
5335+ break;
5336+ }
5337 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
5338 Elf32_Rel *rel = &sec->reltab[j];
5339- rel->r_offset = elf32_to_cpu(rel->r_offset);
5340+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
5341 rel->r_info = elf32_to_cpu(rel->r_info);
5342 }
5343 }
5344@@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
5345
5346 static void print_absolute_symbols(void)
5347 {
5348- int i;
5349+ unsigned int i;
5350 printf("Absolute symbols\n");
5351 printf(" Num: Value Size Type Bind Visibility Name\n");
5352 for (i = 0; i < ehdr.e_shnum; i++) {
5353 struct section *sec = &secs[i];
5354 char *sym_strtab;
5355 Elf32_Sym *sh_symtab;
5356- int j;
5357+ unsigned int j;
5358
5359 if (sec->shdr.sh_type != SHT_SYMTAB) {
5360 continue;
5361@@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
5362
5363 static void print_absolute_relocs(void)
5364 {
5365- int i, printed = 0;
5366+ unsigned int i, printed = 0;
5367
5368 for (i = 0; i < ehdr.e_shnum; i++) {
5369 struct section *sec = &secs[i];
5370 struct section *sec_applies, *sec_symtab;
5371 char *sym_strtab;
5372 Elf32_Sym *sh_symtab;
5373- int j;
5374+ unsigned int j;
5375 if (sec->shdr.sh_type != SHT_REL) {
5376 continue;
5377 }
5378@@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
5379
5380 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
5381 {
5382- int i;
5383+ unsigned int i;
5384 /* Walk through the relocations */
5385 for (i = 0; i < ehdr.e_shnum; i++) {
5386 char *sym_strtab;
5387 Elf32_Sym *sh_symtab;
5388 struct section *sec_applies, *sec_symtab;
5389- int j;
5390+ unsigned int j;
5391 struct section *sec = &secs[i];
5392
5393 if (sec->shdr.sh_type != SHT_REL) {
5394@@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(El
5395 !is_rel_reloc(sym_name(sym_strtab, sym))) {
5396 continue;
5397 }
5398+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
5399+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
5400+ continue;
5401+
5402+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
5403+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
5404+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
5405+ continue;
5406+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
5407+ continue;
5408+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
5409+ continue;
5410+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
5411+ continue;
5412+#endif
5413+
5414 switch (r_type) {
5415 case R_386_NONE:
5416 case R_386_PC32:
5417@@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, co
5418
5419 static void emit_relocs(int as_text)
5420 {
5421- int i;
5422+ unsigned int i;
5423 /* Count how many relocations I have and allocate space for them. */
5424 reloc_count = 0;
5425 walk_relocs(count_reloc);
5426@@ -665,6 +725,7 @@ int main(int argc, char **argv)
5427 fname, strerror(errno));
5428 }
5429 read_ehdr(fp);
5430+ read_phdrs(fp);
5431 read_shdrs(fp);
5432 read_strtabs(fp);
5433 read_symtabs(fp);
5434diff -urNp linux-3.0.7/arch/x86/boot/cpucheck.c linux-3.0.7/arch/x86/boot/cpucheck.c
5435--- linux-3.0.7/arch/x86/boot/cpucheck.c 2011-07-21 22:17:23.000000000 -0400
5436+++ linux-3.0.7/arch/x86/boot/cpucheck.c 2011-08-23 21:47:55.000000000 -0400
5437@@ -74,7 +74,7 @@ static int has_fpu(void)
5438 u16 fcw = -1, fsw = -1;
5439 u32 cr0;
5440
5441- asm("movl %%cr0,%0" : "=r" (cr0));
5442+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
5443 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
5444 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
5445 asm volatile("movl %0,%%cr0" : : "r" (cr0));
5446@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
5447 {
5448 u32 f0, f1;
5449
5450- asm("pushfl ; "
5451+ asm volatile("pushfl ; "
5452 "pushfl ; "
5453 "popl %0 ; "
5454 "movl %0,%1 ; "
5455@@ -115,7 +115,7 @@ static void get_flags(void)
5456 set_bit(X86_FEATURE_FPU, cpu.flags);
5457
5458 if (has_eflag(X86_EFLAGS_ID)) {
5459- asm("cpuid"
5460+ asm volatile("cpuid"
5461 : "=a" (max_intel_level),
5462 "=b" (cpu_vendor[0]),
5463 "=d" (cpu_vendor[1]),
5464@@ -124,7 +124,7 @@ static void get_flags(void)
5465
5466 if (max_intel_level >= 0x00000001 &&
5467 max_intel_level <= 0x0000ffff) {
5468- asm("cpuid"
5469+ asm volatile("cpuid"
5470 : "=a" (tfms),
5471 "=c" (cpu.flags[4]),
5472 "=d" (cpu.flags[0])
5473@@ -136,7 +136,7 @@ static void get_flags(void)
5474 cpu.model += ((tfms >> 16) & 0xf) << 4;
5475 }
5476
5477- asm("cpuid"
5478+ asm volatile("cpuid"
5479 : "=a" (max_amd_level)
5480 : "a" (0x80000000)
5481 : "ebx", "ecx", "edx");
5482@@ -144,7 +144,7 @@ static void get_flags(void)
5483 if (max_amd_level >= 0x80000001 &&
5484 max_amd_level <= 0x8000ffff) {
5485 u32 eax = 0x80000001;
5486- asm("cpuid"
5487+ asm volatile("cpuid"
5488 : "+a" (eax),
5489 "=c" (cpu.flags[6]),
5490 "=d" (cpu.flags[1])
5491@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5492 u32 ecx = MSR_K7_HWCR;
5493 u32 eax, edx;
5494
5495- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5496+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5497 eax &= ~(1 << 15);
5498- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5499+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5500
5501 get_flags(); /* Make sure it really did something */
5502 err = check_flags();
5503@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5504 u32 ecx = MSR_VIA_FCR;
5505 u32 eax, edx;
5506
5507- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5508+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5509 eax |= (1<<1)|(1<<7);
5510- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5511+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5512
5513 set_bit(X86_FEATURE_CX8, cpu.flags);
5514 err = check_flags();
5515@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
5516 u32 eax, edx;
5517 u32 level = 1;
5518
5519- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5520- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5521- asm("cpuid"
5522+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5523+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5524+ asm volatile("cpuid"
5525 : "+a" (level), "=d" (cpu.flags[0])
5526 : : "ecx", "ebx");
5527- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5528+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5529
5530 err = check_flags();
5531 }
5532diff -urNp linux-3.0.7/arch/x86/boot/header.S linux-3.0.7/arch/x86/boot/header.S
5533--- linux-3.0.7/arch/x86/boot/header.S 2011-07-21 22:17:23.000000000 -0400
5534+++ linux-3.0.7/arch/x86/boot/header.S 2011-08-23 21:47:55.000000000 -0400
5535@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
5536 # single linked list of
5537 # struct setup_data
5538
5539-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
5540+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
5541
5542 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
5543 #define VO_INIT_SIZE (VO__end - VO__text)
5544diff -urNp linux-3.0.7/arch/x86/boot/Makefile linux-3.0.7/arch/x86/boot/Makefile
5545--- linux-3.0.7/arch/x86/boot/Makefile 2011-07-21 22:17:23.000000000 -0400
5546+++ linux-3.0.7/arch/x86/boot/Makefile 2011-08-23 21:47:55.000000000 -0400
5547@@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
5548 $(call cc-option, -fno-stack-protector) \
5549 $(call cc-option, -mpreferred-stack-boundary=2)
5550 KBUILD_CFLAGS += $(call cc-option, -m32)
5551+ifdef CONSTIFY_PLUGIN
5552+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5553+endif
5554 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5555 GCOV_PROFILE := n
5556
5557diff -urNp linux-3.0.7/arch/x86/boot/memory.c linux-3.0.7/arch/x86/boot/memory.c
5558--- linux-3.0.7/arch/x86/boot/memory.c 2011-07-21 22:17:23.000000000 -0400
5559+++ linux-3.0.7/arch/x86/boot/memory.c 2011-08-23 21:47:55.000000000 -0400
5560@@ -19,7 +19,7 @@
5561
5562 static int detect_memory_e820(void)
5563 {
5564- int count = 0;
5565+ unsigned int count = 0;
5566 struct biosregs ireg, oreg;
5567 struct e820entry *desc = boot_params.e820_map;
5568 static struct e820entry buf; /* static so it is zeroed */
5569diff -urNp linux-3.0.7/arch/x86/boot/video.c linux-3.0.7/arch/x86/boot/video.c
5570--- linux-3.0.7/arch/x86/boot/video.c 2011-07-21 22:17:23.000000000 -0400
5571+++ linux-3.0.7/arch/x86/boot/video.c 2011-08-23 21:47:55.000000000 -0400
5572@@ -96,7 +96,7 @@ static void store_mode_params(void)
5573 static unsigned int get_entry(void)
5574 {
5575 char entry_buf[4];
5576- int i, len = 0;
5577+ unsigned int i, len = 0;
5578 int key;
5579 unsigned int v;
5580
5581diff -urNp linux-3.0.7/arch/x86/boot/video-vesa.c linux-3.0.7/arch/x86/boot/video-vesa.c
5582--- linux-3.0.7/arch/x86/boot/video-vesa.c 2011-07-21 22:17:23.000000000 -0400
5583+++ linux-3.0.7/arch/x86/boot/video-vesa.c 2011-08-23 21:47:55.000000000 -0400
5584@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
5585
5586 boot_params.screen_info.vesapm_seg = oreg.es;
5587 boot_params.screen_info.vesapm_off = oreg.di;
5588+ boot_params.screen_info.vesapm_size = oreg.cx;
5589 }
5590
5591 /*
5592diff -urNp linux-3.0.7/arch/x86/crypto/aes-x86_64-asm_64.S linux-3.0.7/arch/x86/crypto/aes-x86_64-asm_64.S
5593--- linux-3.0.7/arch/x86/crypto/aes-x86_64-asm_64.S 2011-07-21 22:17:23.000000000 -0400
5594+++ linux-3.0.7/arch/x86/crypto/aes-x86_64-asm_64.S 2011-10-06 04:17:55.000000000 -0400
5595@@ -8,6 +8,8 @@
5596 * including this sentence is retained in full.
5597 */
5598
5599+#include <asm/alternative-asm.h>
5600+
5601 .extern crypto_ft_tab
5602 .extern crypto_it_tab
5603 .extern crypto_fl_tab
5604@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
5605 je B192; \
5606 leaq 32(r9),r9;
5607
5608+#define ret pax_force_retaddr; ret
5609+
5610 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
5611 movq r1,r2; \
5612 movq r3,r4; \
5613diff -urNp linux-3.0.7/arch/x86/crypto/salsa20-x86_64-asm_64.S linux-3.0.7/arch/x86/crypto/salsa20-x86_64-asm_64.S
5614--- linux-3.0.7/arch/x86/crypto/salsa20-x86_64-asm_64.S 2011-07-21 22:17:23.000000000 -0400
5615+++ linux-3.0.7/arch/x86/crypto/salsa20-x86_64-asm_64.S 2011-10-06 04:17:55.000000000 -0400
5616@@ -1,3 +1,5 @@
5617+#include <asm/alternative-asm.h>
5618+
5619 # enter ECRYPT_encrypt_bytes
5620 .text
5621 .p2align 5
5622@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
5623 add %r11,%rsp
5624 mov %rdi,%rax
5625 mov %rsi,%rdx
5626+ pax_force_retaddr
5627 ret
5628 # bytesatleast65:
5629 ._bytesatleast65:
5630@@ -891,6 +894,7 @@ ECRYPT_keysetup:
5631 add %r11,%rsp
5632 mov %rdi,%rax
5633 mov %rsi,%rdx
5634+ pax_force_retaddr
5635 ret
5636 # enter ECRYPT_ivsetup
5637 .text
5638@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
5639 add %r11,%rsp
5640 mov %rdi,%rax
5641 mov %rsi,%rdx
5642+ pax_force_retaddr
5643 ret
5644diff -urNp linux-3.0.7/arch/x86/crypto/twofish-x86_64-asm_64.S linux-3.0.7/arch/x86/crypto/twofish-x86_64-asm_64.S
5645--- linux-3.0.7/arch/x86/crypto/twofish-x86_64-asm_64.S 2011-07-21 22:17:23.000000000 -0400
5646+++ linux-3.0.7/arch/x86/crypto/twofish-x86_64-asm_64.S 2011-10-06 04:17:55.000000000 -0400
5647@@ -21,6 +21,7 @@
5648 .text
5649
5650 #include <asm/asm-offsets.h>
5651+#include <asm/alternative-asm.h>
5652
5653 #define a_offset 0
5654 #define b_offset 4
5655@@ -269,6 +270,7 @@ twofish_enc_blk:
5656
5657 popq R1
5658 movq $1,%rax
5659+ pax_force_retaddr
5660 ret
5661
5662 twofish_dec_blk:
5663@@ -321,4 +323,5 @@ twofish_dec_blk:
5664
5665 popq R1
5666 movq $1,%rax
5667+ pax_force_retaddr
5668 ret
5669diff -urNp linux-3.0.7/arch/x86/ia32/ia32_aout.c linux-3.0.7/arch/x86/ia32/ia32_aout.c
5670--- linux-3.0.7/arch/x86/ia32/ia32_aout.c 2011-07-21 22:17:23.000000000 -0400
5671+++ linux-3.0.7/arch/x86/ia32/ia32_aout.c 2011-08-23 21:48:14.000000000 -0400
5672@@ -162,6 +162,8 @@ static int aout_core_dump(long signr, st
5673 unsigned long dump_start, dump_size;
5674 struct user32 dump;
5675
5676+ memset(&dump, 0, sizeof(dump));
5677+
5678 fs = get_fs();
5679 set_fs(KERNEL_DS);
5680 has_dumped = 1;
5681diff -urNp linux-3.0.7/arch/x86/ia32/ia32entry.S linux-3.0.7/arch/x86/ia32/ia32entry.S
5682--- linux-3.0.7/arch/x86/ia32/ia32entry.S 2011-07-21 22:17:23.000000000 -0400
5683+++ linux-3.0.7/arch/x86/ia32/ia32entry.S 2011-10-11 10:44:33.000000000 -0400
5684@@ -13,7 +13,9 @@
5685 #include <asm/thread_info.h>
5686 #include <asm/segment.h>
5687 #include <asm/irqflags.h>
5688+#include <asm/pgtable.h>
5689 #include <linux/linkage.h>
5690+#include <asm/alternative-asm.h>
5691
5692 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
5693 #include <linux/elf-em.h>
5694@@ -95,6 +97,29 @@ ENTRY(native_irq_enable_sysexit)
5695 ENDPROC(native_irq_enable_sysexit)
5696 #endif
5697
5698+ .macro pax_enter_kernel_user
5699+#ifdef CONFIG_PAX_MEMORY_UDEREF
5700+ call pax_enter_kernel_user
5701+#endif
5702+ .endm
5703+
5704+ .macro pax_exit_kernel_user
5705+#ifdef CONFIG_PAX_MEMORY_UDEREF
5706+ call pax_exit_kernel_user
5707+#endif
5708+#ifdef CONFIG_PAX_RANDKSTACK
5709+ pushq %rax
5710+ call pax_randomize_kstack
5711+ popq %rax
5712+#endif
5713+ .endm
5714+
5715+ .macro pax_erase_kstack
5716+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
5717+ call pax_erase_kstack
5718+#endif
5719+ .endm
5720+
5721 /*
5722 * 32bit SYSENTER instruction entry.
5723 *
5724@@ -121,7 +146,7 @@ ENTRY(ia32_sysenter_target)
5725 CFI_REGISTER rsp,rbp
5726 SWAPGS_UNSAFE_STACK
5727 movq PER_CPU_VAR(kernel_stack), %rsp
5728- addq $(KERNEL_STACK_OFFSET),%rsp
5729+ pax_enter_kernel_user
5730 /*
5731 * No need to follow this irqs on/off section: the syscall
5732 * disabled irqs, here we enable it straight after entry:
5733@@ -134,7 +159,8 @@ ENTRY(ia32_sysenter_target)
5734 CFI_REL_OFFSET rsp,0
5735 pushfq_cfi
5736 /*CFI_REL_OFFSET rflags,0*/
5737- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
5738+ GET_THREAD_INFO(%r10)
5739+ movl TI_sysenter_return(%r10), %r10d
5740 CFI_REGISTER rip,r10
5741 pushq_cfi $__USER32_CS
5742 /*CFI_REL_OFFSET cs,0*/
5743@@ -146,6 +172,12 @@ ENTRY(ia32_sysenter_target)
5744 SAVE_ARGS 0,0,1
5745 /* no need to do an access_ok check here because rbp has been
5746 32bit zero extended */
5747+
5748+#ifdef CONFIG_PAX_MEMORY_UDEREF
5749+ mov $PAX_USER_SHADOW_BASE,%r10
5750+ add %r10,%rbp
5751+#endif
5752+
5753 1: movl (%rbp),%ebp
5754 .section __ex_table,"a"
5755 .quad 1b,ia32_badarg
5756@@ -168,6 +200,8 @@ sysenter_dispatch:
5757 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5758 jnz sysexit_audit
5759 sysexit_from_sys_call:
5760+ pax_exit_kernel_user
5761+ pax_erase_kstack
5762 andl $~TS_COMPAT,TI_status(%r10)
5763 /* clear IF, that popfq doesn't enable interrupts early */
5764 andl $~0x200,EFLAGS-R11(%rsp)
5765@@ -194,6 +228,9 @@ sysexit_from_sys_call:
5766 movl %eax,%esi /* 2nd arg: syscall number */
5767 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
5768 call audit_syscall_entry
5769+
5770+ pax_erase_kstack
5771+
5772 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
5773 cmpq $(IA32_NR_syscalls-1),%rax
5774 ja ia32_badsys
5775@@ -246,6 +283,9 @@ sysenter_tracesys:
5776 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
5777 movq %rsp,%rdi /* &pt_regs -> arg1 */
5778 call syscall_trace_enter
5779+
5780+ pax_erase_kstack
5781+
5782 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5783 RESTORE_REST
5784 cmpq $(IA32_NR_syscalls-1),%rax
5785@@ -277,19 +317,24 @@ ENDPROC(ia32_sysenter_target)
5786 ENTRY(ia32_cstar_target)
5787 CFI_STARTPROC32 simple
5788 CFI_SIGNAL_FRAME
5789- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
5790+ CFI_DEF_CFA rsp,0
5791 CFI_REGISTER rip,rcx
5792 /*CFI_REGISTER rflags,r11*/
5793 SWAPGS_UNSAFE_STACK
5794 movl %esp,%r8d
5795 CFI_REGISTER rsp,r8
5796 movq PER_CPU_VAR(kernel_stack),%rsp
5797+
5798+#ifdef CONFIG_PAX_MEMORY_UDEREF
5799+ pax_enter_kernel_user
5800+#endif
5801+
5802 /*
5803 * No need to follow this irqs on/off section: the syscall
5804 * disabled irqs and here we enable it straight after entry:
5805 */
5806 ENABLE_INTERRUPTS(CLBR_NONE)
5807- SAVE_ARGS 8,1,1
5808+ SAVE_ARGS 8*6,1,1
5809 movl %eax,%eax /* zero extension */
5810 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
5811 movq %rcx,RIP-ARGOFFSET(%rsp)
5812@@ -305,6 +350,12 @@ ENTRY(ia32_cstar_target)
5813 /* no need to do an access_ok check here because r8 has been
5814 32bit zero extended */
5815 /* hardware stack frame is complete now */
5816+
5817+#ifdef CONFIG_PAX_MEMORY_UDEREF
5818+ mov $PAX_USER_SHADOW_BASE,%r10
5819+ add %r10,%r8
5820+#endif
5821+
5822 1: movl (%r8),%r9d
5823 .section __ex_table,"a"
5824 .quad 1b,ia32_badarg
5825@@ -327,6 +378,8 @@ cstar_dispatch:
5826 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5827 jnz sysretl_audit
5828 sysretl_from_sys_call:
5829+ pax_exit_kernel_user
5830+ pax_erase_kstack
5831 andl $~TS_COMPAT,TI_status(%r10)
5832 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
5833 movl RIP-ARGOFFSET(%rsp),%ecx
5834@@ -364,6 +417,9 @@ cstar_tracesys:
5835 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5836 movq %rsp,%rdi /* &pt_regs -> arg1 */
5837 call syscall_trace_enter
5838+
5839+ pax_erase_kstack
5840+
5841 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
5842 RESTORE_REST
5843 xchgl %ebp,%r9d
5844@@ -409,6 +465,7 @@ ENTRY(ia32_syscall)
5845 CFI_REL_OFFSET rip,RIP-RIP
5846 PARAVIRT_ADJUST_EXCEPTION_FRAME
5847 SWAPGS
5848+ pax_enter_kernel_user
5849 /*
5850 * No need to follow this irqs on/off section: the syscall
5851 * disabled irqs and here we enable it straight after entry:
5852@@ -441,6 +498,9 @@ ia32_tracesys:
5853 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5854 movq %rsp,%rdi /* &pt_regs -> arg1 */
5855 call syscall_trace_enter
5856+
5857+ pax_erase_kstack
5858+
5859 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5860 RESTORE_REST
5861 cmpq $(IA32_NR_syscalls-1),%rax
5862@@ -455,6 +515,7 @@ ia32_badsys:
5863
5864 quiet_ni_syscall:
5865 movq $-ENOSYS,%rax
5866+ pax_force_retaddr
5867 ret
5868 CFI_ENDPROC
5869
5870diff -urNp linux-3.0.7/arch/x86/ia32/ia32_signal.c linux-3.0.7/arch/x86/ia32/ia32_signal.c
5871--- linux-3.0.7/arch/x86/ia32/ia32_signal.c 2011-07-21 22:17:23.000000000 -0400
5872+++ linux-3.0.7/arch/x86/ia32/ia32_signal.c 2011-10-06 04:17:55.000000000 -0400
5873@@ -167,7 +167,7 @@ asmlinkage long sys32_sigaltstack(const
5874 }
5875 seg = get_fs();
5876 set_fs(KERNEL_DS);
5877- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
5878+ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
5879 set_fs(seg);
5880 if (ret >= 0 && uoss_ptr) {
5881 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
5882@@ -374,7 +374,7 @@ static int ia32_setup_sigcontext(struct
5883 */
5884 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
5885 size_t frame_size,
5886- void **fpstate)
5887+ void __user **fpstate)
5888 {
5889 unsigned long sp;
5890
5891@@ -395,7 +395,7 @@ static void __user *get_sigframe(struct
5892
5893 if (used_math()) {
5894 sp = sp - sig_xstate_ia32_size;
5895- *fpstate = (struct _fpstate_ia32 *) sp;
5896+ *fpstate = (struct _fpstate_ia32 __user *) sp;
5897 if (save_i387_xstate_ia32(*fpstate) < 0)
5898 return (void __user *) -1L;
5899 }
5900@@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
5901 sp -= frame_size;
5902 /* Align the stack pointer according to the i386 ABI,
5903 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
5904- sp = ((sp + 4) & -16ul) - 4;
5905+ sp = ((sp - 12) & -16ul) - 4;
5906 return (void __user *) sp;
5907 }
5908
5909@@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
5910 * These are actually not used anymore, but left because some
5911 * gdb versions depend on them as a marker.
5912 */
5913- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5914+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
5915 } put_user_catch(err);
5916
5917 if (err)
5918@@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
5919 0xb8,
5920 __NR_ia32_rt_sigreturn,
5921 0x80cd,
5922- 0,
5923+ 0
5924 };
5925
5926 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
5927@@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
5928
5929 if (ka->sa.sa_flags & SA_RESTORER)
5930 restorer = ka->sa.sa_restorer;
5931+ else if (current->mm->context.vdso)
5932+ /* Return stub is in 32bit vsyscall page */
5933+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
5934 else
5935- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
5936- rt_sigreturn);
5937+ restorer = &frame->retcode;
5938 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
5939
5940 /*
5941 * Not actually used anymore, but left because some gdb
5942 * versions need it.
5943 */
5944- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5945+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
5946 } put_user_catch(err);
5947
5948 if (err)
5949diff -urNp linux-3.0.7/arch/x86/ia32/sys_ia32.c linux-3.0.7/arch/x86/ia32/sys_ia32.c
5950--- linux-3.0.7/arch/x86/ia32/sys_ia32.c 2011-07-21 22:17:23.000000000 -0400
5951+++ linux-3.0.7/arch/x86/ia32/sys_ia32.c 2011-10-06 04:17:55.000000000 -0400
5952@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsign
5953 */
5954 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
5955 {
5956- typeof(ubuf->st_uid) uid = 0;
5957- typeof(ubuf->st_gid) gid = 0;
5958+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
5959+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
5960 SET_UID(uid, stat->uid);
5961 SET_GID(gid, stat->gid);
5962 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
5963@@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int
5964 }
5965 set_fs(KERNEL_DS);
5966 ret = sys_rt_sigprocmask(how,
5967- set ? (sigset_t __user *)&s : NULL,
5968- oset ? (sigset_t __user *)&s : NULL,
5969+ set ? (sigset_t __force_user *)&s : NULL,
5970+ oset ? (sigset_t __force_user *)&s : NULL,
5971 sigsetsize);
5972 set_fs(old_fs);
5973 if (ret)
5974@@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int
5975 return alarm_setitimer(seconds);
5976 }
5977
5978-asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
5979+asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
5980 int options)
5981 {
5982 return compat_sys_wait4(pid, stat_addr, options, NULL);
5983@@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_inter
5984 mm_segment_t old_fs = get_fs();
5985
5986 set_fs(KERNEL_DS);
5987- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
5988+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
5989 set_fs(old_fs);
5990 if (put_compat_timespec(&t, interval))
5991 return -EFAULT;
5992@@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(comp
5993 mm_segment_t old_fs = get_fs();
5994
5995 set_fs(KERNEL_DS);
5996- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
5997+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
5998 set_fs(old_fs);
5999 if (!ret) {
6000 switch (_NSIG_WORDS) {
6001@@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(in
6002 if (copy_siginfo_from_user32(&info, uinfo))
6003 return -EFAULT;
6004 set_fs(KERNEL_DS);
6005- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
6006+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
6007 set_fs(old_fs);
6008 return ret;
6009 }
6010@@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_f
6011 return -EFAULT;
6012
6013 set_fs(KERNEL_DS);
6014- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
6015+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
6016 count);
6017 set_fs(old_fs);
6018
6019diff -urNp linux-3.0.7/arch/x86/include/asm/alternative-asm.h linux-3.0.7/arch/x86/include/asm/alternative-asm.h
6020--- linux-3.0.7/arch/x86/include/asm/alternative-asm.h 2011-07-21 22:17:23.000000000 -0400
6021+++ linux-3.0.7/arch/x86/include/asm/alternative-asm.h 2011-10-07 19:07:23.000000000 -0400
6022@@ -15,6 +15,20 @@
6023 .endm
6024 #endif
6025
6026+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
6027+ .macro pax_force_retaddr rip=0
6028+ btsq $63,\rip(%rsp)
6029+ .endm
6030+ .macro pax_force_fptr ptr
6031+ btsq $63,\ptr
6032+ .endm
6033+#else
6034+ .macro pax_force_retaddr rip=0
6035+ .endm
6036+ .macro pax_force_fptr ptr
6037+ .endm
6038+#endif
6039+
6040 .macro altinstruction_entry orig alt feature orig_len alt_len
6041 .align 8
6042 .quad \orig
6043diff -urNp linux-3.0.7/arch/x86/include/asm/alternative.h linux-3.0.7/arch/x86/include/asm/alternative.h
6044--- linux-3.0.7/arch/x86/include/asm/alternative.h 2011-07-21 22:17:23.000000000 -0400
6045+++ linux-3.0.7/arch/x86/include/asm/alternative.h 2011-08-23 21:47:55.000000000 -0400
6046@@ -93,7 +93,7 @@ static inline int alternatives_text_rese
6047 ".section .discard,\"aw\",@progbits\n" \
6048 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
6049 ".previous\n" \
6050- ".section .altinstr_replacement, \"ax\"\n" \
6051+ ".section .altinstr_replacement, \"a\"\n" \
6052 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
6053 ".previous"
6054
6055diff -urNp linux-3.0.7/arch/x86/include/asm/apic.h linux-3.0.7/arch/x86/include/asm/apic.h
6056--- linux-3.0.7/arch/x86/include/asm/apic.h 2011-07-21 22:17:23.000000000 -0400
6057+++ linux-3.0.7/arch/x86/include/asm/apic.h 2011-08-23 21:48:14.000000000 -0400
6058@@ -45,7 +45,7 @@ static inline void generic_apic_probe(vo
6059
6060 #ifdef CONFIG_X86_LOCAL_APIC
6061
6062-extern unsigned int apic_verbosity;
6063+extern int apic_verbosity;
6064 extern int local_apic_timer_c2_ok;
6065
6066 extern int disable_apic;
6067diff -urNp linux-3.0.7/arch/x86/include/asm/apm.h linux-3.0.7/arch/x86/include/asm/apm.h
6068--- linux-3.0.7/arch/x86/include/asm/apm.h 2011-07-21 22:17:23.000000000 -0400
6069+++ linux-3.0.7/arch/x86/include/asm/apm.h 2011-08-23 21:47:55.000000000 -0400
6070@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
6071 __asm__ __volatile__(APM_DO_ZERO_SEGS
6072 "pushl %%edi\n\t"
6073 "pushl %%ebp\n\t"
6074- "lcall *%%cs:apm_bios_entry\n\t"
6075+ "lcall *%%ss:apm_bios_entry\n\t"
6076 "setc %%al\n\t"
6077 "popl %%ebp\n\t"
6078 "popl %%edi\n\t"
6079@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
6080 __asm__ __volatile__(APM_DO_ZERO_SEGS
6081 "pushl %%edi\n\t"
6082 "pushl %%ebp\n\t"
6083- "lcall *%%cs:apm_bios_entry\n\t"
6084+ "lcall *%%ss:apm_bios_entry\n\t"
6085 "setc %%bl\n\t"
6086 "popl %%ebp\n\t"
6087 "popl %%edi\n\t"
6088diff -urNp linux-3.0.7/arch/x86/include/asm/atomic64_32.h linux-3.0.7/arch/x86/include/asm/atomic64_32.h
6089--- linux-3.0.7/arch/x86/include/asm/atomic64_32.h 2011-07-21 22:17:23.000000000 -0400
6090+++ linux-3.0.7/arch/x86/include/asm/atomic64_32.h 2011-08-23 21:47:55.000000000 -0400
6091@@ -12,6 +12,14 @@ typedef struct {
6092 u64 __aligned(8) counter;
6093 } atomic64_t;
6094
6095+#ifdef CONFIG_PAX_REFCOUNT
6096+typedef struct {
6097+ u64 __aligned(8) counter;
6098+} atomic64_unchecked_t;
6099+#else
6100+typedef atomic64_t atomic64_unchecked_t;
6101+#endif
6102+
6103 #define ATOMIC64_INIT(val) { (val) }
6104
6105 #ifdef CONFIG_X86_CMPXCHG64
6106@@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg
6107 }
6108
6109 /**
6110+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
6111+ * @p: pointer to type atomic64_unchecked_t
6112+ * @o: expected value
6113+ * @n: new value
6114+ *
6115+ * Atomically sets @v to @n if it was equal to @o and returns
6116+ * the old value.
6117+ */
6118+
6119+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
6120+{
6121+ return cmpxchg64(&v->counter, o, n);
6122+}
6123+
6124+/**
6125 * atomic64_xchg - xchg atomic64 variable
6126 * @v: pointer to type atomic64_t
6127 * @n: value to assign
6128@@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64
6129 }
6130
6131 /**
6132+ * atomic64_set_unchecked - set atomic64 variable
6133+ * @v: pointer to type atomic64_unchecked_t
6134+ * @n: value to assign
6135+ *
6136+ * Atomically sets the value of @v to @n.
6137+ */
6138+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
6139+{
6140+ unsigned high = (unsigned)(i >> 32);
6141+ unsigned low = (unsigned)i;
6142+ asm volatile(ATOMIC64_ALTERNATIVE(set)
6143+ : "+b" (low), "+c" (high)
6144+ : "S" (v)
6145+ : "eax", "edx", "memory"
6146+ );
6147+}
6148+
6149+/**
6150 * atomic64_read - read atomic64 variable
6151 * @v: pointer to type atomic64_t
6152 *
6153@@ -93,6 +134,22 @@ static inline long long atomic64_read(at
6154 }
6155
6156 /**
6157+ * atomic64_read_unchecked - read atomic64 variable
6158+ * @v: pointer to type atomic64_unchecked_t
6159+ *
6160+ * Atomically reads the value of @v and returns it.
6161+ */
6162+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
6163+{
6164+ long long r;
6165+ asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
6166+ : "=A" (r), "+c" (v)
6167+ : : "memory"
6168+ );
6169+ return r;
6170+ }
6171+
6172+/**
6173 * atomic64_add_return - add and return
6174 * @i: integer value to add
6175 * @v: pointer to type atomic64_t
6176@@ -108,6 +165,22 @@ static inline long long atomic64_add_ret
6177 return i;
6178 }
6179
6180+/**
6181+ * atomic64_add_return_unchecked - add and return
6182+ * @i: integer value to add
6183+ * @v: pointer to type atomic64_unchecked_t
6184+ *
6185+ * Atomically adds @i to @v and returns @i + *@v
6186+ */
6187+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
6188+{
6189+ asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
6190+ : "+A" (i), "+c" (v)
6191+ : : "memory"
6192+ );
6193+ return i;
6194+}
6195+
6196 /*
6197 * Other variants with different arithmetic operators:
6198 */
6199@@ -131,6 +204,17 @@ static inline long long atomic64_inc_ret
6200 return a;
6201 }
6202
6203+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6204+{
6205+ long long a;
6206+ asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
6207+ : "=A" (a)
6208+ : "S" (v)
6209+ : "memory", "ecx"
6210+ );
6211+ return a;
6212+}
6213+
6214 static inline long long atomic64_dec_return(atomic64_t *v)
6215 {
6216 long long a;
6217@@ -159,6 +243,22 @@ static inline long long atomic64_add(lon
6218 }
6219
6220 /**
6221+ * atomic64_add_unchecked - add integer to atomic64 variable
6222+ * @i: integer value to add
6223+ * @v: pointer to type atomic64_unchecked_t
6224+ *
6225+ * Atomically adds @i to @v.
6226+ */
6227+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
6228+{
6229+ asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
6230+ : "+A" (i), "+c" (v)
6231+ : : "memory"
6232+ );
6233+ return i;
6234+}
6235+
6236+/**
6237 * atomic64_sub - subtract the atomic64 variable
6238 * @i: integer value to subtract
6239 * @v: pointer to type atomic64_t
6240diff -urNp linux-3.0.7/arch/x86/include/asm/atomic64_64.h linux-3.0.7/arch/x86/include/asm/atomic64_64.h
6241--- linux-3.0.7/arch/x86/include/asm/atomic64_64.h 2011-07-21 22:17:23.000000000 -0400
6242+++ linux-3.0.7/arch/x86/include/asm/atomic64_64.h 2011-08-23 21:47:55.000000000 -0400
6243@@ -18,7 +18,19 @@
6244 */
6245 static inline long atomic64_read(const atomic64_t *v)
6246 {
6247- return (*(volatile long *)&(v)->counter);
6248+ return (*(volatile const long *)&(v)->counter);
6249+}
6250+
6251+/**
6252+ * atomic64_read_unchecked - read atomic64 variable
6253+ * @v: pointer of type atomic64_unchecked_t
6254+ *
6255+ * Atomically reads the value of @v.
6256+ * Doesn't imply a read memory barrier.
6257+ */
6258+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6259+{
6260+ return (*(volatile const long *)&(v)->counter);
6261 }
6262
6263 /**
6264@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64
6265 }
6266
6267 /**
6268+ * atomic64_set_unchecked - set atomic64 variable
6269+ * @v: pointer to type atomic64_unchecked_t
6270+ * @i: required value
6271+ *
6272+ * Atomically sets the value of @v to @i.
6273+ */
6274+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6275+{
6276+ v->counter = i;
6277+}
6278+
6279+/**
6280 * atomic64_add - add integer to atomic64 variable
6281 * @i: integer value to add
6282 * @v: pointer to type atomic64_t
6283@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64
6284 */
6285 static inline void atomic64_add(long i, atomic64_t *v)
6286 {
6287+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
6288+
6289+#ifdef CONFIG_PAX_REFCOUNT
6290+ "jno 0f\n"
6291+ LOCK_PREFIX "subq %1,%0\n"
6292+ "int $4\n0:\n"
6293+ _ASM_EXTABLE(0b, 0b)
6294+#endif
6295+
6296+ : "=m" (v->counter)
6297+ : "er" (i), "m" (v->counter));
6298+}
6299+
6300+/**
6301+ * atomic64_add_unchecked - add integer to atomic64 variable
6302+ * @i: integer value to add
6303+ * @v: pointer to type atomic64_unchecked_t
6304+ *
6305+ * Atomically adds @i to @v.
6306+ */
6307+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6308+{
6309 asm volatile(LOCK_PREFIX "addq %1,%0"
6310 : "=m" (v->counter)
6311 : "er" (i), "m" (v->counter));
6312@@ -56,7 +102,29 @@ static inline void atomic64_add(long i,
6313 */
6314 static inline void atomic64_sub(long i, atomic64_t *v)
6315 {
6316- asm volatile(LOCK_PREFIX "subq %1,%0"
6317+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
6318+
6319+#ifdef CONFIG_PAX_REFCOUNT
6320+ "jno 0f\n"
6321+ LOCK_PREFIX "addq %1,%0\n"
6322+ "int $4\n0:\n"
6323+ _ASM_EXTABLE(0b, 0b)
6324+#endif
6325+
6326+ : "=m" (v->counter)
6327+ : "er" (i), "m" (v->counter));
6328+}
6329+
6330+/**
6331+ * atomic64_sub_unchecked - subtract the atomic64 variable
6332+ * @i: integer value to subtract
6333+ * @v: pointer to type atomic64_unchecked_t
6334+ *
6335+ * Atomically subtracts @i from @v.
6336+ */
6337+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6338+{
6339+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
6340 : "=m" (v->counter)
6341 : "er" (i), "m" (v->counter));
6342 }
6343@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(
6344 {
6345 unsigned char c;
6346
6347- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
6348+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
6349+
6350+#ifdef CONFIG_PAX_REFCOUNT
6351+ "jno 0f\n"
6352+ LOCK_PREFIX "addq %2,%0\n"
6353+ "int $4\n0:\n"
6354+ _ASM_EXTABLE(0b, 0b)
6355+#endif
6356+
6357+ "sete %1\n"
6358 : "=m" (v->counter), "=qm" (c)
6359 : "er" (i), "m" (v->counter) : "memory");
6360 return c;
6361@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(
6362 */
6363 static inline void atomic64_inc(atomic64_t *v)
6364 {
6365+ asm volatile(LOCK_PREFIX "incq %0\n"
6366+
6367+#ifdef CONFIG_PAX_REFCOUNT
6368+ "jno 0f\n"
6369+ LOCK_PREFIX "decq %0\n"
6370+ "int $4\n0:\n"
6371+ _ASM_EXTABLE(0b, 0b)
6372+#endif
6373+
6374+ : "=m" (v->counter)
6375+ : "m" (v->counter));
6376+}
6377+
6378+/**
6379+ * atomic64_inc_unchecked - increment atomic64 variable
6380+ * @v: pointer to type atomic64_unchecked_t
6381+ *
6382+ * Atomically increments @v by 1.
6383+ */
6384+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
6385+{
6386 asm volatile(LOCK_PREFIX "incq %0"
6387 : "=m" (v->counter)
6388 : "m" (v->counter));
6389@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64
6390 */
6391 static inline void atomic64_dec(atomic64_t *v)
6392 {
6393- asm volatile(LOCK_PREFIX "decq %0"
6394+ asm volatile(LOCK_PREFIX "decq %0\n"
6395+
6396+#ifdef CONFIG_PAX_REFCOUNT
6397+ "jno 0f\n"
6398+ LOCK_PREFIX "incq %0\n"
6399+ "int $4\n0:\n"
6400+ _ASM_EXTABLE(0b, 0b)
6401+#endif
6402+
6403+ : "=m" (v->counter)
6404+ : "m" (v->counter));
6405+}
6406+
6407+/**
6408+ * atomic64_dec_unchecked - decrement atomic64 variable
6409+ * @v: pointer to type atomic64_t
6410+ *
6411+ * Atomically decrements @v by 1.
6412+ */
6413+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
6414+{
6415+ asm volatile(LOCK_PREFIX "decq %0\n"
6416 : "=m" (v->counter)
6417 : "m" (v->counter));
6418 }
6419@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(
6420 {
6421 unsigned char c;
6422
6423- asm volatile(LOCK_PREFIX "decq %0; sete %1"
6424+ asm volatile(LOCK_PREFIX "decq %0\n"
6425+
6426+#ifdef CONFIG_PAX_REFCOUNT
6427+ "jno 0f\n"
6428+ LOCK_PREFIX "incq %0\n"
6429+ "int $4\n0:\n"
6430+ _ASM_EXTABLE(0b, 0b)
6431+#endif
6432+
6433+ "sete %1\n"
6434 : "=m" (v->counter), "=qm" (c)
6435 : "m" (v->counter) : "memory");
6436 return c != 0;
6437@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(
6438 {
6439 unsigned char c;
6440
6441- asm volatile(LOCK_PREFIX "incq %0; sete %1"
6442+ asm volatile(LOCK_PREFIX "incq %0\n"
6443+
6444+#ifdef CONFIG_PAX_REFCOUNT
6445+ "jno 0f\n"
6446+ LOCK_PREFIX "decq %0\n"
6447+ "int $4\n0:\n"
6448+ _ASM_EXTABLE(0b, 0b)
6449+#endif
6450+
6451+ "sete %1\n"
6452 : "=m" (v->counter), "=qm" (c)
6453 : "m" (v->counter) : "memory");
6454 return c != 0;
6455@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(
6456 {
6457 unsigned char c;
6458
6459- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
6460+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
6461+
6462+#ifdef CONFIG_PAX_REFCOUNT
6463+ "jno 0f\n"
6464+ LOCK_PREFIX "subq %2,%0\n"
6465+ "int $4\n0:\n"
6466+ _ASM_EXTABLE(0b, 0b)
6467+#endif
6468+
6469+ "sets %1\n"
6470 : "=m" (v->counter), "=qm" (c)
6471 : "er" (i), "m" (v->counter) : "memory");
6472 return c;
6473@@ -171,7 +317,31 @@ static inline int atomic64_add_negative(
6474 static inline long atomic64_add_return(long i, atomic64_t *v)
6475 {
6476 long __i = i;
6477- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
6478+ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
6479+
6480+#ifdef CONFIG_PAX_REFCOUNT
6481+ "jno 0f\n"
6482+ "movq %0, %1\n"
6483+ "int $4\n0:\n"
6484+ _ASM_EXTABLE(0b, 0b)
6485+#endif
6486+
6487+ : "+r" (i), "+m" (v->counter)
6488+ : : "memory");
6489+ return i + __i;
6490+}
6491+
6492+/**
6493+ * atomic64_add_return_unchecked - add and return
6494+ * @i: integer value to add
6495+ * @v: pointer to type atomic64_unchecked_t
6496+ *
6497+ * Atomically adds @i to @v and returns @i + @v
6498+ */
6499+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6500+{
6501+ long __i = i;
6502+ asm volatile(LOCK_PREFIX "xaddq %0, %1"
6503 : "+r" (i), "+m" (v->counter)
6504 : : "memory");
6505 return i + __i;
6506@@ -183,6 +353,10 @@ static inline long atomic64_sub_return(l
6507 }
6508
6509 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
6510+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6511+{
6512+ return atomic64_add_return_unchecked(1, v);
6513+}
6514 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
6515
6516 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6517@@ -190,6 +364,11 @@ static inline long atomic64_cmpxchg(atom
6518 return cmpxchg(&v->counter, old, new);
6519 }
6520
6521+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
6522+{
6523+ return cmpxchg(&v->counter, old, new);
6524+}
6525+
6526 static inline long atomic64_xchg(atomic64_t *v, long new)
6527 {
6528 return xchg(&v->counter, new);
6529@@ -206,17 +385,30 @@ static inline long atomic64_xchg(atomic6
6530 */
6531 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
6532 {
6533- long c, old;
6534+ long c, old, new;
6535 c = atomic64_read(v);
6536 for (;;) {
6537- if (unlikely(c == (u)))
6538+ if (unlikely(c == u))
6539 break;
6540- old = atomic64_cmpxchg((v), c, c + (a));
6541+
6542+ asm volatile("add %2,%0\n"
6543+
6544+#ifdef CONFIG_PAX_REFCOUNT
6545+ "jno 0f\n"
6546+ "sub %2,%0\n"
6547+ "int $4\n0:\n"
6548+ _ASM_EXTABLE(0b, 0b)
6549+#endif
6550+
6551+ : "=r" (new)
6552+ : "0" (c), "ir" (a));
6553+
6554+ old = atomic64_cmpxchg(v, c, new);
6555 if (likely(old == c))
6556 break;
6557 c = old;
6558 }
6559- return c != (u);
6560+ return c != u;
6561 }
6562
6563 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6564diff -urNp linux-3.0.7/arch/x86/include/asm/atomic.h linux-3.0.7/arch/x86/include/asm/atomic.h
6565--- linux-3.0.7/arch/x86/include/asm/atomic.h 2011-07-21 22:17:23.000000000 -0400
6566+++ linux-3.0.7/arch/x86/include/asm/atomic.h 2011-08-23 21:47:55.000000000 -0400
6567@@ -22,7 +22,18 @@
6568 */
6569 static inline int atomic_read(const atomic_t *v)
6570 {
6571- return (*(volatile int *)&(v)->counter);
6572+ return (*(volatile const int *)&(v)->counter);
6573+}
6574+
6575+/**
6576+ * atomic_read_unchecked - read atomic variable
6577+ * @v: pointer of type atomic_unchecked_t
6578+ *
6579+ * Atomically reads the value of @v.
6580+ */
6581+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6582+{
6583+ return (*(volatile const int *)&(v)->counter);
6584 }
6585
6586 /**
6587@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *
6588 }
6589
6590 /**
6591+ * atomic_set_unchecked - set atomic variable
6592+ * @v: pointer of type atomic_unchecked_t
6593+ * @i: required value
6594+ *
6595+ * Atomically sets the value of @v to @i.
6596+ */
6597+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6598+{
6599+ v->counter = i;
6600+}
6601+
6602+/**
6603 * atomic_add - add integer to atomic variable
6604 * @i: integer value to add
6605 * @v: pointer of type atomic_t
6606@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *
6607 */
6608 static inline void atomic_add(int i, atomic_t *v)
6609 {
6610- asm volatile(LOCK_PREFIX "addl %1,%0"
6611+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6612+
6613+#ifdef CONFIG_PAX_REFCOUNT
6614+ "jno 0f\n"
6615+ LOCK_PREFIX "subl %1,%0\n"
6616+ "int $4\n0:\n"
6617+ _ASM_EXTABLE(0b, 0b)
6618+#endif
6619+
6620+ : "+m" (v->counter)
6621+ : "ir" (i));
6622+}
6623+
6624+/**
6625+ * atomic_add_unchecked - add integer to atomic variable
6626+ * @i: integer value to add
6627+ * @v: pointer of type atomic_unchecked_t
6628+ *
6629+ * Atomically adds @i to @v.
6630+ */
6631+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6632+{
6633+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6634 : "+m" (v->counter)
6635 : "ir" (i));
6636 }
6637@@ -60,7 +105,29 @@ static inline void atomic_add(int i, ato
6638 */
6639 static inline void atomic_sub(int i, atomic_t *v)
6640 {
6641- asm volatile(LOCK_PREFIX "subl %1,%0"
6642+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
6643+
6644+#ifdef CONFIG_PAX_REFCOUNT
6645+ "jno 0f\n"
6646+ LOCK_PREFIX "addl %1,%0\n"
6647+ "int $4\n0:\n"
6648+ _ASM_EXTABLE(0b, 0b)
6649+#endif
6650+
6651+ : "+m" (v->counter)
6652+ : "ir" (i));
6653+}
6654+
6655+/**
6656+ * atomic_sub_unchecked - subtract integer from atomic variable
6657+ * @i: integer value to subtract
6658+ * @v: pointer of type atomic_unchecked_t
6659+ *
6660+ * Atomically subtracts @i from @v.
6661+ */
6662+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6663+{
6664+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
6665 : "+m" (v->counter)
6666 : "ir" (i));
6667 }
6668@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(in
6669 {
6670 unsigned char c;
6671
6672- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6673+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
6674+
6675+#ifdef CONFIG_PAX_REFCOUNT
6676+ "jno 0f\n"
6677+ LOCK_PREFIX "addl %2,%0\n"
6678+ "int $4\n0:\n"
6679+ _ASM_EXTABLE(0b, 0b)
6680+#endif
6681+
6682+ "sete %1\n"
6683 : "+m" (v->counter), "=qm" (c)
6684 : "ir" (i) : "memory");
6685 return c;
6686@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(in
6687 */
6688 static inline void atomic_inc(atomic_t *v)
6689 {
6690- asm volatile(LOCK_PREFIX "incl %0"
6691+ asm volatile(LOCK_PREFIX "incl %0\n"
6692+
6693+#ifdef CONFIG_PAX_REFCOUNT
6694+ "jno 0f\n"
6695+ LOCK_PREFIX "decl %0\n"
6696+ "int $4\n0:\n"
6697+ _ASM_EXTABLE(0b, 0b)
6698+#endif
6699+
6700+ : "+m" (v->counter));
6701+}
6702+
6703+/**
6704+ * atomic_inc_unchecked - increment atomic variable
6705+ * @v: pointer of type atomic_unchecked_t
6706+ *
6707+ * Atomically increments @v by 1.
6708+ */
6709+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
6710+{
6711+ asm volatile(LOCK_PREFIX "incl %0\n"
6712 : "+m" (v->counter));
6713 }
6714
6715@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *
6716 */
6717 static inline void atomic_dec(atomic_t *v)
6718 {
6719- asm volatile(LOCK_PREFIX "decl %0"
6720+ asm volatile(LOCK_PREFIX "decl %0\n"
6721+
6722+#ifdef CONFIG_PAX_REFCOUNT
6723+ "jno 0f\n"
6724+ LOCK_PREFIX "incl %0\n"
6725+ "int $4\n0:\n"
6726+ _ASM_EXTABLE(0b, 0b)
6727+#endif
6728+
6729+ : "+m" (v->counter));
6730+}
6731+
6732+/**
6733+ * atomic_dec_unchecked - decrement atomic variable
6734+ * @v: pointer of type atomic_unchecked_t
6735+ *
6736+ * Atomically decrements @v by 1.
6737+ */
6738+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
6739+{
6740+ asm volatile(LOCK_PREFIX "decl %0\n"
6741 : "+m" (v->counter));
6742 }
6743
6744@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(at
6745 {
6746 unsigned char c;
6747
6748- asm volatile(LOCK_PREFIX "decl %0; sete %1"
6749+ asm volatile(LOCK_PREFIX "decl %0\n"
6750+
6751+#ifdef CONFIG_PAX_REFCOUNT
6752+ "jno 0f\n"
6753+ LOCK_PREFIX "incl %0\n"
6754+ "int $4\n0:\n"
6755+ _ASM_EXTABLE(0b, 0b)
6756+#endif
6757+
6758+ "sete %1\n"
6759 : "+m" (v->counter), "=qm" (c)
6760 : : "memory");
6761 return c != 0;
6762@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(at
6763 {
6764 unsigned char c;
6765
6766- asm volatile(LOCK_PREFIX "incl %0; sete %1"
6767+ asm volatile(LOCK_PREFIX "incl %0\n"
6768+
6769+#ifdef CONFIG_PAX_REFCOUNT
6770+ "jno 0f\n"
6771+ LOCK_PREFIX "decl %0\n"
6772+ "int $4\n0:\n"
6773+ _ASM_EXTABLE(0b, 0b)
6774+#endif
6775+
6776+ "sete %1\n"
6777+ : "+m" (v->counter), "=qm" (c)
6778+ : : "memory");
6779+ return c != 0;
6780+}
6781+
6782+/**
6783+ * atomic_inc_and_test_unchecked - increment and test
6784+ * @v: pointer of type atomic_unchecked_t
6785+ *
6786+ * Atomically increments @v by 1
6787+ * and returns true if the result is zero, or false for all
6788+ * other cases.
6789+ */
6790+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6791+{
6792+ unsigned char c;
6793+
6794+ asm volatile(LOCK_PREFIX "incl %0\n"
6795+ "sete %1\n"
6796 : "+m" (v->counter), "=qm" (c)
6797 : : "memory");
6798 return c != 0;
6799@@ -157,7 +310,16 @@ static inline int atomic_add_negative(in
6800 {
6801 unsigned char c;
6802
6803- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
6804+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
6805+
6806+#ifdef CONFIG_PAX_REFCOUNT
6807+ "jno 0f\n"
6808+ LOCK_PREFIX "subl %2,%0\n"
6809+ "int $4\n0:\n"
6810+ _ASM_EXTABLE(0b, 0b)
6811+#endif
6812+
6813+ "sets %1\n"
6814 : "+m" (v->counter), "=qm" (c)
6815 : "ir" (i) : "memory");
6816 return c;
6817@@ -180,6 +342,46 @@ static inline int atomic_add_return(int
6818 #endif
6819 /* Modern 486+ processor */
6820 __i = i;
6821+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
6822+
6823+#ifdef CONFIG_PAX_REFCOUNT
6824+ "jno 0f\n"
6825+ "movl %0, %1\n"
6826+ "int $4\n0:\n"
6827+ _ASM_EXTABLE(0b, 0b)
6828+#endif
6829+
6830+ : "+r" (i), "+m" (v->counter)
6831+ : : "memory");
6832+ return i + __i;
6833+
6834+#ifdef CONFIG_M386
6835+no_xadd: /* Legacy 386 processor */
6836+ local_irq_save(flags);
6837+ __i = atomic_read(v);
6838+ atomic_set(v, i + __i);
6839+ local_irq_restore(flags);
6840+ return i + __i;
6841+#endif
6842+}
6843+
6844+/**
6845+ * atomic_add_return_unchecked - add integer and return
6846+ * @v: pointer of type atomic_unchecked_t
6847+ * @i: integer value to add
6848+ *
6849+ * Atomically adds @i to @v and returns @i + @v
6850+ */
6851+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6852+{
6853+ int __i;
6854+#ifdef CONFIG_M386
6855+ unsigned long flags;
6856+ if (unlikely(boot_cpu_data.x86 <= 3))
6857+ goto no_xadd;
6858+#endif
6859+ /* Modern 486+ processor */
6860+ __i = i;
6861 asm volatile(LOCK_PREFIX "xaddl %0, %1"
6862 : "+r" (i), "+m" (v->counter)
6863 : : "memory");
6864@@ -208,6 +410,10 @@ static inline int atomic_sub_return(int
6865 }
6866
6867 #define atomic_inc_return(v) (atomic_add_return(1, v))
6868+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6869+{
6870+ return atomic_add_return_unchecked(1, v);
6871+}
6872 #define atomic_dec_return(v) (atomic_sub_return(1, v))
6873
6874 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6875@@ -215,11 +421,21 @@ static inline int atomic_cmpxchg(atomic_
6876 return cmpxchg(&v->counter, old, new);
6877 }
6878
6879+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
6880+{
6881+ return cmpxchg(&v->counter, old, new);
6882+}
6883+
6884 static inline int atomic_xchg(atomic_t *v, int new)
6885 {
6886 return xchg(&v->counter, new);
6887 }
6888
6889+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6890+{
6891+ return xchg(&v->counter, new);
6892+}
6893+
6894 /**
6895 * atomic_add_unless - add unless the number is already a given value
6896 * @v: pointer of type atomic_t
6897@@ -231,21 +447,77 @@ static inline int atomic_xchg(atomic_t *
6898 */
6899 static inline int atomic_add_unless(atomic_t *v, int a, int u)
6900 {
6901- int c, old;
6902+ int c, old, new;
6903 c = atomic_read(v);
6904 for (;;) {
6905- if (unlikely(c == (u)))
6906+ if (unlikely(c == u))
6907 break;
6908- old = atomic_cmpxchg((v), c, c + (a));
6909+
6910+ asm volatile("addl %2,%0\n"
6911+
6912+#ifdef CONFIG_PAX_REFCOUNT
6913+ "jno 0f\n"
6914+ "subl %2,%0\n"
6915+ "int $4\n0:\n"
6916+ _ASM_EXTABLE(0b, 0b)
6917+#endif
6918+
6919+ : "=r" (new)
6920+ : "0" (c), "ir" (a));
6921+
6922+ old = atomic_cmpxchg(v, c, new);
6923 if (likely(old == c))
6924 break;
6925 c = old;
6926 }
6927- return c != (u);
6928+ return c != u;
6929 }
6930
6931 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
6932
6933+/**
6934+ * atomic_inc_not_zero_hint - increment if not null
6935+ * @v: pointer of type atomic_t
6936+ * @hint: probable value of the atomic before the increment
6937+ *
6938+ * This version of atomic_inc_not_zero() gives a hint of probable
6939+ * value of the atomic. This helps processor to not read the memory
6940+ * before doing the atomic read/modify/write cycle, lowering
6941+ * number of bus transactions on some arches.
6942+ *
6943+ * Returns: 0 if increment was not done, 1 otherwise.
6944+ */
6945+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
6946+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
6947+{
6948+ int val, c = hint, new;
6949+
6950+ /* sanity test, should be removed by compiler if hint is a constant */
6951+ if (!hint)
6952+ return atomic_inc_not_zero(v);
6953+
6954+ do {
6955+ asm volatile("incl %0\n"
6956+
6957+#ifdef CONFIG_PAX_REFCOUNT
6958+ "jno 0f\n"
6959+ "decl %0\n"
6960+ "int $4\n0:\n"
6961+ _ASM_EXTABLE(0b, 0b)
6962+#endif
6963+
6964+ : "=r" (new)
6965+ : "0" (c));
6966+
6967+ val = atomic_cmpxchg(v, c, new);
6968+ if (val == c)
6969+ return 1;
6970+ c = val;
6971+ } while (c);
6972+
6973+ return 0;
6974+}
6975+
6976 /*
6977 * atomic_dec_if_positive - decrement by 1 if old value positive
6978 * @v: pointer of type atomic_t
6979diff -urNp linux-3.0.7/arch/x86/include/asm/bitops.h linux-3.0.7/arch/x86/include/asm/bitops.h
6980--- linux-3.0.7/arch/x86/include/asm/bitops.h 2011-07-21 22:17:23.000000000 -0400
6981+++ linux-3.0.7/arch/x86/include/asm/bitops.h 2011-08-23 21:47:55.000000000 -0400
6982@@ -38,7 +38,7 @@
6983 * a mask operation on a byte.
6984 */
6985 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
6986-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
6987+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
6988 #define CONST_MASK(nr) (1 << ((nr) & 7))
6989
6990 /**
6991diff -urNp linux-3.0.7/arch/x86/include/asm/boot.h linux-3.0.7/arch/x86/include/asm/boot.h
6992--- linux-3.0.7/arch/x86/include/asm/boot.h 2011-07-21 22:17:23.000000000 -0400
6993+++ linux-3.0.7/arch/x86/include/asm/boot.h 2011-08-23 21:47:55.000000000 -0400
6994@@ -11,10 +11,15 @@
6995 #include <asm/pgtable_types.h>
6996
6997 /* Physical address where kernel should be loaded. */
6998-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6999+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
7000 + (CONFIG_PHYSICAL_ALIGN - 1)) \
7001 & ~(CONFIG_PHYSICAL_ALIGN - 1))
7002
7003+#ifndef __ASSEMBLY__
7004+extern unsigned char __LOAD_PHYSICAL_ADDR[];
7005+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
7006+#endif
7007+
7008 /* Minimum kernel alignment, as a power of two */
7009 #ifdef CONFIG_X86_64
7010 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
7011diff -urNp linux-3.0.7/arch/x86/include/asm/cacheflush.h linux-3.0.7/arch/x86/include/asm/cacheflush.h
7012--- linux-3.0.7/arch/x86/include/asm/cacheflush.h 2011-07-21 22:17:23.000000000 -0400
7013+++ linux-3.0.7/arch/x86/include/asm/cacheflush.h 2011-08-23 21:47:55.000000000 -0400
7014@@ -26,7 +26,7 @@ static inline unsigned long get_page_mem
7015 unsigned long pg_flags = pg->flags & _PGMT_MASK;
7016
7017 if (pg_flags == _PGMT_DEFAULT)
7018- return -1;
7019+ return ~0UL;
7020 else if (pg_flags == _PGMT_WC)
7021 return _PAGE_CACHE_WC;
7022 else if (pg_flags == _PGMT_UC_MINUS)
7023diff -urNp linux-3.0.7/arch/x86/include/asm/cache.h linux-3.0.7/arch/x86/include/asm/cache.h
7024--- linux-3.0.7/arch/x86/include/asm/cache.h 2011-07-21 22:17:23.000000000 -0400
7025+++ linux-3.0.7/arch/x86/include/asm/cache.h 2011-08-23 21:47:55.000000000 -0400
7026@@ -5,12 +5,13 @@
7027
7028 /* L1 cache line size */
7029 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
7030-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7031+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7032
7033 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
7034+#define __read_only __attribute__((__section__(".data..read_only")))
7035
7036 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
7037-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
7038+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
7039
7040 #ifdef CONFIG_X86_VSMP
7041 #ifdef CONFIG_SMP
7042diff -urNp linux-3.0.7/arch/x86/include/asm/checksum_32.h linux-3.0.7/arch/x86/include/asm/checksum_32.h
7043--- linux-3.0.7/arch/x86/include/asm/checksum_32.h 2011-07-21 22:17:23.000000000 -0400
7044+++ linux-3.0.7/arch/x86/include/asm/checksum_32.h 2011-08-23 21:47:55.000000000 -0400
7045@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
7046 int len, __wsum sum,
7047 int *src_err_ptr, int *dst_err_ptr);
7048
7049+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
7050+ int len, __wsum sum,
7051+ int *src_err_ptr, int *dst_err_ptr);
7052+
7053+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
7054+ int len, __wsum sum,
7055+ int *src_err_ptr, int *dst_err_ptr);
7056+
7057 /*
7058 * Note: when you get a NULL pointer exception here this means someone
7059 * passed in an incorrect kernel address to one of these functions.
7060@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
7061 int *err_ptr)
7062 {
7063 might_sleep();
7064- return csum_partial_copy_generic((__force void *)src, dst,
7065+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
7066 len, sum, err_ptr, NULL);
7067 }
7068
7069@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
7070 {
7071 might_sleep();
7072 if (access_ok(VERIFY_WRITE, dst, len))
7073- return csum_partial_copy_generic(src, (__force void *)dst,
7074+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
7075 len, sum, NULL, err_ptr);
7076
7077 if (len)
7078diff -urNp linux-3.0.7/arch/x86/include/asm/cpufeature.h linux-3.0.7/arch/x86/include/asm/cpufeature.h
7079--- linux-3.0.7/arch/x86/include/asm/cpufeature.h 2011-07-21 22:17:23.000000000 -0400
7080+++ linux-3.0.7/arch/x86/include/asm/cpufeature.h 2011-08-23 21:47:55.000000000 -0400
7081@@ -358,7 +358,7 @@ static __always_inline __pure bool __sta
7082 ".section .discard,\"aw\",@progbits\n"
7083 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
7084 ".previous\n"
7085- ".section .altinstr_replacement,\"ax\"\n"
7086+ ".section .altinstr_replacement,\"a\"\n"
7087 "3: movb $1,%0\n"
7088 "4:\n"
7089 ".previous\n"
7090diff -urNp linux-3.0.7/arch/x86/include/asm/desc_defs.h linux-3.0.7/arch/x86/include/asm/desc_defs.h
7091--- linux-3.0.7/arch/x86/include/asm/desc_defs.h 2011-07-21 22:17:23.000000000 -0400
7092+++ linux-3.0.7/arch/x86/include/asm/desc_defs.h 2011-08-23 21:47:55.000000000 -0400
7093@@ -31,6 +31,12 @@ struct desc_struct {
7094 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
7095 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
7096 };
7097+ struct {
7098+ u16 offset_low;
7099+ u16 seg;
7100+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
7101+ unsigned offset_high: 16;
7102+ } gate;
7103 };
7104 } __attribute__((packed));
7105
7106diff -urNp linux-3.0.7/arch/x86/include/asm/desc.h linux-3.0.7/arch/x86/include/asm/desc.h
7107--- linux-3.0.7/arch/x86/include/asm/desc.h 2011-07-21 22:17:23.000000000 -0400
7108+++ linux-3.0.7/arch/x86/include/asm/desc.h 2011-08-23 21:47:55.000000000 -0400
7109@@ -4,6 +4,7 @@
7110 #include <asm/desc_defs.h>
7111 #include <asm/ldt.h>
7112 #include <asm/mmu.h>
7113+#include <asm/pgtable.h>
7114
7115 #include <linux/smp.h>
7116
7117@@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_
7118
7119 desc->type = (info->read_exec_only ^ 1) << 1;
7120 desc->type |= info->contents << 2;
7121+ desc->type |= info->seg_not_present ^ 1;
7122
7123 desc->s = 1;
7124 desc->dpl = 0x3;
7125@@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_
7126 }
7127
7128 extern struct desc_ptr idt_descr;
7129-extern gate_desc idt_table[];
7130-
7131-struct gdt_page {
7132- struct desc_struct gdt[GDT_ENTRIES];
7133-} __attribute__((aligned(PAGE_SIZE)));
7134-
7135-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
7136+extern gate_desc idt_table[256];
7137
7138+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
7139 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
7140 {
7141- return per_cpu(gdt_page, cpu).gdt;
7142+ return cpu_gdt_table[cpu];
7143 }
7144
7145 #ifdef CONFIG_X86_64
7146@@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *
7147 unsigned long base, unsigned dpl, unsigned flags,
7148 unsigned short seg)
7149 {
7150- gate->a = (seg << 16) | (base & 0xffff);
7151- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
7152+ gate->gate.offset_low = base;
7153+ gate->gate.seg = seg;
7154+ gate->gate.reserved = 0;
7155+ gate->gate.type = type;
7156+ gate->gate.s = 0;
7157+ gate->gate.dpl = dpl;
7158+ gate->gate.p = 1;
7159+ gate->gate.offset_high = base >> 16;
7160 }
7161
7162 #endif
7163@@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(str
7164
7165 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
7166 {
7167+ pax_open_kernel();
7168 memcpy(&idt[entry], gate, sizeof(*gate));
7169+ pax_close_kernel();
7170 }
7171
7172 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
7173 {
7174+ pax_open_kernel();
7175 memcpy(&ldt[entry], desc, 8);
7176+ pax_close_kernel();
7177 }
7178
7179 static inline void
7180@@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struc
7181 default: size = sizeof(*gdt); break;
7182 }
7183
7184+ pax_open_kernel();
7185 memcpy(&gdt[entry], desc, size);
7186+ pax_close_kernel();
7187 }
7188
7189 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
7190@@ -207,7 +216,9 @@ static inline void native_set_ldt(const
7191
7192 static inline void native_load_tr_desc(void)
7193 {
7194+ pax_open_kernel();
7195 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
7196+ pax_close_kernel();
7197 }
7198
7199 static inline void native_load_gdt(const struct desc_ptr *dtr)
7200@@ -244,8 +255,10 @@ static inline void native_load_tls(struc
7201 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
7202 unsigned int i;
7203
7204+ pax_open_kernel();
7205 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
7206 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
7207+ pax_close_kernel();
7208 }
7209
7210 #define _LDT_empty(info) \
7211@@ -307,7 +320,7 @@ static inline void set_desc_limit(struct
7212 desc->limit = (limit >> 16) & 0xf;
7213 }
7214
7215-static inline void _set_gate(int gate, unsigned type, void *addr,
7216+static inline void _set_gate(int gate, unsigned type, const void *addr,
7217 unsigned dpl, unsigned ist, unsigned seg)
7218 {
7219 gate_desc s;
7220@@ -326,7 +339,7 @@ static inline void _set_gate(int gate, u
7221 * Pentium F0 0F bugfix can have resulted in the mapped
7222 * IDT being write-protected.
7223 */
7224-static inline void set_intr_gate(unsigned int n, void *addr)
7225+static inline void set_intr_gate(unsigned int n, const void *addr)
7226 {
7227 BUG_ON((unsigned)n > 0xFF);
7228 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
7229@@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsig
7230 /*
7231 * This routine sets up an interrupt gate at directory privilege level 3.
7232 */
7233-static inline void set_system_intr_gate(unsigned int n, void *addr)
7234+static inline void set_system_intr_gate(unsigned int n, const void *addr)
7235 {
7236 BUG_ON((unsigned)n > 0xFF);
7237 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
7238 }
7239
7240-static inline void set_system_trap_gate(unsigned int n, void *addr)
7241+static inline void set_system_trap_gate(unsigned int n, const void *addr)
7242 {
7243 BUG_ON((unsigned)n > 0xFF);
7244 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
7245 }
7246
7247-static inline void set_trap_gate(unsigned int n, void *addr)
7248+static inline void set_trap_gate(unsigned int n, const void *addr)
7249 {
7250 BUG_ON((unsigned)n > 0xFF);
7251 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
7252@@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigne
7253 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
7254 {
7255 BUG_ON((unsigned)n > 0xFF);
7256- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
7257+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
7258 }
7259
7260-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
7261+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
7262 {
7263 BUG_ON((unsigned)n > 0xFF);
7264 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
7265 }
7266
7267-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
7268+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
7269 {
7270 BUG_ON((unsigned)n > 0xFF);
7271 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
7272 }
7273
7274+#ifdef CONFIG_X86_32
7275+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
7276+{
7277+ struct desc_struct d;
7278+
7279+ if (likely(limit))
7280+ limit = (limit - 1UL) >> PAGE_SHIFT;
7281+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
7282+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
7283+}
7284+#endif
7285+
7286 #endif /* _ASM_X86_DESC_H */
7287diff -urNp linux-3.0.7/arch/x86/include/asm/e820.h linux-3.0.7/arch/x86/include/asm/e820.h
7288--- linux-3.0.7/arch/x86/include/asm/e820.h 2011-07-21 22:17:23.000000000 -0400
7289+++ linux-3.0.7/arch/x86/include/asm/e820.h 2011-08-23 21:47:55.000000000 -0400
7290@@ -69,7 +69,7 @@ struct e820map {
7291 #define ISA_START_ADDRESS 0xa0000
7292 #define ISA_END_ADDRESS 0x100000
7293
7294-#define BIOS_BEGIN 0x000a0000
7295+#define BIOS_BEGIN 0x000c0000
7296 #define BIOS_END 0x00100000
7297
7298 #define BIOS_ROM_BASE 0xffe00000
7299diff -urNp linux-3.0.7/arch/x86/include/asm/elf.h linux-3.0.7/arch/x86/include/asm/elf.h
7300--- linux-3.0.7/arch/x86/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
7301+++ linux-3.0.7/arch/x86/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
7302@@ -237,7 +237,25 @@ extern int force_personality32;
7303 the loader. We need to make sure that it is out of the way of the program
7304 that it will "exec", and that there is sufficient room for the brk. */
7305
7306+#ifdef CONFIG_PAX_SEGMEXEC
7307+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
7308+#else
7309 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
7310+#endif
7311+
7312+#ifdef CONFIG_PAX_ASLR
7313+#ifdef CONFIG_X86_32
7314+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
7315+
7316+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7317+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7318+#else
7319+#define PAX_ELF_ET_DYN_BASE 0x400000UL
7320+
7321+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7322+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7323+#endif
7324+#endif
7325
7326 /* This yields a mask that user programs can use to figure out what
7327 instruction set this CPU supports. This could be done in user space,
7328@@ -290,9 +308,7 @@ do { \
7329
7330 #define ARCH_DLINFO \
7331 do { \
7332- if (vdso_enabled) \
7333- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
7334- (unsigned long)current->mm->context.vdso); \
7335+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
7336 } while (0)
7337
7338 #define AT_SYSINFO 32
7339@@ -303,7 +319,7 @@ do { \
7340
7341 #endif /* !CONFIG_X86_32 */
7342
7343-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
7344+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
7345
7346 #define VDSO_ENTRY \
7347 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
7348@@ -317,7 +333,4 @@ extern int arch_setup_additional_pages(s
7349 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
7350 #define compat_arch_setup_additional_pages syscall32_setup_pages
7351
7352-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7353-#define arch_randomize_brk arch_randomize_brk
7354-
7355 #endif /* _ASM_X86_ELF_H */
7356diff -urNp linux-3.0.7/arch/x86/include/asm/emergency-restart.h linux-3.0.7/arch/x86/include/asm/emergency-restart.h
7357--- linux-3.0.7/arch/x86/include/asm/emergency-restart.h 2011-07-21 22:17:23.000000000 -0400
7358+++ linux-3.0.7/arch/x86/include/asm/emergency-restart.h 2011-08-23 21:47:55.000000000 -0400
7359@@ -15,6 +15,6 @@ enum reboot_type {
7360
7361 extern enum reboot_type reboot_type;
7362
7363-extern void machine_emergency_restart(void);
7364+extern void machine_emergency_restart(void) __noreturn;
7365
7366 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
7367diff -urNp linux-3.0.7/arch/x86/include/asm/futex.h linux-3.0.7/arch/x86/include/asm/futex.h
7368--- linux-3.0.7/arch/x86/include/asm/futex.h 2011-07-21 22:17:23.000000000 -0400
7369+++ linux-3.0.7/arch/x86/include/asm/futex.h 2011-10-06 04:17:55.000000000 -0400
7370@@ -12,16 +12,18 @@
7371 #include <asm/system.h>
7372
7373 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
7374+ typecheck(u32 __user *, uaddr); \
7375 asm volatile("1:\t" insn "\n" \
7376 "2:\t.section .fixup,\"ax\"\n" \
7377 "3:\tmov\t%3, %1\n" \
7378 "\tjmp\t2b\n" \
7379 "\t.previous\n" \
7380 _ASM_EXTABLE(1b, 3b) \
7381- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
7382+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
7383 : "i" (-EFAULT), "0" (oparg), "1" (0))
7384
7385 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
7386+ typecheck(u32 __user *, uaddr); \
7387 asm volatile("1:\tmovl %2, %0\n" \
7388 "\tmovl\t%0, %3\n" \
7389 "\t" insn "\n" \
7390@@ -34,7 +36,7 @@
7391 _ASM_EXTABLE(1b, 4b) \
7392 _ASM_EXTABLE(2b, 4b) \
7393 : "=&a" (oldval), "=&r" (ret), \
7394- "+m" (*uaddr), "=&r" (tem) \
7395+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
7396 : "r" (oparg), "i" (-EFAULT), "1" (0))
7397
7398 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
7399@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
7400
7401 switch (op) {
7402 case FUTEX_OP_SET:
7403- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
7404+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
7405 break;
7406 case FUTEX_OP_ADD:
7407- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
7408+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
7409 uaddr, oparg);
7410 break;
7411 case FUTEX_OP_OR:
7412@@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_i
7413 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
7414 return -EFAULT;
7415
7416- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
7417+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
7418 "2:\t.section .fixup, \"ax\"\n"
7419 "3:\tmov %3, %0\n"
7420 "\tjmp 2b\n"
7421 "\t.previous\n"
7422 _ASM_EXTABLE(1b, 3b)
7423- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
7424+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
7425 : "i" (-EFAULT), "r" (newval), "1" (oldval)
7426 : "memory"
7427 );
7428diff -urNp linux-3.0.7/arch/x86/include/asm/hw_irq.h linux-3.0.7/arch/x86/include/asm/hw_irq.h
7429--- linux-3.0.7/arch/x86/include/asm/hw_irq.h 2011-07-21 22:17:23.000000000 -0400
7430+++ linux-3.0.7/arch/x86/include/asm/hw_irq.h 2011-08-23 21:47:55.000000000 -0400
7431@@ -137,8 +137,8 @@ extern void setup_ioapic_dest(void);
7432 extern void enable_IO_APIC(void);
7433
7434 /* Statistics */
7435-extern atomic_t irq_err_count;
7436-extern atomic_t irq_mis_count;
7437+extern atomic_unchecked_t irq_err_count;
7438+extern atomic_unchecked_t irq_mis_count;
7439
7440 /* EISA */
7441 extern void eisa_set_level_irq(unsigned int irq);
7442diff -urNp linux-3.0.7/arch/x86/include/asm/i387.h linux-3.0.7/arch/x86/include/asm/i387.h
7443--- linux-3.0.7/arch/x86/include/asm/i387.h 2011-07-21 22:17:23.000000000 -0400
7444+++ linux-3.0.7/arch/x86/include/asm/i387.h 2011-10-06 04:17:55.000000000 -0400
7445@@ -92,6 +92,11 @@ static inline int fxrstor_checking(struc
7446 {
7447 int err;
7448
7449+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7450+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7451+ fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
7452+#endif
7453+
7454 /* See comment in fxsave() below. */
7455 #ifdef CONFIG_AS_FXSAVEQ
7456 asm volatile("1: fxrstorq %[fx]\n\t"
7457@@ -121,6 +126,11 @@ static inline int fxsave_user(struct i38
7458 {
7459 int err;
7460
7461+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7462+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7463+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
7464+#endif
7465+
7466 /*
7467 * Clear the bytes not touched by the fxsave and reserved
7468 * for the SW usage.
7469@@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu
7470 #endif /* CONFIG_X86_64 */
7471
7472 /* We need a safe address that is cheap to find and that is already
7473- in L1 during context switch. The best choices are unfortunately
7474- different for UP and SMP */
7475-#ifdef CONFIG_SMP
7476-#define safe_address (__per_cpu_offset[0])
7477-#else
7478-#define safe_address (kstat_cpu(0).cpustat.user)
7479-#endif
7480+ in L1 during context switch. */
7481+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
7482
7483 /*
7484 * These must be called with preempt disabled
7485@@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void
7486 struct thread_info *me = current_thread_info();
7487 preempt_disable();
7488 if (me->status & TS_USEDFPU)
7489- __save_init_fpu(me->task);
7490+ __save_init_fpu(current);
7491 else
7492 clts();
7493 }
7494diff -urNp linux-3.0.7/arch/x86/include/asm/io.h linux-3.0.7/arch/x86/include/asm/io.h
7495--- linux-3.0.7/arch/x86/include/asm/io.h 2011-07-21 22:17:23.000000000 -0400
7496+++ linux-3.0.7/arch/x86/include/asm/io.h 2011-08-23 21:47:55.000000000 -0400
7497@@ -196,6 +196,17 @@ extern void set_iounmap_nonlazy(void);
7498
7499 #include <linux/vmalloc.h>
7500
7501+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
7502+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
7503+{
7504+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7505+}
7506+
7507+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
7508+{
7509+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7510+}
7511+
7512 /*
7513 * Convert a virtual cached pointer to an uncached pointer
7514 */
7515diff -urNp linux-3.0.7/arch/x86/include/asm/irqflags.h linux-3.0.7/arch/x86/include/asm/irqflags.h
7516--- linux-3.0.7/arch/x86/include/asm/irqflags.h 2011-07-21 22:17:23.000000000 -0400
7517+++ linux-3.0.7/arch/x86/include/asm/irqflags.h 2011-08-23 21:47:55.000000000 -0400
7518@@ -140,6 +140,11 @@ static inline unsigned long arch_local_i
7519 sti; \
7520 sysexit
7521
7522+#define GET_CR0_INTO_RDI mov %cr0, %rdi
7523+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
7524+#define GET_CR3_INTO_RDI mov %cr3, %rdi
7525+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
7526+
7527 #else
7528 #define INTERRUPT_RETURN iret
7529 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
7530diff -urNp linux-3.0.7/arch/x86/include/asm/kprobes.h linux-3.0.7/arch/x86/include/asm/kprobes.h
7531--- linux-3.0.7/arch/x86/include/asm/kprobes.h 2011-07-21 22:17:23.000000000 -0400
7532+++ linux-3.0.7/arch/x86/include/asm/kprobes.h 2011-08-23 21:47:55.000000000 -0400
7533@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
7534 #define RELATIVEJUMP_SIZE 5
7535 #define RELATIVECALL_OPCODE 0xe8
7536 #define RELATIVE_ADDR_SIZE 4
7537-#define MAX_STACK_SIZE 64
7538-#define MIN_STACK_SIZE(ADDR) \
7539- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
7540- THREAD_SIZE - (unsigned long)(ADDR))) \
7541- ? (MAX_STACK_SIZE) \
7542- : (((unsigned long)current_thread_info()) + \
7543- THREAD_SIZE - (unsigned long)(ADDR)))
7544+#define MAX_STACK_SIZE 64UL
7545+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
7546
7547 #define flush_insn_slot(p) do { } while (0)
7548
7549diff -urNp linux-3.0.7/arch/x86/include/asm/kvm_host.h linux-3.0.7/arch/x86/include/asm/kvm_host.h
7550--- linux-3.0.7/arch/x86/include/asm/kvm_host.h 2011-07-21 22:17:23.000000000 -0400
7551+++ linux-3.0.7/arch/x86/include/asm/kvm_host.h 2011-08-26 19:49:56.000000000 -0400
7552@@ -441,7 +441,7 @@ struct kvm_arch {
7553 unsigned int n_used_mmu_pages;
7554 unsigned int n_requested_mmu_pages;
7555 unsigned int n_max_mmu_pages;
7556- atomic_t invlpg_counter;
7557+ atomic_unchecked_t invlpg_counter;
7558 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
7559 /*
7560 * Hash table of struct kvm_mmu_page.
7561@@ -619,7 +619,7 @@ struct kvm_x86_ops {
7562 enum x86_intercept_stage stage);
7563
7564 const struct trace_print_flags *exit_reasons_str;
7565-};
7566+} __do_const;
7567
7568 struct kvm_arch_async_pf {
7569 u32 token;
7570diff -urNp linux-3.0.7/arch/x86/include/asm/local.h linux-3.0.7/arch/x86/include/asm/local.h
7571--- linux-3.0.7/arch/x86/include/asm/local.h 2011-07-21 22:17:23.000000000 -0400
7572+++ linux-3.0.7/arch/x86/include/asm/local.h 2011-08-23 21:47:55.000000000 -0400
7573@@ -18,26 +18,58 @@ typedef struct {
7574
7575 static inline void local_inc(local_t *l)
7576 {
7577- asm volatile(_ASM_INC "%0"
7578+ asm volatile(_ASM_INC "%0\n"
7579+
7580+#ifdef CONFIG_PAX_REFCOUNT
7581+ "jno 0f\n"
7582+ _ASM_DEC "%0\n"
7583+ "int $4\n0:\n"
7584+ _ASM_EXTABLE(0b, 0b)
7585+#endif
7586+
7587 : "+m" (l->a.counter));
7588 }
7589
7590 static inline void local_dec(local_t *l)
7591 {
7592- asm volatile(_ASM_DEC "%0"
7593+ asm volatile(_ASM_DEC "%0\n"
7594+
7595+#ifdef CONFIG_PAX_REFCOUNT
7596+ "jno 0f\n"
7597+ _ASM_INC "%0\n"
7598+ "int $4\n0:\n"
7599+ _ASM_EXTABLE(0b, 0b)
7600+#endif
7601+
7602 : "+m" (l->a.counter));
7603 }
7604
7605 static inline void local_add(long i, local_t *l)
7606 {
7607- asm volatile(_ASM_ADD "%1,%0"
7608+ asm volatile(_ASM_ADD "%1,%0\n"
7609+
7610+#ifdef CONFIG_PAX_REFCOUNT
7611+ "jno 0f\n"
7612+ _ASM_SUB "%1,%0\n"
7613+ "int $4\n0:\n"
7614+ _ASM_EXTABLE(0b, 0b)
7615+#endif
7616+
7617 : "+m" (l->a.counter)
7618 : "ir" (i));
7619 }
7620
7621 static inline void local_sub(long i, local_t *l)
7622 {
7623- asm volatile(_ASM_SUB "%1,%0"
7624+ asm volatile(_ASM_SUB "%1,%0\n"
7625+
7626+#ifdef CONFIG_PAX_REFCOUNT
7627+ "jno 0f\n"
7628+ _ASM_ADD "%1,%0\n"
7629+ "int $4\n0:\n"
7630+ _ASM_EXTABLE(0b, 0b)
7631+#endif
7632+
7633 : "+m" (l->a.counter)
7634 : "ir" (i));
7635 }
7636@@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
7637 {
7638 unsigned char c;
7639
7640- asm volatile(_ASM_SUB "%2,%0; sete %1"
7641+ asm volatile(_ASM_SUB "%2,%0\n"
7642+
7643+#ifdef CONFIG_PAX_REFCOUNT
7644+ "jno 0f\n"
7645+ _ASM_ADD "%2,%0\n"
7646+ "int $4\n0:\n"
7647+ _ASM_EXTABLE(0b, 0b)
7648+#endif
7649+
7650+ "sete %1\n"
7651 : "+m" (l->a.counter), "=qm" (c)
7652 : "ir" (i) : "memory");
7653 return c;
7654@@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
7655 {
7656 unsigned char c;
7657
7658- asm volatile(_ASM_DEC "%0; sete %1"
7659+ asm volatile(_ASM_DEC "%0\n"
7660+
7661+#ifdef CONFIG_PAX_REFCOUNT
7662+ "jno 0f\n"
7663+ _ASM_INC "%0\n"
7664+ "int $4\n0:\n"
7665+ _ASM_EXTABLE(0b, 0b)
7666+#endif
7667+
7668+ "sete %1\n"
7669 : "+m" (l->a.counter), "=qm" (c)
7670 : : "memory");
7671 return c != 0;
7672@@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
7673 {
7674 unsigned char c;
7675
7676- asm volatile(_ASM_INC "%0; sete %1"
7677+ asm volatile(_ASM_INC "%0\n"
7678+
7679+#ifdef CONFIG_PAX_REFCOUNT
7680+ "jno 0f\n"
7681+ _ASM_DEC "%0\n"
7682+ "int $4\n0:\n"
7683+ _ASM_EXTABLE(0b, 0b)
7684+#endif
7685+
7686+ "sete %1\n"
7687 : "+m" (l->a.counter), "=qm" (c)
7688 : : "memory");
7689 return c != 0;
7690@@ -110,7 +169,16 @@ static inline int local_add_negative(lon
7691 {
7692 unsigned char c;
7693
7694- asm volatile(_ASM_ADD "%2,%0; sets %1"
7695+ asm volatile(_ASM_ADD "%2,%0\n"
7696+
7697+#ifdef CONFIG_PAX_REFCOUNT
7698+ "jno 0f\n"
7699+ _ASM_SUB "%2,%0\n"
7700+ "int $4\n0:\n"
7701+ _ASM_EXTABLE(0b, 0b)
7702+#endif
7703+
7704+ "sets %1\n"
7705 : "+m" (l->a.counter), "=qm" (c)
7706 : "ir" (i) : "memory");
7707 return c;
7708@@ -133,7 +201,15 @@ static inline long local_add_return(long
7709 #endif
7710 /* Modern 486+ processor */
7711 __i = i;
7712- asm volatile(_ASM_XADD "%0, %1;"
7713+ asm volatile(_ASM_XADD "%0, %1\n"
7714+
7715+#ifdef CONFIG_PAX_REFCOUNT
7716+ "jno 0f\n"
7717+ _ASM_MOV "%0,%1\n"
7718+ "int $4\n0:\n"
7719+ _ASM_EXTABLE(0b, 0b)
7720+#endif
7721+
7722 : "+r" (i), "+m" (l->a.counter)
7723 : : "memory");
7724 return i + __i;
7725diff -urNp linux-3.0.7/arch/x86/include/asm/mman.h linux-3.0.7/arch/x86/include/asm/mman.h
7726--- linux-3.0.7/arch/x86/include/asm/mman.h 2011-07-21 22:17:23.000000000 -0400
7727+++ linux-3.0.7/arch/x86/include/asm/mman.h 2011-08-23 21:47:55.000000000 -0400
7728@@ -5,4 +5,14 @@
7729
7730 #include <asm-generic/mman.h>
7731
7732+#ifdef __KERNEL__
7733+#ifndef __ASSEMBLY__
7734+#ifdef CONFIG_X86_32
7735+#define arch_mmap_check i386_mmap_check
7736+int i386_mmap_check(unsigned long addr, unsigned long len,
7737+ unsigned long flags);
7738+#endif
7739+#endif
7740+#endif
7741+
7742 #endif /* _ASM_X86_MMAN_H */
7743diff -urNp linux-3.0.7/arch/x86/include/asm/mmu_context.h linux-3.0.7/arch/x86/include/asm/mmu_context.h
7744--- linux-3.0.7/arch/x86/include/asm/mmu_context.h 2011-07-21 22:17:23.000000000 -0400
7745+++ linux-3.0.7/arch/x86/include/asm/mmu_context.h 2011-08-23 21:48:14.000000000 -0400
7746@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *m
7747
7748 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
7749 {
7750+
7751+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7752+ unsigned int i;
7753+ pgd_t *pgd;
7754+
7755+ pax_open_kernel();
7756+ pgd = get_cpu_pgd(smp_processor_id());
7757+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
7758+ set_pgd_batched(pgd+i, native_make_pgd(0));
7759+ pax_close_kernel();
7760+#endif
7761+
7762 #ifdef CONFIG_SMP
7763 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
7764 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
7765@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_s
7766 struct task_struct *tsk)
7767 {
7768 unsigned cpu = smp_processor_id();
7769+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7770+ int tlbstate = TLBSTATE_OK;
7771+#endif
7772
7773 if (likely(prev != next)) {
7774 #ifdef CONFIG_SMP
7775+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7776+ tlbstate = percpu_read(cpu_tlbstate.state);
7777+#endif
7778 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7779 percpu_write(cpu_tlbstate.active_mm, next);
7780 #endif
7781 cpumask_set_cpu(cpu, mm_cpumask(next));
7782
7783 /* Re-load page tables */
7784+#ifdef CONFIG_PAX_PER_CPU_PGD
7785+ pax_open_kernel();
7786+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7787+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7788+ pax_close_kernel();
7789+ load_cr3(get_cpu_pgd(cpu));
7790+#else
7791 load_cr3(next->pgd);
7792+#endif
7793
7794 /* stop flush ipis for the previous mm */
7795 cpumask_clear_cpu(cpu, mm_cpumask(prev));
7796@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_s
7797 */
7798 if (unlikely(prev->context.ldt != next->context.ldt))
7799 load_LDT_nolock(&next->context);
7800- }
7801+
7802+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7803+ if (!(__supported_pte_mask & _PAGE_NX)) {
7804+ smp_mb__before_clear_bit();
7805+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
7806+ smp_mb__after_clear_bit();
7807+ cpu_set(cpu, next->context.cpu_user_cs_mask);
7808+ }
7809+#endif
7810+
7811+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7812+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
7813+ prev->context.user_cs_limit != next->context.user_cs_limit))
7814+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7815 #ifdef CONFIG_SMP
7816+ else if (unlikely(tlbstate != TLBSTATE_OK))
7817+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7818+#endif
7819+#endif
7820+
7821+ }
7822 else {
7823+
7824+#ifdef CONFIG_PAX_PER_CPU_PGD
7825+ pax_open_kernel();
7826+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7827+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7828+ pax_close_kernel();
7829+ load_cr3(get_cpu_pgd(cpu));
7830+#endif
7831+
7832+#ifdef CONFIG_SMP
7833 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7834 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
7835
7836@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_s
7837 * tlb flush IPI delivery. We must reload CR3
7838 * to make sure to use no freed page tables.
7839 */
7840+
7841+#ifndef CONFIG_PAX_PER_CPU_PGD
7842 load_cr3(next->pgd);
7843+#endif
7844+
7845 load_LDT_nolock(&next->context);
7846+
7847+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
7848+ if (!(__supported_pte_mask & _PAGE_NX))
7849+ cpu_set(cpu, next->context.cpu_user_cs_mask);
7850+#endif
7851+
7852+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7853+#ifdef CONFIG_PAX_PAGEEXEC
7854+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
7855+#endif
7856+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7857+#endif
7858+
7859 }
7860- }
7861 #endif
7862+ }
7863 }
7864
7865 #define activate_mm(prev, next) \
7866diff -urNp linux-3.0.7/arch/x86/include/asm/mmu.h linux-3.0.7/arch/x86/include/asm/mmu.h
7867--- linux-3.0.7/arch/x86/include/asm/mmu.h 2011-07-21 22:17:23.000000000 -0400
7868+++ linux-3.0.7/arch/x86/include/asm/mmu.h 2011-08-23 21:47:55.000000000 -0400
7869@@ -9,7 +9,7 @@
7870 * we put the segment information here.
7871 */
7872 typedef struct {
7873- void *ldt;
7874+ struct desc_struct *ldt;
7875 int size;
7876
7877 #ifdef CONFIG_X86_64
7878@@ -18,7 +18,19 @@ typedef struct {
7879 #endif
7880
7881 struct mutex lock;
7882- void *vdso;
7883+ unsigned long vdso;
7884+
7885+#ifdef CONFIG_X86_32
7886+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
7887+ unsigned long user_cs_base;
7888+ unsigned long user_cs_limit;
7889+
7890+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7891+ cpumask_t cpu_user_cs_mask;
7892+#endif
7893+
7894+#endif
7895+#endif
7896 } mm_context_t;
7897
7898 #ifdef CONFIG_SMP
7899diff -urNp linux-3.0.7/arch/x86/include/asm/module.h linux-3.0.7/arch/x86/include/asm/module.h
7900--- linux-3.0.7/arch/x86/include/asm/module.h 2011-07-21 22:17:23.000000000 -0400
7901+++ linux-3.0.7/arch/x86/include/asm/module.h 2011-10-07 19:24:31.000000000 -0400
7902@@ -5,6 +5,7 @@
7903
7904 #ifdef CONFIG_X86_64
7905 /* X86_64 does not define MODULE_PROC_FAMILY */
7906+#define MODULE_PROC_FAMILY ""
7907 #elif defined CONFIG_M386
7908 #define MODULE_PROC_FAMILY "386 "
7909 #elif defined CONFIG_M486
7910@@ -59,8 +60,18 @@
7911 #error unknown processor family
7912 #endif
7913
7914-#ifdef CONFIG_X86_32
7915-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
7916+#ifdef CONFIG_PAX_KERNEXEC
7917+#define MODULE_PAX_KERNEXEC "KERNEXEC "
7918+#else
7919+#define MODULE_PAX_KERNEXEC ""
7920 #endif
7921
7922+#ifdef CONFIG_PAX_MEMORY_UDEREF
7923+#define MODULE_PAX_UDEREF "UDEREF "
7924+#else
7925+#define MODULE_PAX_UDEREF ""
7926+#endif
7927+
7928+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
7929+
7930 #endif /* _ASM_X86_MODULE_H */
7931diff -urNp linux-3.0.7/arch/x86/include/asm/page_64_types.h linux-3.0.7/arch/x86/include/asm/page_64_types.h
7932--- linux-3.0.7/arch/x86/include/asm/page_64_types.h 2011-07-21 22:17:23.000000000 -0400
7933+++ linux-3.0.7/arch/x86/include/asm/page_64_types.h 2011-08-23 21:47:55.000000000 -0400
7934@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
7935
7936 /* duplicated to the one in bootmem.h */
7937 extern unsigned long max_pfn;
7938-extern unsigned long phys_base;
7939+extern const unsigned long phys_base;
7940
7941 extern unsigned long __phys_addr(unsigned long);
7942 #define __phys_reloc_hide(x) (x)
7943diff -urNp linux-3.0.7/arch/x86/include/asm/paravirt.h linux-3.0.7/arch/x86/include/asm/paravirt.h
7944--- linux-3.0.7/arch/x86/include/asm/paravirt.h 2011-07-21 22:17:23.000000000 -0400
7945+++ linux-3.0.7/arch/x86/include/asm/paravirt.h 2011-08-23 21:47:55.000000000 -0400
7946@@ -658,6 +658,18 @@ static inline void set_pgd(pgd_t *pgdp,
7947 val);
7948 }
7949
7950+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
7951+{
7952+ pgdval_t val = native_pgd_val(pgd);
7953+
7954+ if (sizeof(pgdval_t) > sizeof(long))
7955+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
7956+ val, (u64)val >> 32);
7957+ else
7958+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
7959+ val);
7960+}
7961+
7962 static inline void pgd_clear(pgd_t *pgdp)
7963 {
7964 set_pgd(pgdp, __pgd(0));
7965@@ -739,6 +751,21 @@ static inline void __set_fixmap(unsigned
7966 pv_mmu_ops.set_fixmap(idx, phys, flags);
7967 }
7968
7969+#ifdef CONFIG_PAX_KERNEXEC
7970+static inline unsigned long pax_open_kernel(void)
7971+{
7972+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
7973+}
7974+
7975+static inline unsigned long pax_close_kernel(void)
7976+{
7977+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
7978+}
7979+#else
7980+static inline unsigned long pax_open_kernel(void) { return 0; }
7981+static inline unsigned long pax_close_kernel(void) { return 0; }
7982+#endif
7983+
7984 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
7985
7986 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
7987@@ -955,7 +982,7 @@ extern void default_banner(void);
7988
7989 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
7990 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
7991-#define PARA_INDIRECT(addr) *%cs:addr
7992+#define PARA_INDIRECT(addr) *%ss:addr
7993 #endif
7994
7995 #define INTERRUPT_RETURN \
7996@@ -1032,6 +1059,21 @@ extern void default_banner(void);
7997 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
7998 CLBR_NONE, \
7999 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
8000+
8001+#define GET_CR0_INTO_RDI \
8002+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
8003+ mov %rax,%rdi
8004+
8005+#define SET_RDI_INTO_CR0 \
8006+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
8007+
8008+#define GET_CR3_INTO_RDI \
8009+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
8010+ mov %rax,%rdi
8011+
8012+#define SET_RDI_INTO_CR3 \
8013+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
8014+
8015 #endif /* CONFIG_X86_32 */
8016
8017 #endif /* __ASSEMBLY__ */
8018diff -urNp linux-3.0.7/arch/x86/include/asm/paravirt_types.h linux-3.0.7/arch/x86/include/asm/paravirt_types.h
8019--- linux-3.0.7/arch/x86/include/asm/paravirt_types.h 2011-07-21 22:17:23.000000000 -0400
8020+++ linux-3.0.7/arch/x86/include/asm/paravirt_types.h 2011-08-23 21:47:55.000000000 -0400
8021@@ -78,19 +78,19 @@ struct pv_init_ops {
8022 */
8023 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
8024 unsigned long addr, unsigned len);
8025-};
8026+} __no_const;
8027
8028
8029 struct pv_lazy_ops {
8030 /* Set deferred update mode, used for batching operations. */
8031 void (*enter)(void);
8032 void (*leave)(void);
8033-};
8034+} __no_const;
8035
8036 struct pv_time_ops {
8037 unsigned long long (*sched_clock)(void);
8038 unsigned long (*get_tsc_khz)(void);
8039-};
8040+} __no_const;
8041
8042 struct pv_cpu_ops {
8043 /* hooks for various privileged instructions */
8044@@ -186,7 +186,7 @@ struct pv_cpu_ops {
8045
8046 void (*start_context_switch)(struct task_struct *prev);
8047 void (*end_context_switch)(struct task_struct *next);
8048-};
8049+} __no_const;
8050
8051 struct pv_irq_ops {
8052 /*
8053@@ -217,7 +217,7 @@ struct pv_apic_ops {
8054 unsigned long start_eip,
8055 unsigned long start_esp);
8056 #endif
8057-};
8058+} __no_const;
8059
8060 struct pv_mmu_ops {
8061 unsigned long (*read_cr2)(void);
8062@@ -306,6 +306,7 @@ struct pv_mmu_ops {
8063 struct paravirt_callee_save make_pud;
8064
8065 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
8066+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
8067 #endif /* PAGETABLE_LEVELS == 4 */
8068 #endif /* PAGETABLE_LEVELS >= 3 */
8069
8070@@ -317,6 +318,12 @@ struct pv_mmu_ops {
8071 an mfn. We can tell which is which from the index. */
8072 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
8073 phys_addr_t phys, pgprot_t flags);
8074+
8075+#ifdef CONFIG_PAX_KERNEXEC
8076+ unsigned long (*pax_open_kernel)(void);
8077+ unsigned long (*pax_close_kernel)(void);
8078+#endif
8079+
8080 };
8081
8082 struct arch_spinlock;
8083@@ -327,7 +334,7 @@ struct pv_lock_ops {
8084 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
8085 int (*spin_trylock)(struct arch_spinlock *lock);
8086 void (*spin_unlock)(struct arch_spinlock *lock);
8087-};
8088+} __no_const;
8089
8090 /* This contains all the paravirt structures: we get a convenient
8091 * number for each function using the offset which we use to indicate
8092diff -urNp linux-3.0.7/arch/x86/include/asm/pgalloc.h linux-3.0.7/arch/x86/include/asm/pgalloc.h
8093--- linux-3.0.7/arch/x86/include/asm/pgalloc.h 2011-07-21 22:17:23.000000000 -0400
8094+++ linux-3.0.7/arch/x86/include/asm/pgalloc.h 2011-08-23 21:47:55.000000000 -0400
8095@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
8096 pmd_t *pmd, pte_t *pte)
8097 {
8098 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
8099+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
8100+}
8101+
8102+static inline void pmd_populate_user(struct mm_struct *mm,
8103+ pmd_t *pmd, pte_t *pte)
8104+{
8105+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
8106 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
8107 }
8108
8109diff -urNp linux-3.0.7/arch/x86/include/asm/pgtable-2level.h linux-3.0.7/arch/x86/include/asm/pgtable-2level.h
8110--- linux-3.0.7/arch/x86/include/asm/pgtable-2level.h 2011-07-21 22:17:23.000000000 -0400
8111+++ linux-3.0.7/arch/x86/include/asm/pgtable-2level.h 2011-08-23 21:47:55.000000000 -0400
8112@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
8113
8114 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8115 {
8116+ pax_open_kernel();
8117 *pmdp = pmd;
8118+ pax_close_kernel();
8119 }
8120
8121 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
8122diff -urNp linux-3.0.7/arch/x86/include/asm/pgtable_32.h linux-3.0.7/arch/x86/include/asm/pgtable_32.h
8123--- linux-3.0.7/arch/x86/include/asm/pgtable_32.h 2011-07-21 22:17:23.000000000 -0400
8124+++ linux-3.0.7/arch/x86/include/asm/pgtable_32.h 2011-08-23 21:47:55.000000000 -0400
8125@@ -25,9 +25,6 @@
8126 struct mm_struct;
8127 struct vm_area_struct;
8128
8129-extern pgd_t swapper_pg_dir[1024];
8130-extern pgd_t initial_page_table[1024];
8131-
8132 static inline void pgtable_cache_init(void) { }
8133 static inline void check_pgt_cache(void) { }
8134 void paging_init(void);
8135@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, u
8136 # include <asm/pgtable-2level.h>
8137 #endif
8138
8139+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
8140+extern pgd_t initial_page_table[PTRS_PER_PGD];
8141+#ifdef CONFIG_X86_PAE
8142+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
8143+#endif
8144+
8145 #if defined(CONFIG_HIGHPTE)
8146 #define pte_offset_map(dir, address) \
8147 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
8148@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, u
8149 /* Clear a kernel PTE and flush it from the TLB */
8150 #define kpte_clear_flush(ptep, vaddr) \
8151 do { \
8152+ pax_open_kernel(); \
8153 pte_clear(&init_mm, (vaddr), (ptep)); \
8154+ pax_close_kernel(); \
8155 __flush_tlb_one((vaddr)); \
8156 } while (0)
8157
8158@@ -74,6 +79,9 @@ do { \
8159
8160 #endif /* !__ASSEMBLY__ */
8161
8162+#define HAVE_ARCH_UNMAPPED_AREA
8163+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
8164+
8165 /*
8166 * kern_addr_valid() is (1) for FLATMEM and (0) for
8167 * SPARSEMEM and DISCONTIGMEM
8168diff -urNp linux-3.0.7/arch/x86/include/asm/pgtable_32_types.h linux-3.0.7/arch/x86/include/asm/pgtable_32_types.h
8169--- linux-3.0.7/arch/x86/include/asm/pgtable_32_types.h 2011-07-21 22:17:23.000000000 -0400
8170+++ linux-3.0.7/arch/x86/include/asm/pgtable_32_types.h 2011-08-23 21:47:55.000000000 -0400
8171@@ -8,7 +8,7 @@
8172 */
8173 #ifdef CONFIG_X86_PAE
8174 # include <asm/pgtable-3level_types.h>
8175-# define PMD_SIZE (1UL << PMD_SHIFT)
8176+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
8177 # define PMD_MASK (~(PMD_SIZE - 1))
8178 #else
8179 # include <asm/pgtable-2level_types.h>
8180@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
8181 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
8182 #endif
8183
8184+#ifdef CONFIG_PAX_KERNEXEC
8185+#ifndef __ASSEMBLY__
8186+extern unsigned char MODULES_EXEC_VADDR[];
8187+extern unsigned char MODULES_EXEC_END[];
8188+#endif
8189+#include <asm/boot.h>
8190+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
8191+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
8192+#else
8193+#define ktla_ktva(addr) (addr)
8194+#define ktva_ktla(addr) (addr)
8195+#endif
8196+
8197 #define MODULES_VADDR VMALLOC_START
8198 #define MODULES_END VMALLOC_END
8199 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
8200diff -urNp linux-3.0.7/arch/x86/include/asm/pgtable-3level.h linux-3.0.7/arch/x86/include/asm/pgtable-3level.h
8201--- linux-3.0.7/arch/x86/include/asm/pgtable-3level.h 2011-07-21 22:17:23.000000000 -0400
8202+++ linux-3.0.7/arch/x86/include/asm/pgtable-3level.h 2011-08-23 21:47:55.000000000 -0400
8203@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
8204
8205 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8206 {
8207+ pax_open_kernel();
8208 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
8209+ pax_close_kernel();
8210 }
8211
8212 static inline void native_set_pud(pud_t *pudp, pud_t pud)
8213 {
8214+ pax_open_kernel();
8215 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
8216+ pax_close_kernel();
8217 }
8218
8219 /*
8220diff -urNp linux-3.0.7/arch/x86/include/asm/pgtable_64.h linux-3.0.7/arch/x86/include/asm/pgtable_64.h
8221--- linux-3.0.7/arch/x86/include/asm/pgtable_64.h 2011-07-21 22:17:23.000000000 -0400
8222+++ linux-3.0.7/arch/x86/include/asm/pgtable_64.h 2011-08-23 21:47:55.000000000 -0400
8223@@ -16,10 +16,13 @@
8224
8225 extern pud_t level3_kernel_pgt[512];
8226 extern pud_t level3_ident_pgt[512];
8227+extern pud_t level3_vmalloc_pgt[512];
8228+extern pud_t level3_vmemmap_pgt[512];
8229+extern pud_t level2_vmemmap_pgt[512];
8230 extern pmd_t level2_kernel_pgt[512];
8231 extern pmd_t level2_fixmap_pgt[512];
8232-extern pmd_t level2_ident_pgt[512];
8233-extern pgd_t init_level4_pgt[];
8234+extern pmd_t level2_ident_pgt[512*2];
8235+extern pgd_t init_level4_pgt[512];
8236
8237 #define swapper_pg_dir init_level4_pgt
8238
8239@@ -61,7 +64,9 @@ static inline void native_set_pte_atomic
8240
8241 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8242 {
8243+ pax_open_kernel();
8244 *pmdp = pmd;
8245+ pax_close_kernel();
8246 }
8247
8248 static inline void native_pmd_clear(pmd_t *pmd)
8249@@ -107,6 +112,13 @@ static inline void native_pud_clear(pud_
8250
8251 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
8252 {
8253+ pax_open_kernel();
8254+ *pgdp = pgd;
8255+ pax_close_kernel();
8256+}
8257+
8258+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
8259+{
8260 *pgdp = pgd;
8261 }
8262
8263diff -urNp linux-3.0.7/arch/x86/include/asm/pgtable_64_types.h linux-3.0.7/arch/x86/include/asm/pgtable_64_types.h
8264--- linux-3.0.7/arch/x86/include/asm/pgtable_64_types.h 2011-07-21 22:17:23.000000000 -0400
8265+++ linux-3.0.7/arch/x86/include/asm/pgtable_64_types.h 2011-08-23 21:47:55.000000000 -0400
8266@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
8267 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
8268 #define MODULES_END _AC(0xffffffffff000000, UL)
8269 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
8270+#define MODULES_EXEC_VADDR MODULES_VADDR
8271+#define MODULES_EXEC_END MODULES_END
8272+
8273+#define ktla_ktva(addr) (addr)
8274+#define ktva_ktla(addr) (addr)
8275
8276 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
8277diff -urNp linux-3.0.7/arch/x86/include/asm/pgtable.h linux-3.0.7/arch/x86/include/asm/pgtable.h
8278--- linux-3.0.7/arch/x86/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
8279+++ linux-3.0.7/arch/x86/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
8280@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm
8281
8282 #ifndef __PAGETABLE_PUD_FOLDED
8283 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
8284+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
8285 #define pgd_clear(pgd) native_pgd_clear(pgd)
8286 #endif
8287
8288@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm
8289
8290 #define arch_end_context_switch(prev) do {} while(0)
8291
8292+#define pax_open_kernel() native_pax_open_kernel()
8293+#define pax_close_kernel() native_pax_close_kernel()
8294 #endif /* CONFIG_PARAVIRT */
8295
8296+#define __HAVE_ARCH_PAX_OPEN_KERNEL
8297+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
8298+
8299+#ifdef CONFIG_PAX_KERNEXEC
8300+static inline unsigned long native_pax_open_kernel(void)
8301+{
8302+ unsigned long cr0;
8303+
8304+ preempt_disable();
8305+ barrier();
8306+ cr0 = read_cr0() ^ X86_CR0_WP;
8307+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
8308+ write_cr0(cr0);
8309+ return cr0 ^ X86_CR0_WP;
8310+}
8311+
8312+static inline unsigned long native_pax_close_kernel(void)
8313+{
8314+ unsigned long cr0;
8315+
8316+ cr0 = read_cr0() ^ X86_CR0_WP;
8317+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
8318+ write_cr0(cr0);
8319+ barrier();
8320+ preempt_enable_no_resched();
8321+ return cr0 ^ X86_CR0_WP;
8322+}
8323+#else
8324+static inline unsigned long native_pax_open_kernel(void) { return 0; }
8325+static inline unsigned long native_pax_close_kernel(void) { return 0; }
8326+#endif
8327+
8328 /*
8329 * The following only work if pte_present() is true.
8330 * Undefined behaviour if not..
8331 */
8332+static inline int pte_user(pte_t pte)
8333+{
8334+ return pte_val(pte) & _PAGE_USER;
8335+}
8336+
8337 static inline int pte_dirty(pte_t pte)
8338 {
8339 return pte_flags(pte) & _PAGE_DIRTY;
8340@@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t
8341 return pte_clear_flags(pte, _PAGE_RW);
8342 }
8343
8344+static inline pte_t pte_mkread(pte_t pte)
8345+{
8346+ return __pte(pte_val(pte) | _PAGE_USER);
8347+}
8348+
8349 static inline pte_t pte_mkexec(pte_t pte)
8350 {
8351- return pte_clear_flags(pte, _PAGE_NX);
8352+#ifdef CONFIG_X86_PAE
8353+ if (__supported_pte_mask & _PAGE_NX)
8354+ return pte_clear_flags(pte, _PAGE_NX);
8355+ else
8356+#endif
8357+ return pte_set_flags(pte, _PAGE_USER);
8358+}
8359+
8360+static inline pte_t pte_exprotect(pte_t pte)
8361+{
8362+#ifdef CONFIG_X86_PAE
8363+ if (__supported_pte_mask & _PAGE_NX)
8364+ return pte_set_flags(pte, _PAGE_NX);
8365+ else
8366+#endif
8367+ return pte_clear_flags(pte, _PAGE_USER);
8368 }
8369
8370 static inline pte_t pte_mkdirty(pte_t pte)
8371@@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long
8372 #endif
8373
8374 #ifndef __ASSEMBLY__
8375+
8376+#ifdef CONFIG_PAX_PER_CPU_PGD
8377+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
8378+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
8379+{
8380+ return cpu_pgd[cpu];
8381+}
8382+#endif
8383+
8384 #include <linux/mm_types.h>
8385
8386 static inline int pte_none(pte_t pte)
8387@@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *p
8388
8389 static inline int pgd_bad(pgd_t pgd)
8390 {
8391- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
8392+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
8393 }
8394
8395 static inline int pgd_none(pgd_t pgd)
8396@@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
8397 * pgd_offset() returns a (pgd_t *)
8398 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
8399 */
8400-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
8401+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
8402+
8403+#ifdef CONFIG_PAX_PER_CPU_PGD
8404+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
8405+#endif
8406+
8407 /*
8408 * a shortcut which implies the use of the kernel's pgd, instead
8409 * of a process's
8410@@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
8411 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
8412 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
8413
8414+#ifdef CONFIG_X86_32
8415+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
8416+#else
8417+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
8418+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
8419+
8420+#ifdef CONFIG_PAX_MEMORY_UDEREF
8421+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
8422+#else
8423+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
8424+#endif
8425+
8426+#endif
8427+
8428 #ifndef __ASSEMBLY__
8429
8430 extern int direct_gbpages;
8431@@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(st
8432 * dst and src can be on the same page, but the range must not overlap,
8433 * and must not cross a page boundary.
8434 */
8435-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
8436+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
8437 {
8438- memcpy(dst, src, count * sizeof(pgd_t));
8439+ pax_open_kernel();
8440+ while (count--)
8441+ *dst++ = *src++;
8442+ pax_close_kernel();
8443 }
8444
8445+#ifdef CONFIG_PAX_PER_CPU_PGD
8446+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8447+#endif
8448+
8449+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8450+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8451+#else
8452+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
8453+#endif
8454
8455 #include <asm-generic/pgtable.h>
8456 #endif /* __ASSEMBLY__ */
8457diff -urNp linux-3.0.7/arch/x86/include/asm/pgtable_types.h linux-3.0.7/arch/x86/include/asm/pgtable_types.h
8458--- linux-3.0.7/arch/x86/include/asm/pgtable_types.h 2011-07-21 22:17:23.000000000 -0400
8459+++ linux-3.0.7/arch/x86/include/asm/pgtable_types.h 2011-08-23 21:47:55.000000000 -0400
8460@@ -16,13 +16,12 @@
8461 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
8462 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
8463 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
8464-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
8465+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
8466 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
8467 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
8468 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
8469-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
8470-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
8471-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
8472+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
8473+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
8474 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
8475
8476 /* If _PAGE_BIT_PRESENT is clear, we use these: */
8477@@ -40,7 +39,6 @@
8478 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
8479 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
8480 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
8481-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
8482 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
8483 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
8484 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
8485@@ -57,8 +55,10 @@
8486
8487 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
8488 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
8489-#else
8490+#elif defined(CONFIG_KMEMCHECK)
8491 #define _PAGE_NX (_AT(pteval_t, 0))
8492+#else
8493+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
8494 #endif
8495
8496 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
8497@@ -96,6 +96,9 @@
8498 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
8499 _PAGE_ACCESSED)
8500
8501+#define PAGE_READONLY_NOEXEC PAGE_READONLY
8502+#define PAGE_SHARED_NOEXEC PAGE_SHARED
8503+
8504 #define __PAGE_KERNEL_EXEC \
8505 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
8506 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
8507@@ -106,8 +109,8 @@
8508 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
8509 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
8510 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
8511-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
8512-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
8513+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
8514+#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
8515 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
8516 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
8517 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
8518@@ -166,8 +169,8 @@
8519 * bits are combined, this will alow user to access the high address mapped
8520 * VDSO in the presence of CONFIG_COMPAT_VDSO
8521 */
8522-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
8523-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
8524+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8525+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8526 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
8527 #endif
8528
8529@@ -205,7 +208,17 @@ static inline pgdval_t pgd_flags(pgd_t p
8530 {
8531 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
8532 }
8533+#endif
8534
8535+#if PAGETABLE_LEVELS == 3
8536+#include <asm-generic/pgtable-nopud.h>
8537+#endif
8538+
8539+#if PAGETABLE_LEVELS == 2
8540+#include <asm-generic/pgtable-nopmd.h>
8541+#endif
8542+
8543+#ifndef __ASSEMBLY__
8544 #if PAGETABLE_LEVELS > 3
8545 typedef struct { pudval_t pud; } pud_t;
8546
8547@@ -219,8 +232,6 @@ static inline pudval_t native_pud_val(pu
8548 return pud.pud;
8549 }
8550 #else
8551-#include <asm-generic/pgtable-nopud.h>
8552-
8553 static inline pudval_t native_pud_val(pud_t pud)
8554 {
8555 return native_pgd_val(pud.pgd);
8556@@ -240,8 +251,6 @@ static inline pmdval_t native_pmd_val(pm
8557 return pmd.pmd;
8558 }
8559 #else
8560-#include <asm-generic/pgtable-nopmd.h>
8561-
8562 static inline pmdval_t native_pmd_val(pmd_t pmd)
8563 {
8564 return native_pgd_val(pmd.pud.pgd);
8565@@ -281,7 +290,6 @@ typedef struct page *pgtable_t;
8566
8567 extern pteval_t __supported_pte_mask;
8568 extern void set_nx(void);
8569-extern int nx_enabled;
8570
8571 #define pgprot_writecombine pgprot_writecombine
8572 extern pgprot_t pgprot_writecombine(pgprot_t prot);
8573diff -urNp linux-3.0.7/arch/x86/include/asm/processor.h linux-3.0.7/arch/x86/include/asm/processor.h
8574--- linux-3.0.7/arch/x86/include/asm/processor.h 2011-07-21 22:17:23.000000000 -0400
8575+++ linux-3.0.7/arch/x86/include/asm/processor.h 2011-08-23 21:47:55.000000000 -0400
8576@@ -266,7 +266,7 @@ struct tss_struct {
8577
8578 } ____cacheline_aligned;
8579
8580-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
8581+extern struct tss_struct init_tss[NR_CPUS];
8582
8583 /*
8584 * Save the original ist values for checking stack pointers during debugging
8585@@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(co
8586 */
8587 #define TASK_SIZE PAGE_OFFSET
8588 #define TASK_SIZE_MAX TASK_SIZE
8589+
8590+#ifdef CONFIG_PAX_SEGMEXEC
8591+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
8592+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
8593+#else
8594 #define STACK_TOP TASK_SIZE
8595-#define STACK_TOP_MAX STACK_TOP
8596+#endif
8597+
8598+#define STACK_TOP_MAX TASK_SIZE
8599
8600 #define INIT_THREAD { \
8601- .sp0 = sizeof(init_stack) + (long)&init_stack, \
8602+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8603 .vm86_info = NULL, \
8604 .sysenter_cs = __KERNEL_CS, \
8605 .io_bitmap_ptr = NULL, \
8606@@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(co
8607 */
8608 #define INIT_TSS { \
8609 .x86_tss = { \
8610- .sp0 = sizeof(init_stack) + (long)&init_stack, \
8611+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8612 .ss0 = __KERNEL_DS, \
8613 .ss1 = __KERNEL_CS, \
8614 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
8615@@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(co
8616 extern unsigned long thread_saved_pc(struct task_struct *tsk);
8617
8618 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
8619-#define KSTK_TOP(info) \
8620-({ \
8621- unsigned long *__ptr = (unsigned long *)(info); \
8622- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
8623-})
8624+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
8625
8626 /*
8627 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
8628@@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(str
8629 #define task_pt_regs(task) \
8630 ({ \
8631 struct pt_regs *__regs__; \
8632- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
8633+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
8634 __regs__ - 1; \
8635 })
8636
8637@@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(str
8638 /*
8639 * User space process size. 47bits minus one guard page.
8640 */
8641-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
8642+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
8643
8644 /* This decides where the kernel will search for a free chunk of vm
8645 * space during mmap's.
8646 */
8647 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
8648- 0xc0000000 : 0xFFFFe000)
8649+ 0xc0000000 : 0xFFFFf000)
8650
8651 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
8652 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
8653@@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(str
8654 #define STACK_TOP_MAX TASK_SIZE_MAX
8655
8656 #define INIT_THREAD { \
8657- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8658+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8659 }
8660
8661 #define INIT_TSS { \
8662- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8663+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8664 }
8665
8666 /*
8667@@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs
8668 */
8669 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
8670
8671+#ifdef CONFIG_PAX_SEGMEXEC
8672+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
8673+#endif
8674+
8675 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
8676
8677 /* Get/set a process' ability to use the timestamp counter instruction */
8678diff -urNp linux-3.0.7/arch/x86/include/asm/ptrace.h linux-3.0.7/arch/x86/include/asm/ptrace.h
8679--- linux-3.0.7/arch/x86/include/asm/ptrace.h 2011-07-21 22:17:23.000000000 -0400
8680+++ linux-3.0.7/arch/x86/include/asm/ptrace.h 2011-08-23 21:47:55.000000000 -0400
8681@@ -153,28 +153,29 @@ static inline unsigned long regs_return_
8682 }
8683
8684 /*
8685- * user_mode_vm(regs) determines whether a register set came from user mode.
8686+ * user_mode(regs) determines whether a register set came from user mode.
8687 * This is true if V8086 mode was enabled OR if the register set was from
8688 * protected mode with RPL-3 CS value. This tricky test checks that with
8689 * one comparison. Many places in the kernel can bypass this full check
8690- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
8691+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
8692+ * be used.
8693 */
8694-static inline int user_mode(struct pt_regs *regs)
8695+static inline int user_mode_novm(struct pt_regs *regs)
8696 {
8697 #ifdef CONFIG_X86_32
8698 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
8699 #else
8700- return !!(regs->cs & 3);
8701+ return !!(regs->cs & SEGMENT_RPL_MASK);
8702 #endif
8703 }
8704
8705-static inline int user_mode_vm(struct pt_regs *regs)
8706+static inline int user_mode(struct pt_regs *regs)
8707 {
8708 #ifdef CONFIG_X86_32
8709 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
8710 USER_RPL;
8711 #else
8712- return user_mode(regs);
8713+ return user_mode_novm(regs);
8714 #endif
8715 }
8716
8717diff -urNp linux-3.0.7/arch/x86/include/asm/reboot.h linux-3.0.7/arch/x86/include/asm/reboot.h
8718--- linux-3.0.7/arch/x86/include/asm/reboot.h 2011-07-21 22:17:23.000000000 -0400
8719+++ linux-3.0.7/arch/x86/include/asm/reboot.h 2011-08-23 21:47:55.000000000 -0400
8720@@ -6,19 +6,19 @@
8721 struct pt_regs;
8722
8723 struct machine_ops {
8724- void (*restart)(char *cmd);
8725- void (*halt)(void);
8726- void (*power_off)(void);
8727+ void (* __noreturn restart)(char *cmd);
8728+ void (* __noreturn halt)(void);
8729+ void (* __noreturn power_off)(void);
8730 void (*shutdown)(void);
8731 void (*crash_shutdown)(struct pt_regs *);
8732- void (*emergency_restart)(void);
8733-};
8734+ void (* __noreturn emergency_restart)(void);
8735+} __no_const;
8736
8737 extern struct machine_ops machine_ops;
8738
8739 void native_machine_crash_shutdown(struct pt_regs *regs);
8740 void native_machine_shutdown(void);
8741-void machine_real_restart(unsigned int type);
8742+void machine_real_restart(unsigned int type) __noreturn;
8743 /* These must match dispatch_table in reboot_32.S */
8744 #define MRR_BIOS 0
8745 #define MRR_APM 1
8746diff -urNp linux-3.0.7/arch/x86/include/asm/rwsem.h linux-3.0.7/arch/x86/include/asm/rwsem.h
8747--- linux-3.0.7/arch/x86/include/asm/rwsem.h 2011-07-21 22:17:23.000000000 -0400
8748+++ linux-3.0.7/arch/x86/include/asm/rwsem.h 2011-08-23 21:47:55.000000000 -0400
8749@@ -64,6 +64,14 @@ static inline void __down_read(struct rw
8750 {
8751 asm volatile("# beginning down_read\n\t"
8752 LOCK_PREFIX _ASM_INC "(%1)\n\t"
8753+
8754+#ifdef CONFIG_PAX_REFCOUNT
8755+ "jno 0f\n"
8756+ LOCK_PREFIX _ASM_DEC "(%1)\n"
8757+ "int $4\n0:\n"
8758+ _ASM_EXTABLE(0b, 0b)
8759+#endif
8760+
8761 /* adds 0x00000001 */
8762 " jns 1f\n"
8763 " call call_rwsem_down_read_failed\n"
8764@@ -85,6 +93,14 @@ static inline int __down_read_trylock(st
8765 "1:\n\t"
8766 " mov %1,%2\n\t"
8767 " add %3,%2\n\t"
8768+
8769+#ifdef CONFIG_PAX_REFCOUNT
8770+ "jno 0f\n"
8771+ "sub %3,%2\n"
8772+ "int $4\n0:\n"
8773+ _ASM_EXTABLE(0b, 0b)
8774+#endif
8775+
8776 " jle 2f\n\t"
8777 LOCK_PREFIX " cmpxchg %2,%0\n\t"
8778 " jnz 1b\n\t"
8779@@ -104,6 +120,14 @@ static inline void __down_write_nested(s
8780 long tmp;
8781 asm volatile("# beginning down_write\n\t"
8782 LOCK_PREFIX " xadd %1,(%2)\n\t"
8783+
8784+#ifdef CONFIG_PAX_REFCOUNT
8785+ "jno 0f\n"
8786+ "mov %1,(%2)\n"
8787+ "int $4\n0:\n"
8788+ _ASM_EXTABLE(0b, 0b)
8789+#endif
8790+
8791 /* adds 0xffff0001, returns the old value */
8792 " test %1,%1\n\t"
8793 /* was the count 0 before? */
8794@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_s
8795 long tmp;
8796 asm volatile("# beginning __up_read\n\t"
8797 LOCK_PREFIX " xadd %1,(%2)\n\t"
8798+
8799+#ifdef CONFIG_PAX_REFCOUNT
8800+ "jno 0f\n"
8801+ "mov %1,(%2)\n"
8802+ "int $4\n0:\n"
8803+ _ASM_EXTABLE(0b, 0b)
8804+#endif
8805+
8806 /* subtracts 1, returns the old value */
8807 " jns 1f\n\t"
8808 " call call_rwsem_wake\n" /* expects old value in %edx */
8809@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_
8810 long tmp;
8811 asm volatile("# beginning __up_write\n\t"
8812 LOCK_PREFIX " xadd %1,(%2)\n\t"
8813+
8814+#ifdef CONFIG_PAX_REFCOUNT
8815+ "jno 0f\n"
8816+ "mov %1,(%2)\n"
8817+ "int $4\n0:\n"
8818+ _ASM_EXTABLE(0b, 0b)
8819+#endif
8820+
8821 /* subtracts 0xffff0001, returns the old value */
8822 " jns 1f\n\t"
8823 " call call_rwsem_wake\n" /* expects old value in %edx */
8824@@ -176,6 +216,14 @@ static inline void __downgrade_write(str
8825 {
8826 asm volatile("# beginning __downgrade_write\n\t"
8827 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
8828+
8829+#ifdef CONFIG_PAX_REFCOUNT
8830+ "jno 0f\n"
8831+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
8832+ "int $4\n0:\n"
8833+ _ASM_EXTABLE(0b, 0b)
8834+#endif
8835+
8836 /*
8837 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
8838 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
8839@@ -194,7 +242,15 @@ static inline void __downgrade_write(str
8840 */
8841 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
8842 {
8843- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
8844+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
8845+
8846+#ifdef CONFIG_PAX_REFCOUNT
8847+ "jno 0f\n"
8848+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
8849+ "int $4\n0:\n"
8850+ _ASM_EXTABLE(0b, 0b)
8851+#endif
8852+
8853 : "+m" (sem->count)
8854 : "er" (delta));
8855 }
8856@@ -206,7 +262,15 @@ static inline long rwsem_atomic_update(l
8857 {
8858 long tmp = delta;
8859
8860- asm volatile(LOCK_PREFIX "xadd %0,%1"
8861+ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
8862+
8863+#ifdef CONFIG_PAX_REFCOUNT
8864+ "jno 0f\n"
8865+ "mov %0,%1\n"
8866+ "int $4\n0:\n"
8867+ _ASM_EXTABLE(0b, 0b)
8868+#endif
8869+
8870 : "+r" (tmp), "+m" (sem->count)
8871 : : "memory");
8872
8873diff -urNp linux-3.0.7/arch/x86/include/asm/segment.h linux-3.0.7/arch/x86/include/asm/segment.h
8874--- linux-3.0.7/arch/x86/include/asm/segment.h 2011-07-21 22:17:23.000000000 -0400
8875+++ linux-3.0.7/arch/x86/include/asm/segment.h 2011-09-17 00:53:42.000000000 -0400
8876@@ -64,10 +64,15 @@
8877 * 26 - ESPFIX small SS
8878 * 27 - per-cpu [ offset to per-cpu data area ]
8879 * 28 - stack_canary-20 [ for stack protector ]
8880- * 29 - unused
8881- * 30 - unused
8882+ * 29 - PCI BIOS CS
8883+ * 30 - PCI BIOS DS
8884 * 31 - TSS for double fault handler
8885 */
8886+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
8887+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
8888+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
8889+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
8890+
8891 #define GDT_ENTRY_TLS_MIN 6
8892 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
8893
8894@@ -79,6 +84,8 @@
8895
8896 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
8897
8898+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
8899+
8900 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
8901
8902 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
8903@@ -104,6 +111,12 @@
8904 #define __KERNEL_STACK_CANARY 0
8905 #endif
8906
8907+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
8908+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
8909+
8910+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
8911+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
8912+
8913 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
8914
8915 /*
8916@@ -141,7 +154,7 @@
8917 */
8918
8919 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
8920-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
8921+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
8922
8923
8924 #else
8925@@ -165,6 +178,8 @@
8926 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
8927 #define __USER32_DS __USER_DS
8928
8929+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
8930+
8931 #define GDT_ENTRY_TSS 8 /* needs two entries */
8932 #define GDT_ENTRY_LDT 10 /* needs two entries */
8933 #define GDT_ENTRY_TLS_MIN 12
8934@@ -185,6 +200,7 @@
8935 #endif
8936
8937 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
8938+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
8939 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
8940 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
8941 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
8942diff -urNp linux-3.0.7/arch/x86/include/asm/smp.h linux-3.0.7/arch/x86/include/asm/smp.h
8943--- linux-3.0.7/arch/x86/include/asm/smp.h 2011-07-21 22:17:23.000000000 -0400
8944+++ linux-3.0.7/arch/x86/include/asm/smp.h 2011-08-23 21:47:55.000000000 -0400
8945@@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_
8946 /* cpus sharing the last level cache: */
8947 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
8948 DECLARE_PER_CPU(u16, cpu_llc_id);
8949-DECLARE_PER_CPU(int, cpu_number);
8950+DECLARE_PER_CPU(unsigned int, cpu_number);
8951
8952 static inline struct cpumask *cpu_sibling_mask(int cpu)
8953 {
8954@@ -77,7 +77,7 @@ struct smp_ops {
8955
8956 void (*send_call_func_ipi)(const struct cpumask *mask);
8957 void (*send_call_func_single_ipi)(int cpu);
8958-};
8959+} __no_const;
8960
8961 /* Globals due to paravirt */
8962 extern void set_cpu_sibling_map(int cpu);
8963@@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitd
8964 extern int safe_smp_processor_id(void);
8965
8966 #elif defined(CONFIG_X86_64_SMP)
8967-#define raw_smp_processor_id() (percpu_read(cpu_number))
8968-
8969-#define stack_smp_processor_id() \
8970-({ \
8971- struct thread_info *ti; \
8972- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
8973- ti->cpu; \
8974-})
8975+#define raw_smp_processor_id() (percpu_read(cpu_number))
8976+#define stack_smp_processor_id() raw_smp_processor_id()
8977 #define safe_smp_processor_id() smp_processor_id()
8978
8979 #endif
8980diff -urNp linux-3.0.7/arch/x86/include/asm/spinlock.h linux-3.0.7/arch/x86/include/asm/spinlock.h
8981--- linux-3.0.7/arch/x86/include/asm/spinlock.h 2011-07-21 22:17:23.000000000 -0400
8982+++ linux-3.0.7/arch/x86/include/asm/spinlock.h 2011-08-23 21:47:55.000000000 -0400
8983@@ -249,6 +249,14 @@ static inline int arch_write_can_lock(ar
8984 static inline void arch_read_lock(arch_rwlock_t *rw)
8985 {
8986 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
8987+
8988+#ifdef CONFIG_PAX_REFCOUNT
8989+ "jno 0f\n"
8990+ LOCK_PREFIX " addl $1,(%0)\n"
8991+ "int $4\n0:\n"
8992+ _ASM_EXTABLE(0b, 0b)
8993+#endif
8994+
8995 "jns 1f\n"
8996 "call __read_lock_failed\n\t"
8997 "1:\n"
8998@@ -258,6 +266,14 @@ static inline void arch_read_lock(arch_r
8999 static inline void arch_write_lock(arch_rwlock_t *rw)
9000 {
9001 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
9002+
9003+#ifdef CONFIG_PAX_REFCOUNT
9004+ "jno 0f\n"
9005+ LOCK_PREFIX " addl %1,(%0)\n"
9006+ "int $4\n0:\n"
9007+ _ASM_EXTABLE(0b, 0b)
9008+#endif
9009+
9010 "jz 1f\n"
9011 "call __write_lock_failed\n\t"
9012 "1:\n"
9013@@ -286,12 +302,29 @@ static inline int arch_write_trylock(arc
9014
9015 static inline void arch_read_unlock(arch_rwlock_t *rw)
9016 {
9017- asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
9018+ asm volatile(LOCK_PREFIX "incl %0\n"
9019+
9020+#ifdef CONFIG_PAX_REFCOUNT
9021+ "jno 0f\n"
9022+ LOCK_PREFIX "decl %0\n"
9023+ "int $4\n0:\n"
9024+ _ASM_EXTABLE(0b, 0b)
9025+#endif
9026+
9027+ :"+m" (rw->lock) : : "memory");
9028 }
9029
9030 static inline void arch_write_unlock(arch_rwlock_t *rw)
9031 {
9032- asm volatile(LOCK_PREFIX "addl %1, %0"
9033+ asm volatile(LOCK_PREFIX "addl %1, %0\n"
9034+
9035+#ifdef CONFIG_PAX_REFCOUNT
9036+ "jno 0f\n"
9037+ LOCK_PREFIX "subl %1, %0\n"
9038+ "int $4\n0:\n"
9039+ _ASM_EXTABLE(0b, 0b)
9040+#endif
9041+
9042 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
9043 }
9044
9045diff -urNp linux-3.0.7/arch/x86/include/asm/stackprotector.h linux-3.0.7/arch/x86/include/asm/stackprotector.h
9046--- linux-3.0.7/arch/x86/include/asm/stackprotector.h 2011-07-21 22:17:23.000000000 -0400
9047+++ linux-3.0.7/arch/x86/include/asm/stackprotector.h 2011-08-23 21:47:55.000000000 -0400
9048@@ -48,7 +48,7 @@
9049 * head_32 for boot CPU and setup_per_cpu_areas() for others.
9050 */
9051 #define GDT_STACK_CANARY_INIT \
9052- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
9053+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
9054
9055 /*
9056 * Initialize the stackprotector canary value.
9057@@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
9058
9059 static inline void load_stack_canary_segment(void)
9060 {
9061-#ifdef CONFIG_X86_32
9062+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
9063 asm volatile ("mov %0, %%gs" : : "r" (0));
9064 #endif
9065 }
9066diff -urNp linux-3.0.7/arch/x86/include/asm/stacktrace.h linux-3.0.7/arch/x86/include/asm/stacktrace.h
9067--- linux-3.0.7/arch/x86/include/asm/stacktrace.h 2011-07-21 22:17:23.000000000 -0400
9068+++ linux-3.0.7/arch/x86/include/asm/stacktrace.h 2011-08-23 21:47:55.000000000 -0400
9069@@ -11,28 +11,20 @@
9070
9071 extern int kstack_depth_to_print;
9072
9073-struct thread_info;
9074+struct task_struct;
9075 struct stacktrace_ops;
9076
9077-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
9078- unsigned long *stack,
9079- unsigned long bp,
9080- const struct stacktrace_ops *ops,
9081- void *data,
9082- unsigned long *end,
9083- int *graph);
9084-
9085-extern unsigned long
9086-print_context_stack(struct thread_info *tinfo,
9087- unsigned long *stack, unsigned long bp,
9088- const struct stacktrace_ops *ops, void *data,
9089- unsigned long *end, int *graph);
9090-
9091-extern unsigned long
9092-print_context_stack_bp(struct thread_info *tinfo,
9093- unsigned long *stack, unsigned long bp,
9094- const struct stacktrace_ops *ops, void *data,
9095- unsigned long *end, int *graph);
9096+typedef unsigned long walk_stack_t(struct task_struct *task,
9097+ void *stack_start,
9098+ unsigned long *stack,
9099+ unsigned long bp,
9100+ const struct stacktrace_ops *ops,
9101+ void *data,
9102+ unsigned long *end,
9103+ int *graph);
9104+
9105+extern walk_stack_t print_context_stack;
9106+extern walk_stack_t print_context_stack_bp;
9107
9108 /* Generic stack tracer with callbacks */
9109
9110@@ -40,7 +32,7 @@ struct stacktrace_ops {
9111 void (*address)(void *data, unsigned long address, int reliable);
9112 /* On negative return stop dumping */
9113 int (*stack)(void *data, char *name);
9114- walk_stack_t walk_stack;
9115+ walk_stack_t *walk_stack;
9116 };
9117
9118 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
9119diff -urNp linux-3.0.7/arch/x86/include/asm/sys_ia32.h linux-3.0.7/arch/x86/include/asm/sys_ia32.h
9120--- linux-3.0.7/arch/x86/include/asm/sys_ia32.h 2011-07-21 22:17:23.000000000 -0400
9121+++ linux-3.0.7/arch/x86/include/asm/sys_ia32.h 2011-10-06 04:17:55.000000000 -0400
9122@@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int
9123 compat_sigset_t __user *, unsigned int);
9124 asmlinkage long sys32_alarm(unsigned int);
9125
9126-asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
9127+asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
9128 asmlinkage long sys32_sysfs(int, u32, u32);
9129
9130 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
9131diff -urNp linux-3.0.7/arch/x86/include/asm/system.h linux-3.0.7/arch/x86/include/asm/system.h
9132--- linux-3.0.7/arch/x86/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
9133+++ linux-3.0.7/arch/x86/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
9134@@ -129,7 +129,7 @@ do { \
9135 "call __switch_to\n\t" \
9136 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
9137 __switch_canary \
9138- "movq %P[thread_info](%%rsi),%%r8\n\t" \
9139+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
9140 "movq %%rax,%%rdi\n\t" \
9141 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
9142 "jnz ret_from_fork\n\t" \
9143@@ -140,7 +140,7 @@ do { \
9144 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
9145 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
9146 [_tif_fork] "i" (_TIF_FORK), \
9147- [thread_info] "i" (offsetof(struct task_struct, stack)), \
9148+ [thread_info] "m" (current_tinfo), \
9149 [current_task] "m" (current_task) \
9150 __switch_canary_iparam \
9151 : "memory", "cc" __EXTRA_CLOBBER)
9152@@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
9153 {
9154 unsigned long __limit;
9155 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
9156- return __limit + 1;
9157+ return __limit;
9158 }
9159
9160 static inline void native_clts(void)
9161@@ -397,12 +397,12 @@ void enable_hlt(void);
9162
9163 void cpu_idle_wait(void);
9164
9165-extern unsigned long arch_align_stack(unsigned long sp);
9166+#define arch_align_stack(x) ((x) & ~0xfUL)
9167 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
9168
9169 void default_idle(void);
9170
9171-void stop_this_cpu(void *dummy);
9172+void stop_this_cpu(void *dummy) __noreturn;
9173
9174 /*
9175 * Force strict CPU ordering.
9176diff -urNp linux-3.0.7/arch/x86/include/asm/thread_info.h linux-3.0.7/arch/x86/include/asm/thread_info.h
9177--- linux-3.0.7/arch/x86/include/asm/thread_info.h 2011-07-21 22:17:23.000000000 -0400
9178+++ linux-3.0.7/arch/x86/include/asm/thread_info.h 2011-08-23 21:47:55.000000000 -0400
9179@@ -10,6 +10,7 @@
9180 #include <linux/compiler.h>
9181 #include <asm/page.h>
9182 #include <asm/types.h>
9183+#include <asm/percpu.h>
9184
9185 /*
9186 * low level task data that entry.S needs immediate access to
9187@@ -24,7 +25,6 @@ struct exec_domain;
9188 #include <asm/atomic.h>
9189
9190 struct thread_info {
9191- struct task_struct *task; /* main task structure */
9192 struct exec_domain *exec_domain; /* execution domain */
9193 __u32 flags; /* low level flags */
9194 __u32 status; /* thread synchronous flags */
9195@@ -34,18 +34,12 @@ struct thread_info {
9196 mm_segment_t addr_limit;
9197 struct restart_block restart_block;
9198 void __user *sysenter_return;
9199-#ifdef CONFIG_X86_32
9200- unsigned long previous_esp; /* ESP of the previous stack in
9201- case of nested (IRQ) stacks
9202- */
9203- __u8 supervisor_stack[0];
9204-#endif
9205+ unsigned long lowest_stack;
9206 int uaccess_err;
9207 };
9208
9209-#define INIT_THREAD_INFO(tsk) \
9210+#define INIT_THREAD_INFO \
9211 { \
9212- .task = &tsk, \
9213 .exec_domain = &default_exec_domain, \
9214 .flags = 0, \
9215 .cpu = 0, \
9216@@ -56,7 +50,7 @@ struct thread_info {
9217 }, \
9218 }
9219
9220-#define init_thread_info (init_thread_union.thread_info)
9221+#define init_thread_info (init_thread_union.stack)
9222 #define init_stack (init_thread_union.stack)
9223
9224 #else /* !__ASSEMBLY__ */
9225@@ -170,6 +164,23 @@ struct thread_info {
9226 ret; \
9227 })
9228
9229+#ifdef __ASSEMBLY__
9230+/* how to get the thread information struct from ASM */
9231+#define GET_THREAD_INFO(reg) \
9232+ mov PER_CPU_VAR(current_tinfo), reg
9233+
9234+/* use this one if reg already contains %esp */
9235+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
9236+#else
9237+/* how to get the thread information struct from C */
9238+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
9239+
9240+static __always_inline struct thread_info *current_thread_info(void)
9241+{
9242+ return percpu_read_stable(current_tinfo);
9243+}
9244+#endif
9245+
9246 #ifdef CONFIG_X86_32
9247
9248 #define STACK_WARN (THREAD_SIZE/8)
9249@@ -180,35 +191,13 @@ struct thread_info {
9250 */
9251 #ifndef __ASSEMBLY__
9252
9253-
9254 /* how to get the current stack pointer from C */
9255 register unsigned long current_stack_pointer asm("esp") __used;
9256
9257-/* how to get the thread information struct from C */
9258-static inline struct thread_info *current_thread_info(void)
9259-{
9260- return (struct thread_info *)
9261- (current_stack_pointer & ~(THREAD_SIZE - 1));
9262-}
9263-
9264-#else /* !__ASSEMBLY__ */
9265-
9266-/* how to get the thread information struct from ASM */
9267-#define GET_THREAD_INFO(reg) \
9268- movl $-THREAD_SIZE, reg; \
9269- andl %esp, reg
9270-
9271-/* use this one if reg already contains %esp */
9272-#define GET_THREAD_INFO_WITH_ESP(reg) \
9273- andl $-THREAD_SIZE, reg
9274-
9275 #endif
9276
9277 #else /* X86_32 */
9278
9279-#include <asm/percpu.h>
9280-#define KERNEL_STACK_OFFSET (5*8)
9281-
9282 /*
9283 * macros/functions for gaining access to the thread information structure
9284 * preempt_count needs to be 1 initially, until the scheduler is functional.
9285@@ -216,21 +205,8 @@ static inline struct thread_info *curren
9286 #ifndef __ASSEMBLY__
9287 DECLARE_PER_CPU(unsigned long, kernel_stack);
9288
9289-static inline struct thread_info *current_thread_info(void)
9290-{
9291- struct thread_info *ti;
9292- ti = (void *)(percpu_read_stable(kernel_stack) +
9293- KERNEL_STACK_OFFSET - THREAD_SIZE);
9294- return ti;
9295-}
9296-
9297-#else /* !__ASSEMBLY__ */
9298-
9299-/* how to get the thread information struct from ASM */
9300-#define GET_THREAD_INFO(reg) \
9301- movq PER_CPU_VAR(kernel_stack),reg ; \
9302- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
9303-
9304+/* how to get the current stack pointer from C */
9305+register unsigned long current_stack_pointer asm("rsp") __used;
9306 #endif
9307
9308 #endif /* !X86_32 */
9309@@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
9310 extern void free_thread_info(struct thread_info *ti);
9311 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
9312 #define arch_task_cache_init arch_task_cache_init
9313+
9314+#define __HAVE_THREAD_FUNCTIONS
9315+#define task_thread_info(task) (&(task)->tinfo)
9316+#define task_stack_page(task) ((task)->stack)
9317+#define setup_thread_stack(p, org) do {} while (0)
9318+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
9319+
9320+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
9321+extern struct task_struct *alloc_task_struct_node(int node);
9322+extern void free_task_struct(struct task_struct *);
9323+
9324 #endif
9325 #endif /* _ASM_X86_THREAD_INFO_H */
9326diff -urNp linux-3.0.7/arch/x86/include/asm/uaccess_32.h linux-3.0.7/arch/x86/include/asm/uaccess_32.h
9327--- linux-3.0.7/arch/x86/include/asm/uaccess_32.h 2011-07-21 22:17:23.000000000 -0400
9328+++ linux-3.0.7/arch/x86/include/asm/uaccess_32.h 2011-08-23 21:48:14.000000000 -0400
9329@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_u
9330 static __always_inline unsigned long __must_check
9331 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
9332 {
9333+ pax_track_stack();
9334+
9335+ if ((long)n < 0)
9336+ return n;
9337+
9338 if (__builtin_constant_p(n)) {
9339 unsigned long ret;
9340
9341@@ -61,6 +66,8 @@ __copy_to_user_inatomic(void __user *to,
9342 return ret;
9343 }
9344 }
9345+ if (!__builtin_constant_p(n))
9346+ check_object_size(from, n, true);
9347 return __copy_to_user_ll(to, from, n);
9348 }
9349
9350@@ -82,12 +89,16 @@ static __always_inline unsigned long __m
9351 __copy_to_user(void __user *to, const void *from, unsigned long n)
9352 {
9353 might_fault();
9354+
9355 return __copy_to_user_inatomic(to, from, n);
9356 }
9357
9358 static __always_inline unsigned long
9359 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
9360 {
9361+ if ((long)n < 0)
9362+ return n;
9363+
9364 /* Avoid zeroing the tail if the copy fails..
9365 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
9366 * but as the zeroing behaviour is only significant when n is not
9367@@ -137,6 +148,12 @@ static __always_inline unsigned long
9368 __copy_from_user(void *to, const void __user *from, unsigned long n)
9369 {
9370 might_fault();
9371+
9372+ pax_track_stack();
9373+
9374+ if ((long)n < 0)
9375+ return n;
9376+
9377 if (__builtin_constant_p(n)) {
9378 unsigned long ret;
9379
9380@@ -152,6 +169,8 @@ __copy_from_user(void *to, const void __
9381 return ret;
9382 }
9383 }
9384+ if (!__builtin_constant_p(n))
9385+ check_object_size(to, n, false);
9386 return __copy_from_user_ll(to, from, n);
9387 }
9388
9389@@ -159,6 +178,10 @@ static __always_inline unsigned long __c
9390 const void __user *from, unsigned long n)
9391 {
9392 might_fault();
9393+
9394+ if ((long)n < 0)
9395+ return n;
9396+
9397 if (__builtin_constant_p(n)) {
9398 unsigned long ret;
9399
9400@@ -181,15 +204,19 @@ static __always_inline unsigned long
9401 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
9402 unsigned long n)
9403 {
9404- return __copy_from_user_ll_nocache_nozero(to, from, n);
9405-}
9406+ if ((long)n < 0)
9407+ return n;
9408
9409-unsigned long __must_check copy_to_user(void __user *to,
9410- const void *from, unsigned long n);
9411-unsigned long __must_check _copy_from_user(void *to,
9412- const void __user *from,
9413- unsigned long n);
9414+ return __copy_from_user_ll_nocache_nozero(to, from, n);
9415+}
9416
9417+extern void copy_to_user_overflow(void)
9418+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9419+ __compiletime_error("copy_to_user() buffer size is not provably correct")
9420+#else
9421+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
9422+#endif
9423+;
9424
9425 extern void copy_from_user_overflow(void)
9426 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9427@@ -199,17 +226,61 @@ extern void copy_from_user_overflow(void
9428 #endif
9429 ;
9430
9431-static inline unsigned long __must_check copy_from_user(void *to,
9432- const void __user *from,
9433- unsigned long n)
9434+/**
9435+ * copy_to_user: - Copy a block of data into user space.
9436+ * @to: Destination address, in user space.
9437+ * @from: Source address, in kernel space.
9438+ * @n: Number of bytes to copy.
9439+ *
9440+ * Context: User context only. This function may sleep.
9441+ *
9442+ * Copy data from kernel space to user space.
9443+ *
9444+ * Returns number of bytes that could not be copied.
9445+ * On success, this will be zero.
9446+ */
9447+static inline unsigned long __must_check
9448+copy_to_user(void __user *to, const void *from, unsigned long n)
9449+{
9450+ int sz = __compiletime_object_size(from);
9451+
9452+ if (unlikely(sz != -1 && sz < n))
9453+ copy_to_user_overflow();
9454+ else if (access_ok(VERIFY_WRITE, to, n))
9455+ n = __copy_to_user(to, from, n);
9456+ return n;
9457+}
9458+
9459+/**
9460+ * copy_from_user: - Copy a block of data from user space.
9461+ * @to: Destination address, in kernel space.
9462+ * @from: Source address, in user space.
9463+ * @n: Number of bytes to copy.
9464+ *
9465+ * Context: User context only. This function may sleep.
9466+ *
9467+ * Copy data from user space to kernel space.
9468+ *
9469+ * Returns number of bytes that could not be copied.
9470+ * On success, this will be zero.
9471+ *
9472+ * If some data could not be copied, this function will pad the copied
9473+ * data to the requested size using zero bytes.
9474+ */
9475+static inline unsigned long __must_check
9476+copy_from_user(void *to, const void __user *from, unsigned long n)
9477 {
9478 int sz = __compiletime_object_size(to);
9479
9480- if (likely(sz == -1 || sz >= n))
9481- n = _copy_from_user(to, from, n);
9482- else
9483+ if (unlikely(sz != -1 && sz < n))
9484 copy_from_user_overflow();
9485-
9486+ else if (access_ok(VERIFY_READ, from, n))
9487+ n = __copy_from_user(to, from, n);
9488+ else if ((long)n > 0) {
9489+ if (!__builtin_constant_p(n))
9490+ check_object_size(to, n, false);
9491+ memset(to, 0, n);
9492+ }
9493 return n;
9494 }
9495
9496diff -urNp linux-3.0.7/arch/x86/include/asm/uaccess_64.h linux-3.0.7/arch/x86/include/asm/uaccess_64.h
9497--- linux-3.0.7/arch/x86/include/asm/uaccess_64.h 2011-07-21 22:17:23.000000000 -0400
9498+++ linux-3.0.7/arch/x86/include/asm/uaccess_64.h 2011-10-06 04:17:55.000000000 -0400
9499@@ -10,6 +10,9 @@
9500 #include <asm/alternative.h>
9501 #include <asm/cpufeature.h>
9502 #include <asm/page.h>
9503+#include <asm/pgtable.h>
9504+
9505+#define set_fs(x) (current_thread_info()->addr_limit = (x))
9506
9507 /*
9508 * Copy To/From Userspace
9509@@ -36,26 +39,26 @@ copy_user_generic(void *to, const void *
9510 return ret;
9511 }
9512
9513-__must_check unsigned long
9514-_copy_to_user(void __user *to, const void *from, unsigned len);
9515-__must_check unsigned long
9516-_copy_from_user(void *to, const void __user *from, unsigned len);
9517+static __always_inline __must_check unsigned long
9518+__copy_to_user(void __user *to, const void *from, unsigned len);
9519+static __always_inline __must_check unsigned long
9520+__copy_from_user(void *to, const void __user *from, unsigned len);
9521 __must_check unsigned long
9522 copy_in_user(void __user *to, const void __user *from, unsigned len);
9523
9524 static inline unsigned long __must_check copy_from_user(void *to,
9525 const void __user *from,
9526- unsigned long n)
9527+ unsigned n)
9528 {
9529- int sz = __compiletime_object_size(to);
9530-
9531 might_fault();
9532- if (likely(sz == -1 || sz >= n))
9533- n = _copy_from_user(to, from, n);
9534-#ifdef CONFIG_DEBUG_VM
9535- else
9536- WARN(1, "Buffer overflow detected!\n");
9537-#endif
9538+
9539+ if (access_ok(VERIFY_READ, from, n))
9540+ n = __copy_from_user(to, from, n);
9541+ else if ((int)n > 0) {
9542+ if (!__builtin_constant_p(n))
9543+ check_object_size(to, n, false);
9544+ memset(to, 0, n);
9545+ }
9546 return n;
9547 }
9548
9549@@ -64,110 +67,198 @@ int copy_to_user(void __user *dst, const
9550 {
9551 might_fault();
9552
9553- return _copy_to_user(dst, src, size);
9554+ if (access_ok(VERIFY_WRITE, dst, size))
9555+ size = __copy_to_user(dst, src, size);
9556+ return size;
9557 }
9558
9559 static __always_inline __must_check
9560-int __copy_from_user(void *dst, const void __user *src, unsigned size)
9561+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
9562 {
9563- int ret = 0;
9564+ int sz = __compiletime_object_size(dst);
9565+ unsigned ret = 0;
9566
9567 might_fault();
9568- if (!__builtin_constant_p(size))
9569- return copy_user_generic(dst, (__force void *)src, size);
9570+
9571+ pax_track_stack();
9572+
9573+ if ((int)size < 0)
9574+ return size;
9575+
9576+#ifdef CONFIG_PAX_MEMORY_UDEREF
9577+ if (!__access_ok(VERIFY_READ, src, size))
9578+ return size;
9579+#endif
9580+
9581+ if (unlikely(sz != -1 && sz < size)) {
9582+#ifdef CONFIG_DEBUG_VM
9583+ WARN(1, "Buffer overflow detected!\n");
9584+#endif
9585+ return size;
9586+ }
9587+
9588+ if (!__builtin_constant_p(size)) {
9589+ check_object_size(dst, size, false);
9590+
9591+#ifdef CONFIG_PAX_MEMORY_UDEREF
9592+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9593+ src += PAX_USER_SHADOW_BASE;
9594+#endif
9595+
9596+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
9597+ }
9598 switch (size) {
9599- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
9600+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
9601 ret, "b", "b", "=q", 1);
9602 return ret;
9603- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
9604+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
9605 ret, "w", "w", "=r", 2);
9606 return ret;
9607- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
9608+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
9609 ret, "l", "k", "=r", 4);
9610 return ret;
9611- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
9612+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9613 ret, "q", "", "=r", 8);
9614 return ret;
9615 case 10:
9616- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9617+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9618 ret, "q", "", "=r", 10);
9619 if (unlikely(ret))
9620 return ret;
9621 __get_user_asm(*(u16 *)(8 + (char *)dst),
9622- (u16 __user *)(8 + (char __user *)src),
9623+ (const u16 __user *)(8 + (const char __user *)src),
9624 ret, "w", "w", "=r", 2);
9625 return ret;
9626 case 16:
9627- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9628+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9629 ret, "q", "", "=r", 16);
9630 if (unlikely(ret))
9631 return ret;
9632 __get_user_asm(*(u64 *)(8 + (char *)dst),
9633- (u64 __user *)(8 + (char __user *)src),
9634+ (const u64 __user *)(8 + (const char __user *)src),
9635 ret, "q", "", "=r", 8);
9636 return ret;
9637 default:
9638- return copy_user_generic(dst, (__force void *)src, size);
9639+
9640+#ifdef CONFIG_PAX_MEMORY_UDEREF
9641+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9642+ src += PAX_USER_SHADOW_BASE;
9643+#endif
9644+
9645+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
9646 }
9647 }
9648
9649 static __always_inline __must_check
9650-int __copy_to_user(void __user *dst, const void *src, unsigned size)
9651+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
9652 {
9653- int ret = 0;
9654+ int sz = __compiletime_object_size(src);
9655+ unsigned ret = 0;
9656
9657 might_fault();
9658- if (!__builtin_constant_p(size))
9659- return copy_user_generic((__force void *)dst, src, size);
9660+
9661+ pax_track_stack();
9662+
9663+ if ((int)size < 0)
9664+ return size;
9665+
9666+#ifdef CONFIG_PAX_MEMORY_UDEREF
9667+ if (!__access_ok(VERIFY_WRITE, dst, size))
9668+ return size;
9669+#endif
9670+
9671+ if (unlikely(sz != -1 && sz < size)) {
9672+#ifdef CONFIG_DEBUG_VM
9673+ WARN(1, "Buffer overflow detected!\n");
9674+#endif
9675+ return size;
9676+ }
9677+
9678+ if (!__builtin_constant_p(size)) {
9679+ check_object_size(src, size, true);
9680+
9681+#ifdef CONFIG_PAX_MEMORY_UDEREF
9682+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9683+ dst += PAX_USER_SHADOW_BASE;
9684+#endif
9685+
9686+ return copy_user_generic((__force_kernel void *)dst, src, size);
9687+ }
9688 switch (size) {
9689- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
9690+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
9691 ret, "b", "b", "iq", 1);
9692 return ret;
9693- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
9694+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
9695 ret, "w", "w", "ir", 2);
9696 return ret;
9697- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
9698+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
9699 ret, "l", "k", "ir", 4);
9700 return ret;
9701- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
9702+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9703 ret, "q", "", "er", 8);
9704 return ret;
9705 case 10:
9706- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9707+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9708 ret, "q", "", "er", 10);
9709 if (unlikely(ret))
9710 return ret;
9711 asm("":::"memory");
9712- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
9713+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
9714 ret, "w", "w", "ir", 2);
9715 return ret;
9716 case 16:
9717- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9718+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9719 ret, "q", "", "er", 16);
9720 if (unlikely(ret))
9721 return ret;
9722 asm("":::"memory");
9723- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
9724+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
9725 ret, "q", "", "er", 8);
9726 return ret;
9727 default:
9728- return copy_user_generic((__force void *)dst, src, size);
9729+
9730+#ifdef CONFIG_PAX_MEMORY_UDEREF
9731+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9732+ dst += PAX_USER_SHADOW_BASE;
9733+#endif
9734+
9735+ return copy_user_generic((__force_kernel void *)dst, src, size);
9736 }
9737 }
9738
9739 static __always_inline __must_check
9740-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9741+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9742 {
9743- int ret = 0;
9744+ unsigned ret = 0;
9745
9746 might_fault();
9747- if (!__builtin_constant_p(size))
9748- return copy_user_generic((__force void *)dst,
9749- (__force void *)src, size);
9750+
9751+ if ((int)size < 0)
9752+ return size;
9753+
9754+#ifdef CONFIG_PAX_MEMORY_UDEREF
9755+ if (!__access_ok(VERIFY_READ, src, size))
9756+ return size;
9757+ if (!__access_ok(VERIFY_WRITE, dst, size))
9758+ return size;
9759+#endif
9760+
9761+ if (!__builtin_constant_p(size)) {
9762+
9763+#ifdef CONFIG_PAX_MEMORY_UDEREF
9764+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9765+ src += PAX_USER_SHADOW_BASE;
9766+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9767+ dst += PAX_USER_SHADOW_BASE;
9768+#endif
9769+
9770+ return copy_user_generic((__force_kernel void *)dst,
9771+ (__force_kernel const void *)src, size);
9772+ }
9773 switch (size) {
9774 case 1: {
9775 u8 tmp;
9776- __get_user_asm(tmp, (u8 __user *)src,
9777+ __get_user_asm(tmp, (const u8 __user *)src,
9778 ret, "b", "b", "=q", 1);
9779 if (likely(!ret))
9780 __put_user_asm(tmp, (u8 __user *)dst,
9781@@ -176,7 +267,7 @@ int __copy_in_user(void __user *dst, con
9782 }
9783 case 2: {
9784 u16 tmp;
9785- __get_user_asm(tmp, (u16 __user *)src,
9786+ __get_user_asm(tmp, (const u16 __user *)src,
9787 ret, "w", "w", "=r", 2);
9788 if (likely(!ret))
9789 __put_user_asm(tmp, (u16 __user *)dst,
9790@@ -186,7 +277,7 @@ int __copy_in_user(void __user *dst, con
9791
9792 case 4: {
9793 u32 tmp;
9794- __get_user_asm(tmp, (u32 __user *)src,
9795+ __get_user_asm(tmp, (const u32 __user *)src,
9796 ret, "l", "k", "=r", 4);
9797 if (likely(!ret))
9798 __put_user_asm(tmp, (u32 __user *)dst,
9799@@ -195,7 +286,7 @@ int __copy_in_user(void __user *dst, con
9800 }
9801 case 8: {
9802 u64 tmp;
9803- __get_user_asm(tmp, (u64 __user *)src,
9804+ __get_user_asm(tmp, (const u64 __user *)src,
9805 ret, "q", "", "=r", 8);
9806 if (likely(!ret))
9807 __put_user_asm(tmp, (u64 __user *)dst,
9808@@ -203,8 +294,16 @@ int __copy_in_user(void __user *dst, con
9809 return ret;
9810 }
9811 default:
9812- return copy_user_generic((__force void *)dst,
9813- (__force void *)src, size);
9814+
9815+#ifdef CONFIG_PAX_MEMORY_UDEREF
9816+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9817+ src += PAX_USER_SHADOW_BASE;
9818+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9819+ dst += PAX_USER_SHADOW_BASE;
9820+#endif
9821+
9822+ return copy_user_generic((__force_kernel void *)dst,
9823+ (__force_kernel const void *)src, size);
9824 }
9825 }
9826
9827@@ -221,33 +320,72 @@ __must_check unsigned long __clear_user(
9828 static __must_check __always_inline int
9829 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
9830 {
9831- return copy_user_generic(dst, (__force const void *)src, size);
9832+ pax_track_stack();
9833+
9834+ if ((int)size < 0)
9835+ return size;
9836+
9837+#ifdef CONFIG_PAX_MEMORY_UDEREF
9838+ if (!__access_ok(VERIFY_READ, src, size))
9839+ return size;
9840+
9841+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9842+ src += PAX_USER_SHADOW_BASE;
9843+#endif
9844+
9845+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
9846 }
9847
9848-static __must_check __always_inline int
9849+static __must_check __always_inline unsigned long
9850 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
9851 {
9852- return copy_user_generic((__force void *)dst, src, size);
9853+ if ((int)size < 0)
9854+ return size;
9855+
9856+#ifdef CONFIG_PAX_MEMORY_UDEREF
9857+ if (!__access_ok(VERIFY_WRITE, dst, size))
9858+ return size;
9859+
9860+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9861+ dst += PAX_USER_SHADOW_BASE;
9862+#endif
9863+
9864+ return copy_user_generic((__force_kernel void *)dst, src, size);
9865 }
9866
9867-extern long __copy_user_nocache(void *dst, const void __user *src,
9868+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
9869 unsigned size, int zerorest);
9870
9871-static inline int
9872-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9873+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9874 {
9875 might_sleep();
9876+
9877+ if ((int)size < 0)
9878+ return size;
9879+
9880+#ifdef CONFIG_PAX_MEMORY_UDEREF
9881+ if (!__access_ok(VERIFY_READ, src, size))
9882+ return size;
9883+#endif
9884+
9885 return __copy_user_nocache(dst, src, size, 1);
9886 }
9887
9888-static inline int
9889-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9890+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9891 unsigned size)
9892 {
9893+ if ((int)size < 0)
9894+ return size;
9895+
9896+#ifdef CONFIG_PAX_MEMORY_UDEREF
9897+ if (!__access_ok(VERIFY_READ, src, size))
9898+ return size;
9899+#endif
9900+
9901 return __copy_user_nocache(dst, src, size, 0);
9902 }
9903
9904-unsigned long
9905-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
9906+extern unsigned long
9907+copy_user_handle_tail(char __user *to, char __user *from, unsigned len, unsigned zerorest);
9908
9909 #endif /* _ASM_X86_UACCESS_64_H */
9910diff -urNp linux-3.0.7/arch/x86/include/asm/uaccess.h linux-3.0.7/arch/x86/include/asm/uaccess.h
9911--- linux-3.0.7/arch/x86/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
9912+++ linux-3.0.7/arch/x86/include/asm/uaccess.h 2011-10-06 04:17:55.000000000 -0400
9913@@ -7,12 +7,15 @@
9914 #include <linux/compiler.h>
9915 #include <linux/thread_info.h>
9916 #include <linux/string.h>
9917+#include <linux/sched.h>
9918 #include <asm/asm.h>
9919 #include <asm/page.h>
9920
9921 #define VERIFY_READ 0
9922 #define VERIFY_WRITE 1
9923
9924+extern void check_object_size(const void *ptr, unsigned long n, bool to);
9925+
9926 /*
9927 * The fs value determines whether argument validity checking should be
9928 * performed or not. If get_fs() == USER_DS, checking is performed, with
9929@@ -28,7 +31,12 @@
9930
9931 #define get_ds() (KERNEL_DS)
9932 #define get_fs() (current_thread_info()->addr_limit)
9933+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9934+void __set_fs(mm_segment_t x);
9935+void set_fs(mm_segment_t x);
9936+#else
9937 #define set_fs(x) (current_thread_info()->addr_limit = (x))
9938+#endif
9939
9940 #define segment_eq(a, b) ((a).seg == (b).seg)
9941
9942@@ -76,7 +84,33 @@
9943 * checks that the pointer is in the user space range - after calling
9944 * this function, memory access functions may still return -EFAULT.
9945 */
9946-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9947+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9948+#define access_ok(type, addr, size) \
9949+({ \
9950+ long __size = size; \
9951+ unsigned long __addr = (unsigned long)addr; \
9952+ unsigned long __addr_ao = __addr & PAGE_MASK; \
9953+ unsigned long __end_ao = __addr + __size - 1; \
9954+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
9955+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
9956+ while(__addr_ao <= __end_ao) { \
9957+ char __c_ao; \
9958+ __addr_ao += PAGE_SIZE; \
9959+ if (__size > PAGE_SIZE) \
9960+ cond_resched(); \
9961+ if (__get_user(__c_ao, (char __user *)__addr)) \
9962+ break; \
9963+ if (type != VERIFY_WRITE) { \
9964+ __addr = __addr_ao; \
9965+ continue; \
9966+ } \
9967+ if (__put_user(__c_ao, (char __user *)__addr)) \
9968+ break; \
9969+ __addr = __addr_ao; \
9970+ } \
9971+ } \
9972+ __ret_ao; \
9973+})
9974
9975 /*
9976 * The exception table consists of pairs of addresses: the first is the
9977@@ -182,12 +216,20 @@ extern int __get_user_bad(void);
9978 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
9979 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
9980
9981-
9982+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9983+#define __copyuser_seg "gs;"
9984+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
9985+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
9986+#else
9987+#define __copyuser_seg
9988+#define __COPYUSER_SET_ES
9989+#define __COPYUSER_RESTORE_ES
9990+#endif
9991
9992 #ifdef CONFIG_X86_32
9993 #define __put_user_asm_u64(x, addr, err, errret) \
9994- asm volatile("1: movl %%eax,0(%2)\n" \
9995- "2: movl %%edx,4(%2)\n" \
9996+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
9997+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
9998 "3:\n" \
9999 ".section .fixup,\"ax\"\n" \
10000 "4: movl %3,%0\n" \
10001@@ -199,8 +241,8 @@ extern int __get_user_bad(void);
10002 : "A" (x), "r" (addr), "i" (errret), "0" (err))
10003
10004 #define __put_user_asm_ex_u64(x, addr) \
10005- asm volatile("1: movl %%eax,0(%1)\n" \
10006- "2: movl %%edx,4(%1)\n" \
10007+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
10008+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
10009 "3:\n" \
10010 _ASM_EXTABLE(1b, 2b - 1b) \
10011 _ASM_EXTABLE(2b, 3b - 2b) \
10012@@ -252,7 +294,7 @@ extern void __put_user_8(void);
10013 __typeof__(*(ptr)) __pu_val; \
10014 __chk_user_ptr(ptr); \
10015 might_fault(); \
10016- __pu_val = x; \
10017+ __pu_val = (x); \
10018 switch (sizeof(*(ptr))) { \
10019 case 1: \
10020 __put_user_x(1, __pu_val, ptr, __ret_pu); \
10021@@ -373,7 +415,7 @@ do { \
10022 } while (0)
10023
10024 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10025- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
10026+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
10027 "2:\n" \
10028 ".section .fixup,\"ax\"\n" \
10029 "3: mov %3,%0\n" \
10030@@ -381,7 +423,7 @@ do { \
10031 " jmp 2b\n" \
10032 ".previous\n" \
10033 _ASM_EXTABLE(1b, 3b) \
10034- : "=r" (err), ltype(x) \
10035+ : "=r" (err), ltype (x) \
10036 : "m" (__m(addr)), "i" (errret), "0" (err))
10037
10038 #define __get_user_size_ex(x, ptr, size) \
10039@@ -406,7 +448,7 @@ do { \
10040 } while (0)
10041
10042 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
10043- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
10044+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
10045 "2:\n" \
10046 _ASM_EXTABLE(1b, 2b - 1b) \
10047 : ltype(x) : "m" (__m(addr)))
10048@@ -423,13 +465,24 @@ do { \
10049 int __gu_err; \
10050 unsigned long __gu_val; \
10051 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
10052- (x) = (__force __typeof__(*(ptr)))__gu_val; \
10053+ (x) = (__typeof__(*(ptr)))__gu_val; \
10054 __gu_err; \
10055 })
10056
10057 /* FIXME: this hack is definitely wrong -AK */
10058 struct __large_struct { unsigned long buf[100]; };
10059-#define __m(x) (*(struct __large_struct __user *)(x))
10060+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10061+#define ____m(x) \
10062+({ \
10063+ unsigned long ____x = (unsigned long)(x); \
10064+ if (____x < PAX_USER_SHADOW_BASE) \
10065+ ____x += PAX_USER_SHADOW_BASE; \
10066+ (void __user *)____x; \
10067+})
10068+#else
10069+#define ____m(x) (x)
10070+#endif
10071+#define __m(x) (*(struct __large_struct __user *)____m(x))
10072
10073 /*
10074 * Tell gcc we read from memory instead of writing: this is because
10075@@ -437,7 +490,7 @@ struct __large_struct { unsigned long bu
10076 * aliasing issues.
10077 */
10078 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10079- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
10080+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
10081 "2:\n" \
10082 ".section .fixup,\"ax\"\n" \
10083 "3: mov %3,%0\n" \
10084@@ -445,10 +498,10 @@ struct __large_struct { unsigned long bu
10085 ".previous\n" \
10086 _ASM_EXTABLE(1b, 3b) \
10087 : "=r"(err) \
10088- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
10089+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
10090
10091 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
10092- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
10093+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
10094 "2:\n" \
10095 _ASM_EXTABLE(1b, 2b - 1b) \
10096 : : ltype(x), "m" (__m(addr)))
10097@@ -487,8 +540,12 @@ struct __large_struct { unsigned long bu
10098 * On error, the variable @x is set to zero.
10099 */
10100
10101+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10102+#define __get_user(x, ptr) get_user((x), (ptr))
10103+#else
10104 #define __get_user(x, ptr) \
10105 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
10106+#endif
10107
10108 /**
10109 * __put_user: - Write a simple value into user space, with less checking.
10110@@ -510,8 +567,12 @@ struct __large_struct { unsigned long bu
10111 * Returns zero on success, or -EFAULT on error.
10112 */
10113
10114+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10115+#define __put_user(x, ptr) put_user((x), (ptr))
10116+#else
10117 #define __put_user(x, ptr) \
10118 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
10119+#endif
10120
10121 #define __get_user_unaligned __get_user
10122 #define __put_user_unaligned __put_user
10123@@ -529,7 +590,7 @@ struct __large_struct { unsigned long bu
10124 #define get_user_ex(x, ptr) do { \
10125 unsigned long __gue_val; \
10126 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
10127- (x) = (__force __typeof__(*(ptr)))__gue_val; \
10128+ (x) = (__typeof__(*(ptr)))__gue_val; \
10129 } while (0)
10130
10131 #ifdef CONFIG_X86_WP_WORKS_OK
10132diff -urNp linux-3.0.7/arch/x86/include/asm/vdso.h linux-3.0.7/arch/x86/include/asm/vdso.h
10133--- linux-3.0.7/arch/x86/include/asm/vdso.h 2011-07-21 22:17:23.000000000 -0400
10134+++ linux-3.0.7/arch/x86/include/asm/vdso.h 2011-10-06 04:17:55.000000000 -0400
10135@@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
10136 #define VDSO32_SYMBOL(base, name) \
10137 ({ \
10138 extern const char VDSO32_##name[]; \
10139- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
10140+ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
10141 })
10142 #endif
10143
10144diff -urNp linux-3.0.7/arch/x86/include/asm/x86_init.h linux-3.0.7/arch/x86/include/asm/x86_init.h
10145--- linux-3.0.7/arch/x86/include/asm/x86_init.h 2011-07-21 22:17:23.000000000 -0400
10146+++ linux-3.0.7/arch/x86/include/asm/x86_init.h 2011-08-23 21:47:55.000000000 -0400
10147@@ -28,7 +28,7 @@ struct x86_init_mpparse {
10148 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
10149 void (*find_smp_config)(void);
10150 void (*get_smp_config)(unsigned int early);
10151-};
10152+} __no_const;
10153
10154 /**
10155 * struct x86_init_resources - platform specific resource related ops
10156@@ -42,7 +42,7 @@ struct x86_init_resources {
10157 void (*probe_roms)(void);
10158 void (*reserve_resources)(void);
10159 char *(*memory_setup)(void);
10160-};
10161+} __no_const;
10162
10163 /**
10164 * struct x86_init_irqs - platform specific interrupt setup
10165@@ -55,7 +55,7 @@ struct x86_init_irqs {
10166 void (*pre_vector_init)(void);
10167 void (*intr_init)(void);
10168 void (*trap_init)(void);
10169-};
10170+} __no_const;
10171
10172 /**
10173 * struct x86_init_oem - oem platform specific customizing functions
10174@@ -65,7 +65,7 @@ struct x86_init_irqs {
10175 struct x86_init_oem {
10176 void (*arch_setup)(void);
10177 void (*banner)(void);
10178-};
10179+} __no_const;
10180
10181 /**
10182 * struct x86_init_mapping - platform specific initial kernel pagetable setup
10183@@ -76,7 +76,7 @@ struct x86_init_oem {
10184 */
10185 struct x86_init_mapping {
10186 void (*pagetable_reserve)(u64 start, u64 end);
10187-};
10188+} __no_const;
10189
10190 /**
10191 * struct x86_init_paging - platform specific paging functions
10192@@ -86,7 +86,7 @@ struct x86_init_mapping {
10193 struct x86_init_paging {
10194 void (*pagetable_setup_start)(pgd_t *base);
10195 void (*pagetable_setup_done)(pgd_t *base);
10196-};
10197+} __no_const;
10198
10199 /**
10200 * struct x86_init_timers - platform specific timer setup
10201@@ -101,7 +101,7 @@ struct x86_init_timers {
10202 void (*tsc_pre_init)(void);
10203 void (*timer_init)(void);
10204 void (*wallclock_init)(void);
10205-};
10206+} __no_const;
10207
10208 /**
10209 * struct x86_init_iommu - platform specific iommu setup
10210@@ -109,7 +109,7 @@ struct x86_init_timers {
10211 */
10212 struct x86_init_iommu {
10213 int (*iommu_init)(void);
10214-};
10215+} __no_const;
10216
10217 /**
10218 * struct x86_init_pci - platform specific pci init functions
10219@@ -123,7 +123,7 @@ struct x86_init_pci {
10220 int (*init)(void);
10221 void (*init_irq)(void);
10222 void (*fixup_irqs)(void);
10223-};
10224+} __no_const;
10225
10226 /**
10227 * struct x86_init_ops - functions for platform specific setup
10228@@ -139,7 +139,7 @@ struct x86_init_ops {
10229 struct x86_init_timers timers;
10230 struct x86_init_iommu iommu;
10231 struct x86_init_pci pci;
10232-};
10233+} __no_const;
10234
10235 /**
10236 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
10237@@ -147,7 +147,7 @@ struct x86_init_ops {
10238 */
10239 struct x86_cpuinit_ops {
10240 void (*setup_percpu_clockev)(void);
10241-};
10242+} __no_const;
10243
10244 /**
10245 * struct x86_platform_ops - platform specific runtime functions
10246@@ -166,7 +166,7 @@ struct x86_platform_ops {
10247 bool (*is_untracked_pat_range)(u64 start, u64 end);
10248 void (*nmi_init)(void);
10249 int (*i8042_detect)(void);
10250-};
10251+} __no_const;
10252
10253 struct pci_dev;
10254
10255@@ -174,7 +174,7 @@ struct x86_msi_ops {
10256 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
10257 void (*teardown_msi_irq)(unsigned int irq);
10258 void (*teardown_msi_irqs)(struct pci_dev *dev);
10259-};
10260+} __no_const;
10261
10262 extern struct x86_init_ops x86_init;
10263 extern struct x86_cpuinit_ops x86_cpuinit;
10264diff -urNp linux-3.0.7/arch/x86/include/asm/xsave.h linux-3.0.7/arch/x86/include/asm/xsave.h
10265--- linux-3.0.7/arch/x86/include/asm/xsave.h 2011-07-21 22:17:23.000000000 -0400
10266+++ linux-3.0.7/arch/x86/include/asm/xsave.h 2011-10-06 04:17:55.000000000 -0400
10267@@ -65,6 +65,11 @@ static inline int xsave_user(struct xsav
10268 {
10269 int err;
10270
10271+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10272+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
10273+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
10274+#endif
10275+
10276 /*
10277 * Clear the xsave header first, so that reserved fields are
10278 * initialized to zero.
10279@@ -96,10 +101,15 @@ static inline int xsave_user(struct xsav
10280 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
10281 {
10282 int err;
10283- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
10284+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
10285 u32 lmask = mask;
10286 u32 hmask = mask >> 32;
10287
10288+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10289+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
10290+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
10291+#endif
10292+
10293 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
10294 "2:\n"
10295 ".section .fixup,\"ax\"\n"
10296diff -urNp linux-3.0.7/arch/x86/Kconfig linux-3.0.7/arch/x86/Kconfig
10297--- linux-3.0.7/arch/x86/Kconfig 2011-07-21 22:17:23.000000000 -0400
10298+++ linux-3.0.7/arch/x86/Kconfig 2011-09-17 00:58:36.000000000 -0400
10299@@ -229,7 +229,7 @@ config X86_HT
10300
10301 config X86_32_LAZY_GS
10302 def_bool y
10303- depends on X86_32 && !CC_STACKPROTECTOR
10304+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10305
10306 config ARCH_HWEIGHT_CFLAGS
10307 string
10308@@ -1018,7 +1018,7 @@ choice
10309
10310 config NOHIGHMEM
10311 bool "off"
10312- depends on !X86_NUMAQ
10313+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10314 ---help---
10315 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10316 However, the address space of 32-bit x86 processors is only 4
10317@@ -1055,7 +1055,7 @@ config NOHIGHMEM
10318
10319 config HIGHMEM4G
10320 bool "4GB"
10321- depends on !X86_NUMAQ
10322+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10323 ---help---
10324 Select this if you have a 32-bit processor and between 1 and 4
10325 gigabytes of physical RAM.
10326@@ -1109,7 +1109,7 @@ config PAGE_OFFSET
10327 hex
10328 default 0xB0000000 if VMSPLIT_3G_OPT
10329 default 0x80000000 if VMSPLIT_2G
10330- default 0x78000000 if VMSPLIT_2G_OPT
10331+ default 0x70000000 if VMSPLIT_2G_OPT
10332 default 0x40000000 if VMSPLIT_1G
10333 default 0xC0000000
10334 depends on X86_32
10335@@ -1483,6 +1483,7 @@ config SECCOMP
10336
10337 config CC_STACKPROTECTOR
10338 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
10339+ depends on X86_64 || !PAX_MEMORY_UDEREF
10340 ---help---
10341 This option turns on the -fstack-protector GCC feature. This
10342 feature puts, at the beginning of functions, a canary value on
10343@@ -1540,6 +1541,7 @@ config KEXEC_JUMP
10344 config PHYSICAL_START
10345 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
10346 default "0x1000000"
10347+ range 0x400000 0x40000000
10348 ---help---
10349 This gives the physical address where the kernel is loaded.
10350
10351@@ -1603,6 +1605,7 @@ config X86_NEED_RELOCS
10352 config PHYSICAL_ALIGN
10353 hex "Alignment value to which kernel should be aligned" if X86_32
10354 default "0x1000000"
10355+ range 0x400000 0x1000000 if PAX_KERNEXEC
10356 range 0x2000 0x1000000
10357 ---help---
10358 This value puts the alignment restrictions on physical address
10359@@ -1634,9 +1637,10 @@ config HOTPLUG_CPU
10360 Say N if you want to disable CPU hotplug.
10361
10362 config COMPAT_VDSO
10363- def_bool y
10364+ def_bool n
10365 prompt "Compat VDSO support"
10366 depends on X86_32 || IA32_EMULATION
10367+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
10368 ---help---
10369 Map the 32-bit VDSO to the predictable old-style address too.
10370
10371diff -urNp linux-3.0.7/arch/x86/Kconfig.cpu linux-3.0.7/arch/x86/Kconfig.cpu
10372--- linux-3.0.7/arch/x86/Kconfig.cpu 2011-07-21 22:17:23.000000000 -0400
10373+++ linux-3.0.7/arch/x86/Kconfig.cpu 2011-08-23 21:47:55.000000000 -0400
10374@@ -338,7 +338,7 @@ config X86_PPRO_FENCE
10375
10376 config X86_F00F_BUG
10377 def_bool y
10378- depends on M586MMX || M586TSC || M586 || M486 || M386
10379+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
10380
10381 config X86_INVD_BUG
10382 def_bool y
10383@@ -362,7 +362,7 @@ config X86_POPAD_OK
10384
10385 config X86_ALIGNMENT_16
10386 def_bool y
10387- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10388+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10389
10390 config X86_INTEL_USERCOPY
10391 def_bool y
10392@@ -408,7 +408,7 @@ config X86_CMPXCHG64
10393 # generates cmov.
10394 config X86_CMOV
10395 def_bool y
10396- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10397+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10398
10399 config X86_MINIMUM_CPU_FAMILY
10400 int
10401diff -urNp linux-3.0.7/arch/x86/Kconfig.debug linux-3.0.7/arch/x86/Kconfig.debug
10402--- linux-3.0.7/arch/x86/Kconfig.debug 2011-07-21 22:17:23.000000000 -0400
10403+++ linux-3.0.7/arch/x86/Kconfig.debug 2011-08-23 21:47:55.000000000 -0400
10404@@ -81,7 +81,7 @@ config X86_PTDUMP
10405 config DEBUG_RODATA
10406 bool "Write protect kernel read-only data structures"
10407 default y
10408- depends on DEBUG_KERNEL
10409+ depends on DEBUG_KERNEL && BROKEN
10410 ---help---
10411 Mark the kernel read-only data as write-protected in the pagetables,
10412 in order to catch accidental (and incorrect) writes to such const
10413@@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
10414
10415 config DEBUG_SET_MODULE_RONX
10416 bool "Set loadable kernel module data as NX and text as RO"
10417- depends on MODULES
10418+ depends on MODULES && BROKEN
10419 ---help---
10420 This option helps catch unintended modifications to loadable
10421 kernel module's text and read-only data. It also prevents execution
10422diff -urNp linux-3.0.7/arch/x86/kernel/acpi/realmode/Makefile linux-3.0.7/arch/x86/kernel/acpi/realmode/Makefile
10423--- linux-3.0.7/arch/x86/kernel/acpi/realmode/Makefile 2011-07-21 22:17:23.000000000 -0400
10424+++ linux-3.0.7/arch/x86/kernel/acpi/realmode/Makefile 2011-08-23 21:47:55.000000000 -0400
10425@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
10426 $(call cc-option, -fno-stack-protector) \
10427 $(call cc-option, -mpreferred-stack-boundary=2)
10428 KBUILD_CFLAGS += $(call cc-option, -m32)
10429+ifdef CONSTIFY_PLUGIN
10430+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
10431+endif
10432 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10433 GCOV_PROFILE := n
10434
10435diff -urNp linux-3.0.7/arch/x86/kernel/acpi/realmode/wakeup.S linux-3.0.7/arch/x86/kernel/acpi/realmode/wakeup.S
10436--- linux-3.0.7/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-21 22:17:23.000000000 -0400
10437+++ linux-3.0.7/arch/x86/kernel/acpi/realmode/wakeup.S 2011-08-23 21:48:14.000000000 -0400
10438@@ -108,6 +108,9 @@ wakeup_code:
10439 /* Do any other stuff... */
10440
10441 #ifndef CONFIG_64BIT
10442+ /* Recheck NX bit overrides (64bit path does this in trampoline */
10443+ call verify_cpu
10444+
10445 /* This could also be done in C code... */
10446 movl pmode_cr3, %eax
10447 movl %eax, %cr3
10448@@ -131,6 +134,7 @@ wakeup_code:
10449 movl pmode_cr0, %eax
10450 movl %eax, %cr0
10451 jmp pmode_return
10452+# include "../../verify_cpu.S"
10453 #else
10454 pushw $0
10455 pushw trampoline_segment
10456diff -urNp linux-3.0.7/arch/x86/kernel/acpi/sleep.c linux-3.0.7/arch/x86/kernel/acpi/sleep.c
10457--- linux-3.0.7/arch/x86/kernel/acpi/sleep.c 2011-07-21 22:17:23.000000000 -0400
10458+++ linux-3.0.7/arch/x86/kernel/acpi/sleep.c 2011-08-23 21:47:55.000000000 -0400
10459@@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
10460 header->trampoline_segment = trampoline_address() >> 4;
10461 #ifdef CONFIG_SMP
10462 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
10463+
10464+ pax_open_kernel();
10465 early_gdt_descr.address =
10466 (unsigned long)get_cpu_gdt_table(smp_processor_id());
10467+ pax_close_kernel();
10468+
10469 initial_gs = per_cpu_offset(smp_processor_id());
10470 #endif
10471 initial_code = (unsigned long)wakeup_long64;
10472diff -urNp linux-3.0.7/arch/x86/kernel/acpi/wakeup_32.S linux-3.0.7/arch/x86/kernel/acpi/wakeup_32.S
10473--- linux-3.0.7/arch/x86/kernel/acpi/wakeup_32.S 2011-07-21 22:17:23.000000000 -0400
10474+++ linux-3.0.7/arch/x86/kernel/acpi/wakeup_32.S 2011-08-23 21:47:55.000000000 -0400
10475@@ -30,13 +30,11 @@ wakeup_pmode_return:
10476 # and restore the stack ... but you need gdt for this to work
10477 movl saved_context_esp, %esp
10478
10479- movl %cs:saved_magic, %eax
10480- cmpl $0x12345678, %eax
10481+ cmpl $0x12345678, saved_magic
10482 jne bogus_magic
10483
10484 # jump to place where we left off
10485- movl saved_eip, %eax
10486- jmp *%eax
10487+ jmp *(saved_eip)
10488
10489 bogus_magic:
10490 jmp bogus_magic
10491diff -urNp linux-3.0.7/arch/x86/kernel/alternative.c linux-3.0.7/arch/x86/kernel/alternative.c
10492--- linux-3.0.7/arch/x86/kernel/alternative.c 2011-07-21 22:17:23.000000000 -0400
10493+++ linux-3.0.7/arch/x86/kernel/alternative.c 2011-08-23 21:47:55.000000000 -0400
10494@@ -313,7 +313,7 @@ static void alternatives_smp_lock(const
10495 if (!*poff || ptr < text || ptr >= text_end)
10496 continue;
10497 /* turn DS segment override prefix into lock prefix */
10498- if (*ptr == 0x3e)
10499+ if (*ktla_ktva(ptr) == 0x3e)
10500 text_poke(ptr, ((unsigned char []){0xf0}), 1);
10501 };
10502 mutex_unlock(&text_mutex);
10503@@ -334,7 +334,7 @@ static void alternatives_smp_unlock(cons
10504 if (!*poff || ptr < text || ptr >= text_end)
10505 continue;
10506 /* turn lock prefix into DS segment override prefix */
10507- if (*ptr == 0xf0)
10508+ if (*ktla_ktva(ptr) == 0xf0)
10509 text_poke(ptr, ((unsigned char []){0x3E}), 1);
10510 };
10511 mutex_unlock(&text_mutex);
10512@@ -503,7 +503,7 @@ void __init_or_module apply_paravirt(str
10513
10514 BUG_ON(p->len > MAX_PATCH_LEN);
10515 /* prep the buffer with the original instructions */
10516- memcpy(insnbuf, p->instr, p->len);
10517+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
10518 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
10519 (unsigned long)p->instr, p->len);
10520
10521@@ -571,7 +571,7 @@ void __init alternative_instructions(voi
10522 if (smp_alt_once)
10523 free_init_pages("SMP alternatives",
10524 (unsigned long)__smp_locks,
10525- (unsigned long)__smp_locks_end);
10526+ PAGE_ALIGN((unsigned long)__smp_locks_end));
10527
10528 restart_nmi();
10529 }
10530@@ -588,13 +588,17 @@ void __init alternative_instructions(voi
10531 * instructions. And on the local CPU you need to be protected again NMI or MCE
10532 * handlers seeing an inconsistent instruction while you patch.
10533 */
10534-void *__init_or_module text_poke_early(void *addr, const void *opcode,
10535+void *__kprobes text_poke_early(void *addr, const void *opcode,
10536 size_t len)
10537 {
10538 unsigned long flags;
10539 local_irq_save(flags);
10540- memcpy(addr, opcode, len);
10541+
10542+ pax_open_kernel();
10543+ memcpy(ktla_ktva(addr), opcode, len);
10544 sync_core();
10545+ pax_close_kernel();
10546+
10547 local_irq_restore(flags);
10548 /* Could also do a CLFLUSH here to speed up CPU recovery; but
10549 that causes hangs on some VIA CPUs. */
10550@@ -616,36 +620,22 @@ void *__init_or_module text_poke_early(v
10551 */
10552 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
10553 {
10554- unsigned long flags;
10555- char *vaddr;
10556+ unsigned char *vaddr = ktla_ktva(addr);
10557 struct page *pages[2];
10558- int i;
10559+ size_t i;
10560
10561 if (!core_kernel_text((unsigned long)addr)) {
10562- pages[0] = vmalloc_to_page(addr);
10563- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
10564+ pages[0] = vmalloc_to_page(vaddr);
10565+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
10566 } else {
10567- pages[0] = virt_to_page(addr);
10568+ pages[0] = virt_to_page(vaddr);
10569 WARN_ON(!PageReserved(pages[0]));
10570- pages[1] = virt_to_page(addr + PAGE_SIZE);
10571+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
10572 }
10573 BUG_ON(!pages[0]);
10574- local_irq_save(flags);
10575- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
10576- if (pages[1])
10577- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
10578- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
10579- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
10580- clear_fixmap(FIX_TEXT_POKE0);
10581- if (pages[1])
10582- clear_fixmap(FIX_TEXT_POKE1);
10583- local_flush_tlb();
10584- sync_core();
10585- /* Could also do a CLFLUSH here to speed up CPU recovery; but
10586- that causes hangs on some VIA CPUs. */
10587+ text_poke_early(addr, opcode, len);
10588 for (i = 0; i < len; i++)
10589- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
10590- local_irq_restore(flags);
10591+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
10592 return addr;
10593 }
10594
10595diff -urNp linux-3.0.7/arch/x86/kernel/apic/apic.c linux-3.0.7/arch/x86/kernel/apic/apic.c
10596--- linux-3.0.7/arch/x86/kernel/apic/apic.c 2011-07-21 22:17:23.000000000 -0400
10597+++ linux-3.0.7/arch/x86/kernel/apic/apic.c 2011-08-23 21:48:14.000000000 -0400
10598@@ -173,7 +173,7 @@ int first_system_vector = 0xfe;
10599 /*
10600 * Debug level, exported for io_apic.c
10601 */
10602-unsigned int apic_verbosity;
10603+int apic_verbosity;
10604
10605 int pic_mode;
10606
10607@@ -1834,7 +1834,7 @@ void smp_error_interrupt(struct pt_regs
10608 apic_write(APIC_ESR, 0);
10609 v1 = apic_read(APIC_ESR);
10610 ack_APIC_irq();
10611- atomic_inc(&irq_err_count);
10612+ atomic_inc_unchecked(&irq_err_count);
10613
10614 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
10615 smp_processor_id(), v0 , v1);
10616@@ -2190,6 +2190,8 @@ static int __cpuinit apic_cluster_num(vo
10617 u16 *bios_cpu_apicid;
10618 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
10619
10620+ pax_track_stack();
10621+
10622 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
10623 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
10624
10625diff -urNp linux-3.0.7/arch/x86/kernel/apic/io_apic.c linux-3.0.7/arch/x86/kernel/apic/io_apic.c
10626--- linux-3.0.7/arch/x86/kernel/apic/io_apic.c 2011-07-21 22:17:23.000000000 -0400
10627+++ linux-3.0.7/arch/x86/kernel/apic/io_apic.c 2011-08-23 21:47:55.000000000 -0400
10628@@ -1028,7 +1028,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
10629 }
10630 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
10631
10632-void lock_vector_lock(void)
10633+void lock_vector_lock(void) __acquires(vector_lock)
10634 {
10635 /* Used to the online set of cpus does not change
10636 * during assign_irq_vector.
10637@@ -1036,7 +1036,7 @@ void lock_vector_lock(void)
10638 raw_spin_lock(&vector_lock);
10639 }
10640
10641-void unlock_vector_lock(void)
10642+void unlock_vector_lock(void) __releases(vector_lock)
10643 {
10644 raw_spin_unlock(&vector_lock);
10645 }
10646@@ -2364,7 +2364,7 @@ static void ack_apic_edge(struct irq_dat
10647 ack_APIC_irq();
10648 }
10649
10650-atomic_t irq_mis_count;
10651+atomic_unchecked_t irq_mis_count;
10652
10653 /*
10654 * IO-APIC versions below 0x20 don't support EOI register.
10655@@ -2472,7 +2472,7 @@ static void ack_apic_level(struct irq_da
10656 * at the cpu.
10657 */
10658 if (!(v & (1 << (i & 0x1f)))) {
10659- atomic_inc(&irq_mis_count);
10660+ atomic_inc_unchecked(&irq_mis_count);
10661
10662 eoi_ioapic_irq(irq, cfg);
10663 }
10664diff -urNp linux-3.0.7/arch/x86/kernel/apm_32.c linux-3.0.7/arch/x86/kernel/apm_32.c
10665--- linux-3.0.7/arch/x86/kernel/apm_32.c 2011-07-21 22:17:23.000000000 -0400
10666+++ linux-3.0.7/arch/x86/kernel/apm_32.c 2011-08-23 21:47:55.000000000 -0400
10667@@ -413,7 +413,7 @@ static DEFINE_MUTEX(apm_mutex);
10668 * This is for buggy BIOS's that refer to (real mode) segment 0x40
10669 * even though they are called in protected mode.
10670 */
10671-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
10672+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
10673 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
10674
10675 static const char driver_version[] = "1.16ac"; /* no spaces */
10676@@ -591,7 +591,10 @@ static long __apm_bios_call(void *_call)
10677 BUG_ON(cpu != 0);
10678 gdt = get_cpu_gdt_table(cpu);
10679 save_desc_40 = gdt[0x40 / 8];
10680+
10681+ pax_open_kernel();
10682 gdt[0x40 / 8] = bad_bios_desc;
10683+ pax_close_kernel();
10684
10685 apm_irq_save(flags);
10686 APM_DO_SAVE_SEGS;
10687@@ -600,7 +603,11 @@ static long __apm_bios_call(void *_call)
10688 &call->esi);
10689 APM_DO_RESTORE_SEGS;
10690 apm_irq_restore(flags);
10691+
10692+ pax_open_kernel();
10693 gdt[0x40 / 8] = save_desc_40;
10694+ pax_close_kernel();
10695+
10696 put_cpu();
10697
10698 return call->eax & 0xff;
10699@@ -667,7 +674,10 @@ static long __apm_bios_call_simple(void
10700 BUG_ON(cpu != 0);
10701 gdt = get_cpu_gdt_table(cpu);
10702 save_desc_40 = gdt[0x40 / 8];
10703+
10704+ pax_open_kernel();
10705 gdt[0x40 / 8] = bad_bios_desc;
10706+ pax_close_kernel();
10707
10708 apm_irq_save(flags);
10709 APM_DO_SAVE_SEGS;
10710@@ -675,7 +685,11 @@ static long __apm_bios_call_simple(void
10711 &call->eax);
10712 APM_DO_RESTORE_SEGS;
10713 apm_irq_restore(flags);
10714+
10715+ pax_open_kernel();
10716 gdt[0x40 / 8] = save_desc_40;
10717+ pax_close_kernel();
10718+
10719 put_cpu();
10720 return error;
10721 }
10722@@ -2349,12 +2363,15 @@ static int __init apm_init(void)
10723 * code to that CPU.
10724 */
10725 gdt = get_cpu_gdt_table(0);
10726+
10727+ pax_open_kernel();
10728 set_desc_base(&gdt[APM_CS >> 3],
10729 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
10730 set_desc_base(&gdt[APM_CS_16 >> 3],
10731 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
10732 set_desc_base(&gdt[APM_DS >> 3],
10733 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
10734+ pax_close_kernel();
10735
10736 proc_create("apm", 0, NULL, &apm_file_ops);
10737
10738diff -urNp linux-3.0.7/arch/x86/kernel/asm-offsets_64.c linux-3.0.7/arch/x86/kernel/asm-offsets_64.c
10739--- linux-3.0.7/arch/x86/kernel/asm-offsets_64.c 2011-07-21 22:17:23.000000000 -0400
10740+++ linux-3.0.7/arch/x86/kernel/asm-offsets_64.c 2011-08-23 21:47:55.000000000 -0400
10741@@ -69,6 +69,7 @@ int main(void)
10742 BLANK();
10743 #undef ENTRY
10744
10745+ DEFINE(TSS_size, sizeof(struct tss_struct));
10746 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
10747 BLANK();
10748
10749diff -urNp linux-3.0.7/arch/x86/kernel/asm-offsets.c linux-3.0.7/arch/x86/kernel/asm-offsets.c
10750--- linux-3.0.7/arch/x86/kernel/asm-offsets.c 2011-07-21 22:17:23.000000000 -0400
10751+++ linux-3.0.7/arch/x86/kernel/asm-offsets.c 2011-08-23 21:47:55.000000000 -0400
10752@@ -33,6 +33,8 @@ void common(void) {
10753 OFFSET(TI_status, thread_info, status);
10754 OFFSET(TI_addr_limit, thread_info, addr_limit);
10755 OFFSET(TI_preempt_count, thread_info, preempt_count);
10756+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
10757+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
10758
10759 BLANK();
10760 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
10761@@ -53,8 +55,26 @@ void common(void) {
10762 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
10763 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
10764 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
10765+
10766+#ifdef CONFIG_PAX_KERNEXEC
10767+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
10768+#endif
10769+
10770+#ifdef CONFIG_PAX_MEMORY_UDEREF
10771+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
10772+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
10773+#ifdef CONFIG_X86_64
10774+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
10775+#endif
10776 #endif
10777
10778+#endif
10779+
10780+ BLANK();
10781+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
10782+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
10783+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
10784+
10785 #ifdef CONFIG_XEN
10786 BLANK();
10787 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
10788diff -urNp linux-3.0.7/arch/x86/kernel/cpu/amd.c linux-3.0.7/arch/x86/kernel/cpu/amd.c
10789--- linux-3.0.7/arch/x86/kernel/cpu/amd.c 2011-07-21 22:17:23.000000000 -0400
10790+++ linux-3.0.7/arch/x86/kernel/cpu/amd.c 2011-08-23 21:47:55.000000000 -0400
10791@@ -647,7 +647,7 @@ static unsigned int __cpuinit amd_size_c
10792 unsigned int size)
10793 {
10794 /* AMD errata T13 (order #21922) */
10795- if ((c->x86 == 6)) {
10796+ if (c->x86 == 6) {
10797 /* Duron Rev A0 */
10798 if (c->x86_model == 3 && c->x86_mask == 0)
10799 size = 64;
10800diff -urNp linux-3.0.7/arch/x86/kernel/cpu/common.c linux-3.0.7/arch/x86/kernel/cpu/common.c
10801--- linux-3.0.7/arch/x86/kernel/cpu/common.c 2011-07-21 22:17:23.000000000 -0400
10802+++ linux-3.0.7/arch/x86/kernel/cpu/common.c 2011-08-23 21:47:55.000000000 -0400
10803@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
10804
10805 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
10806
10807-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
10808-#ifdef CONFIG_X86_64
10809- /*
10810- * We need valid kernel segments for data and code in long mode too
10811- * IRET will check the segment types kkeil 2000/10/28
10812- * Also sysret mandates a special GDT layout
10813- *
10814- * TLS descriptors are currently at a different place compared to i386.
10815- * Hopefully nobody expects them at a fixed place (Wine?)
10816- */
10817- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
10818- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
10819- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
10820- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
10821- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
10822- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
10823-#else
10824- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
10825- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10826- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
10827- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
10828- /*
10829- * Segments used for calling PnP BIOS have byte granularity.
10830- * They code segments and data segments have fixed 64k limits,
10831- * the transfer segment sizes are set at run time.
10832- */
10833- /* 32-bit code */
10834- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10835- /* 16-bit code */
10836- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10837- /* 16-bit data */
10838- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
10839- /* 16-bit data */
10840- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
10841- /* 16-bit data */
10842- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
10843- /*
10844- * The APM segments have byte granularity and their bases
10845- * are set at run time. All have 64k limits.
10846- */
10847- /* 32-bit code */
10848- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10849- /* 16-bit code */
10850- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10851- /* data */
10852- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
10853-
10854- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10855- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10856- GDT_STACK_CANARY_INIT
10857-#endif
10858-} };
10859-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
10860-
10861 static int __init x86_xsave_setup(char *s)
10862 {
10863 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
10864@@ -371,7 +317,7 @@ void switch_to_new_gdt(int cpu)
10865 {
10866 struct desc_ptr gdt_descr;
10867
10868- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
10869+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
10870 gdt_descr.size = GDT_SIZE - 1;
10871 load_gdt(&gdt_descr);
10872 /* Reload the per-cpu base */
10873@@ -840,6 +786,10 @@ static void __cpuinit identify_cpu(struc
10874 /* Filter out anything that depends on CPUID levels we don't have */
10875 filter_cpuid_features(c, true);
10876
10877+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
10878+ setup_clear_cpu_cap(X86_FEATURE_SEP);
10879+#endif
10880+
10881 /* If the model name is still unset, do table lookup. */
10882 if (!c->x86_model_id[0]) {
10883 const char *p;
10884@@ -1019,6 +969,9 @@ static __init int setup_disablecpuid(cha
10885 }
10886 __setup("clearcpuid=", setup_disablecpuid);
10887
10888+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
10889+EXPORT_PER_CPU_SYMBOL(current_tinfo);
10890+
10891 #ifdef CONFIG_X86_64
10892 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
10893
10894@@ -1034,7 +987,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
10895 EXPORT_PER_CPU_SYMBOL(current_task);
10896
10897 DEFINE_PER_CPU(unsigned long, kernel_stack) =
10898- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
10899+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
10900 EXPORT_PER_CPU_SYMBOL(kernel_stack);
10901
10902 DEFINE_PER_CPU(char *, irq_stack_ptr) =
10903@@ -1099,7 +1052,7 @@ struct pt_regs * __cpuinit idle_regs(str
10904 {
10905 memset(regs, 0, sizeof(struct pt_regs));
10906 regs->fs = __KERNEL_PERCPU;
10907- regs->gs = __KERNEL_STACK_CANARY;
10908+ savesegment(gs, regs->gs);
10909
10910 return regs;
10911 }
10912@@ -1154,7 +1107,7 @@ void __cpuinit cpu_init(void)
10913 int i;
10914
10915 cpu = stack_smp_processor_id();
10916- t = &per_cpu(init_tss, cpu);
10917+ t = init_tss + cpu;
10918 oist = &per_cpu(orig_ist, cpu);
10919
10920 #ifdef CONFIG_NUMA
10921@@ -1180,7 +1133,7 @@ void __cpuinit cpu_init(void)
10922 switch_to_new_gdt(cpu);
10923 loadsegment(fs, 0);
10924
10925- load_idt((const struct desc_ptr *)&idt_descr);
10926+ load_idt(&idt_descr);
10927
10928 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
10929 syscall_init();
10930@@ -1189,7 +1142,6 @@ void __cpuinit cpu_init(void)
10931 wrmsrl(MSR_KERNEL_GS_BASE, 0);
10932 barrier();
10933
10934- x86_configure_nx();
10935 if (cpu != 0)
10936 enable_x2apic();
10937
10938@@ -1243,7 +1195,7 @@ void __cpuinit cpu_init(void)
10939 {
10940 int cpu = smp_processor_id();
10941 struct task_struct *curr = current;
10942- struct tss_struct *t = &per_cpu(init_tss, cpu);
10943+ struct tss_struct *t = init_tss + cpu;
10944 struct thread_struct *thread = &curr->thread;
10945
10946 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
10947diff -urNp linux-3.0.7/arch/x86/kernel/cpu/intel.c linux-3.0.7/arch/x86/kernel/cpu/intel.c
10948--- linux-3.0.7/arch/x86/kernel/cpu/intel.c 2011-09-02 18:11:26.000000000 -0400
10949+++ linux-3.0.7/arch/x86/kernel/cpu/intel.c 2011-08-29 23:30:14.000000000 -0400
10950@@ -172,7 +172,7 @@ static void __cpuinit trap_init_f00f_bug
10951 * Update the IDT descriptor and reload the IDT so that
10952 * it uses the read-only mapped virtual address.
10953 */
10954- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
10955+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
10956 load_idt(&idt_descr);
10957 }
10958 #endif
10959diff -urNp linux-3.0.7/arch/x86/kernel/cpu/Makefile linux-3.0.7/arch/x86/kernel/cpu/Makefile
10960--- linux-3.0.7/arch/x86/kernel/cpu/Makefile 2011-07-21 22:17:23.000000000 -0400
10961+++ linux-3.0.7/arch/x86/kernel/cpu/Makefile 2011-08-23 21:47:55.000000000 -0400
10962@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
10963 CFLAGS_REMOVE_perf_event.o = -pg
10964 endif
10965
10966-# Make sure load_percpu_segment has no stackprotector
10967-nostackp := $(call cc-option, -fno-stack-protector)
10968-CFLAGS_common.o := $(nostackp)
10969-
10970 obj-y := intel_cacheinfo.o scattered.o topology.o
10971 obj-y += proc.o capflags.o powerflags.o common.o
10972 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
10973diff -urNp linux-3.0.7/arch/x86/kernel/cpu/mcheck/mce.c linux-3.0.7/arch/x86/kernel/cpu/mcheck/mce.c
10974--- linux-3.0.7/arch/x86/kernel/cpu/mcheck/mce.c 2011-07-21 22:17:23.000000000 -0400
10975+++ linux-3.0.7/arch/x86/kernel/cpu/mcheck/mce.c 2011-08-23 21:47:55.000000000 -0400
10976@@ -46,6 +46,7 @@
10977 #include <asm/ipi.h>
10978 #include <asm/mce.h>
10979 #include <asm/msr.h>
10980+#include <asm/local.h>
10981
10982 #include "mce-internal.h"
10983
10984@@ -208,7 +209,7 @@ static void print_mce(struct mce *m)
10985 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
10986 m->cs, m->ip);
10987
10988- if (m->cs == __KERNEL_CS)
10989+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
10990 print_symbol("{%s}", m->ip);
10991 pr_cont("\n");
10992 }
10993@@ -236,10 +237,10 @@ static void print_mce(struct mce *m)
10994
10995 #define PANIC_TIMEOUT 5 /* 5 seconds */
10996
10997-static atomic_t mce_paniced;
10998+static atomic_unchecked_t mce_paniced;
10999
11000 static int fake_panic;
11001-static atomic_t mce_fake_paniced;
11002+static atomic_unchecked_t mce_fake_paniced;
11003
11004 /* Panic in progress. Enable interrupts and wait for final IPI */
11005 static void wait_for_panic(void)
11006@@ -263,7 +264,7 @@ static void mce_panic(char *msg, struct
11007 /*
11008 * Make sure only one CPU runs in machine check panic
11009 */
11010- if (atomic_inc_return(&mce_paniced) > 1)
11011+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
11012 wait_for_panic();
11013 barrier();
11014
11015@@ -271,7 +272,7 @@ static void mce_panic(char *msg, struct
11016 console_verbose();
11017 } else {
11018 /* Don't log too much for fake panic */
11019- if (atomic_inc_return(&mce_fake_paniced) > 1)
11020+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
11021 return;
11022 }
11023 /* First print corrected ones that are still unlogged */
11024@@ -638,7 +639,7 @@ static int mce_timed_out(u64 *t)
11025 * might have been modified by someone else.
11026 */
11027 rmb();
11028- if (atomic_read(&mce_paniced))
11029+ if (atomic_read_unchecked(&mce_paniced))
11030 wait_for_panic();
11031 if (!monarch_timeout)
11032 goto out;
11033@@ -1452,14 +1453,14 @@ void __cpuinit mcheck_cpu_init(struct cp
11034 */
11035
11036 static DEFINE_SPINLOCK(mce_state_lock);
11037-static int open_count; /* #times opened */
11038+static local_t open_count; /* #times opened */
11039 static int open_exclu; /* already open exclusive? */
11040
11041 static int mce_open(struct inode *inode, struct file *file)
11042 {
11043 spin_lock(&mce_state_lock);
11044
11045- if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
11046+ if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
11047 spin_unlock(&mce_state_lock);
11048
11049 return -EBUSY;
11050@@ -1467,7 +1468,7 @@ static int mce_open(struct inode *inode,
11051
11052 if (file->f_flags & O_EXCL)
11053 open_exclu = 1;
11054- open_count++;
11055+ local_inc(&open_count);
11056
11057 spin_unlock(&mce_state_lock);
11058
11059@@ -1478,7 +1479,7 @@ static int mce_release(struct inode *ino
11060 {
11061 spin_lock(&mce_state_lock);
11062
11063- open_count--;
11064+ local_dec(&open_count);
11065 open_exclu = 0;
11066
11067 spin_unlock(&mce_state_lock);
11068@@ -2163,7 +2164,7 @@ struct dentry *mce_get_debugfs_dir(void)
11069 static void mce_reset(void)
11070 {
11071 cpu_missing = 0;
11072- atomic_set(&mce_fake_paniced, 0);
11073+ atomic_set_unchecked(&mce_fake_paniced, 0);
11074 atomic_set(&mce_executing, 0);
11075 atomic_set(&mce_callin, 0);
11076 atomic_set(&global_nwo, 0);
11077diff -urNp linux-3.0.7/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-3.0.7/arch/x86/kernel/cpu/mcheck/mce-inject.c
11078--- linux-3.0.7/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-07-21 22:17:23.000000000 -0400
11079+++ linux-3.0.7/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-08-23 21:47:55.000000000 -0400
11080@@ -215,7 +215,9 @@ static int inject_init(void)
11081 if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
11082 return -ENOMEM;
11083 printk(KERN_INFO "Machine check injector initialized\n");
11084- mce_chrdev_ops.write = mce_write;
11085+ pax_open_kernel();
11086+ *(void **)&mce_chrdev_ops.write = mce_write;
11087+ pax_close_kernel();
11088 register_die_notifier(&mce_raise_nb);
11089 return 0;
11090 }
11091diff -urNp linux-3.0.7/arch/x86/kernel/cpu/mtrr/main.c linux-3.0.7/arch/x86/kernel/cpu/mtrr/main.c
11092--- linux-3.0.7/arch/x86/kernel/cpu/mtrr/main.c 2011-09-02 18:11:26.000000000 -0400
11093+++ linux-3.0.7/arch/x86/kernel/cpu/mtrr/main.c 2011-08-29 23:26:21.000000000 -0400
11094@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
11095 u64 size_or_mask, size_and_mask;
11096 static bool mtrr_aps_delayed_init;
11097
11098-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
11099+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
11100
11101 const struct mtrr_ops *mtrr_if;
11102
11103diff -urNp linux-3.0.7/arch/x86/kernel/cpu/mtrr/mtrr.h linux-3.0.7/arch/x86/kernel/cpu/mtrr/mtrr.h
11104--- linux-3.0.7/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-07-21 22:17:23.000000000 -0400
11105+++ linux-3.0.7/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-08-26 19:49:56.000000000 -0400
11106@@ -25,7 +25,7 @@ struct mtrr_ops {
11107 int (*validate_add_page)(unsigned long base, unsigned long size,
11108 unsigned int type);
11109 int (*have_wrcomb)(void);
11110-};
11111+} __do_const;
11112
11113 extern int generic_get_free_region(unsigned long base, unsigned long size,
11114 int replace_reg);
11115diff -urNp linux-3.0.7/arch/x86/kernel/cpu/perf_event.c linux-3.0.7/arch/x86/kernel/cpu/perf_event.c
11116--- linux-3.0.7/arch/x86/kernel/cpu/perf_event.c 2011-10-16 21:54:53.000000000 -0400
11117+++ linux-3.0.7/arch/x86/kernel/cpu/perf_event.c 2011-10-16 21:55:27.000000000 -0400
11118@@ -781,6 +781,8 @@ static int x86_schedule_events(struct cp
11119 int i, j, w, wmax, num = 0;
11120 struct hw_perf_event *hwc;
11121
11122+ pax_track_stack();
11123+
11124 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
11125
11126 for (i = 0; i < n; i++) {
11127@@ -1875,7 +1877,7 @@ perf_callchain_user(struct perf_callchai
11128 break;
11129
11130 perf_callchain_store(entry, frame.return_address);
11131- fp = frame.next_frame;
11132+ fp = (const void __force_user *)frame.next_frame;
11133 }
11134 }
11135
11136diff -urNp linux-3.0.7/arch/x86/kernel/crash.c linux-3.0.7/arch/x86/kernel/crash.c
11137--- linux-3.0.7/arch/x86/kernel/crash.c 2011-07-21 22:17:23.000000000 -0400
11138+++ linux-3.0.7/arch/x86/kernel/crash.c 2011-08-23 21:47:55.000000000 -0400
11139@@ -42,7 +42,7 @@ static void kdump_nmi_callback(int cpu,
11140 regs = args->regs;
11141
11142 #ifdef CONFIG_X86_32
11143- if (!user_mode_vm(regs)) {
11144+ if (!user_mode(regs)) {
11145 crash_fixup_ss_esp(&fixed_regs, regs);
11146 regs = &fixed_regs;
11147 }
11148diff -urNp linux-3.0.7/arch/x86/kernel/doublefault_32.c linux-3.0.7/arch/x86/kernel/doublefault_32.c
11149--- linux-3.0.7/arch/x86/kernel/doublefault_32.c 2011-07-21 22:17:23.000000000 -0400
11150+++ linux-3.0.7/arch/x86/kernel/doublefault_32.c 2011-08-23 21:47:55.000000000 -0400
11151@@ -11,7 +11,7 @@
11152
11153 #define DOUBLEFAULT_STACKSIZE (1024)
11154 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
11155-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
11156+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
11157
11158 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
11159
11160@@ -21,7 +21,7 @@ static void doublefault_fn(void)
11161 unsigned long gdt, tss;
11162
11163 store_gdt(&gdt_desc);
11164- gdt = gdt_desc.address;
11165+ gdt = (unsigned long)gdt_desc.address;
11166
11167 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
11168
11169@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
11170 /* 0x2 bit is always set */
11171 .flags = X86_EFLAGS_SF | 0x2,
11172 .sp = STACK_START,
11173- .es = __USER_DS,
11174+ .es = __KERNEL_DS,
11175 .cs = __KERNEL_CS,
11176 .ss = __KERNEL_DS,
11177- .ds = __USER_DS,
11178+ .ds = __KERNEL_DS,
11179 .fs = __KERNEL_PERCPU,
11180
11181 .__cr3 = __pa_nodebug(swapper_pg_dir),
11182diff -urNp linux-3.0.7/arch/x86/kernel/dumpstack_32.c linux-3.0.7/arch/x86/kernel/dumpstack_32.c
11183--- linux-3.0.7/arch/x86/kernel/dumpstack_32.c 2011-07-21 22:17:23.000000000 -0400
11184+++ linux-3.0.7/arch/x86/kernel/dumpstack_32.c 2011-08-23 21:47:55.000000000 -0400
11185@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task
11186 bp = stack_frame(task, regs);
11187
11188 for (;;) {
11189- struct thread_info *context;
11190+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11191
11192- context = (struct thread_info *)
11193- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
11194- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
11195+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11196
11197- stack = (unsigned long *)context->previous_esp;
11198- if (!stack)
11199+ if (stack_start == task_stack_page(task))
11200 break;
11201+ stack = *(unsigned long **)stack_start;
11202 if (ops->stack(data, "IRQ") < 0)
11203 break;
11204 touch_nmi_watchdog();
11205@@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs
11206 * When in-kernel, we also print out the stack and code at the
11207 * time of the fault..
11208 */
11209- if (!user_mode_vm(regs)) {
11210+ if (!user_mode(regs)) {
11211 unsigned int code_prologue = code_bytes * 43 / 64;
11212 unsigned int code_len = code_bytes;
11213 unsigned char c;
11214 u8 *ip;
11215+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
11216
11217 printk(KERN_EMERG "Stack:\n");
11218 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
11219
11220 printk(KERN_EMERG "Code: ");
11221
11222- ip = (u8 *)regs->ip - code_prologue;
11223+ ip = (u8 *)regs->ip - code_prologue + cs_base;
11224 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
11225 /* try starting at IP */
11226- ip = (u8 *)regs->ip;
11227+ ip = (u8 *)regs->ip + cs_base;
11228 code_len = code_len - code_prologue + 1;
11229 }
11230 for (i = 0; i < code_len; i++, ip++) {
11231@@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs
11232 printk(" Bad EIP value.");
11233 break;
11234 }
11235- if (ip == (u8 *)regs->ip)
11236+ if (ip == (u8 *)regs->ip + cs_base)
11237 printk("<%02x> ", c);
11238 else
11239 printk("%02x ", c);
11240@@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
11241 {
11242 unsigned short ud2;
11243
11244+ ip = ktla_ktva(ip);
11245 if (ip < PAGE_OFFSET)
11246 return 0;
11247 if (probe_kernel_address((unsigned short *)ip, ud2))
11248diff -urNp linux-3.0.7/arch/x86/kernel/dumpstack_64.c linux-3.0.7/arch/x86/kernel/dumpstack_64.c
11249--- linux-3.0.7/arch/x86/kernel/dumpstack_64.c 2011-07-21 22:17:23.000000000 -0400
11250+++ linux-3.0.7/arch/x86/kernel/dumpstack_64.c 2011-08-23 21:47:55.000000000 -0400
11251@@ -147,9 +147,9 @@ void dump_trace(struct task_struct *task
11252 unsigned long *irq_stack_end =
11253 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
11254 unsigned used = 0;
11255- struct thread_info *tinfo;
11256 int graph = 0;
11257 unsigned long dummy;
11258+ void *stack_start;
11259
11260 if (!task)
11261 task = current;
11262@@ -167,10 +167,10 @@ void dump_trace(struct task_struct *task
11263 * current stack address. If the stacks consist of nested
11264 * exceptions
11265 */
11266- tinfo = task_thread_info(task);
11267 for (;;) {
11268 char *id;
11269 unsigned long *estack_end;
11270+
11271 estack_end = in_exception_stack(cpu, (unsigned long)stack,
11272 &used, &id);
11273
11274@@ -178,7 +178,7 @@ void dump_trace(struct task_struct *task
11275 if (ops->stack(data, id) < 0)
11276 break;
11277
11278- bp = ops->walk_stack(tinfo, stack, bp, ops,
11279+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
11280 data, estack_end, &graph);
11281 ops->stack(data, "<EOE>");
11282 /*
11283@@ -197,7 +197,7 @@ void dump_trace(struct task_struct *task
11284 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
11285 if (ops->stack(data, "IRQ") < 0)
11286 break;
11287- bp = ops->walk_stack(tinfo, stack, bp,
11288+ bp = ops->walk_stack(task, irq_stack, stack, bp,
11289 ops, data, irq_stack_end, &graph);
11290 /*
11291 * We link to the next stack (which would be
11292@@ -218,7 +218,8 @@ void dump_trace(struct task_struct *task
11293 /*
11294 * This handles the process stack:
11295 */
11296- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
11297+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11298+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11299 put_cpu();
11300 }
11301 EXPORT_SYMBOL(dump_trace);
11302diff -urNp linux-3.0.7/arch/x86/kernel/dumpstack.c linux-3.0.7/arch/x86/kernel/dumpstack.c
11303--- linux-3.0.7/arch/x86/kernel/dumpstack.c 2011-07-21 22:17:23.000000000 -0400
11304+++ linux-3.0.7/arch/x86/kernel/dumpstack.c 2011-08-23 21:48:14.000000000 -0400
11305@@ -2,6 +2,9 @@
11306 * Copyright (C) 1991, 1992 Linus Torvalds
11307 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
11308 */
11309+#ifdef CONFIG_GRKERNSEC_HIDESYM
11310+#define __INCLUDED_BY_HIDESYM 1
11311+#endif
11312 #include <linux/kallsyms.h>
11313 #include <linux/kprobes.h>
11314 #include <linux/uaccess.h>
11315@@ -35,9 +38,8 @@ void printk_address(unsigned long addres
11316 static void
11317 print_ftrace_graph_addr(unsigned long addr, void *data,
11318 const struct stacktrace_ops *ops,
11319- struct thread_info *tinfo, int *graph)
11320+ struct task_struct *task, int *graph)
11321 {
11322- struct task_struct *task = tinfo->task;
11323 unsigned long ret_addr;
11324 int index = task->curr_ret_stack;
11325
11326@@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long ad
11327 static inline void
11328 print_ftrace_graph_addr(unsigned long addr, void *data,
11329 const struct stacktrace_ops *ops,
11330- struct thread_info *tinfo, int *graph)
11331+ struct task_struct *task, int *graph)
11332 { }
11333 #endif
11334
11335@@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long ad
11336 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
11337 */
11338
11339-static inline int valid_stack_ptr(struct thread_info *tinfo,
11340- void *p, unsigned int size, void *end)
11341+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
11342 {
11343- void *t = tinfo;
11344 if (end) {
11345 if (p < end && p >= (end-THREAD_SIZE))
11346 return 1;
11347@@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct
11348 }
11349
11350 unsigned long
11351-print_context_stack(struct thread_info *tinfo,
11352+print_context_stack(struct task_struct *task, void *stack_start,
11353 unsigned long *stack, unsigned long bp,
11354 const struct stacktrace_ops *ops, void *data,
11355 unsigned long *end, int *graph)
11356 {
11357 struct stack_frame *frame = (struct stack_frame *)bp;
11358
11359- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
11360+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
11361 unsigned long addr;
11362
11363 addr = *stack;
11364@@ -102,7 +102,7 @@ print_context_stack(struct thread_info *
11365 } else {
11366 ops->address(data, addr, 0);
11367 }
11368- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11369+ print_ftrace_graph_addr(addr, data, ops, task, graph);
11370 }
11371 stack++;
11372 }
11373@@ -111,7 +111,7 @@ print_context_stack(struct thread_info *
11374 EXPORT_SYMBOL_GPL(print_context_stack);
11375
11376 unsigned long
11377-print_context_stack_bp(struct thread_info *tinfo,
11378+print_context_stack_bp(struct task_struct *task, void *stack_start,
11379 unsigned long *stack, unsigned long bp,
11380 const struct stacktrace_ops *ops, void *data,
11381 unsigned long *end, int *graph)
11382@@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_inf
11383 struct stack_frame *frame = (struct stack_frame *)bp;
11384 unsigned long *ret_addr = &frame->return_address;
11385
11386- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
11387+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
11388 unsigned long addr = *ret_addr;
11389
11390 if (!__kernel_text_address(addr))
11391@@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_inf
11392 ops->address(data, addr, 1);
11393 frame = frame->next_frame;
11394 ret_addr = &frame->return_address;
11395- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11396+ print_ftrace_graph_addr(addr, data, ops, task, graph);
11397 }
11398
11399 return (unsigned long)frame;
11400@@ -186,7 +186,7 @@ void dump_stack(void)
11401
11402 bp = stack_frame(current, NULL);
11403 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
11404- current->pid, current->comm, print_tainted(),
11405+ task_pid_nr(current), current->comm, print_tainted(),
11406 init_utsname()->release,
11407 (int)strcspn(init_utsname()->version, " "),
11408 init_utsname()->version);
11409@@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
11410 }
11411 EXPORT_SYMBOL_GPL(oops_begin);
11412
11413+extern void gr_handle_kernel_exploit(void);
11414+
11415 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
11416 {
11417 if (regs && kexec_should_crash(current))
11418@@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long fl
11419 panic("Fatal exception in interrupt");
11420 if (panic_on_oops)
11421 panic("Fatal exception");
11422- do_exit(signr);
11423+
11424+ gr_handle_kernel_exploit();
11425+
11426+ do_group_exit(signr);
11427 }
11428
11429 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
11430@@ -269,7 +274,7 @@ int __kprobes __die(const char *str, str
11431
11432 show_registers(regs);
11433 #ifdef CONFIG_X86_32
11434- if (user_mode_vm(regs)) {
11435+ if (user_mode(regs)) {
11436 sp = regs->sp;
11437 ss = regs->ss & 0xffff;
11438 } else {
11439@@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs
11440 unsigned long flags = oops_begin();
11441 int sig = SIGSEGV;
11442
11443- if (!user_mode_vm(regs))
11444+ if (!user_mode(regs))
11445 report_bug(regs->ip, regs);
11446
11447 if (__die(str, regs, err))
11448diff -urNp linux-3.0.7/arch/x86/kernel/early_printk.c linux-3.0.7/arch/x86/kernel/early_printk.c
11449--- linux-3.0.7/arch/x86/kernel/early_printk.c 2011-07-21 22:17:23.000000000 -0400
11450+++ linux-3.0.7/arch/x86/kernel/early_printk.c 2011-08-23 21:48:14.000000000 -0400
11451@@ -7,6 +7,7 @@
11452 #include <linux/pci_regs.h>
11453 #include <linux/pci_ids.h>
11454 #include <linux/errno.h>
11455+#include <linux/sched.h>
11456 #include <asm/io.h>
11457 #include <asm/processor.h>
11458 #include <asm/fcntl.h>
11459@@ -179,6 +180,8 @@ asmlinkage void early_printk(const char
11460 int n;
11461 va_list ap;
11462
11463+ pax_track_stack();
11464+
11465 va_start(ap, fmt);
11466 n = vscnprintf(buf, sizeof(buf), fmt, ap);
11467 early_console->write(early_console, buf, n);
11468diff -urNp linux-3.0.7/arch/x86/kernel/entry_32.S linux-3.0.7/arch/x86/kernel/entry_32.S
11469--- linux-3.0.7/arch/x86/kernel/entry_32.S 2011-07-21 22:17:23.000000000 -0400
11470+++ linux-3.0.7/arch/x86/kernel/entry_32.S 2011-08-30 18:23:52.000000000 -0400
11471@@ -185,13 +185,146 @@
11472 /*CFI_REL_OFFSET gs, PT_GS*/
11473 .endm
11474 .macro SET_KERNEL_GS reg
11475+
11476+#ifdef CONFIG_CC_STACKPROTECTOR
11477 movl $(__KERNEL_STACK_CANARY), \reg
11478+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
11479+ movl $(__USER_DS), \reg
11480+#else
11481+ xorl \reg, \reg
11482+#endif
11483+
11484 movl \reg, %gs
11485 .endm
11486
11487 #endif /* CONFIG_X86_32_LAZY_GS */
11488
11489-.macro SAVE_ALL
11490+.macro pax_enter_kernel
11491+#ifdef CONFIG_PAX_KERNEXEC
11492+ call pax_enter_kernel
11493+#endif
11494+.endm
11495+
11496+.macro pax_exit_kernel
11497+#ifdef CONFIG_PAX_KERNEXEC
11498+ call pax_exit_kernel
11499+#endif
11500+.endm
11501+
11502+#ifdef CONFIG_PAX_KERNEXEC
11503+ENTRY(pax_enter_kernel)
11504+#ifdef CONFIG_PARAVIRT
11505+ pushl %eax
11506+ pushl %ecx
11507+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
11508+ mov %eax, %esi
11509+#else
11510+ mov %cr0, %esi
11511+#endif
11512+ bts $16, %esi
11513+ jnc 1f
11514+ mov %cs, %esi
11515+ cmp $__KERNEL_CS, %esi
11516+ jz 3f
11517+ ljmp $__KERNEL_CS, $3f
11518+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
11519+2:
11520+#ifdef CONFIG_PARAVIRT
11521+ mov %esi, %eax
11522+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11523+#else
11524+ mov %esi, %cr0
11525+#endif
11526+3:
11527+#ifdef CONFIG_PARAVIRT
11528+ popl %ecx
11529+ popl %eax
11530+#endif
11531+ ret
11532+ENDPROC(pax_enter_kernel)
11533+
11534+ENTRY(pax_exit_kernel)
11535+#ifdef CONFIG_PARAVIRT
11536+ pushl %eax
11537+ pushl %ecx
11538+#endif
11539+ mov %cs, %esi
11540+ cmp $__KERNEXEC_KERNEL_CS, %esi
11541+ jnz 2f
11542+#ifdef CONFIG_PARAVIRT
11543+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
11544+ mov %eax, %esi
11545+#else
11546+ mov %cr0, %esi
11547+#endif
11548+ btr $16, %esi
11549+ ljmp $__KERNEL_CS, $1f
11550+1:
11551+#ifdef CONFIG_PARAVIRT
11552+ mov %esi, %eax
11553+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
11554+#else
11555+ mov %esi, %cr0
11556+#endif
11557+2:
11558+#ifdef CONFIG_PARAVIRT
11559+ popl %ecx
11560+ popl %eax
11561+#endif
11562+ ret
11563+ENDPROC(pax_exit_kernel)
11564+#endif
11565+
11566+.macro pax_erase_kstack
11567+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11568+ call pax_erase_kstack
11569+#endif
11570+.endm
11571+
11572+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11573+/*
11574+ * ebp: thread_info
11575+ * ecx, edx: can be clobbered
11576+ */
11577+ENTRY(pax_erase_kstack)
11578+ pushl %edi
11579+ pushl %eax
11580+
11581+ mov TI_lowest_stack(%ebp), %edi
11582+ mov $-0xBEEF, %eax
11583+ std
11584+
11585+1: mov %edi, %ecx
11586+ and $THREAD_SIZE_asm - 1, %ecx
11587+ shr $2, %ecx
11588+ repne scasl
11589+ jecxz 2f
11590+
11591+ cmp $2*16, %ecx
11592+ jc 2f
11593+
11594+ mov $2*16, %ecx
11595+ repe scasl
11596+ jecxz 2f
11597+ jne 1b
11598+
11599+2: cld
11600+ mov %esp, %ecx
11601+ sub %edi, %ecx
11602+ shr $2, %ecx
11603+ rep stosl
11604+
11605+ mov TI_task_thread_sp0(%ebp), %edi
11606+ sub $128, %edi
11607+ mov %edi, TI_lowest_stack(%ebp)
11608+
11609+ popl %eax
11610+ popl %edi
11611+ ret
11612+ENDPROC(pax_erase_kstack)
11613+#endif
11614+
11615+.macro __SAVE_ALL _DS
11616 cld
11617 PUSH_GS
11618 pushl_cfi %fs
11619@@ -214,7 +347,7 @@
11620 CFI_REL_OFFSET ecx, 0
11621 pushl_cfi %ebx
11622 CFI_REL_OFFSET ebx, 0
11623- movl $(__USER_DS), %edx
11624+ movl $\_DS, %edx
11625 movl %edx, %ds
11626 movl %edx, %es
11627 movl $(__KERNEL_PERCPU), %edx
11628@@ -222,6 +355,15 @@
11629 SET_KERNEL_GS %edx
11630 .endm
11631
11632+.macro SAVE_ALL
11633+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
11634+ __SAVE_ALL __KERNEL_DS
11635+ pax_enter_kernel
11636+#else
11637+ __SAVE_ALL __USER_DS
11638+#endif
11639+.endm
11640+
11641 .macro RESTORE_INT_REGS
11642 popl_cfi %ebx
11643 CFI_RESTORE ebx
11644@@ -332,7 +474,15 @@ check_userspace:
11645 movb PT_CS(%esp), %al
11646 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
11647 cmpl $USER_RPL, %eax
11648+
11649+#ifdef CONFIG_PAX_KERNEXEC
11650+ jae resume_userspace
11651+
11652+ PAX_EXIT_KERNEL
11653+ jmp resume_kernel
11654+#else
11655 jb resume_kernel # not returning to v8086 or userspace
11656+#endif
11657
11658 ENTRY(resume_userspace)
11659 LOCKDEP_SYS_EXIT
11660@@ -344,7 +494,7 @@ ENTRY(resume_userspace)
11661 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
11662 # int/exception return?
11663 jne work_pending
11664- jmp restore_all
11665+ jmp restore_all_pax
11666 END(ret_from_exception)
11667
11668 #ifdef CONFIG_PREEMPT
11669@@ -394,23 +544,34 @@ sysenter_past_esp:
11670 /*CFI_REL_OFFSET cs, 0*/
11671 /*
11672 * Push current_thread_info()->sysenter_return to the stack.
11673- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
11674- * pushed above; +8 corresponds to copy_thread's esp0 setting.
11675 */
11676- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
11677+ pushl_cfi $0
11678 CFI_REL_OFFSET eip, 0
11679
11680 pushl_cfi %eax
11681 SAVE_ALL
11682+ GET_THREAD_INFO(%ebp)
11683+ movl TI_sysenter_return(%ebp),%ebp
11684+ movl %ebp,PT_EIP(%esp)
11685 ENABLE_INTERRUPTS(CLBR_NONE)
11686
11687 /*
11688 * Load the potential sixth argument from user stack.
11689 * Careful about security.
11690 */
11691+ movl PT_OLDESP(%esp),%ebp
11692+
11693+#ifdef CONFIG_PAX_MEMORY_UDEREF
11694+ mov PT_OLDSS(%esp),%ds
11695+1: movl %ds:(%ebp),%ebp
11696+ push %ss
11697+ pop %ds
11698+#else
11699 cmpl $__PAGE_OFFSET-3,%ebp
11700 jae syscall_fault
11701 1: movl (%ebp),%ebp
11702+#endif
11703+
11704 movl %ebp,PT_EBP(%esp)
11705 .section __ex_table,"a"
11706 .align 4
11707@@ -433,12 +594,24 @@ sysenter_do_call:
11708 testl $_TIF_ALLWORK_MASK, %ecx
11709 jne sysexit_audit
11710 sysenter_exit:
11711+
11712+#ifdef CONFIG_PAX_RANDKSTACK
11713+ pushl_cfi %eax
11714+ movl %esp, %eax
11715+ call pax_randomize_kstack
11716+ popl_cfi %eax
11717+#endif
11718+
11719+ pax_erase_kstack
11720+
11721 /* if something modifies registers it must also disable sysexit */
11722 movl PT_EIP(%esp), %edx
11723 movl PT_OLDESP(%esp), %ecx
11724 xorl %ebp,%ebp
11725 TRACE_IRQS_ON
11726 1: mov PT_FS(%esp), %fs
11727+2: mov PT_DS(%esp), %ds
11728+3: mov PT_ES(%esp), %es
11729 PTGS_TO_GS
11730 ENABLE_INTERRUPTS_SYSEXIT
11731
11732@@ -455,6 +628,9 @@ sysenter_audit:
11733 movl %eax,%edx /* 2nd arg: syscall number */
11734 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
11735 call audit_syscall_entry
11736+
11737+ pax_erase_kstack
11738+
11739 pushl_cfi %ebx
11740 movl PT_EAX(%esp),%eax /* reload syscall number */
11741 jmp sysenter_do_call
11742@@ -481,11 +657,17 @@ sysexit_audit:
11743
11744 CFI_ENDPROC
11745 .pushsection .fixup,"ax"
11746-2: movl $0,PT_FS(%esp)
11747+4: movl $0,PT_FS(%esp)
11748+ jmp 1b
11749+5: movl $0,PT_DS(%esp)
11750+ jmp 1b
11751+6: movl $0,PT_ES(%esp)
11752 jmp 1b
11753 .section __ex_table,"a"
11754 .align 4
11755- .long 1b,2b
11756+ .long 1b,4b
11757+ .long 2b,5b
11758+ .long 3b,6b
11759 .popsection
11760 PTGS_TO_GS_EX
11761 ENDPROC(ia32_sysenter_target)
11762@@ -518,6 +700,15 @@ syscall_exit:
11763 testl $_TIF_ALLWORK_MASK, %ecx # current->work
11764 jne syscall_exit_work
11765
11766+restore_all_pax:
11767+
11768+#ifdef CONFIG_PAX_RANDKSTACK
11769+ movl %esp, %eax
11770+ call pax_randomize_kstack
11771+#endif
11772+
11773+ pax_erase_kstack
11774+
11775 restore_all:
11776 TRACE_IRQS_IRET
11777 restore_all_notrace:
11778@@ -577,14 +768,34 @@ ldt_ss:
11779 * compensating for the offset by changing to the ESPFIX segment with
11780 * a base address that matches for the difference.
11781 */
11782-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
11783+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
11784 mov %esp, %edx /* load kernel esp */
11785 mov PT_OLDESP(%esp), %eax /* load userspace esp */
11786 mov %dx, %ax /* eax: new kernel esp */
11787 sub %eax, %edx /* offset (low word is 0) */
11788+#ifdef CONFIG_SMP
11789+ movl PER_CPU_VAR(cpu_number), %ebx
11790+ shll $PAGE_SHIFT_asm, %ebx
11791+ addl $cpu_gdt_table, %ebx
11792+#else
11793+ movl $cpu_gdt_table, %ebx
11794+#endif
11795 shr $16, %edx
11796- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
11797- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
11798+
11799+#ifdef CONFIG_PAX_KERNEXEC
11800+ mov %cr0, %esi
11801+ btr $16, %esi
11802+ mov %esi, %cr0
11803+#endif
11804+
11805+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
11806+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
11807+
11808+#ifdef CONFIG_PAX_KERNEXEC
11809+ bts $16, %esi
11810+ mov %esi, %cr0
11811+#endif
11812+
11813 pushl_cfi $__ESPFIX_SS
11814 pushl_cfi %eax /* new kernel esp */
11815 /* Disable interrupts, but do not irqtrace this section: we
11816@@ -613,29 +824,23 @@ work_resched:
11817 movl TI_flags(%ebp), %ecx
11818 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
11819 # than syscall tracing?
11820- jz restore_all
11821+ jz restore_all_pax
11822 testb $_TIF_NEED_RESCHED, %cl
11823 jnz work_resched
11824
11825 work_notifysig: # deal with pending signals and
11826 # notify-resume requests
11827+ movl %esp, %eax
11828 #ifdef CONFIG_VM86
11829 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
11830- movl %esp, %eax
11831- jne work_notifysig_v86 # returning to kernel-space or
11832+ jz 1f # returning to kernel-space or
11833 # vm86-space
11834- xorl %edx, %edx
11835- call do_notify_resume
11836- jmp resume_userspace_sig
11837
11838- ALIGN
11839-work_notifysig_v86:
11840 pushl_cfi %ecx # save ti_flags for do_notify_resume
11841 call save_v86_state # %eax contains pt_regs pointer
11842 popl_cfi %ecx
11843 movl %eax, %esp
11844-#else
11845- movl %esp, %eax
11846+1:
11847 #endif
11848 xorl %edx, %edx
11849 call do_notify_resume
11850@@ -648,6 +853,9 @@ syscall_trace_entry:
11851 movl $-ENOSYS,PT_EAX(%esp)
11852 movl %esp, %eax
11853 call syscall_trace_enter
11854+
11855+ pax_erase_kstack
11856+
11857 /* What it returned is what we'll actually use. */
11858 cmpl $(nr_syscalls), %eax
11859 jnae syscall_call
11860@@ -670,6 +878,10 @@ END(syscall_exit_work)
11861
11862 RING0_INT_FRAME # can't unwind into user space anyway
11863 syscall_fault:
11864+#ifdef CONFIG_PAX_MEMORY_UDEREF
11865+ push %ss
11866+ pop %ds
11867+#endif
11868 GET_THREAD_INFO(%ebp)
11869 movl $-EFAULT,PT_EAX(%esp)
11870 jmp resume_userspace
11871@@ -752,6 +964,36 @@ ptregs_clone:
11872 CFI_ENDPROC
11873 ENDPROC(ptregs_clone)
11874
11875+ ALIGN;
11876+ENTRY(kernel_execve)
11877+ CFI_STARTPROC
11878+ pushl_cfi %ebp
11879+ sub $PT_OLDSS+4,%esp
11880+ pushl_cfi %edi
11881+ pushl_cfi %ecx
11882+ pushl_cfi %eax
11883+ lea 3*4(%esp),%edi
11884+ mov $PT_OLDSS/4+1,%ecx
11885+ xorl %eax,%eax
11886+ rep stosl
11887+ popl_cfi %eax
11888+ popl_cfi %ecx
11889+ popl_cfi %edi
11890+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
11891+ pushl_cfi %esp
11892+ call sys_execve
11893+ add $4,%esp
11894+ CFI_ADJUST_CFA_OFFSET -4
11895+ GET_THREAD_INFO(%ebp)
11896+ test %eax,%eax
11897+ jz syscall_exit
11898+ add $PT_OLDSS+4,%esp
11899+ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
11900+ popl_cfi %ebp
11901+ ret
11902+ CFI_ENDPROC
11903+ENDPROC(kernel_execve)
11904+
11905 .macro FIXUP_ESPFIX_STACK
11906 /*
11907 * Switch back for ESPFIX stack to the normal zerobased stack
11908@@ -761,8 +1003,15 @@ ENDPROC(ptregs_clone)
11909 * normal stack and adjusts ESP with the matching offset.
11910 */
11911 /* fixup the stack */
11912- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
11913- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
11914+#ifdef CONFIG_SMP
11915+ movl PER_CPU_VAR(cpu_number), %ebx
11916+ shll $PAGE_SHIFT_asm, %ebx
11917+ addl $cpu_gdt_table, %ebx
11918+#else
11919+ movl $cpu_gdt_table, %ebx
11920+#endif
11921+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
11922+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
11923 shl $16, %eax
11924 addl %esp, %eax /* the adjusted stack pointer */
11925 pushl_cfi $__KERNEL_DS
11926@@ -1213,7 +1462,6 @@ return_to_handler:
11927 jmp *%ecx
11928 #endif
11929
11930-.section .rodata,"a"
11931 #include "syscall_table_32.S"
11932
11933 syscall_table_size=(.-sys_call_table)
11934@@ -1259,9 +1507,12 @@ error_code:
11935 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
11936 REG_TO_PTGS %ecx
11937 SET_KERNEL_GS %ecx
11938- movl $(__USER_DS), %ecx
11939+ movl $(__KERNEL_DS), %ecx
11940 movl %ecx, %ds
11941 movl %ecx, %es
11942+
11943+ pax_enter_kernel
11944+
11945 TRACE_IRQS_OFF
11946 movl %esp,%eax # pt_regs pointer
11947 call *%edi
11948@@ -1346,6 +1597,9 @@ nmi_stack_correct:
11949 xorl %edx,%edx # zero error code
11950 movl %esp,%eax # pt_regs pointer
11951 call do_nmi
11952+
11953+ pax_exit_kernel
11954+
11955 jmp restore_all_notrace
11956 CFI_ENDPROC
11957
11958@@ -1382,6 +1636,9 @@ nmi_espfix_stack:
11959 FIXUP_ESPFIX_STACK # %eax == %esp
11960 xorl %edx,%edx # zero error code
11961 call do_nmi
11962+
11963+ pax_exit_kernel
11964+
11965 RESTORE_REGS
11966 lss 12+4(%esp), %esp # back to espfix stack
11967 CFI_ADJUST_CFA_OFFSET -24
11968diff -urNp linux-3.0.7/arch/x86/kernel/entry_64.S linux-3.0.7/arch/x86/kernel/entry_64.S
11969--- linux-3.0.7/arch/x86/kernel/entry_64.S 2011-07-21 22:17:23.000000000 -0400
11970+++ linux-3.0.7/arch/x86/kernel/entry_64.S 2011-10-11 10:44:33.000000000 -0400
11971@@ -53,6 +53,8 @@
11972 #include <asm/paravirt.h>
11973 #include <asm/ftrace.h>
11974 #include <asm/percpu.h>
11975+#include <asm/pgtable.h>
11976+#include <asm/alternative-asm.h>
11977
11978 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
11979 #include <linux/elf-em.h>
11980@@ -66,6 +68,7 @@
11981 #ifdef CONFIG_FUNCTION_TRACER
11982 #ifdef CONFIG_DYNAMIC_FTRACE
11983 ENTRY(mcount)
11984+ pax_force_retaddr
11985 retq
11986 END(mcount)
11987
11988@@ -90,6 +93,7 @@ GLOBAL(ftrace_graph_call)
11989 #endif
11990
11991 GLOBAL(ftrace_stub)
11992+ pax_force_retaddr
11993 retq
11994 END(ftrace_caller)
11995
11996@@ -110,6 +114,7 @@ ENTRY(mcount)
11997 #endif
11998
11999 GLOBAL(ftrace_stub)
12000+ pax_force_retaddr
12001 retq
12002
12003 trace:
12004@@ -119,6 +124,7 @@ trace:
12005 movq 8(%rbp), %rsi
12006 subq $MCOUNT_INSN_SIZE, %rdi
12007
12008+ pax_force_fptr ftrace_trace_function
12009 call *ftrace_trace_function
12010
12011 MCOUNT_RESTORE_FRAME
12012@@ -144,6 +150,7 @@ ENTRY(ftrace_graph_caller)
12013
12014 MCOUNT_RESTORE_FRAME
12015
12016+ pax_force_retaddr
12017 retq
12018 END(ftrace_graph_caller)
12019
12020@@ -161,6 +168,7 @@ GLOBAL(return_to_handler)
12021 movq 8(%rsp), %rdx
12022 movq (%rsp), %rax
12023 addq $24, %rsp
12024+ pax_force_fptr %rdi
12025 jmp *%rdi
12026 #endif
12027
12028@@ -176,6 +184,269 @@ ENTRY(native_usergs_sysret64)
12029 ENDPROC(native_usergs_sysret64)
12030 #endif /* CONFIG_PARAVIRT */
12031
12032+ .macro ljmpq sel, off
12033+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
12034+ .byte 0x48; ljmp *1234f(%rip)
12035+ .pushsection .rodata
12036+ .align 16
12037+ 1234: .quad \off; .word \sel
12038+ .popsection
12039+#else
12040+ pushq $\sel
12041+ pushq $\off
12042+ lretq
12043+#endif
12044+ .endm
12045+
12046+ .macro pax_enter_kernel
12047+#ifdef CONFIG_PAX_KERNEXEC
12048+ call pax_enter_kernel
12049+#endif
12050+ .endm
12051+
12052+ .macro pax_exit_kernel
12053+#ifdef CONFIG_PAX_KERNEXEC
12054+ call pax_exit_kernel
12055+#endif
12056+ .endm
12057+
12058+#ifdef CONFIG_PAX_KERNEXEC
12059+ENTRY(pax_enter_kernel)
12060+ pushq %rdi
12061+
12062+#ifdef CONFIG_PARAVIRT
12063+ PV_SAVE_REGS(CLBR_RDI)
12064+#endif
12065+
12066+ GET_CR0_INTO_RDI
12067+ bts $16,%rdi
12068+ jnc 1f
12069+ mov %cs,%edi
12070+ cmp $__KERNEL_CS,%edi
12071+ jz 3f
12072+ ljmpq __KERNEL_CS,3f
12073+1: ljmpq __KERNEXEC_KERNEL_CS,2f
12074+2: SET_RDI_INTO_CR0
12075+3:
12076+
12077+#ifdef CONFIG_PARAVIRT
12078+ PV_RESTORE_REGS(CLBR_RDI)
12079+#endif
12080+
12081+ popq %rdi
12082+ pax_force_retaddr
12083+ retq
12084+ENDPROC(pax_enter_kernel)
12085+
12086+ENTRY(pax_exit_kernel)
12087+ pushq %rdi
12088+
12089+#ifdef CONFIG_PARAVIRT
12090+ PV_SAVE_REGS(CLBR_RDI)
12091+#endif
12092+
12093+ mov %cs,%rdi
12094+ cmp $__KERNEXEC_KERNEL_CS,%edi
12095+ jnz 2f
12096+ GET_CR0_INTO_RDI
12097+ btr $16,%rdi
12098+ ljmpq __KERNEL_CS,1f
12099+1: SET_RDI_INTO_CR0
12100+2:
12101+
12102+#ifdef CONFIG_PARAVIRT
12103+ PV_RESTORE_REGS(CLBR_RDI);
12104+#endif
12105+
12106+ popq %rdi
12107+ pax_force_retaddr
12108+ retq
12109+ENDPROC(pax_exit_kernel)
12110+#endif
12111+
12112+ .macro pax_enter_kernel_user
12113+#ifdef CONFIG_PAX_MEMORY_UDEREF
12114+ call pax_enter_kernel_user
12115+#endif
12116+ .endm
12117+
12118+ .macro pax_exit_kernel_user
12119+#ifdef CONFIG_PAX_MEMORY_UDEREF
12120+ call pax_exit_kernel_user
12121+#endif
12122+#ifdef CONFIG_PAX_RANDKSTACK
12123+ push %rax
12124+ call pax_randomize_kstack
12125+ pop %rax
12126+#endif
12127+ .endm
12128+
12129+#ifdef CONFIG_PAX_MEMORY_UDEREF
12130+ENTRY(pax_enter_kernel_user)
12131+ pushq %rdi
12132+ pushq %rbx
12133+
12134+#ifdef CONFIG_PARAVIRT
12135+ PV_SAVE_REGS(CLBR_RDI)
12136+#endif
12137+
12138+ GET_CR3_INTO_RDI
12139+ mov %rdi,%rbx
12140+ add $__START_KERNEL_map,%rbx
12141+ sub phys_base(%rip),%rbx
12142+
12143+#ifdef CONFIG_PARAVIRT
12144+ pushq %rdi
12145+ cmpl $0, pv_info+PARAVIRT_enabled
12146+ jz 1f
12147+ i = 0
12148+ .rept USER_PGD_PTRS
12149+ mov i*8(%rbx),%rsi
12150+ mov $0,%sil
12151+ lea i*8(%rbx),%rdi
12152+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
12153+ i = i + 1
12154+ .endr
12155+ jmp 2f
12156+1:
12157+#endif
12158+
12159+ i = 0
12160+ .rept USER_PGD_PTRS
12161+ movb $0,i*8(%rbx)
12162+ i = i + 1
12163+ .endr
12164+
12165+#ifdef CONFIG_PARAVIRT
12166+2: popq %rdi
12167+#endif
12168+ SET_RDI_INTO_CR3
12169+
12170+#ifdef CONFIG_PAX_KERNEXEC
12171+ GET_CR0_INTO_RDI
12172+ bts $16,%rdi
12173+ SET_RDI_INTO_CR0
12174+#endif
12175+
12176+#ifdef CONFIG_PARAVIRT
12177+ PV_RESTORE_REGS(CLBR_RDI)
12178+#endif
12179+
12180+ popq %rbx
12181+ popq %rdi
12182+ pax_force_retaddr
12183+ retq
12184+ENDPROC(pax_enter_kernel_user)
12185+
12186+ENTRY(pax_exit_kernel_user)
12187+ push %rdi
12188+
12189+#ifdef CONFIG_PARAVIRT
12190+ pushq %rbx
12191+ PV_SAVE_REGS(CLBR_RDI)
12192+#endif
12193+
12194+#ifdef CONFIG_PAX_KERNEXEC
12195+ GET_CR0_INTO_RDI
12196+ btr $16,%rdi
12197+ SET_RDI_INTO_CR0
12198+#endif
12199+
12200+ GET_CR3_INTO_RDI
12201+ add $__START_KERNEL_map,%rdi
12202+ sub phys_base(%rip),%rdi
12203+
12204+#ifdef CONFIG_PARAVIRT
12205+ cmpl $0, pv_info+PARAVIRT_enabled
12206+ jz 1f
12207+ mov %rdi,%rbx
12208+ i = 0
12209+ .rept USER_PGD_PTRS
12210+ mov i*8(%rbx),%rsi
12211+ mov $0x67,%sil
12212+ lea i*8(%rbx),%rdi
12213+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
12214+ i = i + 1
12215+ .endr
12216+ jmp 2f
12217+1:
12218+#endif
12219+
12220+ i = 0
12221+ .rept USER_PGD_PTRS
12222+ movb $0x67,i*8(%rdi)
12223+ i = i + 1
12224+ .endr
12225+
12226+#ifdef CONFIG_PARAVIRT
12227+2: PV_RESTORE_REGS(CLBR_RDI)
12228+ popq %rbx
12229+#endif
12230+
12231+ popq %rdi
12232+ pax_force_retaddr
12233+ retq
12234+ENDPROC(pax_exit_kernel_user)
12235+#endif
12236+
12237+ .macro pax_erase_kstack
12238+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12239+ call pax_erase_kstack
12240+#endif
12241+ .endm
12242+
12243+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12244+/*
12245+ * r10: thread_info
12246+ * rcx, rdx: can be clobbered
12247+ */
12248+ENTRY(pax_erase_kstack)
12249+ pushq %rdi
12250+ pushq %rax
12251+ pushq %r10
12252+
12253+ GET_THREAD_INFO(%r10)
12254+ mov TI_lowest_stack(%r10), %rdi
12255+ mov $-0xBEEF, %rax
12256+ std
12257+
12258+1: mov %edi, %ecx
12259+ and $THREAD_SIZE_asm - 1, %ecx
12260+ shr $3, %ecx
12261+ repne scasq
12262+ jecxz 2f
12263+
12264+ cmp $2*8, %ecx
12265+ jc 2f
12266+
12267+ mov $2*8, %ecx
12268+ repe scasq
12269+ jecxz 2f
12270+ jne 1b
12271+
12272+2: cld
12273+ mov %esp, %ecx
12274+ sub %edi, %ecx
12275+
12276+ cmp $THREAD_SIZE_asm, %rcx
12277+ jb 3f
12278+ ud2
12279+3:
12280+
12281+ shr $3, %ecx
12282+ rep stosq
12283+
12284+ mov TI_task_thread_sp0(%r10), %rdi
12285+ sub $256, %rdi
12286+ mov %rdi, TI_lowest_stack(%r10)
12287+
12288+ popq %r10
12289+ popq %rax
12290+ popq %rdi
12291+ pax_force_retaddr
12292+ ret
12293+ENDPROC(pax_erase_kstack)
12294+#endif
12295
12296 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
12297 #ifdef CONFIG_TRACE_IRQFLAGS
12298@@ -318,7 +589,7 @@ ENTRY(save_args)
12299 leaq -RBP+8(%rsp),%rdi /* arg1 for handler */
12300 movq_cfi rbp, 8 /* push %rbp */
12301 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
12302- testl $3, CS(%rdi)
12303+ testb $3, CS(%rdi)
12304 je 1f
12305 SWAPGS
12306 /*
12307@@ -338,6 +609,7 @@ ENTRY(save_args)
12308 * We entered an interrupt context - irqs are off:
12309 */
12310 2: TRACE_IRQS_OFF
12311+ pax_force_retaddr
12312 ret
12313 CFI_ENDPROC
12314 END(save_args)
12315@@ -354,6 +626,7 @@ ENTRY(save_rest)
12316 movq_cfi r15, R15+16
12317 movq %r11, 8(%rsp) /* return address */
12318 FIXUP_TOP_OF_STACK %r11, 16
12319+ pax_force_retaddr
12320 ret
12321 CFI_ENDPROC
12322 END(save_rest)
12323@@ -385,7 +658,8 @@ ENTRY(save_paranoid)
12324 js 1f /* negative -> in kernel */
12325 SWAPGS
12326 xorl %ebx,%ebx
12327-1: ret
12328+1: pax_force_retaddr
12329+ ret
12330 CFI_ENDPROC
12331 END(save_paranoid)
12332 .popsection
12333@@ -409,7 +683,7 @@ ENTRY(ret_from_fork)
12334
12335 RESTORE_REST
12336
12337- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12338+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12339 je int_ret_from_sys_call
12340
12341 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
12342@@ -455,7 +729,7 @@ END(ret_from_fork)
12343 ENTRY(system_call)
12344 CFI_STARTPROC simple
12345 CFI_SIGNAL_FRAME
12346- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
12347+ CFI_DEF_CFA rsp,0
12348 CFI_REGISTER rip,rcx
12349 /*CFI_REGISTER rflags,r11*/
12350 SWAPGS_UNSAFE_STACK
12351@@ -468,12 +742,13 @@ ENTRY(system_call_after_swapgs)
12352
12353 movq %rsp,PER_CPU_VAR(old_rsp)
12354 movq PER_CPU_VAR(kernel_stack),%rsp
12355+ pax_enter_kernel_user
12356 /*
12357 * No need to follow this irqs off/on section - it's straight
12358 * and short:
12359 */
12360 ENABLE_INTERRUPTS(CLBR_NONE)
12361- SAVE_ARGS 8,1
12362+ SAVE_ARGS 8*6,1
12363 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
12364 movq %rcx,RIP-ARGOFFSET(%rsp)
12365 CFI_REL_OFFSET rip,RIP-ARGOFFSET
12366@@ -502,6 +777,8 @@ sysret_check:
12367 andl %edi,%edx
12368 jnz sysret_careful
12369 CFI_REMEMBER_STATE
12370+ pax_exit_kernel_user
12371+ pax_erase_kstack
12372 /*
12373 * sysretq will re-enable interrupts:
12374 */
12375@@ -560,6 +837,9 @@ auditsys:
12376 movq %rax,%rsi /* 2nd arg: syscall number */
12377 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
12378 call audit_syscall_entry
12379+
12380+ pax_erase_kstack
12381+
12382 LOAD_ARGS 0 /* reload call-clobbered registers */
12383 jmp system_call_fastpath
12384
12385@@ -590,6 +870,9 @@ tracesys:
12386 FIXUP_TOP_OF_STACK %rdi
12387 movq %rsp,%rdi
12388 call syscall_trace_enter
12389+
12390+ pax_erase_kstack
12391+
12392 /*
12393 * Reload arg registers from stack in case ptrace changed them.
12394 * We don't reload %rax because syscall_trace_enter() returned
12395@@ -611,7 +894,7 @@ tracesys:
12396 GLOBAL(int_ret_from_sys_call)
12397 DISABLE_INTERRUPTS(CLBR_NONE)
12398 TRACE_IRQS_OFF
12399- testl $3,CS-ARGOFFSET(%rsp)
12400+ testb $3,CS-ARGOFFSET(%rsp)
12401 je retint_restore_args
12402 movl $_TIF_ALLWORK_MASK,%edi
12403 /* edi: mask to check */
12404@@ -702,6 +985,7 @@ ENTRY(ptregscall_common)
12405 movq_cfi_restore R12+8, r12
12406 movq_cfi_restore RBP+8, rbp
12407 movq_cfi_restore RBX+8, rbx
12408+ pax_force_retaddr
12409 ret $REST_SKIP /* pop extended registers */
12410 CFI_ENDPROC
12411 END(ptregscall_common)
12412@@ -793,6 +1077,16 @@ END(interrupt)
12413 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
12414 call save_args
12415 PARTIAL_FRAME 0
12416+#ifdef CONFIG_PAX_MEMORY_UDEREF
12417+ testb $3, CS(%rdi)
12418+ jnz 1f
12419+ pax_enter_kernel
12420+ jmp 2f
12421+1: pax_enter_kernel_user
12422+2:
12423+#else
12424+ pax_enter_kernel
12425+#endif
12426 call \func
12427 .endm
12428
12429@@ -825,7 +1119,7 @@ ret_from_intr:
12430 CFI_ADJUST_CFA_OFFSET -8
12431 exit_intr:
12432 GET_THREAD_INFO(%rcx)
12433- testl $3,CS-ARGOFFSET(%rsp)
12434+ testb $3,CS-ARGOFFSET(%rsp)
12435 je retint_kernel
12436
12437 /* Interrupt came from user space */
12438@@ -847,12 +1141,16 @@ retint_swapgs: /* return to user-space
12439 * The iretq could re-enable interrupts:
12440 */
12441 DISABLE_INTERRUPTS(CLBR_ANY)
12442+ pax_exit_kernel_user
12443+ pax_erase_kstack
12444 TRACE_IRQS_IRETQ
12445 SWAPGS
12446 jmp restore_args
12447
12448 retint_restore_args: /* return to kernel space */
12449 DISABLE_INTERRUPTS(CLBR_ANY)
12450+ pax_exit_kernel
12451+ pax_force_retaddr RIP-ARGOFFSET
12452 /*
12453 * The iretq could re-enable interrupts:
12454 */
12455@@ -1027,6 +1325,16 @@ ENTRY(\sym)
12456 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12457 call error_entry
12458 DEFAULT_FRAME 0
12459+#ifdef CONFIG_PAX_MEMORY_UDEREF
12460+ testb $3, CS(%rsp)
12461+ jnz 1f
12462+ pax_enter_kernel
12463+ jmp 2f
12464+1: pax_enter_kernel_user
12465+2:
12466+#else
12467+ pax_enter_kernel
12468+#endif
12469 movq %rsp,%rdi /* pt_regs pointer */
12470 xorl %esi,%esi /* no error code */
12471 call \do_sym
12472@@ -1044,6 +1352,16 @@ ENTRY(\sym)
12473 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12474 call save_paranoid
12475 TRACE_IRQS_OFF
12476+#ifdef CONFIG_PAX_MEMORY_UDEREF
12477+ testb $3, CS(%rsp)
12478+ jnz 1f
12479+ pax_enter_kernel
12480+ jmp 2f
12481+1: pax_enter_kernel_user
12482+2:
12483+#else
12484+ pax_enter_kernel
12485+#endif
12486 movq %rsp,%rdi /* pt_regs pointer */
12487 xorl %esi,%esi /* no error code */
12488 call \do_sym
12489@@ -1052,7 +1370,7 @@ ENTRY(\sym)
12490 END(\sym)
12491 .endm
12492
12493-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
12494+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
12495 .macro paranoidzeroentry_ist sym do_sym ist
12496 ENTRY(\sym)
12497 INTR_FRAME
12498@@ -1062,8 +1380,24 @@ ENTRY(\sym)
12499 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12500 call save_paranoid
12501 TRACE_IRQS_OFF
12502+#ifdef CONFIG_PAX_MEMORY_UDEREF
12503+ testb $3, CS(%rsp)
12504+ jnz 1f
12505+ pax_enter_kernel
12506+ jmp 2f
12507+1: pax_enter_kernel_user
12508+2:
12509+#else
12510+ pax_enter_kernel
12511+#endif
12512 movq %rsp,%rdi /* pt_regs pointer */
12513 xorl %esi,%esi /* no error code */
12514+#ifdef CONFIG_SMP
12515+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
12516+ lea init_tss(%r12), %r12
12517+#else
12518+ lea init_tss(%rip), %r12
12519+#endif
12520 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12521 call \do_sym
12522 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12523@@ -1080,6 +1414,16 @@ ENTRY(\sym)
12524 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12525 call error_entry
12526 DEFAULT_FRAME 0
12527+#ifdef CONFIG_PAX_MEMORY_UDEREF
12528+ testb $3, CS(%rsp)
12529+ jnz 1f
12530+ pax_enter_kernel
12531+ jmp 2f
12532+1: pax_enter_kernel_user
12533+2:
12534+#else
12535+ pax_enter_kernel
12536+#endif
12537 movq %rsp,%rdi /* pt_regs pointer */
12538 movq ORIG_RAX(%rsp),%rsi /* get error code */
12539 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12540@@ -1099,6 +1443,16 @@ ENTRY(\sym)
12541 call save_paranoid
12542 DEFAULT_FRAME 0
12543 TRACE_IRQS_OFF
12544+#ifdef CONFIG_PAX_MEMORY_UDEREF
12545+ testb $3, CS(%rsp)
12546+ jnz 1f
12547+ pax_enter_kernel
12548+ jmp 2f
12549+1: pax_enter_kernel_user
12550+2:
12551+#else
12552+ pax_enter_kernel
12553+#endif
12554 movq %rsp,%rdi /* pt_regs pointer */
12555 movq ORIG_RAX(%rsp),%rsi /* get error code */
12556 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12557@@ -1134,6 +1488,7 @@ gs_change:
12558 2: mfence /* workaround */
12559 SWAPGS
12560 popfq_cfi
12561+ pax_force_retaddr
12562 ret
12563 CFI_ENDPROC
12564 END(native_load_gs_index)
12565@@ -1158,6 +1513,7 @@ ENTRY(kernel_thread_helper)
12566 * Here we are in the child and the registers are set as they were
12567 * at kernel_thread() invocation in the parent.
12568 */
12569+ pax_force_fptr %rsi
12570 call *%rsi
12571 # exit
12572 mov %eax, %edi
12573@@ -1193,9 +1549,10 @@ ENTRY(kernel_execve)
12574 je int_ret_from_sys_call
12575 RESTORE_ARGS
12576 UNFAKE_STACK_FRAME
12577+ pax_force_retaddr
12578 ret
12579 CFI_ENDPROC
12580-END(kernel_execve)
12581+ENDPROC(kernel_execve)
12582
12583 /* Call softirq on interrupt stack. Interrupts are off. */
12584 ENTRY(call_softirq)
12585@@ -1213,9 +1570,10 @@ ENTRY(call_softirq)
12586 CFI_DEF_CFA_REGISTER rsp
12587 CFI_ADJUST_CFA_OFFSET -8
12588 decl PER_CPU_VAR(irq_count)
12589+ pax_force_retaddr
12590 ret
12591 CFI_ENDPROC
12592-END(call_softirq)
12593+ENDPROC(call_softirq)
12594
12595 #ifdef CONFIG_XEN
12596 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
12597@@ -1361,16 +1719,31 @@ ENTRY(paranoid_exit)
12598 TRACE_IRQS_OFF
12599 testl %ebx,%ebx /* swapgs needed? */
12600 jnz paranoid_restore
12601- testl $3,CS(%rsp)
12602+ testb $3,CS(%rsp)
12603 jnz paranoid_userspace
12604+#ifdef CONFIG_PAX_MEMORY_UDEREF
12605+ pax_exit_kernel
12606+ TRACE_IRQS_IRETQ 0
12607+ SWAPGS_UNSAFE_STACK
12608+ RESTORE_ALL 8
12609+ pax_force_retaddr
12610+ jmp irq_return
12611+#endif
12612 paranoid_swapgs:
12613+#ifdef CONFIG_PAX_MEMORY_UDEREF
12614+ pax_exit_kernel_user
12615+#else
12616+ pax_exit_kernel
12617+#endif
12618 TRACE_IRQS_IRETQ 0
12619 SWAPGS_UNSAFE_STACK
12620 RESTORE_ALL 8
12621 jmp irq_return
12622 paranoid_restore:
12623+ pax_exit_kernel
12624 TRACE_IRQS_IRETQ 0
12625 RESTORE_ALL 8
12626+ pax_force_retaddr
12627 jmp irq_return
12628 paranoid_userspace:
12629 GET_THREAD_INFO(%rcx)
12630@@ -1426,12 +1799,13 @@ ENTRY(error_entry)
12631 movq_cfi r14, R14+8
12632 movq_cfi r15, R15+8
12633 xorl %ebx,%ebx
12634- testl $3,CS+8(%rsp)
12635+ testb $3,CS+8(%rsp)
12636 je error_kernelspace
12637 error_swapgs:
12638 SWAPGS
12639 error_sti:
12640 TRACE_IRQS_OFF
12641+ pax_force_retaddr
12642 ret
12643
12644 /*
12645@@ -1490,6 +1864,16 @@ ENTRY(nmi)
12646 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12647 call save_paranoid
12648 DEFAULT_FRAME 0
12649+#ifdef CONFIG_PAX_MEMORY_UDEREF
12650+ testb $3, CS(%rsp)
12651+ jnz 1f
12652+ pax_enter_kernel
12653+ jmp 2f
12654+1: pax_enter_kernel_user
12655+2:
12656+#else
12657+ pax_enter_kernel
12658+#endif
12659 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
12660 movq %rsp,%rdi
12661 movq $-1,%rsi
12662@@ -1500,12 +1884,28 @@ ENTRY(nmi)
12663 DISABLE_INTERRUPTS(CLBR_NONE)
12664 testl %ebx,%ebx /* swapgs needed? */
12665 jnz nmi_restore
12666- testl $3,CS(%rsp)
12667+ testb $3,CS(%rsp)
12668 jnz nmi_userspace
12669+#ifdef CONFIG_PAX_MEMORY_UDEREF
12670+ pax_exit_kernel
12671+ SWAPGS_UNSAFE_STACK
12672+ RESTORE_ALL 8
12673+ pax_force_retaddr
12674+ jmp irq_return
12675+#endif
12676 nmi_swapgs:
12677+#ifdef CONFIG_PAX_MEMORY_UDEREF
12678+ pax_exit_kernel_user
12679+#else
12680+ pax_exit_kernel
12681+#endif
12682 SWAPGS_UNSAFE_STACK
12683+ RESTORE_ALL 8
12684+ jmp irq_return
12685 nmi_restore:
12686+ pax_exit_kernel
12687 RESTORE_ALL 8
12688+ pax_force_retaddr
12689 jmp irq_return
12690 nmi_userspace:
12691 GET_THREAD_INFO(%rcx)
12692diff -urNp linux-3.0.7/arch/x86/kernel/ftrace.c linux-3.0.7/arch/x86/kernel/ftrace.c
12693--- linux-3.0.7/arch/x86/kernel/ftrace.c 2011-07-21 22:17:23.000000000 -0400
12694+++ linux-3.0.7/arch/x86/kernel/ftrace.c 2011-08-23 21:47:55.000000000 -0400
12695@@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the
12696 static const void *mod_code_newcode; /* holds the text to write to the IP */
12697
12698 static unsigned nmi_wait_count;
12699-static atomic_t nmi_update_count = ATOMIC_INIT(0);
12700+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
12701
12702 int ftrace_arch_read_dyn_info(char *buf, int size)
12703 {
12704@@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf,
12705
12706 r = snprintf(buf, size, "%u %u",
12707 nmi_wait_count,
12708- atomic_read(&nmi_update_count));
12709+ atomic_read_unchecked(&nmi_update_count));
12710 return r;
12711 }
12712
12713@@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
12714
12715 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
12716 smp_rmb();
12717+ pax_open_kernel();
12718 ftrace_mod_code();
12719- atomic_inc(&nmi_update_count);
12720+ pax_close_kernel();
12721+ atomic_inc_unchecked(&nmi_update_count);
12722 }
12723 /* Must have previous changes seen before executions */
12724 smp_mb();
12725@@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, uns
12726 {
12727 unsigned char replaced[MCOUNT_INSN_SIZE];
12728
12729+ ip = ktla_ktva(ip);
12730+
12731 /*
12732 * Note: Due to modules and __init, code can
12733 * disappear and change, we need to protect against faulting
12734@@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_fun
12735 unsigned char old[MCOUNT_INSN_SIZE], *new;
12736 int ret;
12737
12738- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
12739+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
12740 new = ftrace_call_replace(ip, (unsigned long)func);
12741 ret = ftrace_modify_code(ip, old, new);
12742
12743@@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long
12744 {
12745 unsigned char code[MCOUNT_INSN_SIZE];
12746
12747+ ip = ktla_ktva(ip);
12748+
12749 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
12750 return -EFAULT;
12751
12752diff -urNp linux-3.0.7/arch/x86/kernel/head32.c linux-3.0.7/arch/x86/kernel/head32.c
12753--- linux-3.0.7/arch/x86/kernel/head32.c 2011-07-21 22:17:23.000000000 -0400
12754+++ linux-3.0.7/arch/x86/kernel/head32.c 2011-08-23 21:47:55.000000000 -0400
12755@@ -19,6 +19,7 @@
12756 #include <asm/io_apic.h>
12757 #include <asm/bios_ebda.h>
12758 #include <asm/tlbflush.h>
12759+#include <asm/boot.h>
12760
12761 static void __init i386_default_early_setup(void)
12762 {
12763@@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
12764 {
12765 memblock_init();
12766
12767- memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12768+ memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12769
12770 #ifdef CONFIG_BLK_DEV_INITRD
12771 /* Reserve INITRD */
12772diff -urNp linux-3.0.7/arch/x86/kernel/head_32.S linux-3.0.7/arch/x86/kernel/head_32.S
12773--- linux-3.0.7/arch/x86/kernel/head_32.S 2011-07-21 22:17:23.000000000 -0400
12774+++ linux-3.0.7/arch/x86/kernel/head_32.S 2011-08-23 21:47:55.000000000 -0400
12775@@ -25,6 +25,12 @@
12776 /* Physical address */
12777 #define pa(X) ((X) - __PAGE_OFFSET)
12778
12779+#ifdef CONFIG_PAX_KERNEXEC
12780+#define ta(X) (X)
12781+#else
12782+#define ta(X) ((X) - __PAGE_OFFSET)
12783+#endif
12784+
12785 /*
12786 * References to members of the new_cpu_data structure.
12787 */
12788@@ -54,11 +60,7 @@
12789 * and small than max_low_pfn, otherwise will waste some page table entries
12790 */
12791
12792-#if PTRS_PER_PMD > 1
12793-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
12794-#else
12795-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
12796-#endif
12797+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
12798
12799 /* Number of possible pages in the lowmem region */
12800 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
12801@@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
12802 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12803
12804 /*
12805+ * Real beginning of normal "text" segment
12806+ */
12807+ENTRY(stext)
12808+ENTRY(_stext)
12809+
12810+/*
12811 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
12812 * %esi points to the real-mode code as a 32-bit pointer.
12813 * CS and DS must be 4 GB flat segments, but we don't depend on
12814@@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12815 * can.
12816 */
12817 __HEAD
12818+
12819+#ifdef CONFIG_PAX_KERNEXEC
12820+ jmp startup_32
12821+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
12822+.fill PAGE_SIZE-5,1,0xcc
12823+#endif
12824+
12825 ENTRY(startup_32)
12826 movl pa(stack_start),%ecx
12827
12828@@ -105,6 +120,57 @@ ENTRY(startup_32)
12829 2:
12830 leal -__PAGE_OFFSET(%ecx),%esp
12831
12832+#ifdef CONFIG_SMP
12833+ movl $pa(cpu_gdt_table),%edi
12834+ movl $__per_cpu_load,%eax
12835+ movw %ax,__KERNEL_PERCPU + 2(%edi)
12836+ rorl $16,%eax
12837+ movb %al,__KERNEL_PERCPU + 4(%edi)
12838+ movb %ah,__KERNEL_PERCPU + 7(%edi)
12839+ movl $__per_cpu_end - 1,%eax
12840+ subl $__per_cpu_start,%eax
12841+ movw %ax,__KERNEL_PERCPU + 0(%edi)
12842+#endif
12843+
12844+#ifdef CONFIG_PAX_MEMORY_UDEREF
12845+ movl $NR_CPUS,%ecx
12846+ movl $pa(cpu_gdt_table),%edi
12847+1:
12848+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
12849+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
12850+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
12851+ addl $PAGE_SIZE_asm,%edi
12852+ loop 1b
12853+#endif
12854+
12855+#ifdef CONFIG_PAX_KERNEXEC
12856+ movl $pa(boot_gdt),%edi
12857+ movl $__LOAD_PHYSICAL_ADDR,%eax
12858+ movw %ax,__BOOT_CS + 2(%edi)
12859+ rorl $16,%eax
12860+ movb %al,__BOOT_CS + 4(%edi)
12861+ movb %ah,__BOOT_CS + 7(%edi)
12862+ rorl $16,%eax
12863+
12864+ ljmp $(__BOOT_CS),$1f
12865+1:
12866+
12867+ movl $NR_CPUS,%ecx
12868+ movl $pa(cpu_gdt_table),%edi
12869+ addl $__PAGE_OFFSET,%eax
12870+1:
12871+ movw %ax,__KERNEL_CS + 2(%edi)
12872+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
12873+ rorl $16,%eax
12874+ movb %al,__KERNEL_CS + 4(%edi)
12875+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
12876+ movb %ah,__KERNEL_CS + 7(%edi)
12877+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
12878+ rorl $16,%eax
12879+ addl $PAGE_SIZE_asm,%edi
12880+ loop 1b
12881+#endif
12882+
12883 /*
12884 * Clear BSS first so that there are no surprises...
12885 */
12886@@ -195,8 +261,11 @@ ENTRY(startup_32)
12887 movl %eax, pa(max_pfn_mapped)
12888
12889 /* Do early initialization of the fixmap area */
12890- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12891- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
12892+#ifdef CONFIG_COMPAT_VDSO
12893+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
12894+#else
12895+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
12896+#endif
12897 #else /* Not PAE */
12898
12899 page_pde_offset = (__PAGE_OFFSET >> 20);
12900@@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12901 movl %eax, pa(max_pfn_mapped)
12902
12903 /* Do early initialization of the fixmap area */
12904- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12905- movl %eax,pa(initial_page_table+0xffc)
12906+#ifdef CONFIG_COMPAT_VDSO
12907+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
12908+#else
12909+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
12910+#endif
12911 #endif
12912
12913 #ifdef CONFIG_PARAVIRT
12914@@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12915 cmpl $num_subarch_entries, %eax
12916 jae bad_subarch
12917
12918- movl pa(subarch_entries)(,%eax,4), %eax
12919- subl $__PAGE_OFFSET, %eax
12920- jmp *%eax
12921+ jmp *pa(subarch_entries)(,%eax,4)
12922
12923 bad_subarch:
12924 WEAK(lguest_entry)
12925@@ -255,10 +325,10 @@ WEAK(xen_entry)
12926 __INITDATA
12927
12928 subarch_entries:
12929- .long default_entry /* normal x86/PC */
12930- .long lguest_entry /* lguest hypervisor */
12931- .long xen_entry /* Xen hypervisor */
12932- .long default_entry /* Moorestown MID */
12933+ .long ta(default_entry) /* normal x86/PC */
12934+ .long ta(lguest_entry) /* lguest hypervisor */
12935+ .long ta(xen_entry) /* Xen hypervisor */
12936+ .long ta(default_entry) /* Moorestown MID */
12937 num_subarch_entries = (. - subarch_entries) / 4
12938 .previous
12939 #else
12940@@ -312,6 +382,7 @@ default_entry:
12941 orl %edx,%eax
12942 movl %eax,%cr4
12943
12944+#ifdef CONFIG_X86_PAE
12945 testb $X86_CR4_PAE, %al # check if PAE is enabled
12946 jz 6f
12947
12948@@ -340,6 +411,9 @@ default_entry:
12949 /* Make changes effective */
12950 wrmsr
12951
12952+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
12953+#endif
12954+
12955 6:
12956
12957 /*
12958@@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
12959 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
12960 movl %eax,%ss # after changing gdt.
12961
12962- movl $(__USER_DS),%eax # DS/ES contains default USER segment
12963+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
12964 movl %eax,%ds
12965 movl %eax,%es
12966
12967@@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
12968 */
12969 cmpb $0,ready
12970 jne 1f
12971- movl $gdt_page,%eax
12972+ movl $cpu_gdt_table,%eax
12973 movl $stack_canary,%ecx
12974+#ifdef CONFIG_SMP
12975+ addl $__per_cpu_load,%ecx
12976+#endif
12977 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
12978 shrl $16, %ecx
12979 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
12980 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
12981 1:
12982-#endif
12983 movl $(__KERNEL_STACK_CANARY),%eax
12984+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12985+ movl $(__USER_DS),%eax
12986+#else
12987+ xorl %eax,%eax
12988+#endif
12989 movl %eax,%gs
12990
12991 xorl %eax,%eax # Clear LDT
12992@@ -558,22 +639,22 @@ early_page_fault:
12993 jmp early_fault
12994
12995 early_fault:
12996- cld
12997 #ifdef CONFIG_PRINTK
12998+ cmpl $1,%ss:early_recursion_flag
12999+ je hlt_loop
13000+ incl %ss:early_recursion_flag
13001+ cld
13002 pusha
13003 movl $(__KERNEL_DS),%eax
13004 movl %eax,%ds
13005 movl %eax,%es
13006- cmpl $2,early_recursion_flag
13007- je hlt_loop
13008- incl early_recursion_flag
13009 movl %cr2,%eax
13010 pushl %eax
13011 pushl %edx /* trapno */
13012 pushl $fault_msg
13013 call printk
13014+; call dump_stack
13015 #endif
13016- call dump_stack
13017 hlt_loop:
13018 hlt
13019 jmp hlt_loop
13020@@ -581,8 +662,11 @@ hlt_loop:
13021 /* This is the default interrupt "handler" :-) */
13022 ALIGN
13023 ignore_int:
13024- cld
13025 #ifdef CONFIG_PRINTK
13026+ cmpl $2,%ss:early_recursion_flag
13027+ je hlt_loop
13028+ incl %ss:early_recursion_flag
13029+ cld
13030 pushl %eax
13031 pushl %ecx
13032 pushl %edx
13033@@ -591,9 +675,6 @@ ignore_int:
13034 movl $(__KERNEL_DS),%eax
13035 movl %eax,%ds
13036 movl %eax,%es
13037- cmpl $2,early_recursion_flag
13038- je hlt_loop
13039- incl early_recursion_flag
13040 pushl 16(%esp)
13041 pushl 24(%esp)
13042 pushl 32(%esp)
13043@@ -622,29 +703,43 @@ ENTRY(initial_code)
13044 /*
13045 * BSS section
13046 */
13047-__PAGE_ALIGNED_BSS
13048- .align PAGE_SIZE
13049 #ifdef CONFIG_X86_PAE
13050+.section .initial_pg_pmd,"a",@progbits
13051 initial_pg_pmd:
13052 .fill 1024*KPMDS,4,0
13053 #else
13054+.section .initial_page_table,"a",@progbits
13055 ENTRY(initial_page_table)
13056 .fill 1024,4,0
13057 #endif
13058+.section .initial_pg_fixmap,"a",@progbits
13059 initial_pg_fixmap:
13060 .fill 1024,4,0
13061+.section .empty_zero_page,"a",@progbits
13062 ENTRY(empty_zero_page)
13063 .fill 4096,1,0
13064+.section .swapper_pg_dir,"a",@progbits
13065 ENTRY(swapper_pg_dir)
13066+#ifdef CONFIG_X86_PAE
13067+ .fill 4,8,0
13068+#else
13069 .fill 1024,4,0
13070+#endif
13071+
13072+/*
13073+ * The IDT has to be page-aligned to simplify the Pentium
13074+ * F0 0F bug workaround.. We have a special link segment
13075+ * for this.
13076+ */
13077+.section .idt,"a",@progbits
13078+ENTRY(idt_table)
13079+ .fill 256,8,0
13080
13081 /*
13082 * This starts the data section.
13083 */
13084 #ifdef CONFIG_X86_PAE
13085-__PAGE_ALIGNED_DATA
13086- /* Page-aligned for the benefit of paravirt? */
13087- .align PAGE_SIZE
13088+.section .initial_page_table,"a",@progbits
13089 ENTRY(initial_page_table)
13090 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
13091 # if KPMDS == 3
13092@@ -663,18 +758,27 @@ ENTRY(initial_page_table)
13093 # error "Kernel PMDs should be 1, 2 or 3"
13094 # endif
13095 .align PAGE_SIZE /* needs to be page-sized too */
13096+
13097+#ifdef CONFIG_PAX_PER_CPU_PGD
13098+ENTRY(cpu_pgd)
13099+ .rept NR_CPUS
13100+ .fill 4,8,0
13101+ .endr
13102+#endif
13103+
13104 #endif
13105
13106 .data
13107 .balign 4
13108 ENTRY(stack_start)
13109- .long init_thread_union+THREAD_SIZE
13110+ .long init_thread_union+THREAD_SIZE-8
13111+
13112+ready: .byte 0
13113
13114+.section .rodata,"a",@progbits
13115 early_recursion_flag:
13116 .long 0
13117
13118-ready: .byte 0
13119-
13120 int_msg:
13121 .asciz "Unknown interrupt or fault at: %p %p %p\n"
13122
13123@@ -707,7 +811,7 @@ fault_msg:
13124 .word 0 # 32 bit align gdt_desc.address
13125 boot_gdt_descr:
13126 .word __BOOT_DS+7
13127- .long boot_gdt - __PAGE_OFFSET
13128+ .long pa(boot_gdt)
13129
13130 .word 0 # 32-bit align idt_desc.address
13131 idt_descr:
13132@@ -718,7 +822,7 @@ idt_descr:
13133 .word 0 # 32 bit align gdt_desc.address
13134 ENTRY(early_gdt_descr)
13135 .word GDT_ENTRIES*8-1
13136- .long gdt_page /* Overwritten for secondary CPUs */
13137+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
13138
13139 /*
13140 * The boot_gdt must mirror the equivalent in setup.S and is
13141@@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
13142 .align L1_CACHE_BYTES
13143 ENTRY(boot_gdt)
13144 .fill GDT_ENTRY_BOOT_CS,8,0
13145- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
13146- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
13147+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
13148+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
13149+
13150+ .align PAGE_SIZE_asm
13151+ENTRY(cpu_gdt_table)
13152+ .rept NR_CPUS
13153+ .quad 0x0000000000000000 /* NULL descriptor */
13154+ .quad 0x0000000000000000 /* 0x0b reserved */
13155+ .quad 0x0000000000000000 /* 0x13 reserved */
13156+ .quad 0x0000000000000000 /* 0x1b reserved */
13157+
13158+#ifdef CONFIG_PAX_KERNEXEC
13159+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
13160+#else
13161+ .quad 0x0000000000000000 /* 0x20 unused */
13162+#endif
13163+
13164+ .quad 0x0000000000000000 /* 0x28 unused */
13165+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
13166+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
13167+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
13168+ .quad 0x0000000000000000 /* 0x4b reserved */
13169+ .quad 0x0000000000000000 /* 0x53 reserved */
13170+ .quad 0x0000000000000000 /* 0x5b reserved */
13171+
13172+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
13173+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
13174+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
13175+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
13176+
13177+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
13178+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
13179+
13180+ /*
13181+ * Segments used for calling PnP BIOS have byte granularity.
13182+ * The code segments and data segments have fixed 64k limits,
13183+ * the transfer segment sizes are set at run time.
13184+ */
13185+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
13186+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
13187+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
13188+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
13189+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
13190+
13191+ /*
13192+ * The APM segments have byte granularity and their bases
13193+ * are set at run time. All have 64k limits.
13194+ */
13195+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
13196+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
13197+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
13198+
13199+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
13200+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
13201+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
13202+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
13203+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
13204+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
13205+
13206+ /* Be sure this is zeroed to avoid false validations in Xen */
13207+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
13208+ .endr
13209diff -urNp linux-3.0.7/arch/x86/kernel/head_64.S linux-3.0.7/arch/x86/kernel/head_64.S
13210--- linux-3.0.7/arch/x86/kernel/head_64.S 2011-07-21 22:17:23.000000000 -0400
13211+++ linux-3.0.7/arch/x86/kernel/head_64.S 2011-08-23 21:47:55.000000000 -0400
13212@@ -19,6 +19,7 @@
13213 #include <asm/cache.h>
13214 #include <asm/processor-flags.h>
13215 #include <asm/percpu.h>
13216+#include <asm/cpufeature.h>
13217
13218 #ifdef CONFIG_PARAVIRT
13219 #include <asm/asm-offsets.h>
13220@@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
13221 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
13222 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
13223 L3_START_KERNEL = pud_index(__START_KERNEL_map)
13224+L4_VMALLOC_START = pgd_index(VMALLOC_START)
13225+L3_VMALLOC_START = pud_index(VMALLOC_START)
13226+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
13227+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
13228
13229 .text
13230 __HEAD
13231@@ -85,35 +90,22 @@ startup_64:
13232 */
13233 addq %rbp, init_level4_pgt + 0(%rip)
13234 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
13235+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
13236+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
13237 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
13238
13239 addq %rbp, level3_ident_pgt + 0(%rip)
13240+#ifndef CONFIG_XEN
13241+ addq %rbp, level3_ident_pgt + 8(%rip)
13242+#endif
13243
13244- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
13245- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
13246+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
13247
13248- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
13249+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
13250+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
13251
13252- /* Add an Identity mapping if I am above 1G */
13253- leaq _text(%rip), %rdi
13254- andq $PMD_PAGE_MASK, %rdi
13255-
13256- movq %rdi, %rax
13257- shrq $PUD_SHIFT, %rax
13258- andq $(PTRS_PER_PUD - 1), %rax
13259- jz ident_complete
13260-
13261- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
13262- leaq level3_ident_pgt(%rip), %rbx
13263- movq %rdx, 0(%rbx, %rax, 8)
13264-
13265- movq %rdi, %rax
13266- shrq $PMD_SHIFT, %rax
13267- andq $(PTRS_PER_PMD - 1), %rax
13268- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
13269- leaq level2_spare_pgt(%rip), %rbx
13270- movq %rdx, 0(%rbx, %rax, 8)
13271-ident_complete:
13272+ addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
13273+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
13274
13275 /*
13276 * Fixup the kernel text+data virtual addresses. Note that
13277@@ -160,8 +152,8 @@ ENTRY(secondary_startup_64)
13278 * after the boot processor executes this code.
13279 */
13280
13281- /* Enable PAE mode and PGE */
13282- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
13283+ /* Enable PAE mode and PSE/PGE */
13284+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
13285 movq %rax, %cr4
13286
13287 /* Setup early boot stage 4 level pagetables. */
13288@@ -183,9 +175,14 @@ ENTRY(secondary_startup_64)
13289 movl $MSR_EFER, %ecx
13290 rdmsr
13291 btsl $_EFER_SCE, %eax /* Enable System Call */
13292- btl $20,%edi /* No Execute supported? */
13293+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
13294 jnc 1f
13295 btsl $_EFER_NX, %eax
13296+ leaq init_level4_pgt(%rip), %rdi
13297+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
13298+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
13299+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
13300+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
13301 1: wrmsr /* Make changes effective */
13302
13303 /* Setup cr0 */
13304@@ -269,7 +266,7 @@ ENTRY(secondary_startup_64)
13305 bad_address:
13306 jmp bad_address
13307
13308- .section ".init.text","ax"
13309+ __INIT
13310 #ifdef CONFIG_EARLY_PRINTK
13311 .globl early_idt_handlers
13312 early_idt_handlers:
13313@@ -314,18 +311,23 @@ ENTRY(early_idt_handler)
13314 #endif /* EARLY_PRINTK */
13315 1: hlt
13316 jmp 1b
13317+ .previous
13318
13319 #ifdef CONFIG_EARLY_PRINTK
13320+ __INITDATA
13321 early_recursion_flag:
13322 .long 0
13323+ .previous
13324
13325+ .section .rodata,"a",@progbits
13326 early_idt_msg:
13327 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
13328 early_idt_ripmsg:
13329 .asciz "RIP %s\n"
13330-#endif /* CONFIG_EARLY_PRINTK */
13331 .previous
13332+#endif /* CONFIG_EARLY_PRINTK */
13333
13334+ .section .rodata,"a",@progbits
13335 #define NEXT_PAGE(name) \
13336 .balign PAGE_SIZE; \
13337 ENTRY(name)
13338@@ -338,7 +340,6 @@ ENTRY(name)
13339 i = i + 1 ; \
13340 .endr
13341
13342- .data
13343 /*
13344 * This default setting generates an ident mapping at address 0x100000
13345 * and a mapping for the kernel that precisely maps virtual address
13346@@ -349,13 +350,36 @@ NEXT_PAGE(init_level4_pgt)
13347 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13348 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
13349 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13350+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
13351+ .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
13352+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
13353+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13354 .org init_level4_pgt + L4_START_KERNEL*8, 0
13355 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
13356 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
13357
13358+#ifdef CONFIG_PAX_PER_CPU_PGD
13359+NEXT_PAGE(cpu_pgd)
13360+ .rept NR_CPUS
13361+ .fill 512,8,0
13362+ .endr
13363+#endif
13364+
13365 NEXT_PAGE(level3_ident_pgt)
13366 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13367+#ifdef CONFIG_XEN
13368 .fill 511,8,0
13369+#else
13370+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
13371+ .fill 510,8,0
13372+#endif
13373+
13374+NEXT_PAGE(level3_vmalloc_pgt)
13375+ .fill 512,8,0
13376+
13377+NEXT_PAGE(level3_vmemmap_pgt)
13378+ .fill L3_VMEMMAP_START,8,0
13379+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13380
13381 NEXT_PAGE(level3_kernel_pgt)
13382 .fill L3_START_KERNEL,8,0
13383@@ -363,20 +387,23 @@ NEXT_PAGE(level3_kernel_pgt)
13384 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
13385 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13386
13387+NEXT_PAGE(level2_vmemmap_pgt)
13388+ .fill 512,8,0
13389+
13390 NEXT_PAGE(level2_fixmap_pgt)
13391- .fill 506,8,0
13392- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13393- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
13394- .fill 5,8,0
13395+ .fill 507,8,0
13396+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
13397+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
13398+ .fill 4,8,0
13399
13400-NEXT_PAGE(level1_fixmap_pgt)
13401+NEXT_PAGE(level1_vsyscall_pgt)
13402 .fill 512,8,0
13403
13404-NEXT_PAGE(level2_ident_pgt)
13405- /* Since I easily can, map the first 1G.
13406+ /* Since I easily can, map the first 2G.
13407 * Don't set NX because code runs from these pages.
13408 */
13409- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
13410+NEXT_PAGE(level2_ident_pgt)
13411+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
13412
13413 NEXT_PAGE(level2_kernel_pgt)
13414 /*
13415@@ -389,33 +416,55 @@ NEXT_PAGE(level2_kernel_pgt)
13416 * If you want to increase this then increase MODULES_VADDR
13417 * too.)
13418 */
13419- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
13420- KERNEL_IMAGE_SIZE/PMD_SIZE)
13421-
13422-NEXT_PAGE(level2_spare_pgt)
13423- .fill 512, 8, 0
13424+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
13425
13426 #undef PMDS
13427 #undef NEXT_PAGE
13428
13429- .data
13430+ .align PAGE_SIZE
13431+ENTRY(cpu_gdt_table)
13432+ .rept NR_CPUS
13433+ .quad 0x0000000000000000 /* NULL descriptor */
13434+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
13435+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
13436+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
13437+ .quad 0x00cffb000000ffff /* __USER32_CS */
13438+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
13439+ .quad 0x00affb000000ffff /* __USER_CS */
13440+
13441+#ifdef CONFIG_PAX_KERNEXEC
13442+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
13443+#else
13444+ .quad 0x0 /* unused */
13445+#endif
13446+
13447+ .quad 0,0 /* TSS */
13448+ .quad 0,0 /* LDT */
13449+ .quad 0,0,0 /* three TLS descriptors */
13450+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
13451+ /* asm/segment.h:GDT_ENTRIES must match this */
13452+
13453+ /* zero the remaining page */
13454+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
13455+ .endr
13456+
13457 .align 16
13458 .globl early_gdt_descr
13459 early_gdt_descr:
13460 .word GDT_ENTRIES*8-1
13461 early_gdt_descr_base:
13462- .quad INIT_PER_CPU_VAR(gdt_page)
13463+ .quad cpu_gdt_table
13464
13465 ENTRY(phys_base)
13466 /* This must match the first entry in level2_kernel_pgt */
13467 .quad 0x0000000000000000
13468
13469 #include "../../x86/xen/xen-head.S"
13470-
13471- .section .bss, "aw", @nobits
13472+
13473+ .section .rodata,"a",@progbits
13474 .align L1_CACHE_BYTES
13475 ENTRY(idt_table)
13476- .skip IDT_ENTRIES * 16
13477+ .fill 512,8,0
13478
13479 __PAGE_ALIGNED_BSS
13480 .align PAGE_SIZE
13481diff -urNp linux-3.0.7/arch/x86/kernel/i386_ksyms_32.c linux-3.0.7/arch/x86/kernel/i386_ksyms_32.c
13482--- linux-3.0.7/arch/x86/kernel/i386_ksyms_32.c 2011-07-21 22:17:23.000000000 -0400
13483+++ linux-3.0.7/arch/x86/kernel/i386_ksyms_32.c 2011-08-23 21:47:55.000000000 -0400
13484@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
13485 EXPORT_SYMBOL(cmpxchg8b_emu);
13486 #endif
13487
13488+EXPORT_SYMBOL_GPL(cpu_gdt_table);
13489+
13490 /* Networking helper routines. */
13491 EXPORT_SYMBOL(csum_partial_copy_generic);
13492+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
13493+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
13494
13495 EXPORT_SYMBOL(__get_user_1);
13496 EXPORT_SYMBOL(__get_user_2);
13497@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
13498
13499 EXPORT_SYMBOL(csum_partial);
13500 EXPORT_SYMBOL(empty_zero_page);
13501+
13502+#ifdef CONFIG_PAX_KERNEXEC
13503+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
13504+#endif
13505diff -urNp linux-3.0.7/arch/x86/kernel/i8259.c linux-3.0.7/arch/x86/kernel/i8259.c
13506--- linux-3.0.7/arch/x86/kernel/i8259.c 2011-07-21 22:17:23.000000000 -0400
13507+++ linux-3.0.7/arch/x86/kernel/i8259.c 2011-08-23 21:47:55.000000000 -0400
13508@@ -210,7 +210,7 @@ spurious_8259A_irq:
13509 "spurious 8259A interrupt: IRQ%d.\n", irq);
13510 spurious_irq_mask |= irqmask;
13511 }
13512- atomic_inc(&irq_err_count);
13513+ atomic_inc_unchecked(&irq_err_count);
13514 /*
13515 * Theoretically we do not have to handle this IRQ,
13516 * but in Linux this does not cause problems and is
13517diff -urNp linux-3.0.7/arch/x86/kernel/init_task.c linux-3.0.7/arch/x86/kernel/init_task.c
13518--- linux-3.0.7/arch/x86/kernel/init_task.c 2011-07-21 22:17:23.000000000 -0400
13519+++ linux-3.0.7/arch/x86/kernel/init_task.c 2011-08-23 21:47:55.000000000 -0400
13520@@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
13521 * way process stacks are handled. This is done by having a special
13522 * "init_task" linker map entry..
13523 */
13524-union thread_union init_thread_union __init_task_data =
13525- { INIT_THREAD_INFO(init_task) };
13526+union thread_union init_thread_union __init_task_data;
13527
13528 /*
13529 * Initial task structure.
13530@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
13531 * section. Since TSS's are completely CPU-local, we want them
13532 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
13533 */
13534-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
13535-
13536+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
13537+EXPORT_SYMBOL(init_tss);
13538diff -urNp linux-3.0.7/arch/x86/kernel/ioport.c linux-3.0.7/arch/x86/kernel/ioport.c
13539--- linux-3.0.7/arch/x86/kernel/ioport.c 2011-07-21 22:17:23.000000000 -0400
13540+++ linux-3.0.7/arch/x86/kernel/ioport.c 2011-08-23 21:48:14.000000000 -0400
13541@@ -6,6 +6,7 @@
13542 #include <linux/sched.h>
13543 #include <linux/kernel.h>
13544 #include <linux/capability.h>
13545+#include <linux/security.h>
13546 #include <linux/errno.h>
13547 #include <linux/types.h>
13548 #include <linux/ioport.h>
13549@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long
13550
13551 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
13552 return -EINVAL;
13553+#ifdef CONFIG_GRKERNSEC_IO
13554+ if (turn_on && grsec_disable_privio) {
13555+ gr_handle_ioperm();
13556+ return -EPERM;
13557+ }
13558+#endif
13559 if (turn_on && !capable(CAP_SYS_RAWIO))
13560 return -EPERM;
13561
13562@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long
13563 * because the ->io_bitmap_max value must match the bitmap
13564 * contents:
13565 */
13566- tss = &per_cpu(init_tss, get_cpu());
13567+ tss = init_tss + get_cpu();
13568
13569 if (turn_on)
13570 bitmap_clear(t->io_bitmap_ptr, from, num);
13571@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct
13572 return -EINVAL;
13573 /* Trying to gain more privileges? */
13574 if (level > old) {
13575+#ifdef CONFIG_GRKERNSEC_IO
13576+ if (grsec_disable_privio) {
13577+ gr_handle_iopl();
13578+ return -EPERM;
13579+ }
13580+#endif
13581 if (!capable(CAP_SYS_RAWIO))
13582 return -EPERM;
13583 }
13584diff -urNp linux-3.0.7/arch/x86/kernel/irq_32.c linux-3.0.7/arch/x86/kernel/irq_32.c
13585--- linux-3.0.7/arch/x86/kernel/irq_32.c 2011-07-21 22:17:23.000000000 -0400
13586+++ linux-3.0.7/arch/x86/kernel/irq_32.c 2011-08-23 21:47:55.000000000 -0400
13587@@ -36,7 +36,7 @@ static int check_stack_overflow(void)
13588 __asm__ __volatile__("andl %%esp,%0" :
13589 "=r" (sp) : "0" (THREAD_SIZE - 1));
13590
13591- return sp < (sizeof(struct thread_info) + STACK_WARN);
13592+ return sp < STACK_WARN;
13593 }
13594
13595 static void print_stack_overflow(void)
13596@@ -54,8 +54,8 @@ static inline void print_stack_overflow(
13597 * per-CPU IRQ handling contexts (thread information and stack)
13598 */
13599 union irq_ctx {
13600- struct thread_info tinfo;
13601- u32 stack[THREAD_SIZE/sizeof(u32)];
13602+ unsigned long previous_esp;
13603+ u32 stack[THREAD_SIZE/sizeof(u32)];
13604 } __attribute__((aligned(THREAD_SIZE)));
13605
13606 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
13607@@ -75,10 +75,9 @@ static void call_on_stack(void *func, vo
13608 static inline int
13609 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
13610 {
13611- union irq_ctx *curctx, *irqctx;
13612+ union irq_ctx *irqctx;
13613 u32 *isp, arg1, arg2;
13614
13615- curctx = (union irq_ctx *) current_thread_info();
13616 irqctx = __this_cpu_read(hardirq_ctx);
13617
13618 /*
13619@@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struc
13620 * handler) we can't do that and just have to keep using the
13621 * current stack (which is the irq stack already after all)
13622 */
13623- if (unlikely(curctx == irqctx))
13624+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
13625 return 0;
13626
13627 /* build the stack frame on the IRQ stack */
13628- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13629- irqctx->tinfo.task = curctx->tinfo.task;
13630- irqctx->tinfo.previous_esp = current_stack_pointer;
13631+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13632+ irqctx->previous_esp = current_stack_pointer;
13633
13634- /*
13635- * Copy the softirq bits in preempt_count so that the
13636- * softirq checks work in the hardirq context.
13637- */
13638- irqctx->tinfo.preempt_count =
13639- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
13640- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
13641+#ifdef CONFIG_PAX_MEMORY_UDEREF
13642+ __set_fs(MAKE_MM_SEG(0));
13643+#endif
13644
13645 if (unlikely(overflow))
13646 call_on_stack(print_stack_overflow, isp);
13647@@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struc
13648 : "0" (irq), "1" (desc), "2" (isp),
13649 "D" (desc->handle_irq)
13650 : "memory", "cc", "ecx");
13651+
13652+#ifdef CONFIG_PAX_MEMORY_UDEREF
13653+ __set_fs(current_thread_info()->addr_limit);
13654+#endif
13655+
13656 return 1;
13657 }
13658
13659@@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struc
13660 */
13661 void __cpuinit irq_ctx_init(int cpu)
13662 {
13663- union irq_ctx *irqctx;
13664-
13665 if (per_cpu(hardirq_ctx, cpu))
13666 return;
13667
13668- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13669- THREAD_FLAGS,
13670- THREAD_ORDER));
13671- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13672- irqctx->tinfo.cpu = cpu;
13673- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
13674- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13675-
13676- per_cpu(hardirq_ctx, cpu) = irqctx;
13677-
13678- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13679- THREAD_FLAGS,
13680- THREAD_ORDER));
13681- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13682- irqctx->tinfo.cpu = cpu;
13683- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13684-
13685- per_cpu(softirq_ctx, cpu) = irqctx;
13686+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13687+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13688
13689 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
13690 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
13691@@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
13692 asmlinkage void do_softirq(void)
13693 {
13694 unsigned long flags;
13695- struct thread_info *curctx;
13696 union irq_ctx *irqctx;
13697 u32 *isp;
13698
13699@@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
13700 local_irq_save(flags);
13701
13702 if (local_softirq_pending()) {
13703- curctx = current_thread_info();
13704 irqctx = __this_cpu_read(softirq_ctx);
13705- irqctx->tinfo.task = curctx->task;
13706- irqctx->tinfo.previous_esp = current_stack_pointer;
13707+ irqctx->previous_esp = current_stack_pointer;
13708
13709 /* build the stack frame on the softirq stack */
13710- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13711+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13712+
13713+#ifdef CONFIG_PAX_MEMORY_UDEREF
13714+ __set_fs(MAKE_MM_SEG(0));
13715+#endif
13716
13717 call_on_stack(__do_softirq, isp);
13718+
13719+#ifdef CONFIG_PAX_MEMORY_UDEREF
13720+ __set_fs(current_thread_info()->addr_limit);
13721+#endif
13722+
13723 /*
13724 * Shouldn't happen, we returned above if in_interrupt():
13725 */
13726diff -urNp linux-3.0.7/arch/x86/kernel/irq.c linux-3.0.7/arch/x86/kernel/irq.c
13727--- linux-3.0.7/arch/x86/kernel/irq.c 2011-07-21 22:17:23.000000000 -0400
13728+++ linux-3.0.7/arch/x86/kernel/irq.c 2011-08-23 21:47:55.000000000 -0400
13729@@ -17,7 +17,7 @@
13730 #include <asm/mce.h>
13731 #include <asm/hw_irq.h>
13732
13733-atomic_t irq_err_count;
13734+atomic_unchecked_t irq_err_count;
13735
13736 /* Function pointer for generic interrupt vector handling */
13737 void (*x86_platform_ipi_callback)(void) = NULL;
13738@@ -116,9 +116,9 @@ int arch_show_interrupts(struct seq_file
13739 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
13740 seq_printf(p, " Machine check polls\n");
13741 #endif
13742- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
13743+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
13744 #if defined(CONFIG_X86_IO_APIC)
13745- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
13746+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
13747 #endif
13748 return 0;
13749 }
13750@@ -158,10 +158,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
13751
13752 u64 arch_irq_stat(void)
13753 {
13754- u64 sum = atomic_read(&irq_err_count);
13755+ u64 sum = atomic_read_unchecked(&irq_err_count);
13756
13757 #ifdef CONFIG_X86_IO_APIC
13758- sum += atomic_read(&irq_mis_count);
13759+ sum += atomic_read_unchecked(&irq_mis_count);
13760 #endif
13761 return sum;
13762 }
13763diff -urNp linux-3.0.7/arch/x86/kernel/kgdb.c linux-3.0.7/arch/x86/kernel/kgdb.c
13764--- linux-3.0.7/arch/x86/kernel/kgdb.c 2011-07-21 22:17:23.000000000 -0400
13765+++ linux-3.0.7/arch/x86/kernel/kgdb.c 2011-08-23 21:47:55.000000000 -0400
13766@@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem,
13767 #ifdef CONFIG_X86_32
13768 switch (regno) {
13769 case GDB_SS:
13770- if (!user_mode_vm(regs))
13771+ if (!user_mode(regs))
13772 *(unsigned long *)mem = __KERNEL_DS;
13773 break;
13774 case GDB_SP:
13775- if (!user_mode_vm(regs))
13776+ if (!user_mode(regs))
13777 *(unsigned long *)mem = kernel_stack_pointer(regs);
13778 break;
13779 case GDB_GS:
13780@@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vec
13781 case 'k':
13782 /* clear the trace bit */
13783 linux_regs->flags &= ~X86_EFLAGS_TF;
13784- atomic_set(&kgdb_cpu_doing_single_step, -1);
13785+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
13786
13787 /* set the trace bit if we're stepping */
13788 if (remcomInBuffer[0] == 's') {
13789 linux_regs->flags |= X86_EFLAGS_TF;
13790- atomic_set(&kgdb_cpu_doing_single_step,
13791+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
13792 raw_smp_processor_id());
13793 }
13794
13795@@ -534,7 +534,7 @@ static int __kgdb_notify(struct die_args
13796 return NOTIFY_DONE;
13797
13798 case DIE_DEBUG:
13799- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
13800+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
13801 if (user_mode(regs))
13802 return single_step_cont(regs, args);
13803 break;
13804diff -urNp linux-3.0.7/arch/x86/kernel/kprobes.c linux-3.0.7/arch/x86/kernel/kprobes.c
13805--- linux-3.0.7/arch/x86/kernel/kprobes.c 2011-07-21 22:17:23.000000000 -0400
13806+++ linux-3.0.7/arch/x86/kernel/kprobes.c 2011-10-11 10:44:33.000000000 -0400
13807@@ -115,8 +115,11 @@ static void __kprobes __synthesize_relat
13808 } __attribute__((packed)) *insn;
13809
13810 insn = (struct __arch_relative_insn *)from;
13811+
13812+ pax_open_kernel();
13813 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
13814 insn->op = op;
13815+ pax_close_kernel();
13816 }
13817
13818 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
13819@@ -153,7 +156,7 @@ static int __kprobes can_boost(kprobe_op
13820 kprobe_opcode_t opcode;
13821 kprobe_opcode_t *orig_opcodes = opcodes;
13822
13823- if (search_exception_tables((unsigned long)opcodes))
13824+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
13825 return 0; /* Page fault may occur on this address. */
13826
13827 retry:
13828@@ -314,7 +317,9 @@ static int __kprobes __copy_instruction(
13829 }
13830 }
13831 insn_get_length(&insn);
13832+ pax_open_kernel();
13833 memcpy(dest, insn.kaddr, insn.length);
13834+ pax_close_kernel();
13835
13836 #ifdef CONFIG_X86_64
13837 if (insn_rip_relative(&insn)) {
13838@@ -338,7 +343,9 @@ static int __kprobes __copy_instruction(
13839 (u8 *) dest;
13840 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
13841 disp = (u8 *) dest + insn_offset_displacement(&insn);
13842+ pax_open_kernel();
13843 *(s32 *) disp = (s32) newdisp;
13844+ pax_close_kernel();
13845 }
13846 #endif
13847 return insn.length;
13848@@ -352,12 +359,12 @@ static void __kprobes arch_copy_kprobe(s
13849 */
13850 __copy_instruction(p->ainsn.insn, p->addr, 0);
13851
13852- if (can_boost(p->addr))
13853+ if (can_boost(ktla_ktva(p->addr)))
13854 p->ainsn.boostable = 0;
13855 else
13856 p->ainsn.boostable = -1;
13857
13858- p->opcode = *p->addr;
13859+ p->opcode = *(ktla_ktva(p->addr));
13860 }
13861
13862 int __kprobes arch_prepare_kprobe(struct kprobe *p)
13863@@ -474,7 +481,7 @@ static void __kprobes setup_singlestep(s
13864 * nor set current_kprobe, because it doesn't use single
13865 * stepping.
13866 */
13867- regs->ip = (unsigned long)p->ainsn.insn;
13868+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13869 preempt_enable_no_resched();
13870 return;
13871 }
13872@@ -493,7 +500,7 @@ static void __kprobes setup_singlestep(s
13873 if (p->opcode == BREAKPOINT_INSTRUCTION)
13874 regs->ip = (unsigned long)p->addr;
13875 else
13876- regs->ip = (unsigned long)p->ainsn.insn;
13877+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13878 }
13879
13880 /*
13881@@ -572,7 +579,7 @@ static int __kprobes kprobe_handler(stru
13882 setup_singlestep(p, regs, kcb, 0);
13883 return 1;
13884 }
13885- } else if (*addr != BREAKPOINT_INSTRUCTION) {
13886+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
13887 /*
13888 * The breakpoint instruction was removed right
13889 * after we hit it. Another cpu has removed
13890@@ -680,6 +687,9 @@ static void __used __kprobes kretprobe_t
13891 " movq %rax, 152(%rsp)\n"
13892 RESTORE_REGS_STRING
13893 " popfq\n"
13894+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
13895+ " btsq $63,(%rsp)\n"
13896+#endif
13897 #else
13898 " pushf\n"
13899 SAVE_REGS_STRING
13900@@ -817,7 +827,7 @@ static void __kprobes resume_execution(s
13901 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
13902 {
13903 unsigned long *tos = stack_addr(regs);
13904- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
13905+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
13906 unsigned long orig_ip = (unsigned long)p->addr;
13907 kprobe_opcode_t *insn = p->ainsn.insn;
13908
13909@@ -999,7 +1009,7 @@ int __kprobes kprobe_exceptions_notify(s
13910 struct die_args *args = data;
13911 int ret = NOTIFY_DONE;
13912
13913- if (args->regs && user_mode_vm(args->regs))
13914+ if (args->regs && user_mode(args->regs))
13915 return ret;
13916
13917 switch (val) {
13918@@ -1381,7 +1391,7 @@ int __kprobes arch_prepare_optimized_kpr
13919 * Verify if the address gap is in 2GB range, because this uses
13920 * a relative jump.
13921 */
13922- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
13923+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
13924 if (abs(rel) > 0x7fffffff)
13925 return -ERANGE;
13926
13927@@ -1402,11 +1412,11 @@ int __kprobes arch_prepare_optimized_kpr
13928 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
13929
13930 /* Set probe function call */
13931- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
13932+ synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
13933
13934 /* Set returning jmp instruction at the tail of out-of-line buffer */
13935 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
13936- (u8 *)op->kp.addr + op->optinsn.size);
13937+ (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
13938
13939 flush_icache_range((unsigned long) buf,
13940 (unsigned long) buf + TMPL_END_IDX +
13941@@ -1428,7 +1438,7 @@ static void __kprobes setup_optimize_kpr
13942 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
13943
13944 /* Backup instructions which will be replaced by jump address */
13945- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
13946+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
13947 RELATIVE_ADDR_SIZE);
13948
13949 insn_buf[0] = RELATIVEJUMP_OPCODE;
13950diff -urNp linux-3.0.7/arch/x86/kernel/kvm.c linux-3.0.7/arch/x86/kernel/kvm.c
13951--- linux-3.0.7/arch/x86/kernel/kvm.c 2011-07-21 22:17:23.000000000 -0400
13952+++ linux-3.0.7/arch/x86/kernel/kvm.c 2011-08-24 18:10:12.000000000 -0400
13953@@ -426,6 +426,7 @@ static void __init paravirt_ops_setup(vo
13954 pv_mmu_ops.set_pud = kvm_set_pud;
13955 #if PAGETABLE_LEVELS == 4
13956 pv_mmu_ops.set_pgd = kvm_set_pgd;
13957+ pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
13958 #endif
13959 #endif
13960 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
13961diff -urNp linux-3.0.7/arch/x86/kernel/ldt.c linux-3.0.7/arch/x86/kernel/ldt.c
13962--- linux-3.0.7/arch/x86/kernel/ldt.c 2011-07-21 22:17:23.000000000 -0400
13963+++ linux-3.0.7/arch/x86/kernel/ldt.c 2011-08-23 21:47:55.000000000 -0400
13964@@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, i
13965 if (reload) {
13966 #ifdef CONFIG_SMP
13967 preempt_disable();
13968- load_LDT(pc);
13969+ load_LDT_nolock(pc);
13970 if (!cpumask_equal(mm_cpumask(current->mm),
13971 cpumask_of(smp_processor_id())))
13972 smp_call_function(flush_ldt, current->mm, 1);
13973 preempt_enable();
13974 #else
13975- load_LDT(pc);
13976+ load_LDT_nolock(pc);
13977 #endif
13978 }
13979 if (oldsize) {
13980@@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t
13981 return err;
13982
13983 for (i = 0; i < old->size; i++)
13984- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
13985+ write_ldt_entry(new->ldt, i, old->ldt + i);
13986 return 0;
13987 }
13988
13989@@ -116,6 +116,24 @@ int init_new_context(struct task_struct
13990 retval = copy_ldt(&mm->context, &old_mm->context);
13991 mutex_unlock(&old_mm->context.lock);
13992 }
13993+
13994+ if (tsk == current) {
13995+ mm->context.vdso = 0;
13996+
13997+#ifdef CONFIG_X86_32
13998+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
13999+ mm->context.user_cs_base = 0UL;
14000+ mm->context.user_cs_limit = ~0UL;
14001+
14002+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
14003+ cpus_clear(mm->context.cpu_user_cs_mask);
14004+#endif
14005+
14006+#endif
14007+#endif
14008+
14009+ }
14010+
14011 return retval;
14012 }
14013
14014@@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, u
14015 }
14016 }
14017
14018+#ifdef CONFIG_PAX_SEGMEXEC
14019+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
14020+ error = -EINVAL;
14021+ goto out_unlock;
14022+ }
14023+#endif
14024+
14025 fill_ldt(&ldt, &ldt_info);
14026 if (oldmode)
14027 ldt.avl = 0;
14028diff -urNp linux-3.0.7/arch/x86/kernel/machine_kexec_32.c linux-3.0.7/arch/x86/kernel/machine_kexec_32.c
14029--- linux-3.0.7/arch/x86/kernel/machine_kexec_32.c 2011-07-21 22:17:23.000000000 -0400
14030+++ linux-3.0.7/arch/x86/kernel/machine_kexec_32.c 2011-08-23 21:47:55.000000000 -0400
14031@@ -27,7 +27,7 @@
14032 #include <asm/cacheflush.h>
14033 #include <asm/debugreg.h>
14034
14035-static void set_idt(void *newidt, __u16 limit)
14036+static void set_idt(struct desc_struct *newidt, __u16 limit)
14037 {
14038 struct desc_ptr curidt;
14039
14040@@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16
14041 }
14042
14043
14044-static void set_gdt(void *newgdt, __u16 limit)
14045+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
14046 {
14047 struct desc_ptr curgdt;
14048
14049@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
14050 }
14051
14052 control_page = page_address(image->control_code_page);
14053- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
14054+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
14055
14056 relocate_kernel_ptr = control_page;
14057 page_list[PA_CONTROL_PAGE] = __pa(control_page);
14058diff -urNp linux-3.0.7/arch/x86/kernel/microcode_intel.c linux-3.0.7/arch/x86/kernel/microcode_intel.c
14059--- linux-3.0.7/arch/x86/kernel/microcode_intel.c 2011-07-21 22:17:23.000000000 -0400
14060+++ linux-3.0.7/arch/x86/kernel/microcode_intel.c 2011-10-06 04:17:55.000000000 -0400
14061@@ -440,13 +440,13 @@ static enum ucode_state request_microcod
14062
14063 static int get_ucode_user(void *to, const void *from, size_t n)
14064 {
14065- return copy_from_user(to, from, n);
14066+ return copy_from_user(to, (const void __force_user *)from, n);
14067 }
14068
14069 static enum ucode_state
14070 request_microcode_user(int cpu, const void __user *buf, size_t size)
14071 {
14072- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
14073+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
14074 }
14075
14076 static void microcode_fini_cpu(int cpu)
14077diff -urNp linux-3.0.7/arch/x86/kernel/module.c linux-3.0.7/arch/x86/kernel/module.c
14078--- linux-3.0.7/arch/x86/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
14079+++ linux-3.0.7/arch/x86/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
14080@@ -36,21 +36,66 @@
14081 #define DEBUGP(fmt...)
14082 #endif
14083
14084-void *module_alloc(unsigned long size)
14085+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
14086 {
14087 if (PAGE_ALIGN(size) > MODULES_LEN)
14088 return NULL;
14089 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
14090- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
14091+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
14092 -1, __builtin_return_address(0));
14093 }
14094
14095+void *module_alloc(unsigned long size)
14096+{
14097+
14098+#ifdef CONFIG_PAX_KERNEXEC
14099+ return __module_alloc(size, PAGE_KERNEL);
14100+#else
14101+ return __module_alloc(size, PAGE_KERNEL_EXEC);
14102+#endif
14103+
14104+}
14105+
14106 /* Free memory returned from module_alloc */
14107 void module_free(struct module *mod, void *module_region)
14108 {
14109 vfree(module_region);
14110 }
14111
14112+#ifdef CONFIG_PAX_KERNEXEC
14113+#ifdef CONFIG_X86_32
14114+void *module_alloc_exec(unsigned long size)
14115+{
14116+ struct vm_struct *area;
14117+
14118+ if (size == 0)
14119+ return NULL;
14120+
14121+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
14122+ return area ? area->addr : NULL;
14123+}
14124+EXPORT_SYMBOL(module_alloc_exec);
14125+
14126+void module_free_exec(struct module *mod, void *module_region)
14127+{
14128+ vunmap(module_region);
14129+}
14130+EXPORT_SYMBOL(module_free_exec);
14131+#else
14132+void module_free_exec(struct module *mod, void *module_region)
14133+{
14134+ module_free(mod, module_region);
14135+}
14136+EXPORT_SYMBOL(module_free_exec);
14137+
14138+void *module_alloc_exec(unsigned long size)
14139+{
14140+ return __module_alloc(size, PAGE_KERNEL_RX);
14141+}
14142+EXPORT_SYMBOL(module_alloc_exec);
14143+#endif
14144+#endif
14145+
14146 /* We don't need anything special. */
14147 int module_frob_arch_sections(Elf_Ehdr *hdr,
14148 Elf_Shdr *sechdrs,
14149@@ -70,14 +115,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
14150 unsigned int i;
14151 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
14152 Elf32_Sym *sym;
14153- uint32_t *location;
14154+ uint32_t *plocation, location;
14155
14156 DEBUGP("Applying relocate section %u to %u\n", relsec,
14157 sechdrs[relsec].sh_info);
14158 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
14159 /* This is where to make the change */
14160- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
14161- + rel[i].r_offset;
14162+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
14163+ location = (uint32_t)plocation;
14164+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
14165+ plocation = ktla_ktva((void *)plocation);
14166 /* This is the symbol it is referring to. Note that all
14167 undefined symbols have been resolved. */
14168 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
14169@@ -86,11 +133,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
14170 switch (ELF32_R_TYPE(rel[i].r_info)) {
14171 case R_386_32:
14172 /* We add the value into the location given */
14173- *location += sym->st_value;
14174+ pax_open_kernel();
14175+ *plocation += sym->st_value;
14176+ pax_close_kernel();
14177 break;
14178 case R_386_PC32:
14179 /* Add the value, subtract its postition */
14180- *location += sym->st_value - (uint32_t)location;
14181+ pax_open_kernel();
14182+ *plocation += sym->st_value - location;
14183+ pax_close_kernel();
14184 break;
14185 default:
14186 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
14187@@ -146,21 +197,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
14188 case R_X86_64_NONE:
14189 break;
14190 case R_X86_64_64:
14191+ pax_open_kernel();
14192 *(u64 *)loc = val;
14193+ pax_close_kernel();
14194 break;
14195 case R_X86_64_32:
14196+ pax_open_kernel();
14197 *(u32 *)loc = val;
14198+ pax_close_kernel();
14199 if (val != *(u32 *)loc)
14200 goto overflow;
14201 break;
14202 case R_X86_64_32S:
14203+ pax_open_kernel();
14204 *(s32 *)loc = val;
14205+ pax_close_kernel();
14206 if ((s64)val != *(s32 *)loc)
14207 goto overflow;
14208 break;
14209 case R_X86_64_PC32:
14210 val -= (u64)loc;
14211+ pax_open_kernel();
14212 *(u32 *)loc = val;
14213+ pax_close_kernel();
14214+
14215 #if 0
14216 if ((s64)val != *(s32 *)loc)
14217 goto overflow;
14218diff -urNp linux-3.0.7/arch/x86/kernel/paravirt.c linux-3.0.7/arch/x86/kernel/paravirt.c
14219--- linux-3.0.7/arch/x86/kernel/paravirt.c 2011-07-21 22:17:23.000000000 -0400
14220+++ linux-3.0.7/arch/x86/kernel/paravirt.c 2011-08-23 21:48:14.000000000 -0400
14221@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
14222 {
14223 return x;
14224 }
14225+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
14226+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
14227+#endif
14228
14229 void __init default_banner(void)
14230 {
14231@@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbu
14232 * corresponding structure. */
14233 static void *get_call_destination(u8 type)
14234 {
14235- struct paravirt_patch_template tmpl = {
14236+ const struct paravirt_patch_template tmpl = {
14237 .pv_init_ops = pv_init_ops,
14238 .pv_time_ops = pv_time_ops,
14239 .pv_cpu_ops = pv_cpu_ops,
14240@@ -133,6 +136,9 @@ static void *get_call_destination(u8 typ
14241 .pv_lock_ops = pv_lock_ops,
14242 #endif
14243 };
14244+
14245+ pax_track_stack();
14246+
14247 return *((void **)&tmpl + type);
14248 }
14249
14250@@ -145,15 +151,19 @@ unsigned paravirt_patch_default(u8 type,
14251 if (opfunc == NULL)
14252 /* If there's no function, patch it with a ud2a (BUG) */
14253 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
14254- else if (opfunc == _paravirt_nop)
14255+ else if (opfunc == (void *)_paravirt_nop)
14256 /* If the operation is a nop, then nop the callsite */
14257 ret = paravirt_patch_nop();
14258
14259 /* identity functions just return their single argument */
14260- else if (opfunc == _paravirt_ident_32)
14261+ else if (opfunc == (void *)_paravirt_ident_32)
14262 ret = paravirt_patch_ident_32(insnbuf, len);
14263- else if (opfunc == _paravirt_ident_64)
14264+ else if (opfunc == (void *)_paravirt_ident_64)
14265 ret = paravirt_patch_ident_64(insnbuf, len);
14266+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
14267+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
14268+ ret = paravirt_patch_ident_64(insnbuf, len);
14269+#endif
14270
14271 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
14272 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
14273@@ -178,7 +188,7 @@ unsigned paravirt_patch_insns(void *insn
14274 if (insn_len > len || start == NULL)
14275 insn_len = len;
14276 else
14277- memcpy(insnbuf, start, insn_len);
14278+ memcpy(insnbuf, ktla_ktva(start), insn_len);
14279
14280 return insn_len;
14281 }
14282@@ -294,22 +304,22 @@ void arch_flush_lazy_mmu_mode(void)
14283 preempt_enable();
14284 }
14285
14286-struct pv_info pv_info = {
14287+struct pv_info pv_info __read_only = {
14288 .name = "bare hardware",
14289 .paravirt_enabled = 0,
14290 .kernel_rpl = 0,
14291 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
14292 };
14293
14294-struct pv_init_ops pv_init_ops = {
14295+struct pv_init_ops pv_init_ops __read_only = {
14296 .patch = native_patch,
14297 };
14298
14299-struct pv_time_ops pv_time_ops = {
14300+struct pv_time_ops pv_time_ops __read_only = {
14301 .sched_clock = native_sched_clock,
14302 };
14303
14304-struct pv_irq_ops pv_irq_ops = {
14305+struct pv_irq_ops pv_irq_ops __read_only = {
14306 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
14307 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
14308 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
14309@@ -321,7 +331,7 @@ struct pv_irq_ops pv_irq_ops = {
14310 #endif
14311 };
14312
14313-struct pv_cpu_ops pv_cpu_ops = {
14314+struct pv_cpu_ops pv_cpu_ops __read_only = {
14315 .cpuid = native_cpuid,
14316 .get_debugreg = native_get_debugreg,
14317 .set_debugreg = native_set_debugreg,
14318@@ -382,21 +392,26 @@ struct pv_cpu_ops pv_cpu_ops = {
14319 .end_context_switch = paravirt_nop,
14320 };
14321
14322-struct pv_apic_ops pv_apic_ops = {
14323+struct pv_apic_ops pv_apic_ops __read_only = {
14324 #ifdef CONFIG_X86_LOCAL_APIC
14325 .startup_ipi_hook = paravirt_nop,
14326 #endif
14327 };
14328
14329-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
14330+#ifdef CONFIG_X86_32
14331+#ifdef CONFIG_X86_PAE
14332+/* 64-bit pagetable entries */
14333+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
14334+#else
14335 /* 32-bit pagetable entries */
14336 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
14337+#endif
14338 #else
14339 /* 64-bit pagetable entries */
14340 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
14341 #endif
14342
14343-struct pv_mmu_ops pv_mmu_ops = {
14344+struct pv_mmu_ops pv_mmu_ops __read_only = {
14345
14346 .read_cr2 = native_read_cr2,
14347 .write_cr2 = native_write_cr2,
14348@@ -446,6 +461,7 @@ struct pv_mmu_ops pv_mmu_ops = {
14349 .make_pud = PTE_IDENT,
14350
14351 .set_pgd = native_set_pgd,
14352+ .set_pgd_batched = native_set_pgd_batched,
14353 #endif
14354 #endif /* PAGETABLE_LEVELS >= 3 */
14355
14356@@ -465,6 +481,12 @@ struct pv_mmu_ops pv_mmu_ops = {
14357 },
14358
14359 .set_fixmap = native_set_fixmap,
14360+
14361+#ifdef CONFIG_PAX_KERNEXEC
14362+ .pax_open_kernel = native_pax_open_kernel,
14363+ .pax_close_kernel = native_pax_close_kernel,
14364+#endif
14365+
14366 };
14367
14368 EXPORT_SYMBOL_GPL(pv_time_ops);
14369diff -urNp linux-3.0.7/arch/x86/kernel/paravirt-spinlocks.c linux-3.0.7/arch/x86/kernel/paravirt-spinlocks.c
14370--- linux-3.0.7/arch/x86/kernel/paravirt-spinlocks.c 2011-07-21 22:17:23.000000000 -0400
14371+++ linux-3.0.7/arch/x86/kernel/paravirt-spinlocks.c 2011-08-23 21:47:55.000000000 -0400
14372@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t
14373 arch_spin_lock(lock);
14374 }
14375
14376-struct pv_lock_ops pv_lock_ops = {
14377+struct pv_lock_ops pv_lock_ops __read_only = {
14378 #ifdef CONFIG_SMP
14379 .spin_is_locked = __ticket_spin_is_locked,
14380 .spin_is_contended = __ticket_spin_is_contended,
14381diff -urNp linux-3.0.7/arch/x86/kernel/pci-iommu_table.c linux-3.0.7/arch/x86/kernel/pci-iommu_table.c
14382--- linux-3.0.7/arch/x86/kernel/pci-iommu_table.c 2011-07-21 22:17:23.000000000 -0400
14383+++ linux-3.0.7/arch/x86/kernel/pci-iommu_table.c 2011-08-23 21:48:14.000000000 -0400
14384@@ -2,7 +2,7 @@
14385 #include <asm/iommu_table.h>
14386 #include <linux/string.h>
14387 #include <linux/kallsyms.h>
14388-
14389+#include <linux/sched.h>
14390
14391 #define DEBUG 1
14392
14393@@ -51,6 +51,8 @@ void __init check_iommu_entries(struct i
14394 {
14395 struct iommu_table_entry *p, *q, *x;
14396
14397+ pax_track_stack();
14398+
14399 /* Simple cyclic dependency checker. */
14400 for (p = start; p < finish; p++) {
14401 q = find_dependents_of(start, finish, p);
14402diff -urNp linux-3.0.7/arch/x86/kernel/process_32.c linux-3.0.7/arch/x86/kernel/process_32.c
14403--- linux-3.0.7/arch/x86/kernel/process_32.c 2011-07-21 22:17:23.000000000 -0400
14404+++ linux-3.0.7/arch/x86/kernel/process_32.c 2011-08-23 21:47:55.000000000 -0400
14405@@ -65,6 +65,7 @@ asmlinkage void ret_from_fork(void) __as
14406 unsigned long thread_saved_pc(struct task_struct *tsk)
14407 {
14408 return ((unsigned long *)tsk->thread.sp)[3];
14409+//XXX return tsk->thread.eip;
14410 }
14411
14412 #ifndef CONFIG_SMP
14413@@ -126,15 +127,14 @@ void __show_regs(struct pt_regs *regs, i
14414 unsigned long sp;
14415 unsigned short ss, gs;
14416
14417- if (user_mode_vm(regs)) {
14418+ if (user_mode(regs)) {
14419 sp = regs->sp;
14420 ss = regs->ss & 0xffff;
14421- gs = get_user_gs(regs);
14422 } else {
14423 sp = kernel_stack_pointer(regs);
14424 savesegment(ss, ss);
14425- savesegment(gs, gs);
14426 }
14427+ gs = get_user_gs(regs);
14428
14429 show_regs_common();
14430
14431@@ -196,13 +196,14 @@ int copy_thread(unsigned long clone_flag
14432 struct task_struct *tsk;
14433 int err;
14434
14435- childregs = task_pt_regs(p);
14436+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
14437 *childregs = *regs;
14438 childregs->ax = 0;
14439 childregs->sp = sp;
14440
14441 p->thread.sp = (unsigned long) childregs;
14442 p->thread.sp0 = (unsigned long) (childregs+1);
14443+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14444
14445 p->thread.ip = (unsigned long) ret_from_fork;
14446
14447@@ -292,7 +293,7 @@ __switch_to(struct task_struct *prev_p,
14448 struct thread_struct *prev = &prev_p->thread,
14449 *next = &next_p->thread;
14450 int cpu = smp_processor_id();
14451- struct tss_struct *tss = &per_cpu(init_tss, cpu);
14452+ struct tss_struct *tss = init_tss + cpu;
14453 bool preload_fpu;
14454
14455 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
14456@@ -327,6 +328,10 @@ __switch_to(struct task_struct *prev_p,
14457 */
14458 lazy_save_gs(prev->gs);
14459
14460+#ifdef CONFIG_PAX_MEMORY_UDEREF
14461+ __set_fs(task_thread_info(next_p)->addr_limit);
14462+#endif
14463+
14464 /*
14465 * Load the per-thread Thread-Local Storage descriptor.
14466 */
14467@@ -362,6 +367,9 @@ __switch_to(struct task_struct *prev_p,
14468 */
14469 arch_end_context_switch(next_p);
14470
14471+ percpu_write(current_task, next_p);
14472+ percpu_write(current_tinfo, &next_p->tinfo);
14473+
14474 if (preload_fpu)
14475 __math_state_restore();
14476
14477@@ -371,8 +379,6 @@ __switch_to(struct task_struct *prev_p,
14478 if (prev->gs | next->gs)
14479 lazy_load_gs(next->gs);
14480
14481- percpu_write(current_task, next_p);
14482-
14483 return prev_p;
14484 }
14485
14486@@ -402,4 +408,3 @@ unsigned long get_wchan(struct task_stru
14487 } while (count++ < 16);
14488 return 0;
14489 }
14490-
14491diff -urNp linux-3.0.7/arch/x86/kernel/process_64.c linux-3.0.7/arch/x86/kernel/process_64.c
14492--- linux-3.0.7/arch/x86/kernel/process_64.c 2011-07-21 22:17:23.000000000 -0400
14493+++ linux-3.0.7/arch/x86/kernel/process_64.c 2011-08-23 21:47:55.000000000 -0400
14494@@ -87,7 +87,7 @@ static void __exit_idle(void)
14495 void exit_idle(void)
14496 {
14497 /* idle loop has pid 0 */
14498- if (current->pid)
14499+ if (task_pid_nr(current))
14500 return;
14501 __exit_idle();
14502 }
14503@@ -260,8 +260,7 @@ int copy_thread(unsigned long clone_flag
14504 struct pt_regs *childregs;
14505 struct task_struct *me = current;
14506
14507- childregs = ((struct pt_regs *)
14508- (THREAD_SIZE + task_stack_page(p))) - 1;
14509+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
14510 *childregs = *regs;
14511
14512 childregs->ax = 0;
14513@@ -273,6 +272,7 @@ int copy_thread(unsigned long clone_flag
14514 p->thread.sp = (unsigned long) childregs;
14515 p->thread.sp0 = (unsigned long) (childregs+1);
14516 p->thread.usersp = me->thread.usersp;
14517+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14518
14519 set_tsk_thread_flag(p, TIF_FORK);
14520
14521@@ -375,7 +375,7 @@ __switch_to(struct task_struct *prev_p,
14522 struct thread_struct *prev = &prev_p->thread;
14523 struct thread_struct *next = &next_p->thread;
14524 int cpu = smp_processor_id();
14525- struct tss_struct *tss = &per_cpu(init_tss, cpu);
14526+ struct tss_struct *tss = init_tss + cpu;
14527 unsigned fsindex, gsindex;
14528 bool preload_fpu;
14529
14530@@ -471,10 +471,9 @@ __switch_to(struct task_struct *prev_p,
14531 prev->usersp = percpu_read(old_rsp);
14532 percpu_write(old_rsp, next->usersp);
14533 percpu_write(current_task, next_p);
14534+ percpu_write(current_tinfo, &next_p->tinfo);
14535
14536- percpu_write(kernel_stack,
14537- (unsigned long)task_stack_page(next_p) +
14538- THREAD_SIZE - KERNEL_STACK_OFFSET);
14539+ percpu_write(kernel_stack, next->sp0);
14540
14541 /*
14542 * Now maybe reload the debug registers and handle I/O bitmaps
14543@@ -536,12 +535,11 @@ unsigned long get_wchan(struct task_stru
14544 if (!p || p == current || p->state == TASK_RUNNING)
14545 return 0;
14546 stack = (unsigned long)task_stack_page(p);
14547- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
14548+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
14549 return 0;
14550 fp = *(u64 *)(p->thread.sp);
14551 do {
14552- if (fp < (unsigned long)stack ||
14553- fp >= (unsigned long)stack+THREAD_SIZE)
14554+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
14555 return 0;
14556 ip = *(u64 *)(fp+8);
14557 if (!in_sched_functions(ip))
14558diff -urNp linux-3.0.7/arch/x86/kernel/process.c linux-3.0.7/arch/x86/kernel/process.c
14559--- linux-3.0.7/arch/x86/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
14560+++ linux-3.0.7/arch/x86/kernel/process.c 2011-08-30 18:23:52.000000000 -0400
14561@@ -48,16 +48,33 @@ void free_thread_xstate(struct task_stru
14562
14563 void free_thread_info(struct thread_info *ti)
14564 {
14565- free_thread_xstate(ti->task);
14566 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
14567 }
14568
14569+static struct kmem_cache *task_struct_cachep;
14570+
14571 void arch_task_cache_init(void)
14572 {
14573- task_xstate_cachep =
14574- kmem_cache_create("task_xstate", xstate_size,
14575+ /* create a slab on which task_structs can be allocated */
14576+ task_struct_cachep =
14577+ kmem_cache_create("task_struct", sizeof(struct task_struct),
14578+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
14579+
14580+ task_xstate_cachep =
14581+ kmem_cache_create("task_xstate", xstate_size,
14582 __alignof__(union thread_xstate),
14583- SLAB_PANIC | SLAB_NOTRACK, NULL);
14584+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
14585+}
14586+
14587+struct task_struct *alloc_task_struct_node(int node)
14588+{
14589+ return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
14590+}
14591+
14592+void free_task_struct(struct task_struct *task)
14593+{
14594+ free_thread_xstate(task);
14595+ kmem_cache_free(task_struct_cachep, task);
14596 }
14597
14598 /*
14599@@ -70,7 +87,7 @@ void exit_thread(void)
14600 unsigned long *bp = t->io_bitmap_ptr;
14601
14602 if (bp) {
14603- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
14604+ struct tss_struct *tss = init_tss + get_cpu();
14605
14606 t->io_bitmap_ptr = NULL;
14607 clear_thread_flag(TIF_IO_BITMAP);
14608@@ -106,7 +123,7 @@ void show_regs_common(void)
14609
14610 printk(KERN_CONT "\n");
14611 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
14612- current->pid, current->comm, print_tainted(),
14613+ task_pid_nr(current), current->comm, print_tainted(),
14614 init_utsname()->release,
14615 (int)strcspn(init_utsname()->version, " "),
14616 init_utsname()->version);
14617@@ -120,6 +137,9 @@ void flush_thread(void)
14618 {
14619 struct task_struct *tsk = current;
14620
14621+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
14622+ loadsegment(gs, 0);
14623+#endif
14624 flush_ptrace_hw_breakpoint(tsk);
14625 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
14626 /*
14627@@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), voi
14628 regs.di = (unsigned long) arg;
14629
14630 #ifdef CONFIG_X86_32
14631- regs.ds = __USER_DS;
14632- regs.es = __USER_DS;
14633+ regs.ds = __KERNEL_DS;
14634+ regs.es = __KERNEL_DS;
14635 regs.fs = __KERNEL_PERCPU;
14636- regs.gs = __KERNEL_STACK_CANARY;
14637+ savesegment(gs, regs.gs);
14638 #else
14639 regs.ss = __KERNEL_DS;
14640 #endif
14641@@ -403,7 +423,7 @@ void default_idle(void)
14642 EXPORT_SYMBOL(default_idle);
14643 #endif
14644
14645-void stop_this_cpu(void *dummy)
14646+__noreturn void stop_this_cpu(void *dummy)
14647 {
14648 local_irq_disable();
14649 /*
14650@@ -668,16 +688,37 @@ static int __init idle_setup(char *str)
14651 }
14652 early_param("idle", idle_setup);
14653
14654-unsigned long arch_align_stack(unsigned long sp)
14655+#ifdef CONFIG_PAX_RANDKSTACK
14656+void pax_randomize_kstack(struct pt_regs *regs)
14657 {
14658- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
14659- sp -= get_random_int() % 8192;
14660- return sp & ~0xf;
14661-}
14662+ struct thread_struct *thread = &current->thread;
14663+ unsigned long time;
14664
14665-unsigned long arch_randomize_brk(struct mm_struct *mm)
14666-{
14667- unsigned long range_end = mm->brk + 0x02000000;
14668- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
14669-}
14670+ if (!randomize_va_space)
14671+ return;
14672+
14673+ if (v8086_mode(regs))
14674+ return;
14675
14676+ rdtscl(time);
14677+
14678+ /* P4 seems to return a 0 LSB, ignore it */
14679+#ifdef CONFIG_MPENTIUM4
14680+ time &= 0x3EUL;
14681+ time <<= 2;
14682+#elif defined(CONFIG_X86_64)
14683+ time &= 0xFUL;
14684+ time <<= 4;
14685+#else
14686+ time &= 0x1FUL;
14687+ time <<= 3;
14688+#endif
14689+
14690+ thread->sp0 ^= time;
14691+ load_sp0(init_tss + smp_processor_id(), thread);
14692+
14693+#ifdef CONFIG_X86_64
14694+ percpu_write(kernel_stack, thread->sp0);
14695+#endif
14696+}
14697+#endif
14698diff -urNp linux-3.0.7/arch/x86/kernel/ptrace.c linux-3.0.7/arch/x86/kernel/ptrace.c
14699--- linux-3.0.7/arch/x86/kernel/ptrace.c 2011-07-21 22:17:23.000000000 -0400
14700+++ linux-3.0.7/arch/x86/kernel/ptrace.c 2011-08-23 21:47:55.000000000 -0400
14701@@ -821,7 +821,7 @@ long arch_ptrace(struct task_struct *chi
14702 unsigned long addr, unsigned long data)
14703 {
14704 int ret;
14705- unsigned long __user *datap = (unsigned long __user *)data;
14706+ unsigned long __user *datap = (__force unsigned long __user *)data;
14707
14708 switch (request) {
14709 /* read the word at location addr in the USER area. */
14710@@ -906,14 +906,14 @@ long arch_ptrace(struct task_struct *chi
14711 if ((int) addr < 0)
14712 return -EIO;
14713 ret = do_get_thread_area(child, addr,
14714- (struct user_desc __user *)data);
14715+ (__force struct user_desc __user *) data);
14716 break;
14717
14718 case PTRACE_SET_THREAD_AREA:
14719 if ((int) addr < 0)
14720 return -EIO;
14721 ret = do_set_thread_area(child, addr,
14722- (struct user_desc __user *)data, 0);
14723+ (__force struct user_desc __user *) data, 0);
14724 break;
14725 #endif
14726
14727@@ -1330,7 +1330,7 @@ static void fill_sigtrap_info(struct tas
14728 memset(info, 0, sizeof(*info));
14729 info->si_signo = SIGTRAP;
14730 info->si_code = si_code;
14731- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
14732+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
14733 }
14734
14735 void user_single_step_siginfo(struct task_struct *tsk,
14736diff -urNp linux-3.0.7/arch/x86/kernel/pvclock.c linux-3.0.7/arch/x86/kernel/pvclock.c
14737--- linux-3.0.7/arch/x86/kernel/pvclock.c 2011-07-21 22:17:23.000000000 -0400
14738+++ linux-3.0.7/arch/x86/kernel/pvclock.c 2011-08-23 21:47:55.000000000 -0400
14739@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvc
14740 return pv_tsc_khz;
14741 }
14742
14743-static atomic64_t last_value = ATOMIC64_INIT(0);
14744+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
14745
14746 void pvclock_resume(void)
14747 {
14748- atomic64_set(&last_value, 0);
14749+ atomic64_set_unchecked(&last_value, 0);
14750 }
14751
14752 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
14753@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct
14754 * updating at the same time, and one of them could be slightly behind,
14755 * making the assumption that last_value always go forward fail to hold.
14756 */
14757- last = atomic64_read(&last_value);
14758+ last = atomic64_read_unchecked(&last_value);
14759 do {
14760 if (ret < last)
14761 return last;
14762- last = atomic64_cmpxchg(&last_value, last, ret);
14763+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
14764 } while (unlikely(last != ret));
14765
14766 return ret;
14767diff -urNp linux-3.0.7/arch/x86/kernel/reboot.c linux-3.0.7/arch/x86/kernel/reboot.c
14768--- linux-3.0.7/arch/x86/kernel/reboot.c 2011-07-21 22:17:23.000000000 -0400
14769+++ linux-3.0.7/arch/x86/kernel/reboot.c 2011-08-23 21:47:55.000000000 -0400
14770@@ -35,7 +35,7 @@ void (*pm_power_off)(void);
14771 EXPORT_SYMBOL(pm_power_off);
14772
14773 static const struct desc_ptr no_idt = {};
14774-static int reboot_mode;
14775+static unsigned short reboot_mode;
14776 enum reboot_type reboot_type = BOOT_ACPI;
14777 int reboot_force;
14778
14779@@ -315,13 +315,17 @@ core_initcall(reboot_init);
14780 extern const unsigned char machine_real_restart_asm[];
14781 extern const u64 machine_real_restart_gdt[3];
14782
14783-void machine_real_restart(unsigned int type)
14784+__noreturn void machine_real_restart(unsigned int type)
14785 {
14786 void *restart_va;
14787 unsigned long restart_pa;
14788- void (*restart_lowmem)(unsigned int);
14789+ void (* __noreturn restart_lowmem)(unsigned int);
14790 u64 *lowmem_gdt;
14791
14792+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14793+ struct desc_struct *gdt;
14794+#endif
14795+
14796 local_irq_disable();
14797
14798 /* Write zero to CMOS register number 0x0f, which the BIOS POST
14799@@ -347,14 +351,14 @@ void machine_real_restart(unsigned int t
14800 boot)". This seems like a fairly standard thing that gets set by
14801 REBOOT.COM programs, and the previous reset routine did this
14802 too. */
14803- *((unsigned short *)0x472) = reboot_mode;
14804+ *(unsigned short *)(__va(0x472)) = reboot_mode;
14805
14806 /* Patch the GDT in the low memory trampoline */
14807 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
14808
14809 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
14810 restart_pa = virt_to_phys(restart_va);
14811- restart_lowmem = (void (*)(unsigned int))restart_pa;
14812+ restart_lowmem = (void *)restart_pa;
14813
14814 /* GDT[0]: GDT self-pointer */
14815 lowmem_gdt[0] =
14816@@ -365,7 +369,33 @@ void machine_real_restart(unsigned int t
14817 GDT_ENTRY(0x009b, restart_pa, 0xffff);
14818
14819 /* Jump to the identity-mapped low memory code */
14820+
14821+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14822+ gdt = get_cpu_gdt_table(smp_processor_id());
14823+ pax_open_kernel();
14824+#ifdef CONFIG_PAX_MEMORY_UDEREF
14825+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
14826+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
14827+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
14828+#endif
14829+#ifdef CONFIG_PAX_KERNEXEC
14830+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
14831+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
14832+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
14833+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
14834+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
14835+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
14836+#endif
14837+ pax_close_kernel();
14838+#endif
14839+
14840+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
14841+ asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
14842+ unreachable();
14843+#else
14844 restart_lowmem(type);
14845+#endif
14846+
14847 }
14848 #ifdef CONFIG_APM_MODULE
14849 EXPORT_SYMBOL(machine_real_restart);
14850@@ -523,7 +553,7 @@ void __attribute__((weak)) mach_reboot_f
14851 * try to force a triple fault and then cycle between hitting the keyboard
14852 * controller and doing that
14853 */
14854-static void native_machine_emergency_restart(void)
14855+__noreturn static void native_machine_emergency_restart(void)
14856 {
14857 int i;
14858 int attempt = 0;
14859@@ -647,13 +677,13 @@ void native_machine_shutdown(void)
14860 #endif
14861 }
14862
14863-static void __machine_emergency_restart(int emergency)
14864+static __noreturn void __machine_emergency_restart(int emergency)
14865 {
14866 reboot_emergency = emergency;
14867 machine_ops.emergency_restart();
14868 }
14869
14870-static void native_machine_restart(char *__unused)
14871+static __noreturn void native_machine_restart(char *__unused)
14872 {
14873 printk("machine restart\n");
14874
14875@@ -662,7 +692,7 @@ static void native_machine_restart(char
14876 __machine_emergency_restart(0);
14877 }
14878
14879-static void native_machine_halt(void)
14880+static __noreturn void native_machine_halt(void)
14881 {
14882 /* stop other cpus and apics */
14883 machine_shutdown();
14884@@ -673,7 +703,7 @@ static void native_machine_halt(void)
14885 stop_this_cpu(NULL);
14886 }
14887
14888-static void native_machine_power_off(void)
14889+__noreturn static void native_machine_power_off(void)
14890 {
14891 if (pm_power_off) {
14892 if (!reboot_force)
14893@@ -682,6 +712,7 @@ static void native_machine_power_off(voi
14894 }
14895 /* a fallback in case there is no PM info available */
14896 tboot_shutdown(TB_SHUTDOWN_HALT);
14897+ unreachable();
14898 }
14899
14900 struct machine_ops machine_ops = {
14901diff -urNp linux-3.0.7/arch/x86/kernel/setup.c linux-3.0.7/arch/x86/kernel/setup.c
14902--- linux-3.0.7/arch/x86/kernel/setup.c 2011-07-21 22:17:23.000000000 -0400
14903+++ linux-3.0.7/arch/x86/kernel/setup.c 2011-10-06 04:17:55.000000000 -0400
14904@@ -447,7 +447,7 @@ static void __init parse_setup_data(void
14905
14906 switch (data->type) {
14907 case SETUP_E820_EXT:
14908- parse_e820_ext(data);
14909+ parse_e820_ext((struct setup_data __force_kernel *)data);
14910 break;
14911 case SETUP_DTB:
14912 add_dtb(pa_data);
14913@@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
14914 * area (640->1Mb) as ram even though it is not.
14915 * take them out.
14916 */
14917- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
14918+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
14919 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
14920 }
14921
14922@@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
14923
14924 if (!boot_params.hdr.root_flags)
14925 root_mountflags &= ~MS_RDONLY;
14926- init_mm.start_code = (unsigned long) _text;
14927- init_mm.end_code = (unsigned long) _etext;
14928+ init_mm.start_code = ktla_ktva((unsigned long) _text);
14929+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
14930 init_mm.end_data = (unsigned long) _edata;
14931 init_mm.brk = _brk_end;
14932
14933- code_resource.start = virt_to_phys(_text);
14934- code_resource.end = virt_to_phys(_etext)-1;
14935- data_resource.start = virt_to_phys(_etext);
14936+ code_resource.start = virt_to_phys(ktla_ktva(_text));
14937+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
14938+ data_resource.start = virt_to_phys(_sdata);
14939 data_resource.end = virt_to_phys(_edata)-1;
14940 bss_resource.start = virt_to_phys(&__bss_start);
14941 bss_resource.end = virt_to_phys(&__bss_stop)-1;
14942diff -urNp linux-3.0.7/arch/x86/kernel/setup_percpu.c linux-3.0.7/arch/x86/kernel/setup_percpu.c
14943--- linux-3.0.7/arch/x86/kernel/setup_percpu.c 2011-07-21 22:17:23.000000000 -0400
14944+++ linux-3.0.7/arch/x86/kernel/setup_percpu.c 2011-08-23 21:47:55.000000000 -0400
14945@@ -21,19 +21,17 @@
14946 #include <asm/cpu.h>
14947 #include <asm/stackprotector.h>
14948
14949-DEFINE_PER_CPU(int, cpu_number);
14950+#ifdef CONFIG_SMP
14951+DEFINE_PER_CPU(unsigned int, cpu_number);
14952 EXPORT_PER_CPU_SYMBOL(cpu_number);
14953+#endif
14954
14955-#ifdef CONFIG_X86_64
14956 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
14957-#else
14958-#define BOOT_PERCPU_OFFSET 0
14959-#endif
14960
14961 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
14962 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
14963
14964-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
14965+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
14966 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
14967 };
14968 EXPORT_SYMBOL(__per_cpu_offset);
14969@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(
14970 {
14971 #ifdef CONFIG_X86_32
14972 struct desc_struct gdt;
14973+ unsigned long base = per_cpu_offset(cpu);
14974
14975- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
14976- 0x2 | DESCTYPE_S, 0x8);
14977- gdt.s = 1;
14978+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
14979+ 0x83 | DESCTYPE_S, 0xC);
14980 write_gdt_entry(get_cpu_gdt_table(cpu),
14981 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
14982 #endif
14983@@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
14984 /* alrighty, percpu areas up and running */
14985 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
14986 for_each_possible_cpu(cpu) {
14987+#ifdef CONFIG_CC_STACKPROTECTOR
14988+#ifdef CONFIG_X86_32
14989+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
14990+#endif
14991+#endif
14992 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
14993 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
14994 per_cpu(cpu_number, cpu) = cpu;
14995@@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
14996 */
14997 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
14998 #endif
14999+#ifdef CONFIG_CC_STACKPROTECTOR
15000+#ifdef CONFIG_X86_32
15001+ if (!cpu)
15002+ per_cpu(stack_canary.canary, cpu) = canary;
15003+#endif
15004+#endif
15005 /*
15006 * Up to this point, the boot CPU has been using .init.data
15007 * area. Reload any changed state for the boot CPU.
15008diff -urNp linux-3.0.7/arch/x86/kernel/signal.c linux-3.0.7/arch/x86/kernel/signal.c
15009--- linux-3.0.7/arch/x86/kernel/signal.c 2011-07-21 22:17:23.000000000 -0400
15010+++ linux-3.0.7/arch/x86/kernel/signal.c 2011-08-23 21:48:14.000000000 -0400
15011@@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsi
15012 * Align the stack pointer according to the i386 ABI,
15013 * i.e. so that on function entry ((sp + 4) & 15) == 0.
15014 */
15015- sp = ((sp + 4) & -16ul) - 4;
15016+ sp = ((sp - 12) & -16ul) - 4;
15017 #else /* !CONFIG_X86_32 */
15018 sp = round_down(sp, 16) - 8;
15019 #endif
15020@@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, str
15021 * Return an always-bogus address instead so we will die with SIGSEGV.
15022 */
15023 if (onsigstack && !likely(on_sig_stack(sp)))
15024- return (void __user *)-1L;
15025+ return (__force void __user *)-1L;
15026
15027 /* save i387 state */
15028 if (used_math() && save_i387_xstate(*fpstate) < 0)
15029- return (void __user *)-1L;
15030+ return (__force void __user *)-1L;
15031
15032 return (void __user *)sp;
15033 }
15034@@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigactio
15035 }
15036
15037 if (current->mm->context.vdso)
15038- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
15039+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
15040 else
15041- restorer = &frame->retcode;
15042+ restorer = (void __user *)&frame->retcode;
15043 if (ka->sa.sa_flags & SA_RESTORER)
15044 restorer = ka->sa.sa_restorer;
15045
15046@@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigactio
15047 * reasons and because gdb uses it as a signature to notice
15048 * signal handler stack frames.
15049 */
15050- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
15051+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
15052
15053 if (err)
15054 return -EFAULT;
15055@@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, str
15056 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
15057
15058 /* Set up to return from userspace. */
15059- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
15060+ if (current->mm->context.vdso)
15061+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
15062+ else
15063+ restorer = (void __user *)&frame->retcode;
15064 if (ka->sa.sa_flags & SA_RESTORER)
15065 restorer = ka->sa.sa_restorer;
15066 put_user_ex(restorer, &frame->pretcode);
15067@@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, str
15068 * reasons and because gdb uses it as a signature to notice
15069 * signal handler stack frames.
15070 */
15071- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
15072+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
15073 } put_user_catch(err);
15074
15075 if (err)
15076@@ -769,6 +772,8 @@ static void do_signal(struct pt_regs *re
15077 int signr;
15078 sigset_t *oldset;
15079
15080+ pax_track_stack();
15081+
15082 /*
15083 * We want the common case to go fast, which is why we may in certain
15084 * cases get here from kernel mode. Just return without doing anything
15085@@ -776,7 +781,7 @@ static void do_signal(struct pt_regs *re
15086 * X86_32: vm86 regs switched out by assembly code before reaching
15087 * here, so testing against kernel CS suffices.
15088 */
15089- if (!user_mode(regs))
15090+ if (!user_mode_novm(regs))
15091 return;
15092
15093 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
15094diff -urNp linux-3.0.7/arch/x86/kernel/smpboot.c linux-3.0.7/arch/x86/kernel/smpboot.c
15095--- linux-3.0.7/arch/x86/kernel/smpboot.c 2011-07-21 22:17:23.000000000 -0400
15096+++ linux-3.0.7/arch/x86/kernel/smpboot.c 2011-08-23 21:47:55.000000000 -0400
15097@@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int api
15098 set_idle_for_cpu(cpu, c_idle.idle);
15099 do_rest:
15100 per_cpu(current_task, cpu) = c_idle.idle;
15101+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
15102 #ifdef CONFIG_X86_32
15103 /* Stack for startup_32 can be just as for start_secondary onwards */
15104 irq_ctx_init(cpu);
15105 #else
15106 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
15107 initial_gs = per_cpu_offset(cpu);
15108- per_cpu(kernel_stack, cpu) =
15109- (unsigned long)task_stack_page(c_idle.idle) -
15110- KERNEL_STACK_OFFSET + THREAD_SIZE;
15111+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
15112 #endif
15113+
15114+ pax_open_kernel();
15115 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
15116+ pax_close_kernel();
15117+
15118 initial_code = (unsigned long)start_secondary;
15119 stack_start = c_idle.idle->thread.sp;
15120
15121@@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int
15122
15123 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
15124
15125+#ifdef CONFIG_PAX_PER_CPU_PGD
15126+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
15127+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
15128+ KERNEL_PGD_PTRS);
15129+#endif
15130+
15131 err = do_boot_cpu(apicid, cpu);
15132 if (err) {
15133 pr_debug("do_boot_cpu failed %d\n", err);
15134diff -urNp linux-3.0.7/arch/x86/kernel/step.c linux-3.0.7/arch/x86/kernel/step.c
15135--- linux-3.0.7/arch/x86/kernel/step.c 2011-07-21 22:17:23.000000000 -0400
15136+++ linux-3.0.7/arch/x86/kernel/step.c 2011-08-23 21:47:55.000000000 -0400
15137@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
15138 struct desc_struct *desc;
15139 unsigned long base;
15140
15141- seg &= ~7UL;
15142+ seg >>= 3;
15143
15144 mutex_lock(&child->mm->context.lock);
15145- if (unlikely((seg >> 3) >= child->mm->context.size))
15146+ if (unlikely(seg >= child->mm->context.size))
15147 addr = -1L; /* bogus selector, access would fault */
15148 else {
15149 desc = child->mm->context.ldt + seg;
15150@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
15151 addr += base;
15152 }
15153 mutex_unlock(&child->mm->context.lock);
15154- }
15155+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
15156+ addr = ktla_ktva(addr);
15157
15158 return addr;
15159 }
15160@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
15161 unsigned char opcode[15];
15162 unsigned long addr = convert_ip_to_linear(child, regs);
15163
15164+ if (addr == -EINVAL)
15165+ return 0;
15166+
15167 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
15168 for (i = 0; i < copied; i++) {
15169 switch (opcode[i]) {
15170@@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
15171
15172 #ifdef CONFIG_X86_64
15173 case 0x40 ... 0x4f:
15174- if (regs->cs != __USER_CS)
15175+ if ((regs->cs & 0xffff) != __USER_CS)
15176 /* 32-bit mode: register increment */
15177 return 0;
15178 /* 64-bit mode: REX prefix */
15179diff -urNp linux-3.0.7/arch/x86/kernel/syscall_table_32.S linux-3.0.7/arch/x86/kernel/syscall_table_32.S
15180--- linux-3.0.7/arch/x86/kernel/syscall_table_32.S 2011-07-21 22:17:23.000000000 -0400
15181+++ linux-3.0.7/arch/x86/kernel/syscall_table_32.S 2011-08-23 21:47:55.000000000 -0400
15182@@ -1,3 +1,4 @@
15183+.section .rodata,"a",@progbits
15184 ENTRY(sys_call_table)
15185 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
15186 .long sys_exit
15187diff -urNp linux-3.0.7/arch/x86/kernel/sys_i386_32.c linux-3.0.7/arch/x86/kernel/sys_i386_32.c
15188--- linux-3.0.7/arch/x86/kernel/sys_i386_32.c 2011-07-21 22:17:23.000000000 -0400
15189+++ linux-3.0.7/arch/x86/kernel/sys_i386_32.c 2011-08-23 21:47:55.000000000 -0400
15190@@ -24,17 +24,224 @@
15191
15192 #include <asm/syscalls.h>
15193
15194-/*
15195- * Do a system call from kernel instead of calling sys_execve so we
15196- * end up with proper pt_regs.
15197- */
15198-int kernel_execve(const char *filename,
15199- const char *const argv[],
15200- const char *const envp[])
15201+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
15202 {
15203- long __res;
15204- asm volatile ("int $0x80"
15205- : "=a" (__res)
15206- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
15207- return __res;
15208+ unsigned long pax_task_size = TASK_SIZE;
15209+
15210+#ifdef CONFIG_PAX_SEGMEXEC
15211+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
15212+ pax_task_size = SEGMEXEC_TASK_SIZE;
15213+#endif
15214+
15215+ if (len > pax_task_size || addr > pax_task_size - len)
15216+ return -EINVAL;
15217+
15218+ return 0;
15219+}
15220+
15221+unsigned long
15222+arch_get_unmapped_area(struct file *filp, unsigned long addr,
15223+ unsigned long len, unsigned long pgoff, unsigned long flags)
15224+{
15225+ struct mm_struct *mm = current->mm;
15226+ struct vm_area_struct *vma;
15227+ unsigned long start_addr, pax_task_size = TASK_SIZE;
15228+
15229+#ifdef CONFIG_PAX_SEGMEXEC
15230+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
15231+ pax_task_size = SEGMEXEC_TASK_SIZE;
15232+#endif
15233+
15234+ pax_task_size -= PAGE_SIZE;
15235+
15236+ if (len > pax_task_size)
15237+ return -ENOMEM;
15238+
15239+ if (flags & MAP_FIXED)
15240+ return addr;
15241+
15242+#ifdef CONFIG_PAX_RANDMMAP
15243+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15244+#endif
15245+
15246+ if (addr) {
15247+ addr = PAGE_ALIGN(addr);
15248+ if (pax_task_size - len >= addr) {
15249+ vma = find_vma(mm, addr);
15250+ if (check_heap_stack_gap(vma, addr, len))
15251+ return addr;
15252+ }
15253+ }
15254+ if (len > mm->cached_hole_size) {
15255+ start_addr = addr = mm->free_area_cache;
15256+ } else {
15257+ start_addr = addr = mm->mmap_base;
15258+ mm->cached_hole_size = 0;
15259+ }
15260+
15261+#ifdef CONFIG_PAX_PAGEEXEC
15262+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
15263+ start_addr = 0x00110000UL;
15264+
15265+#ifdef CONFIG_PAX_RANDMMAP
15266+ if (mm->pax_flags & MF_PAX_RANDMMAP)
15267+ start_addr += mm->delta_mmap & 0x03FFF000UL;
15268+#endif
15269+
15270+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
15271+ start_addr = addr = mm->mmap_base;
15272+ else
15273+ addr = start_addr;
15274+ }
15275+#endif
15276+
15277+full_search:
15278+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
15279+ /* At this point: (!vma || addr < vma->vm_end). */
15280+ if (pax_task_size - len < addr) {
15281+ /*
15282+ * Start a new search - just in case we missed
15283+ * some holes.
15284+ */
15285+ if (start_addr != mm->mmap_base) {
15286+ start_addr = addr = mm->mmap_base;
15287+ mm->cached_hole_size = 0;
15288+ goto full_search;
15289+ }
15290+ return -ENOMEM;
15291+ }
15292+ if (check_heap_stack_gap(vma, addr, len))
15293+ break;
15294+ if (addr + mm->cached_hole_size < vma->vm_start)
15295+ mm->cached_hole_size = vma->vm_start - addr;
15296+ addr = vma->vm_end;
15297+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
15298+ start_addr = addr = mm->mmap_base;
15299+ mm->cached_hole_size = 0;
15300+ goto full_search;
15301+ }
15302+ }
15303+
15304+ /*
15305+ * Remember the place where we stopped the search:
15306+ */
15307+ mm->free_area_cache = addr + len;
15308+ return addr;
15309+}
15310+
15311+unsigned long
15312+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
15313+ const unsigned long len, const unsigned long pgoff,
15314+ const unsigned long flags)
15315+{
15316+ struct vm_area_struct *vma;
15317+ struct mm_struct *mm = current->mm;
15318+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
15319+
15320+#ifdef CONFIG_PAX_SEGMEXEC
15321+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
15322+ pax_task_size = SEGMEXEC_TASK_SIZE;
15323+#endif
15324+
15325+ pax_task_size -= PAGE_SIZE;
15326+
15327+ /* requested length too big for entire address space */
15328+ if (len > pax_task_size)
15329+ return -ENOMEM;
15330+
15331+ if (flags & MAP_FIXED)
15332+ return addr;
15333+
15334+#ifdef CONFIG_PAX_PAGEEXEC
15335+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
15336+ goto bottomup;
15337+#endif
15338+
15339+#ifdef CONFIG_PAX_RANDMMAP
15340+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15341+#endif
15342+
15343+ /* requesting a specific address */
15344+ if (addr) {
15345+ addr = PAGE_ALIGN(addr);
15346+ if (pax_task_size - len >= addr) {
15347+ vma = find_vma(mm, addr);
15348+ if (check_heap_stack_gap(vma, addr, len))
15349+ return addr;
15350+ }
15351+ }
15352+
15353+ /* check if free_area_cache is useful for us */
15354+ if (len <= mm->cached_hole_size) {
15355+ mm->cached_hole_size = 0;
15356+ mm->free_area_cache = mm->mmap_base;
15357+ }
15358+
15359+ /* either no address requested or can't fit in requested address hole */
15360+ addr = mm->free_area_cache;
15361+
15362+ /* make sure it can fit in the remaining address space */
15363+ if (addr > len) {
15364+ vma = find_vma(mm, addr-len);
15365+ if (check_heap_stack_gap(vma, addr - len, len))
15366+ /* remember the address as a hint for next time */
15367+ return (mm->free_area_cache = addr-len);
15368+ }
15369+
15370+ if (mm->mmap_base < len)
15371+ goto bottomup;
15372+
15373+ addr = mm->mmap_base-len;
15374+
15375+ do {
15376+ /*
15377+ * Lookup failure means no vma is above this address,
15378+ * else if new region fits below vma->vm_start,
15379+ * return with success:
15380+ */
15381+ vma = find_vma(mm, addr);
15382+ if (check_heap_stack_gap(vma, addr, len))
15383+ /* remember the address as a hint for next time */
15384+ return (mm->free_area_cache = addr);
15385+
15386+ /* remember the largest hole we saw so far */
15387+ if (addr + mm->cached_hole_size < vma->vm_start)
15388+ mm->cached_hole_size = vma->vm_start - addr;
15389+
15390+ /* try just below the current vma->vm_start */
15391+ addr = skip_heap_stack_gap(vma, len);
15392+ } while (!IS_ERR_VALUE(addr));
15393+
15394+bottomup:
15395+ /*
15396+ * A failed mmap() very likely causes application failure,
15397+ * so fall back to the bottom-up function here. This scenario
15398+ * can happen with large stack limits and large mmap()
15399+ * allocations.
15400+ */
15401+
15402+#ifdef CONFIG_PAX_SEGMEXEC
15403+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
15404+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
15405+ else
15406+#endif
15407+
15408+ mm->mmap_base = TASK_UNMAPPED_BASE;
15409+
15410+#ifdef CONFIG_PAX_RANDMMAP
15411+ if (mm->pax_flags & MF_PAX_RANDMMAP)
15412+ mm->mmap_base += mm->delta_mmap;
15413+#endif
15414+
15415+ mm->free_area_cache = mm->mmap_base;
15416+ mm->cached_hole_size = ~0UL;
15417+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15418+ /*
15419+ * Restore the topdown base:
15420+ */
15421+ mm->mmap_base = base;
15422+ mm->free_area_cache = base;
15423+ mm->cached_hole_size = ~0UL;
15424+
15425+ return addr;
15426 }
15427diff -urNp linux-3.0.7/arch/x86/kernel/sys_x86_64.c linux-3.0.7/arch/x86/kernel/sys_x86_64.c
15428--- linux-3.0.7/arch/x86/kernel/sys_x86_64.c 2011-07-21 22:17:23.000000000 -0400
15429+++ linux-3.0.7/arch/x86/kernel/sys_x86_64.c 2011-08-23 21:47:55.000000000 -0400
15430@@ -32,8 +32,8 @@ out:
15431 return error;
15432 }
15433
15434-static void find_start_end(unsigned long flags, unsigned long *begin,
15435- unsigned long *end)
15436+static void find_start_end(struct mm_struct *mm, unsigned long flags,
15437+ unsigned long *begin, unsigned long *end)
15438 {
15439 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
15440 unsigned long new_begin;
15441@@ -52,7 +52,7 @@ static void find_start_end(unsigned long
15442 *begin = new_begin;
15443 }
15444 } else {
15445- *begin = TASK_UNMAPPED_BASE;
15446+ *begin = mm->mmap_base;
15447 *end = TASK_SIZE;
15448 }
15449 }
15450@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
15451 if (flags & MAP_FIXED)
15452 return addr;
15453
15454- find_start_end(flags, &begin, &end);
15455+ find_start_end(mm, flags, &begin, &end);
15456
15457 if (len > end)
15458 return -ENOMEM;
15459
15460+#ifdef CONFIG_PAX_RANDMMAP
15461+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15462+#endif
15463+
15464 if (addr) {
15465 addr = PAGE_ALIGN(addr);
15466 vma = find_vma(mm, addr);
15467- if (end - len >= addr &&
15468- (!vma || addr + len <= vma->vm_start))
15469+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
15470 return addr;
15471 }
15472 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
15473@@ -106,7 +109,7 @@ full_search:
15474 }
15475 return -ENOMEM;
15476 }
15477- if (!vma || addr + len <= vma->vm_start) {
15478+ if (check_heap_stack_gap(vma, addr, len)) {
15479 /*
15480 * Remember the place where we stopped the search:
15481 */
15482@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
15483 {
15484 struct vm_area_struct *vma;
15485 struct mm_struct *mm = current->mm;
15486- unsigned long addr = addr0;
15487+ unsigned long base = mm->mmap_base, addr = addr0;
15488
15489 /* requested length too big for entire address space */
15490 if (len > TASK_SIZE)
15491@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
15492 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
15493 goto bottomup;
15494
15495+#ifdef CONFIG_PAX_RANDMMAP
15496+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15497+#endif
15498+
15499 /* requesting a specific address */
15500 if (addr) {
15501 addr = PAGE_ALIGN(addr);
15502- vma = find_vma(mm, addr);
15503- if (TASK_SIZE - len >= addr &&
15504- (!vma || addr + len <= vma->vm_start))
15505- return addr;
15506+ if (TASK_SIZE - len >= addr) {
15507+ vma = find_vma(mm, addr);
15508+ if (check_heap_stack_gap(vma, addr, len))
15509+ return addr;
15510+ }
15511 }
15512
15513 /* check if free_area_cache is useful for us */
15514@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
15515 /* make sure it can fit in the remaining address space */
15516 if (addr > len) {
15517 vma = find_vma(mm, addr-len);
15518- if (!vma || addr <= vma->vm_start)
15519+ if (check_heap_stack_gap(vma, addr - len, len))
15520 /* remember the address as a hint for next time */
15521 return mm->free_area_cache = addr-len;
15522 }
15523@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
15524 * return with success:
15525 */
15526 vma = find_vma(mm, addr);
15527- if (!vma || addr+len <= vma->vm_start)
15528+ if (check_heap_stack_gap(vma, addr, len))
15529 /* remember the address as a hint for next time */
15530 return mm->free_area_cache = addr;
15531
15532@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
15533 mm->cached_hole_size = vma->vm_start - addr;
15534
15535 /* try just below the current vma->vm_start */
15536- addr = vma->vm_start-len;
15537- } while (len < vma->vm_start);
15538+ addr = skip_heap_stack_gap(vma, len);
15539+ } while (!IS_ERR_VALUE(addr));
15540
15541 bottomup:
15542 /*
15543@@ -198,13 +206,21 @@ bottomup:
15544 * can happen with large stack limits and large mmap()
15545 * allocations.
15546 */
15547+ mm->mmap_base = TASK_UNMAPPED_BASE;
15548+
15549+#ifdef CONFIG_PAX_RANDMMAP
15550+ if (mm->pax_flags & MF_PAX_RANDMMAP)
15551+ mm->mmap_base += mm->delta_mmap;
15552+#endif
15553+
15554+ mm->free_area_cache = mm->mmap_base;
15555 mm->cached_hole_size = ~0UL;
15556- mm->free_area_cache = TASK_UNMAPPED_BASE;
15557 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15558 /*
15559 * Restore the topdown base:
15560 */
15561- mm->free_area_cache = mm->mmap_base;
15562+ mm->mmap_base = base;
15563+ mm->free_area_cache = base;
15564 mm->cached_hole_size = ~0UL;
15565
15566 return addr;
15567diff -urNp linux-3.0.7/arch/x86/kernel/tboot.c linux-3.0.7/arch/x86/kernel/tboot.c
15568--- linux-3.0.7/arch/x86/kernel/tboot.c 2011-07-21 22:17:23.000000000 -0400
15569+++ linux-3.0.7/arch/x86/kernel/tboot.c 2011-08-23 21:47:55.000000000 -0400
15570@@ -217,7 +217,7 @@ static int tboot_setup_sleep(void)
15571
15572 void tboot_shutdown(u32 shutdown_type)
15573 {
15574- void (*shutdown)(void);
15575+ void (* __noreturn shutdown)(void);
15576
15577 if (!tboot_enabled())
15578 return;
15579@@ -239,7 +239,7 @@ void tboot_shutdown(u32 shutdown_type)
15580
15581 switch_to_tboot_pt();
15582
15583- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
15584+ shutdown = (void *)tboot->shutdown_entry;
15585 shutdown();
15586
15587 /* should not reach here */
15588@@ -296,7 +296,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
15589 tboot_shutdown(acpi_shutdown_map[sleep_state]);
15590 }
15591
15592-static atomic_t ap_wfs_count;
15593+static atomic_unchecked_t ap_wfs_count;
15594
15595 static int tboot_wait_for_aps(int num_aps)
15596 {
15597@@ -320,9 +320,9 @@ static int __cpuinit tboot_cpu_callback(
15598 {
15599 switch (action) {
15600 case CPU_DYING:
15601- atomic_inc(&ap_wfs_count);
15602+ atomic_inc_unchecked(&ap_wfs_count);
15603 if (num_online_cpus() == 1)
15604- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
15605+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
15606 return NOTIFY_BAD;
15607 break;
15608 }
15609@@ -341,7 +341,7 @@ static __init int tboot_late_init(void)
15610
15611 tboot_create_trampoline();
15612
15613- atomic_set(&ap_wfs_count, 0);
15614+ atomic_set_unchecked(&ap_wfs_count, 0);
15615 register_hotcpu_notifier(&tboot_cpu_notifier);
15616 return 0;
15617 }
15618diff -urNp linux-3.0.7/arch/x86/kernel/time.c linux-3.0.7/arch/x86/kernel/time.c
15619--- linux-3.0.7/arch/x86/kernel/time.c 2011-07-21 22:17:23.000000000 -0400
15620+++ linux-3.0.7/arch/x86/kernel/time.c 2011-08-23 21:47:55.000000000 -0400
15621@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs
15622 {
15623 unsigned long pc = instruction_pointer(regs);
15624
15625- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
15626+ if (!user_mode(regs) && in_lock_functions(pc)) {
15627 #ifdef CONFIG_FRAME_POINTER
15628- return *(unsigned long *)(regs->bp + sizeof(long));
15629+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
15630 #else
15631 unsigned long *sp =
15632 (unsigned long *)kernel_stack_pointer(regs);
15633@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
15634 * or above a saved flags. Eflags has bits 22-31 zero,
15635 * kernel addresses don't.
15636 */
15637+
15638+#ifdef CONFIG_PAX_KERNEXEC
15639+ return ktla_ktva(sp[0]);
15640+#else
15641 if (sp[0] >> 22)
15642 return sp[0];
15643 if (sp[1] >> 22)
15644 return sp[1];
15645 #endif
15646+
15647+#endif
15648 }
15649 return pc;
15650 }
15651diff -urNp linux-3.0.7/arch/x86/kernel/tls.c linux-3.0.7/arch/x86/kernel/tls.c
15652--- linux-3.0.7/arch/x86/kernel/tls.c 2011-07-21 22:17:23.000000000 -0400
15653+++ linux-3.0.7/arch/x86/kernel/tls.c 2011-08-23 21:47:55.000000000 -0400
15654@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
15655 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
15656 return -EINVAL;
15657
15658+#ifdef CONFIG_PAX_SEGMEXEC
15659+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
15660+ return -EINVAL;
15661+#endif
15662+
15663 set_tls_desc(p, idx, &info, 1);
15664
15665 return 0;
15666diff -urNp linux-3.0.7/arch/x86/kernel/trampoline_32.S linux-3.0.7/arch/x86/kernel/trampoline_32.S
15667--- linux-3.0.7/arch/x86/kernel/trampoline_32.S 2011-07-21 22:17:23.000000000 -0400
15668+++ linux-3.0.7/arch/x86/kernel/trampoline_32.S 2011-08-23 21:47:55.000000000 -0400
15669@@ -32,6 +32,12 @@
15670 #include <asm/segment.h>
15671 #include <asm/page_types.h>
15672
15673+#ifdef CONFIG_PAX_KERNEXEC
15674+#define ta(X) (X)
15675+#else
15676+#define ta(X) ((X) - __PAGE_OFFSET)
15677+#endif
15678+
15679 #ifdef CONFIG_SMP
15680
15681 .section ".x86_trampoline","a"
15682@@ -62,7 +68,7 @@ r_base = .
15683 inc %ax # protected mode (PE) bit
15684 lmsw %ax # into protected mode
15685 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
15686- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
15687+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
15688
15689 # These need to be in the same 64K segment as the above;
15690 # hence we don't use the boot_gdt_descr defined in head.S
15691diff -urNp linux-3.0.7/arch/x86/kernel/trampoline_64.S linux-3.0.7/arch/x86/kernel/trampoline_64.S
15692--- linux-3.0.7/arch/x86/kernel/trampoline_64.S 2011-07-21 22:17:23.000000000 -0400
15693+++ linux-3.0.7/arch/x86/kernel/trampoline_64.S 2011-08-23 21:47:55.000000000 -0400
15694@@ -90,7 +90,7 @@ startup_32:
15695 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
15696 movl %eax, %ds
15697
15698- movl $X86_CR4_PAE, %eax
15699+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15700 movl %eax, %cr4 # Enable PAE mode
15701
15702 # Setup trampoline 4 level pagetables
15703@@ -138,7 +138,7 @@ tidt:
15704 # so the kernel can live anywhere
15705 .balign 4
15706 tgdt:
15707- .short tgdt_end - tgdt # gdt limit
15708+ .short tgdt_end - tgdt - 1 # gdt limit
15709 .long tgdt - r_base
15710 .short 0
15711 .quad 0x00cf9b000000ffff # __KERNEL32_CS
15712diff -urNp linux-3.0.7/arch/x86/kernel/traps.c linux-3.0.7/arch/x86/kernel/traps.c
15713--- linux-3.0.7/arch/x86/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
15714+++ linux-3.0.7/arch/x86/kernel/traps.c 2011-08-23 21:47:55.000000000 -0400
15715@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
15716
15717 /* Do we ignore FPU interrupts ? */
15718 char ignore_fpu_irq;
15719-
15720-/*
15721- * The IDT has to be page-aligned to simplify the Pentium
15722- * F0 0F bug workaround.
15723- */
15724-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
15725 #endif
15726
15727 DECLARE_BITMAP(used_vectors, NR_VECTORS);
15728@@ -117,13 +111,13 @@ static inline void preempt_conditional_c
15729 }
15730
15731 static void __kprobes
15732-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
15733+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
15734 long error_code, siginfo_t *info)
15735 {
15736 struct task_struct *tsk = current;
15737
15738 #ifdef CONFIG_X86_32
15739- if (regs->flags & X86_VM_MASK) {
15740+ if (v8086_mode(regs)) {
15741 /*
15742 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
15743 * On nmi (interrupt 2), do_trap should not be called.
15744@@ -134,7 +128,7 @@ do_trap(int trapnr, int signr, char *str
15745 }
15746 #endif
15747
15748- if (!user_mode(regs))
15749+ if (!user_mode_novm(regs))
15750 goto kernel_trap;
15751
15752 #ifdef CONFIG_X86_32
15753@@ -157,7 +151,7 @@ trap_signal:
15754 printk_ratelimit()) {
15755 printk(KERN_INFO
15756 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
15757- tsk->comm, tsk->pid, str,
15758+ tsk->comm, task_pid_nr(tsk), str,
15759 regs->ip, regs->sp, error_code);
15760 print_vma_addr(" in ", regs->ip);
15761 printk("\n");
15762@@ -174,8 +168,20 @@ kernel_trap:
15763 if (!fixup_exception(regs)) {
15764 tsk->thread.error_code = error_code;
15765 tsk->thread.trap_no = trapnr;
15766+
15767+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15768+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
15769+ str = "PAX: suspicious stack segment fault";
15770+#endif
15771+
15772 die(str, regs, error_code);
15773 }
15774+
15775+#ifdef CONFIG_PAX_REFCOUNT
15776+ if (trapnr == 4)
15777+ pax_report_refcount_overflow(regs);
15778+#endif
15779+
15780 return;
15781
15782 #ifdef CONFIG_X86_32
15783@@ -264,14 +270,30 @@ do_general_protection(struct pt_regs *re
15784 conditional_sti(regs);
15785
15786 #ifdef CONFIG_X86_32
15787- if (regs->flags & X86_VM_MASK)
15788+ if (v8086_mode(regs))
15789 goto gp_in_vm86;
15790 #endif
15791
15792 tsk = current;
15793- if (!user_mode(regs))
15794+ if (!user_mode_novm(regs))
15795 goto gp_in_kernel;
15796
15797+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
15798+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
15799+ struct mm_struct *mm = tsk->mm;
15800+ unsigned long limit;
15801+
15802+ down_write(&mm->mmap_sem);
15803+ limit = mm->context.user_cs_limit;
15804+ if (limit < TASK_SIZE) {
15805+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
15806+ up_write(&mm->mmap_sem);
15807+ return;
15808+ }
15809+ up_write(&mm->mmap_sem);
15810+ }
15811+#endif
15812+
15813 tsk->thread.error_code = error_code;
15814 tsk->thread.trap_no = 13;
15815
15816@@ -304,6 +326,13 @@ gp_in_kernel:
15817 if (notify_die(DIE_GPF, "general protection fault", regs,
15818 error_code, 13, SIGSEGV) == NOTIFY_STOP)
15819 return;
15820+
15821+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15822+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
15823+ die("PAX: suspicious general protection fault", regs, error_code);
15824+ else
15825+#endif
15826+
15827 die("general protection fault", regs, error_code);
15828 }
15829
15830@@ -433,6 +462,17 @@ static notrace __kprobes void default_do
15831 dotraplinkage notrace __kprobes void
15832 do_nmi(struct pt_regs *regs, long error_code)
15833 {
15834+
15835+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15836+ if (!user_mode(regs)) {
15837+ unsigned long cs = regs->cs & 0xFFFF;
15838+ unsigned long ip = ktva_ktla(regs->ip);
15839+
15840+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
15841+ regs->ip = ip;
15842+ }
15843+#endif
15844+
15845 nmi_enter();
15846
15847 inc_irq_stat(__nmi_count);
15848@@ -569,7 +609,7 @@ dotraplinkage void __kprobes do_debug(st
15849 /* It's safe to allow irq's after DR6 has been saved */
15850 preempt_conditional_sti(regs);
15851
15852- if (regs->flags & X86_VM_MASK) {
15853+ if (v8086_mode(regs)) {
15854 handle_vm86_trap((struct kernel_vm86_regs *) regs,
15855 error_code, 1);
15856 preempt_conditional_cli(regs);
15857@@ -583,7 +623,7 @@ dotraplinkage void __kprobes do_debug(st
15858 * We already checked v86 mode above, so we can check for kernel mode
15859 * by just checking the CPL of CS.
15860 */
15861- if ((dr6 & DR_STEP) && !user_mode(regs)) {
15862+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
15863 tsk->thread.debugreg6 &= ~DR_STEP;
15864 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
15865 regs->flags &= ~X86_EFLAGS_TF;
15866@@ -612,7 +652,7 @@ void math_error(struct pt_regs *regs, in
15867 return;
15868 conditional_sti(regs);
15869
15870- if (!user_mode_vm(regs))
15871+ if (!user_mode(regs))
15872 {
15873 if (!fixup_exception(regs)) {
15874 task->thread.error_code = error_code;
15875@@ -723,7 +763,7 @@ asmlinkage void __attribute__((weak)) sm
15876 void __math_state_restore(void)
15877 {
15878 struct thread_info *thread = current_thread_info();
15879- struct task_struct *tsk = thread->task;
15880+ struct task_struct *tsk = current;
15881
15882 /*
15883 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
15884@@ -750,8 +790,7 @@ void __math_state_restore(void)
15885 */
15886 asmlinkage void math_state_restore(void)
15887 {
15888- struct thread_info *thread = current_thread_info();
15889- struct task_struct *tsk = thread->task;
15890+ struct task_struct *tsk = current;
15891
15892 if (!tsk_used_math(tsk)) {
15893 local_irq_enable();
15894diff -urNp linux-3.0.7/arch/x86/kernel/verify_cpu.S linux-3.0.7/arch/x86/kernel/verify_cpu.S
15895--- linux-3.0.7/arch/x86/kernel/verify_cpu.S 2011-07-21 22:17:23.000000000 -0400
15896+++ linux-3.0.7/arch/x86/kernel/verify_cpu.S 2011-08-23 21:48:14.000000000 -0400
15897@@ -20,6 +20,7 @@
15898 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
15899 * arch/x86/kernel/trampoline_64.S: secondary processor verification
15900 * arch/x86/kernel/head_32.S: processor startup
15901+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
15902 *
15903 * verify_cpu, returns the status of longmode and SSE in register %eax.
15904 * 0: Success 1: Failure
15905diff -urNp linux-3.0.7/arch/x86/kernel/vm86_32.c linux-3.0.7/arch/x86/kernel/vm86_32.c
15906--- linux-3.0.7/arch/x86/kernel/vm86_32.c 2011-07-21 22:17:23.000000000 -0400
15907+++ linux-3.0.7/arch/x86/kernel/vm86_32.c 2011-08-23 21:48:14.000000000 -0400
15908@@ -41,6 +41,7 @@
15909 #include <linux/ptrace.h>
15910 #include <linux/audit.h>
15911 #include <linux/stddef.h>
15912+#include <linux/grsecurity.h>
15913
15914 #include <asm/uaccess.h>
15915 #include <asm/io.h>
15916@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
15917 do_exit(SIGSEGV);
15918 }
15919
15920- tss = &per_cpu(init_tss, get_cpu());
15921+ tss = init_tss + get_cpu();
15922 current->thread.sp0 = current->thread.saved_sp0;
15923 current->thread.sysenter_cs = __KERNEL_CS;
15924 load_sp0(tss, &current->thread);
15925@@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __use
15926 struct task_struct *tsk;
15927 int tmp, ret = -EPERM;
15928
15929+#ifdef CONFIG_GRKERNSEC_VM86
15930+ if (!capable(CAP_SYS_RAWIO)) {
15931+ gr_handle_vm86();
15932+ goto out;
15933+ }
15934+#endif
15935+
15936 tsk = current;
15937 if (tsk->thread.saved_sp0)
15938 goto out;
15939@@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned
15940 int tmp, ret;
15941 struct vm86plus_struct __user *v86;
15942
15943+#ifdef CONFIG_GRKERNSEC_VM86
15944+ if (!capable(CAP_SYS_RAWIO)) {
15945+ gr_handle_vm86();
15946+ ret = -EPERM;
15947+ goto out;
15948+ }
15949+#endif
15950+
15951 tsk = current;
15952 switch (cmd) {
15953 case VM86_REQUEST_IRQ:
15954@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
15955 tsk->thread.saved_fs = info->regs32->fs;
15956 tsk->thread.saved_gs = get_user_gs(info->regs32);
15957
15958- tss = &per_cpu(init_tss, get_cpu());
15959+ tss = init_tss + get_cpu();
15960 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
15961 if (cpu_has_sep)
15962 tsk->thread.sysenter_cs = 0;
15963@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
15964 goto cannot_handle;
15965 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
15966 goto cannot_handle;
15967- intr_ptr = (unsigned long __user *) (i << 2);
15968+ intr_ptr = (__force unsigned long __user *) (i << 2);
15969 if (get_user(segoffs, intr_ptr))
15970 goto cannot_handle;
15971 if ((segoffs >> 16) == BIOSSEG)
15972diff -urNp linux-3.0.7/arch/x86/kernel/vmlinux.lds.S linux-3.0.7/arch/x86/kernel/vmlinux.lds.S
15973--- linux-3.0.7/arch/x86/kernel/vmlinux.lds.S 2011-07-21 22:17:23.000000000 -0400
15974+++ linux-3.0.7/arch/x86/kernel/vmlinux.lds.S 2011-08-23 21:47:55.000000000 -0400
15975@@ -26,6 +26,13 @@
15976 #include <asm/page_types.h>
15977 #include <asm/cache.h>
15978 #include <asm/boot.h>
15979+#include <asm/segment.h>
15980+
15981+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15982+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
15983+#else
15984+#define __KERNEL_TEXT_OFFSET 0
15985+#endif
15986
15987 #undef i386 /* in case the preprocessor is a 32bit one */
15988
15989@@ -69,31 +76,46 @@ jiffies_64 = jiffies;
15990
15991 PHDRS {
15992 text PT_LOAD FLAGS(5); /* R_E */
15993+#ifdef CONFIG_X86_32
15994+ module PT_LOAD FLAGS(5); /* R_E */
15995+#endif
15996+#ifdef CONFIG_XEN
15997+ rodata PT_LOAD FLAGS(5); /* R_E */
15998+#else
15999+ rodata PT_LOAD FLAGS(4); /* R__ */
16000+#endif
16001 data PT_LOAD FLAGS(6); /* RW_ */
16002 #ifdef CONFIG_X86_64
16003 user PT_LOAD FLAGS(5); /* R_E */
16004+#endif
16005+ init.begin PT_LOAD FLAGS(6); /* RW_ */
16006 #ifdef CONFIG_SMP
16007 percpu PT_LOAD FLAGS(6); /* RW_ */
16008 #endif
16009+ text.init PT_LOAD FLAGS(5); /* R_E */
16010+ text.exit PT_LOAD FLAGS(5); /* R_E */
16011 init PT_LOAD FLAGS(7); /* RWE */
16012-#endif
16013 note PT_NOTE FLAGS(0); /* ___ */
16014 }
16015
16016 SECTIONS
16017 {
16018 #ifdef CONFIG_X86_32
16019- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
16020- phys_startup_32 = startup_32 - LOAD_OFFSET;
16021+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
16022 #else
16023- . = __START_KERNEL;
16024- phys_startup_64 = startup_64 - LOAD_OFFSET;
16025+ . = __START_KERNEL;
16026 #endif
16027
16028 /* Text and read-only data */
16029- .text : AT(ADDR(.text) - LOAD_OFFSET) {
16030- _text = .;
16031+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
16032 /* bootstrapping code */
16033+#ifdef CONFIG_X86_32
16034+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
16035+#else
16036+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
16037+#endif
16038+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
16039+ _text = .;
16040 HEAD_TEXT
16041 #ifdef CONFIG_X86_32
16042 . = ALIGN(PAGE_SIZE);
16043@@ -109,13 +131,47 @@ SECTIONS
16044 IRQENTRY_TEXT
16045 *(.fixup)
16046 *(.gnu.warning)
16047- /* End of text section */
16048- _etext = .;
16049 } :text = 0x9090
16050
16051- NOTES :text :note
16052+ . += __KERNEL_TEXT_OFFSET;
16053+
16054+#ifdef CONFIG_X86_32
16055+ . = ALIGN(PAGE_SIZE);
16056+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
16057+
16058+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
16059+ MODULES_EXEC_VADDR = .;
16060+ BYTE(0)
16061+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
16062+ . = ALIGN(HPAGE_SIZE);
16063+ MODULES_EXEC_END = . - 1;
16064+#endif
16065+
16066+ } :module
16067+#endif
16068+
16069+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
16070+ /* End of text section */
16071+ _etext = . - __KERNEL_TEXT_OFFSET;
16072+ }
16073+
16074+#ifdef CONFIG_X86_32
16075+ . = ALIGN(PAGE_SIZE);
16076+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
16077+ *(.idt)
16078+ . = ALIGN(PAGE_SIZE);
16079+ *(.empty_zero_page)
16080+ *(.initial_pg_fixmap)
16081+ *(.initial_pg_pmd)
16082+ *(.initial_page_table)
16083+ *(.swapper_pg_dir)
16084+ } :rodata
16085+#endif
16086+
16087+ . = ALIGN(PAGE_SIZE);
16088+ NOTES :rodata :note
16089
16090- EXCEPTION_TABLE(16) :text = 0x9090
16091+ EXCEPTION_TABLE(16) :rodata
16092
16093 #if defined(CONFIG_DEBUG_RODATA)
16094 /* .text should occupy whole number of pages */
16095@@ -127,16 +183,20 @@ SECTIONS
16096
16097 /* Data */
16098 .data : AT(ADDR(.data) - LOAD_OFFSET) {
16099+
16100+#ifdef CONFIG_PAX_KERNEXEC
16101+ . = ALIGN(HPAGE_SIZE);
16102+#else
16103+ . = ALIGN(PAGE_SIZE);
16104+#endif
16105+
16106 /* Start of data section */
16107 _sdata = .;
16108
16109 /* init_task */
16110 INIT_TASK_DATA(THREAD_SIZE)
16111
16112-#ifdef CONFIG_X86_32
16113- /* 32 bit has nosave before _edata */
16114 NOSAVE_DATA
16115-#endif
16116
16117 PAGE_ALIGNED_DATA(PAGE_SIZE)
16118
16119@@ -208,12 +268,19 @@ SECTIONS
16120 #endif /* CONFIG_X86_64 */
16121
16122 /* Init code and data - will be freed after init */
16123- . = ALIGN(PAGE_SIZE);
16124 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
16125+ BYTE(0)
16126+
16127+#ifdef CONFIG_PAX_KERNEXEC
16128+ . = ALIGN(HPAGE_SIZE);
16129+#else
16130+ . = ALIGN(PAGE_SIZE);
16131+#endif
16132+
16133 __init_begin = .; /* paired with __init_end */
16134- }
16135+ } :init.begin
16136
16137-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
16138+#ifdef CONFIG_SMP
16139 /*
16140 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
16141 * output PHDR, so the next output section - .init.text - should
16142@@ -222,12 +289,27 @@ SECTIONS
16143 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
16144 #endif
16145
16146- INIT_TEXT_SECTION(PAGE_SIZE)
16147-#ifdef CONFIG_X86_64
16148- :init
16149-#endif
16150+ . = ALIGN(PAGE_SIZE);
16151+ init_begin = .;
16152+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
16153+ VMLINUX_SYMBOL(_sinittext) = .;
16154+ INIT_TEXT
16155+ VMLINUX_SYMBOL(_einittext) = .;
16156+ . = ALIGN(PAGE_SIZE);
16157+ } :text.init
16158
16159- INIT_DATA_SECTION(16)
16160+ /*
16161+ * .exit.text is discard at runtime, not link time, to deal with
16162+ * references from .altinstructions and .eh_frame
16163+ */
16164+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
16165+ EXIT_TEXT
16166+ . = ALIGN(16);
16167+ } :text.exit
16168+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
16169+
16170+ . = ALIGN(PAGE_SIZE);
16171+ INIT_DATA_SECTION(16) :init
16172
16173 /*
16174 * Code and data for a variety of lowlevel trampolines, to be
16175@@ -301,19 +383,12 @@ SECTIONS
16176 }
16177
16178 . = ALIGN(8);
16179- /*
16180- * .exit.text is discard at runtime, not link time, to deal with
16181- * references from .altinstructions and .eh_frame
16182- */
16183- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
16184- EXIT_TEXT
16185- }
16186
16187 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
16188 EXIT_DATA
16189 }
16190
16191-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
16192+#ifndef CONFIG_SMP
16193 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
16194 #endif
16195
16196@@ -332,16 +407,10 @@ SECTIONS
16197 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
16198 __smp_locks = .;
16199 *(.smp_locks)
16200- . = ALIGN(PAGE_SIZE);
16201 __smp_locks_end = .;
16202+ . = ALIGN(PAGE_SIZE);
16203 }
16204
16205-#ifdef CONFIG_X86_64
16206- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
16207- NOSAVE_DATA
16208- }
16209-#endif
16210-
16211 /* BSS */
16212 . = ALIGN(PAGE_SIZE);
16213 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
16214@@ -357,6 +426,7 @@ SECTIONS
16215 __brk_base = .;
16216 . += 64 * 1024; /* 64k alignment slop space */
16217 *(.brk_reservation) /* areas brk users have reserved */
16218+ . = ALIGN(HPAGE_SIZE);
16219 __brk_limit = .;
16220 }
16221
16222@@ -383,13 +453,12 @@ SECTIONS
16223 * for the boot processor.
16224 */
16225 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
16226-INIT_PER_CPU(gdt_page);
16227 INIT_PER_CPU(irq_stack_union);
16228
16229 /*
16230 * Build-time check on the image size:
16231 */
16232-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
16233+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
16234 "kernel image bigger than KERNEL_IMAGE_SIZE");
16235
16236 #ifdef CONFIG_SMP
16237diff -urNp linux-3.0.7/arch/x86/kernel/vsyscall_64.c linux-3.0.7/arch/x86/kernel/vsyscall_64.c
16238--- linux-3.0.7/arch/x86/kernel/vsyscall_64.c 2011-07-21 22:17:23.000000000 -0400
16239+++ linux-3.0.7/arch/x86/kernel/vsyscall_64.c 2011-08-23 21:47:55.000000000 -0400
16240@@ -53,7 +53,7 @@ DEFINE_VVAR(int, vgetcpu_mode);
16241 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
16242 {
16243 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
16244- .sysctl_enabled = 1,
16245+ .sysctl_enabled = 0,
16246 };
16247
16248 void update_vsyscall_tz(void)
16249@@ -231,7 +231,7 @@ static long __vsyscall(3) venosys_1(void
16250 static ctl_table kernel_table2[] = {
16251 { .procname = "vsyscall64",
16252 .data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int),
16253- .mode = 0644,
16254+ .mode = 0444,
16255 .proc_handler = proc_dointvec },
16256 {}
16257 };
16258diff -urNp linux-3.0.7/arch/x86/kernel/x8664_ksyms_64.c linux-3.0.7/arch/x86/kernel/x8664_ksyms_64.c
16259--- linux-3.0.7/arch/x86/kernel/x8664_ksyms_64.c 2011-07-21 22:17:23.000000000 -0400
16260+++ linux-3.0.7/arch/x86/kernel/x8664_ksyms_64.c 2011-08-23 21:47:55.000000000 -0400
16261@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
16262 EXPORT_SYMBOL(copy_user_generic_string);
16263 EXPORT_SYMBOL(copy_user_generic_unrolled);
16264 EXPORT_SYMBOL(__copy_user_nocache);
16265-EXPORT_SYMBOL(_copy_from_user);
16266-EXPORT_SYMBOL(_copy_to_user);
16267
16268 EXPORT_SYMBOL(copy_page);
16269 EXPORT_SYMBOL(clear_page);
16270diff -urNp linux-3.0.7/arch/x86/kernel/xsave.c linux-3.0.7/arch/x86/kernel/xsave.c
16271--- linux-3.0.7/arch/x86/kernel/xsave.c 2011-07-21 22:17:23.000000000 -0400
16272+++ linux-3.0.7/arch/x86/kernel/xsave.c 2011-10-06 04:17:55.000000000 -0400
16273@@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_
16274 fx_sw_user->xstate_size > fx_sw_user->extended_size)
16275 return -EINVAL;
16276
16277- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
16278+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
16279 fx_sw_user->extended_size -
16280 FP_XSTATE_MAGIC2_SIZE));
16281 if (err)
16282@@ -267,7 +267,7 @@ fx_only:
16283 * the other extended state.
16284 */
16285 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
16286- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
16287+ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
16288 }
16289
16290 /*
16291@@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf
16292 if (use_xsave())
16293 err = restore_user_xstate(buf);
16294 else
16295- err = fxrstor_checking((__force struct i387_fxsave_struct *)
16296+ err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
16297 buf);
16298 if (unlikely(err)) {
16299 /*
16300diff -urNp linux-3.0.7/arch/x86/kvm/emulate.c linux-3.0.7/arch/x86/kvm/emulate.c
16301--- linux-3.0.7/arch/x86/kvm/emulate.c 2011-07-21 22:17:23.000000000 -0400
16302+++ linux-3.0.7/arch/x86/kvm/emulate.c 2011-08-23 21:47:55.000000000 -0400
16303@@ -96,7 +96,7 @@
16304 #define Src2ImmByte (2<<29)
16305 #define Src2One (3<<29)
16306 #define Src2Imm (4<<29)
16307-#define Src2Mask (7<<29)
16308+#define Src2Mask (7U<<29)
16309
16310 #define X2(x...) x, x
16311 #define X3(x...) X2(x), x
16312@@ -207,6 +207,7 @@ struct gprefix {
16313
16314 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
16315 do { \
16316+ unsigned long _tmp; \
16317 __asm__ __volatile__ ( \
16318 _PRE_EFLAGS("0", "4", "2") \
16319 _op _suffix " %"_x"3,%1; " \
16320@@ -220,8 +221,6 @@ struct gprefix {
16321 /* Raw emulation: instruction has two explicit operands. */
16322 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
16323 do { \
16324- unsigned long _tmp; \
16325- \
16326 switch ((_dst).bytes) { \
16327 case 2: \
16328 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
16329@@ -237,7 +236,6 @@ struct gprefix {
16330
16331 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
16332 do { \
16333- unsigned long _tmp; \
16334 switch ((_dst).bytes) { \
16335 case 1: \
16336 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
16337diff -urNp linux-3.0.7/arch/x86/kvm/lapic.c linux-3.0.7/arch/x86/kvm/lapic.c
16338--- linux-3.0.7/arch/x86/kvm/lapic.c 2011-07-21 22:17:23.000000000 -0400
16339+++ linux-3.0.7/arch/x86/kvm/lapic.c 2011-08-23 21:47:55.000000000 -0400
16340@@ -53,7 +53,7 @@
16341 #define APIC_BUS_CYCLE_NS 1
16342
16343 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
16344-#define apic_debug(fmt, arg...)
16345+#define apic_debug(fmt, arg...) do {} while (0)
16346
16347 #define APIC_LVT_NUM 6
16348 /* 14 is the version for Xeon and Pentium 8.4.8*/
16349diff -urNp linux-3.0.7/arch/x86/kvm/mmu.c linux-3.0.7/arch/x86/kvm/mmu.c
16350--- linux-3.0.7/arch/x86/kvm/mmu.c 2011-07-21 22:17:23.000000000 -0400
16351+++ linux-3.0.7/arch/x86/kvm/mmu.c 2011-08-23 21:47:55.000000000 -0400
16352@@ -3238,7 +3238,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
16353
16354 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
16355
16356- invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
16357+ invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
16358
16359 /*
16360 * Assume that the pte write on a page table of the same type
16361@@ -3270,7 +3270,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
16362 }
16363
16364 spin_lock(&vcpu->kvm->mmu_lock);
16365- if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
16366+ if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
16367 gentry = 0;
16368 kvm_mmu_free_some_pages(vcpu);
16369 ++vcpu->kvm->stat.mmu_pte_write;
16370diff -urNp linux-3.0.7/arch/x86/kvm/paging_tmpl.h linux-3.0.7/arch/x86/kvm/paging_tmpl.h
16371--- linux-3.0.7/arch/x86/kvm/paging_tmpl.h 2011-07-21 22:17:23.000000000 -0400
16372+++ linux-3.0.7/arch/x86/kvm/paging_tmpl.h 2011-10-06 04:17:55.000000000 -0400
16373@@ -182,7 +182,7 @@ walk:
16374 break;
16375 }
16376
16377- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
16378+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
16379 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte)))) {
16380 present = false;
16381 break;
16382@@ -583,6 +583,8 @@ static int FNAME(page_fault)(struct kvm_
16383 unsigned long mmu_seq;
16384 bool map_writable;
16385
16386+ pax_track_stack();
16387+
16388 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
16389
16390 r = mmu_topup_memory_caches(vcpu);
16391@@ -703,7 +705,7 @@ static void FNAME(invlpg)(struct kvm_vcp
16392 if (need_flush)
16393 kvm_flush_remote_tlbs(vcpu->kvm);
16394
16395- atomic_inc(&vcpu->kvm->arch.invlpg_counter);
16396+ atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
16397
16398 spin_unlock(&vcpu->kvm->mmu_lock);
16399
16400diff -urNp linux-3.0.7/arch/x86/kvm/svm.c linux-3.0.7/arch/x86/kvm/svm.c
16401--- linux-3.0.7/arch/x86/kvm/svm.c 2011-07-21 22:17:23.000000000 -0400
16402+++ linux-3.0.7/arch/x86/kvm/svm.c 2011-08-23 21:47:55.000000000 -0400
16403@@ -3377,7 +3377,11 @@ static void reload_tss(struct kvm_vcpu *
16404 int cpu = raw_smp_processor_id();
16405
16406 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
16407+
16408+ pax_open_kernel();
16409 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
16410+ pax_close_kernel();
16411+
16412 load_TR_desc();
16413 }
16414
16415@@ -3755,6 +3759,10 @@ static void svm_vcpu_run(struct kvm_vcpu
16416 #endif
16417 #endif
16418
16419+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16420+ __set_fs(current_thread_info()->addr_limit);
16421+#endif
16422+
16423 reload_tss(vcpu);
16424
16425 local_irq_disable();
16426diff -urNp linux-3.0.7/arch/x86/kvm/vmx.c linux-3.0.7/arch/x86/kvm/vmx.c
16427--- linux-3.0.7/arch/x86/kvm/vmx.c 2011-07-21 22:17:23.000000000 -0400
16428+++ linux-3.0.7/arch/x86/kvm/vmx.c 2011-08-23 21:47:55.000000000 -0400
16429@@ -797,7 +797,11 @@ static void reload_tss(void)
16430 struct desc_struct *descs;
16431
16432 descs = (void *)gdt->address;
16433+
16434+ pax_open_kernel();
16435 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
16436+ pax_close_kernel();
16437+
16438 load_TR_desc();
16439 }
16440
16441@@ -1747,8 +1751,11 @@ static __init int hardware_setup(void)
16442 if (!cpu_has_vmx_flexpriority())
16443 flexpriority_enabled = 0;
16444
16445- if (!cpu_has_vmx_tpr_shadow())
16446- kvm_x86_ops->update_cr8_intercept = NULL;
16447+ if (!cpu_has_vmx_tpr_shadow()) {
16448+ pax_open_kernel();
16449+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
16450+ pax_close_kernel();
16451+ }
16452
16453 if (enable_ept && !cpu_has_vmx_ept_2m_page())
16454 kvm_disable_largepages();
16455@@ -2814,7 +2821,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
16456 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
16457
16458 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
16459- vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
16460+ vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
16461 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
16462 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
16463 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
16464@@ -4211,6 +4218,12 @@ static void __noclone vmx_vcpu_run(struc
16465 "jmp .Lkvm_vmx_return \n\t"
16466 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
16467 ".Lkvm_vmx_return: "
16468+
16469+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16470+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
16471+ ".Lkvm_vmx_return2: "
16472+#endif
16473+
16474 /* Save guest registers, load host registers, keep flags */
16475 "mov %0, %c[wordsize](%%"R"sp) \n\t"
16476 "pop %0 \n\t"
16477@@ -4259,6 +4272,11 @@ static void __noclone vmx_vcpu_run(struc
16478 #endif
16479 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
16480 [wordsize]"i"(sizeof(ulong))
16481+
16482+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16483+ ,[cs]"i"(__KERNEL_CS)
16484+#endif
16485+
16486 : "cc", "memory"
16487 , R"ax", R"bx", R"di", R"si"
16488 #ifdef CONFIG_X86_64
16489@@ -4276,7 +4294,16 @@ static void __noclone vmx_vcpu_run(struc
16490
16491 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
16492
16493- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
16494+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
16495+
16496+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16497+ loadsegment(fs, __KERNEL_PERCPU);
16498+#endif
16499+
16500+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16501+ __set_fs(current_thread_info()->addr_limit);
16502+#endif
16503+
16504 vmx->launched = 1;
16505
16506 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
16507diff -urNp linux-3.0.7/arch/x86/kvm/x86.c linux-3.0.7/arch/x86/kvm/x86.c
16508--- linux-3.0.7/arch/x86/kvm/x86.c 2011-07-21 22:17:23.000000000 -0400
16509+++ linux-3.0.7/arch/x86/kvm/x86.c 2011-10-06 04:17:55.000000000 -0400
16510@@ -1313,8 +1313,8 @@ static int xen_hvm_config(struct kvm_vcp
16511 {
16512 struct kvm *kvm = vcpu->kvm;
16513 int lm = is_long_mode(vcpu);
16514- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
16515- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
16516+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
16517+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
16518 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
16519 : kvm->arch.xen_hvm_config.blob_size_32;
16520 u32 page_num = data & ~PAGE_MASK;
16521@@ -2057,6 +2057,8 @@ long kvm_arch_dev_ioctl(struct file *fil
16522 if (n < msr_list.nmsrs)
16523 goto out;
16524 r = -EFAULT;
16525+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
16526+ goto out;
16527 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
16528 num_msrs_to_save * sizeof(u32)))
16529 goto out;
16530@@ -2229,15 +2231,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
16531 struct kvm_cpuid2 *cpuid,
16532 struct kvm_cpuid_entry2 __user *entries)
16533 {
16534- int r;
16535+ int r, i;
16536
16537 r = -E2BIG;
16538 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
16539 goto out;
16540 r = -EFAULT;
16541- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
16542- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16543+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16544 goto out;
16545+ for (i = 0; i < cpuid->nent; ++i) {
16546+ struct kvm_cpuid_entry2 cpuid_entry;
16547+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
16548+ goto out;
16549+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
16550+ }
16551 vcpu->arch.cpuid_nent = cpuid->nent;
16552 kvm_apic_set_version(vcpu);
16553 kvm_x86_ops->cpuid_update(vcpu);
16554@@ -2252,15 +2259,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
16555 struct kvm_cpuid2 *cpuid,
16556 struct kvm_cpuid_entry2 __user *entries)
16557 {
16558- int r;
16559+ int r, i;
16560
16561 r = -E2BIG;
16562 if (cpuid->nent < vcpu->arch.cpuid_nent)
16563 goto out;
16564 r = -EFAULT;
16565- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
16566- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16567+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16568 goto out;
16569+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
16570+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
16571+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
16572+ goto out;
16573+ }
16574 return 0;
16575
16576 out:
16577@@ -2579,7 +2590,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
16578 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
16579 struct kvm_interrupt *irq)
16580 {
16581- if (irq->irq < 0 || irq->irq >= 256)
16582+ if (irq->irq >= 256)
16583 return -EINVAL;
16584 if (irqchip_in_kernel(vcpu->kvm))
16585 return -ENXIO;
16586@@ -4878,7 +4889,7 @@ void kvm_after_handle_nmi(struct kvm_vcp
16587 }
16588 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
16589
16590-int kvm_arch_init(void *opaque)
16591+int kvm_arch_init(const void *opaque)
16592 {
16593 int r;
16594 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
16595diff -urNp linux-3.0.7/arch/x86/lguest/boot.c linux-3.0.7/arch/x86/lguest/boot.c
16596--- linux-3.0.7/arch/x86/lguest/boot.c 2011-07-21 22:17:23.000000000 -0400
16597+++ linux-3.0.7/arch/x86/lguest/boot.c 2011-08-23 21:47:55.000000000 -0400
16598@@ -1176,9 +1176,10 @@ static __init int early_put_chars(u32 vt
16599 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
16600 * Launcher to reboot us.
16601 */
16602-static void lguest_restart(char *reason)
16603+static __noreturn void lguest_restart(char *reason)
16604 {
16605 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
16606+ BUG();
16607 }
16608
16609 /*G:050
16610diff -urNp linux-3.0.7/arch/x86/lib/atomic64_32.c linux-3.0.7/arch/x86/lib/atomic64_32.c
16611--- linux-3.0.7/arch/x86/lib/atomic64_32.c 2011-07-21 22:17:23.000000000 -0400
16612+++ linux-3.0.7/arch/x86/lib/atomic64_32.c 2011-08-23 21:47:55.000000000 -0400
16613@@ -8,18 +8,30 @@
16614
16615 long long atomic64_read_cx8(long long, const atomic64_t *v);
16616 EXPORT_SYMBOL(atomic64_read_cx8);
16617+long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16618+EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
16619 long long atomic64_set_cx8(long long, const atomic64_t *v);
16620 EXPORT_SYMBOL(atomic64_set_cx8);
16621+long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16622+EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
16623 long long atomic64_xchg_cx8(long long, unsigned high);
16624 EXPORT_SYMBOL(atomic64_xchg_cx8);
16625 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
16626 EXPORT_SYMBOL(atomic64_add_return_cx8);
16627+long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16628+EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
16629 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
16630 EXPORT_SYMBOL(atomic64_sub_return_cx8);
16631+long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16632+EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
16633 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
16634 EXPORT_SYMBOL(atomic64_inc_return_cx8);
16635+long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16636+EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
16637 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
16638 EXPORT_SYMBOL(atomic64_dec_return_cx8);
16639+long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16640+EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
16641 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
16642 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
16643 int atomic64_inc_not_zero_cx8(atomic64_t *v);
16644@@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
16645 #ifndef CONFIG_X86_CMPXCHG64
16646 long long atomic64_read_386(long long, const atomic64_t *v);
16647 EXPORT_SYMBOL(atomic64_read_386);
16648+long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
16649+EXPORT_SYMBOL(atomic64_read_unchecked_386);
16650 long long atomic64_set_386(long long, const atomic64_t *v);
16651 EXPORT_SYMBOL(atomic64_set_386);
16652+long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
16653+EXPORT_SYMBOL(atomic64_set_unchecked_386);
16654 long long atomic64_xchg_386(long long, unsigned high);
16655 EXPORT_SYMBOL(atomic64_xchg_386);
16656 long long atomic64_add_return_386(long long a, atomic64_t *v);
16657 EXPORT_SYMBOL(atomic64_add_return_386);
16658+long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16659+EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
16660 long long atomic64_sub_return_386(long long a, atomic64_t *v);
16661 EXPORT_SYMBOL(atomic64_sub_return_386);
16662+long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16663+EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
16664 long long atomic64_inc_return_386(long long a, atomic64_t *v);
16665 EXPORT_SYMBOL(atomic64_inc_return_386);
16666+long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16667+EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
16668 long long atomic64_dec_return_386(long long a, atomic64_t *v);
16669 EXPORT_SYMBOL(atomic64_dec_return_386);
16670+long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16671+EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
16672 long long atomic64_add_386(long long a, atomic64_t *v);
16673 EXPORT_SYMBOL(atomic64_add_386);
16674+long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
16675+EXPORT_SYMBOL(atomic64_add_unchecked_386);
16676 long long atomic64_sub_386(long long a, atomic64_t *v);
16677 EXPORT_SYMBOL(atomic64_sub_386);
16678+long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
16679+EXPORT_SYMBOL(atomic64_sub_unchecked_386);
16680 long long atomic64_inc_386(long long a, atomic64_t *v);
16681 EXPORT_SYMBOL(atomic64_inc_386);
16682+long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
16683+EXPORT_SYMBOL(atomic64_inc_unchecked_386);
16684 long long atomic64_dec_386(long long a, atomic64_t *v);
16685 EXPORT_SYMBOL(atomic64_dec_386);
16686+long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
16687+EXPORT_SYMBOL(atomic64_dec_unchecked_386);
16688 long long atomic64_dec_if_positive_386(atomic64_t *v);
16689 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
16690 int atomic64_inc_not_zero_386(atomic64_t *v);
16691diff -urNp linux-3.0.7/arch/x86/lib/atomic64_386_32.S linux-3.0.7/arch/x86/lib/atomic64_386_32.S
16692--- linux-3.0.7/arch/x86/lib/atomic64_386_32.S 2011-07-21 22:17:23.000000000 -0400
16693+++ linux-3.0.7/arch/x86/lib/atomic64_386_32.S 2011-08-23 21:47:55.000000000 -0400
16694@@ -48,6 +48,10 @@ BEGIN(read)
16695 movl (v), %eax
16696 movl 4(v), %edx
16697 RET_ENDP
16698+BEGIN(read_unchecked)
16699+ movl (v), %eax
16700+ movl 4(v), %edx
16701+RET_ENDP
16702 #undef v
16703
16704 #define v %esi
16705@@ -55,6 +59,10 @@ BEGIN(set)
16706 movl %ebx, (v)
16707 movl %ecx, 4(v)
16708 RET_ENDP
16709+BEGIN(set_unchecked)
16710+ movl %ebx, (v)
16711+ movl %ecx, 4(v)
16712+RET_ENDP
16713 #undef v
16714
16715 #define v %esi
16716@@ -70,6 +78,20 @@ RET_ENDP
16717 BEGIN(add)
16718 addl %eax, (v)
16719 adcl %edx, 4(v)
16720+
16721+#ifdef CONFIG_PAX_REFCOUNT
16722+ jno 0f
16723+ subl %eax, (v)
16724+ sbbl %edx, 4(v)
16725+ int $4
16726+0:
16727+ _ASM_EXTABLE(0b, 0b)
16728+#endif
16729+
16730+RET_ENDP
16731+BEGIN(add_unchecked)
16732+ addl %eax, (v)
16733+ adcl %edx, 4(v)
16734 RET_ENDP
16735 #undef v
16736
16737@@ -77,6 +99,24 @@ RET_ENDP
16738 BEGIN(add_return)
16739 addl (v), %eax
16740 adcl 4(v), %edx
16741+
16742+#ifdef CONFIG_PAX_REFCOUNT
16743+ into
16744+1234:
16745+ _ASM_EXTABLE(1234b, 2f)
16746+#endif
16747+
16748+ movl %eax, (v)
16749+ movl %edx, 4(v)
16750+
16751+#ifdef CONFIG_PAX_REFCOUNT
16752+2:
16753+#endif
16754+
16755+RET_ENDP
16756+BEGIN(add_return_unchecked)
16757+ addl (v), %eax
16758+ adcl 4(v), %edx
16759 movl %eax, (v)
16760 movl %edx, 4(v)
16761 RET_ENDP
16762@@ -86,6 +126,20 @@ RET_ENDP
16763 BEGIN(sub)
16764 subl %eax, (v)
16765 sbbl %edx, 4(v)
16766+
16767+#ifdef CONFIG_PAX_REFCOUNT
16768+ jno 0f
16769+ addl %eax, (v)
16770+ adcl %edx, 4(v)
16771+ int $4
16772+0:
16773+ _ASM_EXTABLE(0b, 0b)
16774+#endif
16775+
16776+RET_ENDP
16777+BEGIN(sub_unchecked)
16778+ subl %eax, (v)
16779+ sbbl %edx, 4(v)
16780 RET_ENDP
16781 #undef v
16782
16783@@ -96,6 +150,27 @@ BEGIN(sub_return)
16784 sbbl $0, %edx
16785 addl (v), %eax
16786 adcl 4(v), %edx
16787+
16788+#ifdef CONFIG_PAX_REFCOUNT
16789+ into
16790+1234:
16791+ _ASM_EXTABLE(1234b, 2f)
16792+#endif
16793+
16794+ movl %eax, (v)
16795+ movl %edx, 4(v)
16796+
16797+#ifdef CONFIG_PAX_REFCOUNT
16798+2:
16799+#endif
16800+
16801+RET_ENDP
16802+BEGIN(sub_return_unchecked)
16803+ negl %edx
16804+ negl %eax
16805+ sbbl $0, %edx
16806+ addl (v), %eax
16807+ adcl 4(v), %edx
16808 movl %eax, (v)
16809 movl %edx, 4(v)
16810 RET_ENDP
16811@@ -105,6 +180,20 @@ RET_ENDP
16812 BEGIN(inc)
16813 addl $1, (v)
16814 adcl $0, 4(v)
16815+
16816+#ifdef CONFIG_PAX_REFCOUNT
16817+ jno 0f
16818+ subl $1, (v)
16819+ sbbl $0, 4(v)
16820+ int $4
16821+0:
16822+ _ASM_EXTABLE(0b, 0b)
16823+#endif
16824+
16825+RET_ENDP
16826+BEGIN(inc_unchecked)
16827+ addl $1, (v)
16828+ adcl $0, 4(v)
16829 RET_ENDP
16830 #undef v
16831
16832@@ -114,6 +203,26 @@ BEGIN(inc_return)
16833 movl 4(v), %edx
16834 addl $1, %eax
16835 adcl $0, %edx
16836+
16837+#ifdef CONFIG_PAX_REFCOUNT
16838+ into
16839+1234:
16840+ _ASM_EXTABLE(1234b, 2f)
16841+#endif
16842+
16843+ movl %eax, (v)
16844+ movl %edx, 4(v)
16845+
16846+#ifdef CONFIG_PAX_REFCOUNT
16847+2:
16848+#endif
16849+
16850+RET_ENDP
16851+BEGIN(inc_return_unchecked)
16852+ movl (v), %eax
16853+ movl 4(v), %edx
16854+ addl $1, %eax
16855+ adcl $0, %edx
16856 movl %eax, (v)
16857 movl %edx, 4(v)
16858 RET_ENDP
16859@@ -123,6 +232,20 @@ RET_ENDP
16860 BEGIN(dec)
16861 subl $1, (v)
16862 sbbl $0, 4(v)
16863+
16864+#ifdef CONFIG_PAX_REFCOUNT
16865+ jno 0f
16866+ addl $1, (v)
16867+ adcl $0, 4(v)
16868+ int $4
16869+0:
16870+ _ASM_EXTABLE(0b, 0b)
16871+#endif
16872+
16873+RET_ENDP
16874+BEGIN(dec_unchecked)
16875+ subl $1, (v)
16876+ sbbl $0, 4(v)
16877 RET_ENDP
16878 #undef v
16879
16880@@ -132,6 +255,26 @@ BEGIN(dec_return)
16881 movl 4(v), %edx
16882 subl $1, %eax
16883 sbbl $0, %edx
16884+
16885+#ifdef CONFIG_PAX_REFCOUNT
16886+ into
16887+1234:
16888+ _ASM_EXTABLE(1234b, 2f)
16889+#endif
16890+
16891+ movl %eax, (v)
16892+ movl %edx, 4(v)
16893+
16894+#ifdef CONFIG_PAX_REFCOUNT
16895+2:
16896+#endif
16897+
16898+RET_ENDP
16899+BEGIN(dec_return_unchecked)
16900+ movl (v), %eax
16901+ movl 4(v), %edx
16902+ subl $1, %eax
16903+ sbbl $0, %edx
16904 movl %eax, (v)
16905 movl %edx, 4(v)
16906 RET_ENDP
16907@@ -143,6 +286,13 @@ BEGIN(add_unless)
16908 adcl %edx, %edi
16909 addl (v), %eax
16910 adcl 4(v), %edx
16911+
16912+#ifdef CONFIG_PAX_REFCOUNT
16913+ into
16914+1234:
16915+ _ASM_EXTABLE(1234b, 2f)
16916+#endif
16917+
16918 cmpl %eax, %esi
16919 je 3f
16920 1:
16921@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
16922 1:
16923 addl $1, %eax
16924 adcl $0, %edx
16925+
16926+#ifdef CONFIG_PAX_REFCOUNT
16927+ into
16928+1234:
16929+ _ASM_EXTABLE(1234b, 2f)
16930+#endif
16931+
16932 movl %eax, (v)
16933 movl %edx, 4(v)
16934 movl $1, %eax
16935@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
16936 movl 4(v), %edx
16937 subl $1, %eax
16938 sbbl $0, %edx
16939+
16940+#ifdef CONFIG_PAX_REFCOUNT
16941+ into
16942+1234:
16943+ _ASM_EXTABLE(1234b, 1f)
16944+#endif
16945+
16946 js 1f
16947 movl %eax, (v)
16948 movl %edx, 4(v)
16949diff -urNp linux-3.0.7/arch/x86/lib/atomic64_cx8_32.S linux-3.0.7/arch/x86/lib/atomic64_cx8_32.S
16950--- linux-3.0.7/arch/x86/lib/atomic64_cx8_32.S 2011-07-21 22:17:23.000000000 -0400
16951+++ linux-3.0.7/arch/x86/lib/atomic64_cx8_32.S 2011-10-06 04:17:55.000000000 -0400
16952@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
16953 CFI_STARTPROC
16954
16955 read64 %ecx
16956+ pax_force_retaddr
16957 ret
16958 CFI_ENDPROC
16959 ENDPROC(atomic64_read_cx8)
16960
16961+ENTRY(atomic64_read_unchecked_cx8)
16962+ CFI_STARTPROC
16963+
16964+ read64 %ecx
16965+ pax_force_retaddr
16966+ ret
16967+ CFI_ENDPROC
16968+ENDPROC(atomic64_read_unchecked_cx8)
16969+
16970 ENTRY(atomic64_set_cx8)
16971 CFI_STARTPROC
16972
16973@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
16974 cmpxchg8b (%esi)
16975 jne 1b
16976
16977+ pax_force_retaddr
16978 ret
16979 CFI_ENDPROC
16980 ENDPROC(atomic64_set_cx8)
16981
16982+ENTRY(atomic64_set_unchecked_cx8)
16983+ CFI_STARTPROC
16984+
16985+1:
16986+/* we don't need LOCK_PREFIX since aligned 64-bit writes
16987+ * are atomic on 586 and newer */
16988+ cmpxchg8b (%esi)
16989+ jne 1b
16990+
16991+ pax_force_retaddr
16992+ ret
16993+ CFI_ENDPROC
16994+ENDPROC(atomic64_set_unchecked_cx8)
16995+
16996 ENTRY(atomic64_xchg_cx8)
16997 CFI_STARTPROC
16998
16999@@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
17000 cmpxchg8b (%esi)
17001 jne 1b
17002
17003+ pax_force_retaddr
17004 ret
17005 CFI_ENDPROC
17006 ENDPROC(atomic64_xchg_cx8)
17007
17008-.macro addsub_return func ins insc
17009-ENTRY(atomic64_\func\()_return_cx8)
17010+.macro addsub_return func ins insc unchecked=""
17011+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
17012 CFI_STARTPROC
17013 SAVE ebp
17014 SAVE ebx
17015@@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
17016 movl %edx, %ecx
17017 \ins\()l %esi, %ebx
17018 \insc\()l %edi, %ecx
17019+
17020+.ifb \unchecked
17021+#ifdef CONFIG_PAX_REFCOUNT
17022+ into
17023+2:
17024+ _ASM_EXTABLE(2b, 3f)
17025+#endif
17026+.endif
17027+
17028 LOCK_PREFIX
17029 cmpxchg8b (%ebp)
17030 jne 1b
17031-
17032-10:
17033 movl %ebx, %eax
17034 movl %ecx, %edx
17035+
17036+.ifb \unchecked
17037+#ifdef CONFIG_PAX_REFCOUNT
17038+3:
17039+#endif
17040+.endif
17041+
17042 RESTORE edi
17043 RESTORE esi
17044 RESTORE ebx
17045 RESTORE ebp
17046+ pax_force_retaddr
17047 ret
17048 CFI_ENDPROC
17049-ENDPROC(atomic64_\func\()_return_cx8)
17050+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
17051 .endm
17052
17053 addsub_return add add adc
17054 addsub_return sub sub sbb
17055+addsub_return add add adc _unchecked
17056+addsub_return sub sub sbb _unchecked
17057
17058-.macro incdec_return func ins insc
17059-ENTRY(atomic64_\func\()_return_cx8)
17060+.macro incdec_return func ins insc unchecked
17061+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
17062 CFI_STARTPROC
17063 SAVE ebx
17064
17065@@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
17066 movl %edx, %ecx
17067 \ins\()l $1, %ebx
17068 \insc\()l $0, %ecx
17069+
17070+.ifb \unchecked
17071+#ifdef CONFIG_PAX_REFCOUNT
17072+ into
17073+2:
17074+ _ASM_EXTABLE(2b, 3f)
17075+#endif
17076+.endif
17077+
17078 LOCK_PREFIX
17079 cmpxchg8b (%esi)
17080 jne 1b
17081
17082-10:
17083 movl %ebx, %eax
17084 movl %ecx, %edx
17085+
17086+.ifb \unchecked
17087+#ifdef CONFIG_PAX_REFCOUNT
17088+3:
17089+#endif
17090+.endif
17091+
17092 RESTORE ebx
17093+ pax_force_retaddr
17094 ret
17095 CFI_ENDPROC
17096-ENDPROC(atomic64_\func\()_return_cx8)
17097+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
17098 .endm
17099
17100 incdec_return inc add adc
17101 incdec_return dec sub sbb
17102+incdec_return inc add adc _unchecked
17103+incdec_return dec sub sbb _unchecked
17104
17105 ENTRY(atomic64_dec_if_positive_cx8)
17106 CFI_STARTPROC
17107@@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
17108 movl %edx, %ecx
17109 subl $1, %ebx
17110 sbb $0, %ecx
17111+
17112+#ifdef CONFIG_PAX_REFCOUNT
17113+ into
17114+1234:
17115+ _ASM_EXTABLE(1234b, 2f)
17116+#endif
17117+
17118 js 2f
17119 LOCK_PREFIX
17120 cmpxchg8b (%esi)
17121@@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
17122 movl %ebx, %eax
17123 movl %ecx, %edx
17124 RESTORE ebx
17125+ pax_force_retaddr
17126 ret
17127 CFI_ENDPROC
17128 ENDPROC(atomic64_dec_if_positive_cx8)
17129@@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
17130 movl %edx, %ecx
17131 addl %esi, %ebx
17132 adcl %edi, %ecx
17133+
17134+#ifdef CONFIG_PAX_REFCOUNT
17135+ into
17136+1234:
17137+ _ASM_EXTABLE(1234b, 3f)
17138+#endif
17139+
17140 LOCK_PREFIX
17141 cmpxchg8b (%ebp)
17142 jne 1b
17143@@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
17144 CFI_ADJUST_CFA_OFFSET -8
17145 RESTORE ebx
17146 RESTORE ebp
17147+ pax_force_retaddr
17148 ret
17149 4:
17150 cmpl %edx, 4(%esp)
17151@@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
17152 movl %edx, %ecx
17153 addl $1, %ebx
17154 adcl $0, %ecx
17155+
17156+#ifdef CONFIG_PAX_REFCOUNT
17157+ into
17158+1234:
17159+ _ASM_EXTABLE(1234b, 3f)
17160+#endif
17161+
17162 LOCK_PREFIX
17163 cmpxchg8b (%esi)
17164 jne 1b
17165@@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
17166 movl $1, %eax
17167 3:
17168 RESTORE ebx
17169+ pax_force_retaddr
17170 ret
17171 4:
17172 testl %edx, %edx
17173diff -urNp linux-3.0.7/arch/x86/lib/checksum_32.S linux-3.0.7/arch/x86/lib/checksum_32.S
17174--- linux-3.0.7/arch/x86/lib/checksum_32.S 2011-07-21 22:17:23.000000000 -0400
17175+++ linux-3.0.7/arch/x86/lib/checksum_32.S 2011-08-23 21:47:55.000000000 -0400
17176@@ -28,7 +28,8 @@
17177 #include <linux/linkage.h>
17178 #include <asm/dwarf2.h>
17179 #include <asm/errno.h>
17180-
17181+#include <asm/segment.h>
17182+
17183 /*
17184 * computes a partial checksum, e.g. for TCP/UDP fragments
17185 */
17186@@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (
17187
17188 #define ARGBASE 16
17189 #define FP 12
17190-
17191-ENTRY(csum_partial_copy_generic)
17192+
17193+ENTRY(csum_partial_copy_generic_to_user)
17194 CFI_STARTPROC
17195+
17196+#ifdef CONFIG_PAX_MEMORY_UDEREF
17197+ pushl_cfi %gs
17198+ popl_cfi %es
17199+ jmp csum_partial_copy_generic
17200+#endif
17201+
17202+ENTRY(csum_partial_copy_generic_from_user)
17203+
17204+#ifdef CONFIG_PAX_MEMORY_UDEREF
17205+ pushl_cfi %gs
17206+ popl_cfi %ds
17207+#endif
17208+
17209+ENTRY(csum_partial_copy_generic)
17210 subl $4,%esp
17211 CFI_ADJUST_CFA_OFFSET 4
17212 pushl_cfi %edi
17213@@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
17214 jmp 4f
17215 SRC(1: movw (%esi), %bx )
17216 addl $2, %esi
17217-DST( movw %bx, (%edi) )
17218+DST( movw %bx, %es:(%edi) )
17219 addl $2, %edi
17220 addw %bx, %ax
17221 adcl $0, %eax
17222@@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
17223 SRC(1: movl (%esi), %ebx )
17224 SRC( movl 4(%esi), %edx )
17225 adcl %ebx, %eax
17226-DST( movl %ebx, (%edi) )
17227+DST( movl %ebx, %es:(%edi) )
17228 adcl %edx, %eax
17229-DST( movl %edx, 4(%edi) )
17230+DST( movl %edx, %es:4(%edi) )
17231
17232 SRC( movl 8(%esi), %ebx )
17233 SRC( movl 12(%esi), %edx )
17234 adcl %ebx, %eax
17235-DST( movl %ebx, 8(%edi) )
17236+DST( movl %ebx, %es:8(%edi) )
17237 adcl %edx, %eax
17238-DST( movl %edx, 12(%edi) )
17239+DST( movl %edx, %es:12(%edi) )
17240
17241 SRC( movl 16(%esi), %ebx )
17242 SRC( movl 20(%esi), %edx )
17243 adcl %ebx, %eax
17244-DST( movl %ebx, 16(%edi) )
17245+DST( movl %ebx, %es:16(%edi) )
17246 adcl %edx, %eax
17247-DST( movl %edx, 20(%edi) )
17248+DST( movl %edx, %es:20(%edi) )
17249
17250 SRC( movl 24(%esi), %ebx )
17251 SRC( movl 28(%esi), %edx )
17252 adcl %ebx, %eax
17253-DST( movl %ebx, 24(%edi) )
17254+DST( movl %ebx, %es:24(%edi) )
17255 adcl %edx, %eax
17256-DST( movl %edx, 28(%edi) )
17257+DST( movl %edx, %es:28(%edi) )
17258
17259 lea 32(%esi), %esi
17260 lea 32(%edi), %edi
17261@@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
17262 shrl $2, %edx # This clears CF
17263 SRC(3: movl (%esi), %ebx )
17264 adcl %ebx, %eax
17265-DST( movl %ebx, (%edi) )
17266+DST( movl %ebx, %es:(%edi) )
17267 lea 4(%esi), %esi
17268 lea 4(%edi), %edi
17269 dec %edx
17270@@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
17271 jb 5f
17272 SRC( movw (%esi), %cx )
17273 leal 2(%esi), %esi
17274-DST( movw %cx, (%edi) )
17275+DST( movw %cx, %es:(%edi) )
17276 leal 2(%edi), %edi
17277 je 6f
17278 shll $16,%ecx
17279 SRC(5: movb (%esi), %cl )
17280-DST( movb %cl, (%edi) )
17281+DST( movb %cl, %es:(%edi) )
17282 6: addl %ecx, %eax
17283 adcl $0, %eax
17284 7:
17285@@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
17286
17287 6001:
17288 movl ARGBASE+20(%esp), %ebx # src_err_ptr
17289- movl $-EFAULT, (%ebx)
17290+ movl $-EFAULT, %ss:(%ebx)
17291
17292 # zero the complete destination - computing the rest
17293 # is too much work
17294@@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
17295
17296 6002:
17297 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
17298- movl $-EFAULT,(%ebx)
17299+ movl $-EFAULT,%ss:(%ebx)
17300 jmp 5000b
17301
17302 .previous
17303
17304+ pushl_cfi %ss
17305+ popl_cfi %ds
17306+ pushl_cfi %ss
17307+ popl_cfi %es
17308 popl_cfi %ebx
17309 CFI_RESTORE ebx
17310 popl_cfi %esi
17311@@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
17312 popl_cfi %ecx # equivalent to addl $4,%esp
17313 ret
17314 CFI_ENDPROC
17315-ENDPROC(csum_partial_copy_generic)
17316+ENDPROC(csum_partial_copy_generic_to_user)
17317
17318 #else
17319
17320 /* Version for PentiumII/PPro */
17321
17322 #define ROUND1(x) \
17323+ nop; nop; nop; \
17324 SRC(movl x(%esi), %ebx ) ; \
17325 addl %ebx, %eax ; \
17326- DST(movl %ebx, x(%edi) ) ;
17327+ DST(movl %ebx, %es:x(%edi)) ;
17328
17329 #define ROUND(x) \
17330+ nop; nop; nop; \
17331 SRC(movl x(%esi), %ebx ) ; \
17332 adcl %ebx, %eax ; \
17333- DST(movl %ebx, x(%edi) ) ;
17334+ DST(movl %ebx, %es:x(%edi)) ;
17335
17336 #define ARGBASE 12
17337-
17338-ENTRY(csum_partial_copy_generic)
17339+
17340+ENTRY(csum_partial_copy_generic_to_user)
17341 CFI_STARTPROC
17342+
17343+#ifdef CONFIG_PAX_MEMORY_UDEREF
17344+ pushl_cfi %gs
17345+ popl_cfi %es
17346+ jmp csum_partial_copy_generic
17347+#endif
17348+
17349+ENTRY(csum_partial_copy_generic_from_user)
17350+
17351+#ifdef CONFIG_PAX_MEMORY_UDEREF
17352+ pushl_cfi %gs
17353+ popl_cfi %ds
17354+#endif
17355+
17356+ENTRY(csum_partial_copy_generic)
17357 pushl_cfi %ebx
17358 CFI_REL_OFFSET ebx, 0
17359 pushl_cfi %edi
17360@@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
17361 subl %ebx, %edi
17362 lea -1(%esi),%edx
17363 andl $-32,%edx
17364- lea 3f(%ebx,%ebx), %ebx
17365+ lea 3f(%ebx,%ebx,2), %ebx
17366 testl %esi, %esi
17367 jmp *%ebx
17368 1: addl $64,%esi
17369@@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
17370 jb 5f
17371 SRC( movw (%esi), %dx )
17372 leal 2(%esi), %esi
17373-DST( movw %dx, (%edi) )
17374+DST( movw %dx, %es:(%edi) )
17375 leal 2(%edi), %edi
17376 je 6f
17377 shll $16,%edx
17378 5:
17379 SRC( movb (%esi), %dl )
17380-DST( movb %dl, (%edi) )
17381+DST( movb %dl, %es:(%edi) )
17382 6: addl %edx, %eax
17383 adcl $0, %eax
17384 7:
17385 .section .fixup, "ax"
17386 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
17387- movl $-EFAULT, (%ebx)
17388+ movl $-EFAULT, %ss:(%ebx)
17389 # zero the complete destination (computing the rest is too much work)
17390 movl ARGBASE+8(%esp),%edi # dst
17391 movl ARGBASE+12(%esp),%ecx # len
17392@@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
17393 rep; stosb
17394 jmp 7b
17395 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
17396- movl $-EFAULT, (%ebx)
17397+ movl $-EFAULT, %ss:(%ebx)
17398 jmp 7b
17399 .previous
17400
17401+#ifdef CONFIG_PAX_MEMORY_UDEREF
17402+ pushl_cfi %ss
17403+ popl_cfi %ds
17404+ pushl_cfi %ss
17405+ popl_cfi %es
17406+#endif
17407+
17408 popl_cfi %esi
17409 CFI_RESTORE esi
17410 popl_cfi %edi
17411@@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
17412 CFI_RESTORE ebx
17413 ret
17414 CFI_ENDPROC
17415-ENDPROC(csum_partial_copy_generic)
17416+ENDPROC(csum_partial_copy_generic_to_user)
17417
17418 #undef ROUND
17419 #undef ROUND1
17420diff -urNp linux-3.0.7/arch/x86/lib/clear_page_64.S linux-3.0.7/arch/x86/lib/clear_page_64.S
17421--- linux-3.0.7/arch/x86/lib/clear_page_64.S 2011-07-21 22:17:23.000000000 -0400
17422+++ linux-3.0.7/arch/x86/lib/clear_page_64.S 2011-10-06 04:17:55.000000000 -0400
17423@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
17424 movl $4096/8,%ecx
17425 xorl %eax,%eax
17426 rep stosq
17427+ pax_force_retaddr
17428 ret
17429 CFI_ENDPROC
17430 ENDPROC(clear_page_c)
17431@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
17432 movl $4096,%ecx
17433 xorl %eax,%eax
17434 rep stosb
17435+ pax_force_retaddr
17436 ret
17437 CFI_ENDPROC
17438 ENDPROC(clear_page_c_e)
17439@@ -43,6 +45,7 @@ ENTRY(clear_page)
17440 leaq 64(%rdi),%rdi
17441 jnz .Lloop
17442 nop
17443+ pax_force_retaddr
17444 ret
17445 CFI_ENDPROC
17446 .Lclear_page_end:
17447@@ -58,7 +61,7 @@ ENDPROC(clear_page)
17448
17449 #include <asm/cpufeature.h>
17450
17451- .section .altinstr_replacement,"ax"
17452+ .section .altinstr_replacement,"a"
17453 1: .byte 0xeb /* jmp <disp8> */
17454 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
17455 2: .byte 0xeb /* jmp <disp8> */
17456diff -urNp linux-3.0.7/arch/x86/lib/cmpxchg16b_emu.S linux-3.0.7/arch/x86/lib/cmpxchg16b_emu.S
17457--- linux-3.0.7/arch/x86/lib/cmpxchg16b_emu.S 2011-07-21 22:17:23.000000000 -0400
17458+++ linux-3.0.7/arch/x86/lib/cmpxchg16b_emu.S 2011-10-07 19:07:28.000000000 -0400
17459@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
17460
17461 popf
17462 mov $1, %al
17463+ pax_force_retaddr
17464 ret
17465
17466 not_same:
17467 popf
17468 xor %al,%al
17469+ pax_force_retaddr
17470 ret
17471
17472 CFI_ENDPROC
17473diff -urNp linux-3.0.7/arch/x86/lib/copy_page_64.S linux-3.0.7/arch/x86/lib/copy_page_64.S
17474--- linux-3.0.7/arch/x86/lib/copy_page_64.S 2011-07-21 22:17:23.000000000 -0400
17475+++ linux-3.0.7/arch/x86/lib/copy_page_64.S 2011-10-06 04:17:55.000000000 -0400
17476@@ -2,12 +2,14 @@
17477
17478 #include <linux/linkage.h>
17479 #include <asm/dwarf2.h>
17480+#include <asm/alternative-asm.h>
17481
17482 ALIGN
17483 copy_page_c:
17484 CFI_STARTPROC
17485 movl $4096/8,%ecx
17486 rep movsq
17487+ pax_force_retaddr
17488 ret
17489 CFI_ENDPROC
17490 ENDPROC(copy_page_c)
17491@@ -94,6 +96,7 @@ ENTRY(copy_page)
17492 CFI_RESTORE r13
17493 addq $3*8,%rsp
17494 CFI_ADJUST_CFA_OFFSET -3*8
17495+ pax_force_retaddr
17496 ret
17497 .Lcopy_page_end:
17498 CFI_ENDPROC
17499@@ -104,7 +107,7 @@ ENDPROC(copy_page)
17500
17501 #include <asm/cpufeature.h>
17502
17503- .section .altinstr_replacement,"ax"
17504+ .section .altinstr_replacement,"a"
17505 1: .byte 0xeb /* jmp <disp8> */
17506 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
17507 2:
17508diff -urNp linux-3.0.7/arch/x86/lib/copy_user_64.S linux-3.0.7/arch/x86/lib/copy_user_64.S
17509--- linux-3.0.7/arch/x86/lib/copy_user_64.S 2011-07-21 22:17:23.000000000 -0400
17510+++ linux-3.0.7/arch/x86/lib/copy_user_64.S 2011-10-06 04:17:55.000000000 -0400
17511@@ -16,6 +16,7 @@
17512 #include <asm/thread_info.h>
17513 #include <asm/cpufeature.h>
17514 #include <asm/alternative-asm.h>
17515+#include <asm/pgtable.h>
17516
17517 /*
17518 * By placing feature2 after feature1 in altinstructions section, we logically
17519@@ -29,7 +30,7 @@
17520 .byte 0xe9 /* 32bit jump */
17521 .long \orig-1f /* by default jump to orig */
17522 1:
17523- .section .altinstr_replacement,"ax"
17524+ .section .altinstr_replacement,"a"
17525 2: .byte 0xe9 /* near jump with 32bit immediate */
17526 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
17527 3: .byte 0xe9 /* near jump with 32bit immediate */
17528@@ -71,47 +72,20 @@
17529 #endif
17530 .endm
17531
17532-/* Standard copy_to_user with segment limit checking */
17533-ENTRY(_copy_to_user)
17534- CFI_STARTPROC
17535- GET_THREAD_INFO(%rax)
17536- movq %rdi,%rcx
17537- addq %rdx,%rcx
17538- jc bad_to_user
17539- cmpq TI_addr_limit(%rax),%rcx
17540- ja bad_to_user
17541- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
17542- copy_user_generic_unrolled,copy_user_generic_string, \
17543- copy_user_enhanced_fast_string
17544- CFI_ENDPROC
17545-ENDPROC(_copy_to_user)
17546-
17547-/* Standard copy_from_user with segment limit checking */
17548-ENTRY(_copy_from_user)
17549- CFI_STARTPROC
17550- GET_THREAD_INFO(%rax)
17551- movq %rsi,%rcx
17552- addq %rdx,%rcx
17553- jc bad_from_user
17554- cmpq TI_addr_limit(%rax),%rcx
17555- ja bad_from_user
17556- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
17557- copy_user_generic_unrolled,copy_user_generic_string, \
17558- copy_user_enhanced_fast_string
17559- CFI_ENDPROC
17560-ENDPROC(_copy_from_user)
17561-
17562 .section .fixup,"ax"
17563 /* must zero dest */
17564 ENTRY(bad_from_user)
17565 bad_from_user:
17566 CFI_STARTPROC
17567+ testl %edx,%edx
17568+ js bad_to_user
17569 movl %edx,%ecx
17570 xorl %eax,%eax
17571 rep
17572 stosb
17573 bad_to_user:
17574 movl %edx,%eax
17575+ pax_force_retaddr
17576 ret
17577 CFI_ENDPROC
17578 ENDPROC(bad_from_user)
17579@@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
17580 decl %ecx
17581 jnz 21b
17582 23: xor %eax,%eax
17583+ pax_force_retaddr
17584 ret
17585
17586 .section .fixup,"ax"
17587@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
17588 3: rep
17589 movsb
17590 4: xorl %eax,%eax
17591+ pax_force_retaddr
17592 ret
17593
17594 .section .fixup,"ax"
17595@@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
17596 1: rep
17597 movsb
17598 2: xorl %eax,%eax
17599+ pax_force_retaddr
17600 ret
17601
17602 .section .fixup,"ax"
17603diff -urNp linux-3.0.7/arch/x86/lib/copy_user_nocache_64.S linux-3.0.7/arch/x86/lib/copy_user_nocache_64.S
17604--- linux-3.0.7/arch/x86/lib/copy_user_nocache_64.S 2011-07-21 22:17:23.000000000 -0400
17605+++ linux-3.0.7/arch/x86/lib/copy_user_nocache_64.S 2011-10-06 04:17:55.000000000 -0400
17606@@ -8,12 +8,14 @@
17607
17608 #include <linux/linkage.h>
17609 #include <asm/dwarf2.h>
17610+#include <asm/alternative-asm.h>
17611
17612 #define FIX_ALIGNMENT 1
17613
17614 #include <asm/current.h>
17615 #include <asm/asm-offsets.h>
17616 #include <asm/thread_info.h>
17617+#include <asm/pgtable.h>
17618
17619 .macro ALIGN_DESTINATION
17620 #ifdef FIX_ALIGNMENT
17621@@ -50,6 +52,15 @@
17622 */
17623 ENTRY(__copy_user_nocache)
17624 CFI_STARTPROC
17625+
17626+#ifdef CONFIG_PAX_MEMORY_UDEREF
17627+ mov $PAX_USER_SHADOW_BASE,%rcx
17628+ cmp %rcx,%rsi
17629+ jae 1f
17630+ add %rcx,%rsi
17631+1:
17632+#endif
17633+
17634 cmpl $8,%edx
17635 jb 20f /* less then 8 bytes, go to byte copy loop */
17636 ALIGN_DESTINATION
17637@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
17638 jnz 21b
17639 23: xorl %eax,%eax
17640 sfence
17641+ pax_force_retaddr
17642 ret
17643
17644 .section .fixup,"ax"
17645diff -urNp linux-3.0.7/arch/x86/lib/csum-copy_64.S linux-3.0.7/arch/x86/lib/csum-copy_64.S
17646--- linux-3.0.7/arch/x86/lib/csum-copy_64.S 2011-07-21 22:17:23.000000000 -0400
17647+++ linux-3.0.7/arch/x86/lib/csum-copy_64.S 2011-10-06 04:17:55.000000000 -0400
17648@@ -8,6 +8,7 @@
17649 #include <linux/linkage.h>
17650 #include <asm/dwarf2.h>
17651 #include <asm/errno.h>
17652+#include <asm/alternative-asm.h>
17653
17654 /*
17655 * Checksum copy with exception handling.
17656@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
17657 CFI_RESTORE rbp
17658 addq $7*8, %rsp
17659 CFI_ADJUST_CFA_OFFSET -7*8
17660+ pax_force_retaddr
17661 ret
17662 CFI_RESTORE_STATE
17663
17664diff -urNp linux-3.0.7/arch/x86/lib/csum-wrappers_64.c linux-3.0.7/arch/x86/lib/csum-wrappers_64.c
17665--- linux-3.0.7/arch/x86/lib/csum-wrappers_64.c 2011-07-21 22:17:23.000000000 -0400
17666+++ linux-3.0.7/arch/x86/lib/csum-wrappers_64.c 2011-10-06 04:17:55.000000000 -0400
17667@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void _
17668 len -= 2;
17669 }
17670 }
17671- isum = csum_partial_copy_generic((__force const void *)src,
17672+
17673+#ifdef CONFIG_PAX_MEMORY_UDEREF
17674+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
17675+ src += PAX_USER_SHADOW_BASE;
17676+#endif
17677+
17678+ isum = csum_partial_copy_generic((const void __force_kernel *)src,
17679 dst, len, isum, errp, NULL);
17680 if (unlikely(*errp))
17681 goto out_err;
17682@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *sr
17683 }
17684
17685 *errp = 0;
17686- return csum_partial_copy_generic(src, (void __force *)dst,
17687+
17688+#ifdef CONFIG_PAX_MEMORY_UDEREF
17689+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
17690+ dst += PAX_USER_SHADOW_BASE;
17691+#endif
17692+
17693+ return csum_partial_copy_generic(src, (void __force_kernel *)dst,
17694 len, isum, NULL, errp);
17695 }
17696 EXPORT_SYMBOL(csum_partial_copy_to_user);
17697diff -urNp linux-3.0.7/arch/x86/lib/getuser.S linux-3.0.7/arch/x86/lib/getuser.S
17698--- linux-3.0.7/arch/x86/lib/getuser.S 2011-07-21 22:17:23.000000000 -0400
17699+++ linux-3.0.7/arch/x86/lib/getuser.S 2011-10-07 19:07:23.000000000 -0400
17700@@ -33,15 +33,38 @@
17701 #include <asm/asm-offsets.h>
17702 #include <asm/thread_info.h>
17703 #include <asm/asm.h>
17704+#include <asm/segment.h>
17705+#include <asm/pgtable.h>
17706+#include <asm/alternative-asm.h>
17707+
17708+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17709+#define __copyuser_seg gs;
17710+#else
17711+#define __copyuser_seg
17712+#endif
17713
17714 .text
17715 ENTRY(__get_user_1)
17716 CFI_STARTPROC
17717+
17718+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17719 GET_THREAD_INFO(%_ASM_DX)
17720 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17721 jae bad_get_user
17722-1: movzb (%_ASM_AX),%edx
17723+
17724+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17725+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17726+ cmp %_ASM_DX,%_ASM_AX
17727+ jae 1234f
17728+ add %_ASM_DX,%_ASM_AX
17729+1234:
17730+#endif
17731+
17732+#endif
17733+
17734+1: __copyuser_seg movzb (%_ASM_AX),%edx
17735 xor %eax,%eax
17736+ pax_force_retaddr
17737 ret
17738 CFI_ENDPROC
17739 ENDPROC(__get_user_1)
17740@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
17741 ENTRY(__get_user_2)
17742 CFI_STARTPROC
17743 add $1,%_ASM_AX
17744+
17745+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17746 jc bad_get_user
17747 GET_THREAD_INFO(%_ASM_DX)
17748 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17749 jae bad_get_user
17750-2: movzwl -1(%_ASM_AX),%edx
17751+
17752+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17753+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17754+ cmp %_ASM_DX,%_ASM_AX
17755+ jae 1234f
17756+ add %_ASM_DX,%_ASM_AX
17757+1234:
17758+#endif
17759+
17760+#endif
17761+
17762+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
17763 xor %eax,%eax
17764+ pax_force_retaddr
17765 ret
17766 CFI_ENDPROC
17767 ENDPROC(__get_user_2)
17768@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
17769 ENTRY(__get_user_4)
17770 CFI_STARTPROC
17771 add $3,%_ASM_AX
17772+
17773+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17774 jc bad_get_user
17775 GET_THREAD_INFO(%_ASM_DX)
17776 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17777 jae bad_get_user
17778-3: mov -3(%_ASM_AX),%edx
17779+
17780+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17781+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17782+ cmp %_ASM_DX,%_ASM_AX
17783+ jae 1234f
17784+ add %_ASM_DX,%_ASM_AX
17785+1234:
17786+#endif
17787+
17788+#endif
17789+
17790+3: __copyuser_seg mov -3(%_ASM_AX),%edx
17791 xor %eax,%eax
17792+ pax_force_retaddr
17793 ret
17794 CFI_ENDPROC
17795 ENDPROC(__get_user_4)
17796@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
17797 GET_THREAD_INFO(%_ASM_DX)
17798 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17799 jae bad_get_user
17800+
17801+#ifdef CONFIG_PAX_MEMORY_UDEREF
17802+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17803+ cmp %_ASM_DX,%_ASM_AX
17804+ jae 1234f
17805+ add %_ASM_DX,%_ASM_AX
17806+1234:
17807+#endif
17808+
17809 4: movq -7(%_ASM_AX),%_ASM_DX
17810 xor %eax,%eax
17811+ pax_force_retaddr
17812 ret
17813 CFI_ENDPROC
17814 ENDPROC(__get_user_8)
17815@@ -91,6 +152,7 @@ bad_get_user:
17816 CFI_STARTPROC
17817 xor %edx,%edx
17818 mov $(-EFAULT),%_ASM_AX
17819+ pax_force_retaddr
17820 ret
17821 CFI_ENDPROC
17822 END(bad_get_user)
17823diff -urNp linux-3.0.7/arch/x86/lib/insn.c linux-3.0.7/arch/x86/lib/insn.c
17824--- linux-3.0.7/arch/x86/lib/insn.c 2011-07-21 22:17:23.000000000 -0400
17825+++ linux-3.0.7/arch/x86/lib/insn.c 2011-08-23 21:47:55.000000000 -0400
17826@@ -21,6 +21,11 @@
17827 #include <linux/string.h>
17828 #include <asm/inat.h>
17829 #include <asm/insn.h>
17830+#ifdef __KERNEL__
17831+#include <asm/pgtable_types.h>
17832+#else
17833+#define ktla_ktva(addr) addr
17834+#endif
17835
17836 #define get_next(t, insn) \
17837 ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
17838@@ -40,8 +45,8 @@
17839 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
17840 {
17841 memset(insn, 0, sizeof(*insn));
17842- insn->kaddr = kaddr;
17843- insn->next_byte = kaddr;
17844+ insn->kaddr = ktla_ktva(kaddr);
17845+ insn->next_byte = ktla_ktva(kaddr);
17846 insn->x86_64 = x86_64 ? 1 : 0;
17847 insn->opnd_bytes = 4;
17848 if (x86_64)
17849diff -urNp linux-3.0.7/arch/x86/lib/iomap_copy_64.S linux-3.0.7/arch/x86/lib/iomap_copy_64.S
17850--- linux-3.0.7/arch/x86/lib/iomap_copy_64.S 2011-07-21 22:17:23.000000000 -0400
17851+++ linux-3.0.7/arch/x86/lib/iomap_copy_64.S 2011-10-06 04:17:55.000000000 -0400
17852@@ -17,6 +17,7 @@
17853
17854 #include <linux/linkage.h>
17855 #include <asm/dwarf2.h>
17856+#include <asm/alternative-asm.h>
17857
17858 /*
17859 * override generic version in lib/iomap_copy.c
17860@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
17861 CFI_STARTPROC
17862 movl %edx,%ecx
17863 rep movsd
17864+ pax_force_retaddr
17865 ret
17866 CFI_ENDPROC
17867 ENDPROC(__iowrite32_copy)
17868diff -urNp linux-3.0.7/arch/x86/lib/memcpy_64.S linux-3.0.7/arch/x86/lib/memcpy_64.S
17869--- linux-3.0.7/arch/x86/lib/memcpy_64.S 2011-07-21 22:17:23.000000000 -0400
17870+++ linux-3.0.7/arch/x86/lib/memcpy_64.S 2011-10-06 04:17:55.000000000 -0400
17871@@ -34,6 +34,7 @@
17872 rep movsq
17873 movl %edx, %ecx
17874 rep movsb
17875+ pax_force_retaddr
17876 ret
17877 .Lmemcpy_e:
17878 .previous
17879@@ -51,6 +52,7 @@
17880
17881 movl %edx, %ecx
17882 rep movsb
17883+ pax_force_retaddr
17884 ret
17885 .Lmemcpy_e_e:
17886 .previous
17887@@ -141,6 +143,7 @@ ENTRY(memcpy)
17888 movq %r9, 1*8(%rdi)
17889 movq %r10, -2*8(%rdi, %rdx)
17890 movq %r11, -1*8(%rdi, %rdx)
17891+ pax_force_retaddr
17892 retq
17893 .p2align 4
17894 .Lless_16bytes:
17895@@ -153,6 +156,7 @@ ENTRY(memcpy)
17896 movq -1*8(%rsi, %rdx), %r9
17897 movq %r8, 0*8(%rdi)
17898 movq %r9, -1*8(%rdi, %rdx)
17899+ pax_force_retaddr
17900 retq
17901 .p2align 4
17902 .Lless_8bytes:
17903@@ -166,6 +170,7 @@ ENTRY(memcpy)
17904 movl -4(%rsi, %rdx), %r8d
17905 movl %ecx, (%rdi)
17906 movl %r8d, -4(%rdi, %rdx)
17907+ pax_force_retaddr
17908 retq
17909 .p2align 4
17910 .Lless_3bytes:
17911@@ -183,6 +188,7 @@ ENTRY(memcpy)
17912 jnz .Lloop_1
17913
17914 .Lend:
17915+ pax_force_retaddr
17916 retq
17917 CFI_ENDPROC
17918 ENDPROC(memcpy)
17919diff -urNp linux-3.0.7/arch/x86/lib/memmove_64.S linux-3.0.7/arch/x86/lib/memmove_64.S
17920--- linux-3.0.7/arch/x86/lib/memmove_64.S 2011-07-21 22:17:23.000000000 -0400
17921+++ linux-3.0.7/arch/x86/lib/memmove_64.S 2011-10-06 04:17:55.000000000 -0400
17922@@ -9,6 +9,7 @@
17923 #include <linux/linkage.h>
17924 #include <asm/dwarf2.h>
17925 #include <asm/cpufeature.h>
17926+#include <asm/alternative-asm.h>
17927
17928 #undef memmove
17929
17930@@ -201,6 +202,7 @@ ENTRY(memmove)
17931 movb (%rsi), %r11b
17932 movb %r11b, (%rdi)
17933 13:
17934+ pax_force_retaddr
17935 retq
17936 CFI_ENDPROC
17937
17938@@ -209,6 +211,7 @@ ENTRY(memmove)
17939 /* Forward moving data. */
17940 movq %rdx, %rcx
17941 rep movsb
17942+ pax_force_retaddr
17943 retq
17944 .Lmemmove_end_forward_efs:
17945 .previous
17946diff -urNp linux-3.0.7/arch/x86/lib/memset_64.S linux-3.0.7/arch/x86/lib/memset_64.S
17947--- linux-3.0.7/arch/x86/lib/memset_64.S 2011-07-21 22:17:23.000000000 -0400
17948+++ linux-3.0.7/arch/x86/lib/memset_64.S 2011-10-06 04:17:55.000000000 -0400
17949@@ -31,6 +31,7 @@
17950 movl %r8d,%ecx
17951 rep stosb
17952 movq %r9,%rax
17953+ pax_force_retaddr
17954 ret
17955 .Lmemset_e:
17956 .previous
17957@@ -53,6 +54,7 @@
17958 movl %edx,%ecx
17959 rep stosb
17960 movq %r9,%rax
17961+ pax_force_retaddr
17962 ret
17963 .Lmemset_e_e:
17964 .previous
17965@@ -121,6 +123,7 @@ ENTRY(__memset)
17966
17967 .Lende:
17968 movq %r10,%rax
17969+ pax_force_retaddr
17970 ret
17971
17972 CFI_RESTORE_STATE
17973diff -urNp linux-3.0.7/arch/x86/lib/mmx_32.c linux-3.0.7/arch/x86/lib/mmx_32.c
17974--- linux-3.0.7/arch/x86/lib/mmx_32.c 2011-07-21 22:17:23.000000000 -0400
17975+++ linux-3.0.7/arch/x86/lib/mmx_32.c 2011-08-23 21:47:55.000000000 -0400
17976@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
17977 {
17978 void *p;
17979 int i;
17980+ unsigned long cr0;
17981
17982 if (unlikely(in_interrupt()))
17983 return __memcpy(to, from, len);
17984@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
17985 kernel_fpu_begin();
17986
17987 __asm__ __volatile__ (
17988- "1: prefetch (%0)\n" /* This set is 28 bytes */
17989- " prefetch 64(%0)\n"
17990- " prefetch 128(%0)\n"
17991- " prefetch 192(%0)\n"
17992- " prefetch 256(%0)\n"
17993+ "1: prefetch (%1)\n" /* This set is 28 bytes */
17994+ " prefetch 64(%1)\n"
17995+ " prefetch 128(%1)\n"
17996+ " prefetch 192(%1)\n"
17997+ " prefetch 256(%1)\n"
17998 "2: \n"
17999 ".section .fixup, \"ax\"\n"
18000- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18001+ "3: \n"
18002+
18003+#ifdef CONFIG_PAX_KERNEXEC
18004+ " movl %%cr0, %0\n"
18005+ " movl %0, %%eax\n"
18006+ " andl $0xFFFEFFFF, %%eax\n"
18007+ " movl %%eax, %%cr0\n"
18008+#endif
18009+
18010+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18011+
18012+#ifdef CONFIG_PAX_KERNEXEC
18013+ " movl %0, %%cr0\n"
18014+#endif
18015+
18016 " jmp 2b\n"
18017 ".previous\n"
18018 _ASM_EXTABLE(1b, 3b)
18019- : : "r" (from));
18020+ : "=&r" (cr0) : "r" (from) : "ax");
18021
18022 for ( ; i > 5; i--) {
18023 __asm__ __volatile__ (
18024- "1: prefetch 320(%0)\n"
18025- "2: movq (%0), %%mm0\n"
18026- " movq 8(%0), %%mm1\n"
18027- " movq 16(%0), %%mm2\n"
18028- " movq 24(%0), %%mm3\n"
18029- " movq %%mm0, (%1)\n"
18030- " movq %%mm1, 8(%1)\n"
18031- " movq %%mm2, 16(%1)\n"
18032- " movq %%mm3, 24(%1)\n"
18033- " movq 32(%0), %%mm0\n"
18034- " movq 40(%0), %%mm1\n"
18035- " movq 48(%0), %%mm2\n"
18036- " movq 56(%0), %%mm3\n"
18037- " movq %%mm0, 32(%1)\n"
18038- " movq %%mm1, 40(%1)\n"
18039- " movq %%mm2, 48(%1)\n"
18040- " movq %%mm3, 56(%1)\n"
18041+ "1: prefetch 320(%1)\n"
18042+ "2: movq (%1), %%mm0\n"
18043+ " movq 8(%1), %%mm1\n"
18044+ " movq 16(%1), %%mm2\n"
18045+ " movq 24(%1), %%mm3\n"
18046+ " movq %%mm0, (%2)\n"
18047+ " movq %%mm1, 8(%2)\n"
18048+ " movq %%mm2, 16(%2)\n"
18049+ " movq %%mm3, 24(%2)\n"
18050+ " movq 32(%1), %%mm0\n"
18051+ " movq 40(%1), %%mm1\n"
18052+ " movq 48(%1), %%mm2\n"
18053+ " movq 56(%1), %%mm3\n"
18054+ " movq %%mm0, 32(%2)\n"
18055+ " movq %%mm1, 40(%2)\n"
18056+ " movq %%mm2, 48(%2)\n"
18057+ " movq %%mm3, 56(%2)\n"
18058 ".section .fixup, \"ax\"\n"
18059- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18060+ "3:\n"
18061+
18062+#ifdef CONFIG_PAX_KERNEXEC
18063+ " movl %%cr0, %0\n"
18064+ " movl %0, %%eax\n"
18065+ " andl $0xFFFEFFFF, %%eax\n"
18066+ " movl %%eax, %%cr0\n"
18067+#endif
18068+
18069+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18070+
18071+#ifdef CONFIG_PAX_KERNEXEC
18072+ " movl %0, %%cr0\n"
18073+#endif
18074+
18075 " jmp 2b\n"
18076 ".previous\n"
18077 _ASM_EXTABLE(1b, 3b)
18078- : : "r" (from), "r" (to) : "memory");
18079+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18080
18081 from += 64;
18082 to += 64;
18083@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
18084 static void fast_copy_page(void *to, void *from)
18085 {
18086 int i;
18087+ unsigned long cr0;
18088
18089 kernel_fpu_begin();
18090
18091@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
18092 * but that is for later. -AV
18093 */
18094 __asm__ __volatile__(
18095- "1: prefetch (%0)\n"
18096- " prefetch 64(%0)\n"
18097- " prefetch 128(%0)\n"
18098- " prefetch 192(%0)\n"
18099- " prefetch 256(%0)\n"
18100+ "1: prefetch (%1)\n"
18101+ " prefetch 64(%1)\n"
18102+ " prefetch 128(%1)\n"
18103+ " prefetch 192(%1)\n"
18104+ " prefetch 256(%1)\n"
18105 "2: \n"
18106 ".section .fixup, \"ax\"\n"
18107- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18108+ "3: \n"
18109+
18110+#ifdef CONFIG_PAX_KERNEXEC
18111+ " movl %%cr0, %0\n"
18112+ " movl %0, %%eax\n"
18113+ " andl $0xFFFEFFFF, %%eax\n"
18114+ " movl %%eax, %%cr0\n"
18115+#endif
18116+
18117+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18118+
18119+#ifdef CONFIG_PAX_KERNEXEC
18120+ " movl %0, %%cr0\n"
18121+#endif
18122+
18123 " jmp 2b\n"
18124 ".previous\n"
18125- _ASM_EXTABLE(1b, 3b) : : "r" (from));
18126+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
18127
18128 for (i = 0; i < (4096-320)/64; i++) {
18129 __asm__ __volatile__ (
18130- "1: prefetch 320(%0)\n"
18131- "2: movq (%0), %%mm0\n"
18132- " movntq %%mm0, (%1)\n"
18133- " movq 8(%0), %%mm1\n"
18134- " movntq %%mm1, 8(%1)\n"
18135- " movq 16(%0), %%mm2\n"
18136- " movntq %%mm2, 16(%1)\n"
18137- " movq 24(%0), %%mm3\n"
18138- " movntq %%mm3, 24(%1)\n"
18139- " movq 32(%0), %%mm4\n"
18140- " movntq %%mm4, 32(%1)\n"
18141- " movq 40(%0), %%mm5\n"
18142- " movntq %%mm5, 40(%1)\n"
18143- " movq 48(%0), %%mm6\n"
18144- " movntq %%mm6, 48(%1)\n"
18145- " movq 56(%0), %%mm7\n"
18146- " movntq %%mm7, 56(%1)\n"
18147+ "1: prefetch 320(%1)\n"
18148+ "2: movq (%1), %%mm0\n"
18149+ " movntq %%mm0, (%2)\n"
18150+ " movq 8(%1), %%mm1\n"
18151+ " movntq %%mm1, 8(%2)\n"
18152+ " movq 16(%1), %%mm2\n"
18153+ " movntq %%mm2, 16(%2)\n"
18154+ " movq 24(%1), %%mm3\n"
18155+ " movntq %%mm3, 24(%2)\n"
18156+ " movq 32(%1), %%mm4\n"
18157+ " movntq %%mm4, 32(%2)\n"
18158+ " movq 40(%1), %%mm5\n"
18159+ " movntq %%mm5, 40(%2)\n"
18160+ " movq 48(%1), %%mm6\n"
18161+ " movntq %%mm6, 48(%2)\n"
18162+ " movq 56(%1), %%mm7\n"
18163+ " movntq %%mm7, 56(%2)\n"
18164 ".section .fixup, \"ax\"\n"
18165- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18166+ "3:\n"
18167+
18168+#ifdef CONFIG_PAX_KERNEXEC
18169+ " movl %%cr0, %0\n"
18170+ " movl %0, %%eax\n"
18171+ " andl $0xFFFEFFFF, %%eax\n"
18172+ " movl %%eax, %%cr0\n"
18173+#endif
18174+
18175+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18176+
18177+#ifdef CONFIG_PAX_KERNEXEC
18178+ " movl %0, %%cr0\n"
18179+#endif
18180+
18181 " jmp 2b\n"
18182 ".previous\n"
18183- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
18184+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18185
18186 from += 64;
18187 to += 64;
18188@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
18189 static void fast_copy_page(void *to, void *from)
18190 {
18191 int i;
18192+ unsigned long cr0;
18193
18194 kernel_fpu_begin();
18195
18196 __asm__ __volatile__ (
18197- "1: prefetch (%0)\n"
18198- " prefetch 64(%0)\n"
18199- " prefetch 128(%0)\n"
18200- " prefetch 192(%0)\n"
18201- " prefetch 256(%0)\n"
18202+ "1: prefetch (%1)\n"
18203+ " prefetch 64(%1)\n"
18204+ " prefetch 128(%1)\n"
18205+ " prefetch 192(%1)\n"
18206+ " prefetch 256(%1)\n"
18207 "2: \n"
18208 ".section .fixup, \"ax\"\n"
18209- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18210+ "3: \n"
18211+
18212+#ifdef CONFIG_PAX_KERNEXEC
18213+ " movl %%cr0, %0\n"
18214+ " movl %0, %%eax\n"
18215+ " andl $0xFFFEFFFF, %%eax\n"
18216+ " movl %%eax, %%cr0\n"
18217+#endif
18218+
18219+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18220+
18221+#ifdef CONFIG_PAX_KERNEXEC
18222+ " movl %0, %%cr0\n"
18223+#endif
18224+
18225 " jmp 2b\n"
18226 ".previous\n"
18227- _ASM_EXTABLE(1b, 3b) : : "r" (from));
18228+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
18229
18230 for (i = 0; i < 4096/64; i++) {
18231 __asm__ __volatile__ (
18232- "1: prefetch 320(%0)\n"
18233- "2: movq (%0), %%mm0\n"
18234- " movq 8(%0), %%mm1\n"
18235- " movq 16(%0), %%mm2\n"
18236- " movq 24(%0), %%mm3\n"
18237- " movq %%mm0, (%1)\n"
18238- " movq %%mm1, 8(%1)\n"
18239- " movq %%mm2, 16(%1)\n"
18240- " movq %%mm3, 24(%1)\n"
18241- " movq 32(%0), %%mm0\n"
18242- " movq 40(%0), %%mm1\n"
18243- " movq 48(%0), %%mm2\n"
18244- " movq 56(%0), %%mm3\n"
18245- " movq %%mm0, 32(%1)\n"
18246- " movq %%mm1, 40(%1)\n"
18247- " movq %%mm2, 48(%1)\n"
18248- " movq %%mm3, 56(%1)\n"
18249+ "1: prefetch 320(%1)\n"
18250+ "2: movq (%1), %%mm0\n"
18251+ " movq 8(%1), %%mm1\n"
18252+ " movq 16(%1), %%mm2\n"
18253+ " movq 24(%1), %%mm3\n"
18254+ " movq %%mm0, (%2)\n"
18255+ " movq %%mm1, 8(%2)\n"
18256+ " movq %%mm2, 16(%2)\n"
18257+ " movq %%mm3, 24(%2)\n"
18258+ " movq 32(%1), %%mm0\n"
18259+ " movq 40(%1), %%mm1\n"
18260+ " movq 48(%1), %%mm2\n"
18261+ " movq 56(%1), %%mm3\n"
18262+ " movq %%mm0, 32(%2)\n"
18263+ " movq %%mm1, 40(%2)\n"
18264+ " movq %%mm2, 48(%2)\n"
18265+ " movq %%mm3, 56(%2)\n"
18266 ".section .fixup, \"ax\"\n"
18267- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18268+ "3:\n"
18269+
18270+#ifdef CONFIG_PAX_KERNEXEC
18271+ " movl %%cr0, %0\n"
18272+ " movl %0, %%eax\n"
18273+ " andl $0xFFFEFFFF, %%eax\n"
18274+ " movl %%eax, %%cr0\n"
18275+#endif
18276+
18277+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18278+
18279+#ifdef CONFIG_PAX_KERNEXEC
18280+ " movl %0, %%cr0\n"
18281+#endif
18282+
18283 " jmp 2b\n"
18284 ".previous\n"
18285 _ASM_EXTABLE(1b, 3b)
18286- : : "r" (from), "r" (to) : "memory");
18287+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18288
18289 from += 64;
18290 to += 64;
18291diff -urNp linux-3.0.7/arch/x86/lib/msr-reg.S linux-3.0.7/arch/x86/lib/msr-reg.S
18292--- linux-3.0.7/arch/x86/lib/msr-reg.S 2011-07-21 22:17:23.000000000 -0400
18293+++ linux-3.0.7/arch/x86/lib/msr-reg.S 2011-10-07 19:07:28.000000000 -0400
18294@@ -3,6 +3,7 @@
18295 #include <asm/dwarf2.h>
18296 #include <asm/asm.h>
18297 #include <asm/msr.h>
18298+#include <asm/alternative-asm.h>
18299
18300 #ifdef CONFIG_X86_64
18301 /*
18302@@ -37,6 +38,7 @@ ENTRY(native_\op\()_safe_regs)
18303 movl %edi, 28(%r10)
18304 popq_cfi %rbp
18305 popq_cfi %rbx
18306+ pax_force_retaddr
18307 ret
18308 3:
18309 CFI_RESTORE_STATE
18310diff -urNp linux-3.0.7/arch/x86/lib/putuser.S linux-3.0.7/arch/x86/lib/putuser.S
18311--- linux-3.0.7/arch/x86/lib/putuser.S 2011-07-21 22:17:23.000000000 -0400
18312+++ linux-3.0.7/arch/x86/lib/putuser.S 2011-10-07 19:07:23.000000000 -0400
18313@@ -15,7 +15,9 @@
18314 #include <asm/thread_info.h>
18315 #include <asm/errno.h>
18316 #include <asm/asm.h>
18317-
18318+#include <asm/segment.h>
18319+#include <asm/pgtable.h>
18320+#include <asm/alternative-asm.h>
18321
18322 /*
18323 * __put_user_X
18324@@ -29,52 +31,119 @@
18325 * as they get called from within inline assembly.
18326 */
18327
18328-#define ENTER CFI_STARTPROC ; \
18329- GET_THREAD_INFO(%_ASM_BX)
18330-#define EXIT ret ; \
18331+#define ENTER CFI_STARTPROC
18332+#define EXIT pax_force_retaddr; ret ; \
18333 CFI_ENDPROC
18334
18335+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18336+#define _DEST %_ASM_CX,%_ASM_BX
18337+#else
18338+#define _DEST %_ASM_CX
18339+#endif
18340+
18341+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18342+#define __copyuser_seg gs;
18343+#else
18344+#define __copyuser_seg
18345+#endif
18346+
18347 .text
18348 ENTRY(__put_user_1)
18349 ENTER
18350+
18351+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18352+ GET_THREAD_INFO(%_ASM_BX)
18353 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
18354 jae bad_put_user
18355-1: movb %al,(%_ASM_CX)
18356+
18357+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18358+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18359+ cmp %_ASM_BX,%_ASM_CX
18360+ jb 1234f
18361+ xor %ebx,%ebx
18362+1234:
18363+#endif
18364+
18365+#endif
18366+
18367+1: __copyuser_seg movb %al,(_DEST)
18368 xor %eax,%eax
18369 EXIT
18370 ENDPROC(__put_user_1)
18371
18372 ENTRY(__put_user_2)
18373 ENTER
18374+
18375+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18376+ GET_THREAD_INFO(%_ASM_BX)
18377 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18378 sub $1,%_ASM_BX
18379 cmp %_ASM_BX,%_ASM_CX
18380 jae bad_put_user
18381-2: movw %ax,(%_ASM_CX)
18382+
18383+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18384+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18385+ cmp %_ASM_BX,%_ASM_CX
18386+ jb 1234f
18387+ xor %ebx,%ebx
18388+1234:
18389+#endif
18390+
18391+#endif
18392+
18393+2: __copyuser_seg movw %ax,(_DEST)
18394 xor %eax,%eax
18395 EXIT
18396 ENDPROC(__put_user_2)
18397
18398 ENTRY(__put_user_4)
18399 ENTER
18400+
18401+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18402+ GET_THREAD_INFO(%_ASM_BX)
18403 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18404 sub $3,%_ASM_BX
18405 cmp %_ASM_BX,%_ASM_CX
18406 jae bad_put_user
18407-3: movl %eax,(%_ASM_CX)
18408+
18409+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18410+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18411+ cmp %_ASM_BX,%_ASM_CX
18412+ jb 1234f
18413+ xor %ebx,%ebx
18414+1234:
18415+#endif
18416+
18417+#endif
18418+
18419+3: __copyuser_seg movl %eax,(_DEST)
18420 xor %eax,%eax
18421 EXIT
18422 ENDPROC(__put_user_4)
18423
18424 ENTRY(__put_user_8)
18425 ENTER
18426+
18427+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18428+ GET_THREAD_INFO(%_ASM_BX)
18429 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18430 sub $7,%_ASM_BX
18431 cmp %_ASM_BX,%_ASM_CX
18432 jae bad_put_user
18433-4: mov %_ASM_AX,(%_ASM_CX)
18434+
18435+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18436+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18437+ cmp %_ASM_BX,%_ASM_CX
18438+ jb 1234f
18439+ xor %ebx,%ebx
18440+1234:
18441+#endif
18442+
18443+#endif
18444+
18445+4: __copyuser_seg mov %_ASM_AX,(_DEST)
18446 #ifdef CONFIG_X86_32
18447-5: movl %edx,4(%_ASM_CX)
18448+5: __copyuser_seg movl %edx,4(_DEST)
18449 #endif
18450 xor %eax,%eax
18451 EXIT
18452diff -urNp linux-3.0.7/arch/x86/lib/rwlock_64.S linux-3.0.7/arch/x86/lib/rwlock_64.S
18453--- linux-3.0.7/arch/x86/lib/rwlock_64.S 2011-07-21 22:17:23.000000000 -0400
18454+++ linux-3.0.7/arch/x86/lib/rwlock_64.S 2011-10-06 04:17:55.000000000 -0400
18455@@ -17,6 +17,7 @@ ENTRY(__write_lock_failed)
18456 LOCK_PREFIX
18457 subl $RW_LOCK_BIAS,(%rdi)
18458 jnz __write_lock_failed
18459+ pax_force_retaddr
18460 ret
18461 CFI_ENDPROC
18462 END(__write_lock_failed)
18463@@ -33,6 +34,7 @@ ENTRY(__read_lock_failed)
18464 LOCK_PREFIX
18465 decl (%rdi)
18466 js __read_lock_failed
18467+ pax_force_retaddr
18468 ret
18469 CFI_ENDPROC
18470 END(__read_lock_failed)
18471diff -urNp linux-3.0.7/arch/x86/lib/rwsem_64.S linux-3.0.7/arch/x86/lib/rwsem_64.S
18472--- linux-3.0.7/arch/x86/lib/rwsem_64.S 2011-07-21 22:17:23.000000000 -0400
18473+++ linux-3.0.7/arch/x86/lib/rwsem_64.S 2011-10-07 10:46:47.000000000 -0400
18474@@ -51,6 +51,7 @@ ENTRY(call_rwsem_down_read_failed)
18475 popq_cfi %rdx
18476 CFI_RESTORE rdx
18477 restore_common_regs
18478+ pax_force_retaddr
18479 ret
18480 CFI_ENDPROC
18481 ENDPROC(call_rwsem_down_read_failed)
18482@@ -61,6 +62,7 @@ ENTRY(call_rwsem_down_write_failed)
18483 movq %rax,%rdi
18484 call rwsem_down_write_failed
18485 restore_common_regs
18486+ pax_force_retaddr
18487 ret
18488 CFI_ENDPROC
18489 ENDPROC(call_rwsem_down_write_failed)
18490@@ -73,7 +75,8 @@ ENTRY(call_rwsem_wake)
18491 movq %rax,%rdi
18492 call rwsem_wake
18493 restore_common_regs
18494-1: ret
18495+1: pax_force_retaddr
18496+ ret
18497 CFI_ENDPROC
18498 ENDPROC(call_rwsem_wake)
18499
18500@@ -88,6 +91,7 @@ ENTRY(call_rwsem_downgrade_wake)
18501 popq_cfi %rdx
18502 CFI_RESTORE rdx
18503 restore_common_regs
18504+ pax_force_retaddr
18505 ret
18506 CFI_ENDPROC
18507 ENDPROC(call_rwsem_downgrade_wake)
18508diff -urNp linux-3.0.7/arch/x86/lib/thunk_64.S linux-3.0.7/arch/x86/lib/thunk_64.S
18509--- linux-3.0.7/arch/x86/lib/thunk_64.S 2011-07-21 22:17:23.000000000 -0400
18510+++ linux-3.0.7/arch/x86/lib/thunk_64.S 2011-10-06 04:17:55.000000000 -0400
18511@@ -10,7 +10,8 @@
18512 #include <asm/dwarf2.h>
18513 #include <asm/calling.h>
18514 #include <asm/rwlock.h>
18515-
18516+ #include <asm/alternative-asm.h>
18517+
18518 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
18519 .macro thunk name,func
18520 .globl \name
18521@@ -50,5 +51,6 @@
18522 SAVE_ARGS
18523 restore:
18524 RESTORE_ARGS
18525- ret
18526+ pax_force_retaddr
18527+ ret
18528 CFI_ENDPROC
18529diff -urNp linux-3.0.7/arch/x86/lib/usercopy_32.c linux-3.0.7/arch/x86/lib/usercopy_32.c
18530--- linux-3.0.7/arch/x86/lib/usercopy_32.c 2011-07-21 22:17:23.000000000 -0400
18531+++ linux-3.0.7/arch/x86/lib/usercopy_32.c 2011-08-23 21:47:55.000000000 -0400
18532@@ -43,7 +43,7 @@ do { \
18533 __asm__ __volatile__( \
18534 " testl %1,%1\n" \
18535 " jz 2f\n" \
18536- "0: lodsb\n" \
18537+ "0: "__copyuser_seg"lodsb\n" \
18538 " stosb\n" \
18539 " testb %%al,%%al\n" \
18540 " jz 1f\n" \
18541@@ -128,10 +128,12 @@ do { \
18542 int __d0; \
18543 might_fault(); \
18544 __asm__ __volatile__( \
18545+ __COPYUSER_SET_ES \
18546 "0: rep; stosl\n" \
18547 " movl %2,%0\n" \
18548 "1: rep; stosb\n" \
18549 "2:\n" \
18550+ __COPYUSER_RESTORE_ES \
18551 ".section .fixup,\"ax\"\n" \
18552 "3: lea 0(%2,%0,4),%0\n" \
18553 " jmp 2b\n" \
18554@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
18555 might_fault();
18556
18557 __asm__ __volatile__(
18558+ __COPYUSER_SET_ES
18559 " testl %0, %0\n"
18560 " jz 3f\n"
18561 " andl %0,%%ecx\n"
18562@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
18563 " subl %%ecx,%0\n"
18564 " addl %0,%%eax\n"
18565 "1:\n"
18566+ __COPYUSER_RESTORE_ES
18567 ".section .fixup,\"ax\"\n"
18568 "2: xorl %%eax,%%eax\n"
18569 " jmp 1b\n"
18570@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
18571
18572 #ifdef CONFIG_X86_INTEL_USERCOPY
18573 static unsigned long
18574-__copy_user_intel(void __user *to, const void *from, unsigned long size)
18575+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
18576 {
18577 int d0, d1;
18578 __asm__ __volatile__(
18579@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
18580 " .align 2,0x90\n"
18581 "3: movl 0(%4), %%eax\n"
18582 "4: movl 4(%4), %%edx\n"
18583- "5: movl %%eax, 0(%3)\n"
18584- "6: movl %%edx, 4(%3)\n"
18585+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
18586+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
18587 "7: movl 8(%4), %%eax\n"
18588 "8: movl 12(%4),%%edx\n"
18589- "9: movl %%eax, 8(%3)\n"
18590- "10: movl %%edx, 12(%3)\n"
18591+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
18592+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
18593 "11: movl 16(%4), %%eax\n"
18594 "12: movl 20(%4), %%edx\n"
18595- "13: movl %%eax, 16(%3)\n"
18596- "14: movl %%edx, 20(%3)\n"
18597+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
18598+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
18599 "15: movl 24(%4), %%eax\n"
18600 "16: movl 28(%4), %%edx\n"
18601- "17: movl %%eax, 24(%3)\n"
18602- "18: movl %%edx, 28(%3)\n"
18603+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
18604+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
18605 "19: movl 32(%4), %%eax\n"
18606 "20: movl 36(%4), %%edx\n"
18607- "21: movl %%eax, 32(%3)\n"
18608- "22: movl %%edx, 36(%3)\n"
18609+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
18610+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
18611 "23: movl 40(%4), %%eax\n"
18612 "24: movl 44(%4), %%edx\n"
18613- "25: movl %%eax, 40(%3)\n"
18614- "26: movl %%edx, 44(%3)\n"
18615+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
18616+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
18617 "27: movl 48(%4), %%eax\n"
18618 "28: movl 52(%4), %%edx\n"
18619- "29: movl %%eax, 48(%3)\n"
18620- "30: movl %%edx, 52(%3)\n"
18621+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
18622+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
18623 "31: movl 56(%4), %%eax\n"
18624 "32: movl 60(%4), %%edx\n"
18625- "33: movl %%eax, 56(%3)\n"
18626- "34: movl %%edx, 60(%3)\n"
18627+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
18628+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
18629 " addl $-64, %0\n"
18630 " addl $64, %4\n"
18631 " addl $64, %3\n"
18632@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
18633 " shrl $2, %0\n"
18634 " andl $3, %%eax\n"
18635 " cld\n"
18636+ __COPYUSER_SET_ES
18637 "99: rep; movsl\n"
18638 "36: movl %%eax, %0\n"
18639 "37: rep; movsb\n"
18640 "100:\n"
18641+ __COPYUSER_RESTORE_ES
18642+ ".section .fixup,\"ax\"\n"
18643+ "101: lea 0(%%eax,%0,4),%0\n"
18644+ " jmp 100b\n"
18645+ ".previous\n"
18646+ ".section __ex_table,\"a\"\n"
18647+ " .align 4\n"
18648+ " .long 1b,100b\n"
18649+ " .long 2b,100b\n"
18650+ " .long 3b,100b\n"
18651+ " .long 4b,100b\n"
18652+ " .long 5b,100b\n"
18653+ " .long 6b,100b\n"
18654+ " .long 7b,100b\n"
18655+ " .long 8b,100b\n"
18656+ " .long 9b,100b\n"
18657+ " .long 10b,100b\n"
18658+ " .long 11b,100b\n"
18659+ " .long 12b,100b\n"
18660+ " .long 13b,100b\n"
18661+ " .long 14b,100b\n"
18662+ " .long 15b,100b\n"
18663+ " .long 16b,100b\n"
18664+ " .long 17b,100b\n"
18665+ " .long 18b,100b\n"
18666+ " .long 19b,100b\n"
18667+ " .long 20b,100b\n"
18668+ " .long 21b,100b\n"
18669+ " .long 22b,100b\n"
18670+ " .long 23b,100b\n"
18671+ " .long 24b,100b\n"
18672+ " .long 25b,100b\n"
18673+ " .long 26b,100b\n"
18674+ " .long 27b,100b\n"
18675+ " .long 28b,100b\n"
18676+ " .long 29b,100b\n"
18677+ " .long 30b,100b\n"
18678+ " .long 31b,100b\n"
18679+ " .long 32b,100b\n"
18680+ " .long 33b,100b\n"
18681+ " .long 34b,100b\n"
18682+ " .long 35b,100b\n"
18683+ " .long 36b,100b\n"
18684+ " .long 37b,100b\n"
18685+ " .long 99b,101b\n"
18686+ ".previous"
18687+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
18688+ : "1"(to), "2"(from), "0"(size)
18689+ : "eax", "edx", "memory");
18690+ return size;
18691+}
18692+
18693+static unsigned long
18694+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
18695+{
18696+ int d0, d1;
18697+ __asm__ __volatile__(
18698+ " .align 2,0x90\n"
18699+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
18700+ " cmpl $67, %0\n"
18701+ " jbe 3f\n"
18702+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
18703+ " .align 2,0x90\n"
18704+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
18705+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
18706+ "5: movl %%eax, 0(%3)\n"
18707+ "6: movl %%edx, 4(%3)\n"
18708+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
18709+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
18710+ "9: movl %%eax, 8(%3)\n"
18711+ "10: movl %%edx, 12(%3)\n"
18712+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
18713+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
18714+ "13: movl %%eax, 16(%3)\n"
18715+ "14: movl %%edx, 20(%3)\n"
18716+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
18717+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
18718+ "17: movl %%eax, 24(%3)\n"
18719+ "18: movl %%edx, 28(%3)\n"
18720+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
18721+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
18722+ "21: movl %%eax, 32(%3)\n"
18723+ "22: movl %%edx, 36(%3)\n"
18724+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
18725+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
18726+ "25: movl %%eax, 40(%3)\n"
18727+ "26: movl %%edx, 44(%3)\n"
18728+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
18729+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
18730+ "29: movl %%eax, 48(%3)\n"
18731+ "30: movl %%edx, 52(%3)\n"
18732+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
18733+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
18734+ "33: movl %%eax, 56(%3)\n"
18735+ "34: movl %%edx, 60(%3)\n"
18736+ " addl $-64, %0\n"
18737+ " addl $64, %4\n"
18738+ " addl $64, %3\n"
18739+ " cmpl $63, %0\n"
18740+ " ja 1b\n"
18741+ "35: movl %0, %%eax\n"
18742+ " shrl $2, %0\n"
18743+ " andl $3, %%eax\n"
18744+ " cld\n"
18745+ "99: rep; "__copyuser_seg" movsl\n"
18746+ "36: movl %%eax, %0\n"
18747+ "37: rep; "__copyuser_seg" movsb\n"
18748+ "100:\n"
18749 ".section .fixup,\"ax\"\n"
18750 "101: lea 0(%%eax,%0,4),%0\n"
18751 " jmp 100b\n"
18752@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
18753 int d0, d1;
18754 __asm__ __volatile__(
18755 " .align 2,0x90\n"
18756- "0: movl 32(%4), %%eax\n"
18757+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18758 " cmpl $67, %0\n"
18759 " jbe 2f\n"
18760- "1: movl 64(%4), %%eax\n"
18761+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18762 " .align 2,0x90\n"
18763- "2: movl 0(%4), %%eax\n"
18764- "21: movl 4(%4), %%edx\n"
18765+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18766+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18767 " movl %%eax, 0(%3)\n"
18768 " movl %%edx, 4(%3)\n"
18769- "3: movl 8(%4), %%eax\n"
18770- "31: movl 12(%4),%%edx\n"
18771+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18772+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18773 " movl %%eax, 8(%3)\n"
18774 " movl %%edx, 12(%3)\n"
18775- "4: movl 16(%4), %%eax\n"
18776- "41: movl 20(%4), %%edx\n"
18777+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18778+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18779 " movl %%eax, 16(%3)\n"
18780 " movl %%edx, 20(%3)\n"
18781- "10: movl 24(%4), %%eax\n"
18782- "51: movl 28(%4), %%edx\n"
18783+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18784+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18785 " movl %%eax, 24(%3)\n"
18786 " movl %%edx, 28(%3)\n"
18787- "11: movl 32(%4), %%eax\n"
18788- "61: movl 36(%4), %%edx\n"
18789+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18790+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18791 " movl %%eax, 32(%3)\n"
18792 " movl %%edx, 36(%3)\n"
18793- "12: movl 40(%4), %%eax\n"
18794- "71: movl 44(%4), %%edx\n"
18795+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18796+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18797 " movl %%eax, 40(%3)\n"
18798 " movl %%edx, 44(%3)\n"
18799- "13: movl 48(%4), %%eax\n"
18800- "81: movl 52(%4), %%edx\n"
18801+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18802+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18803 " movl %%eax, 48(%3)\n"
18804 " movl %%edx, 52(%3)\n"
18805- "14: movl 56(%4), %%eax\n"
18806- "91: movl 60(%4), %%edx\n"
18807+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18808+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18809 " movl %%eax, 56(%3)\n"
18810 " movl %%edx, 60(%3)\n"
18811 " addl $-64, %0\n"
18812@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
18813 " shrl $2, %0\n"
18814 " andl $3, %%eax\n"
18815 " cld\n"
18816- "6: rep; movsl\n"
18817+ "6: rep; "__copyuser_seg" movsl\n"
18818 " movl %%eax,%0\n"
18819- "7: rep; movsb\n"
18820+ "7: rep; "__copyuser_seg" movsb\n"
18821 "8:\n"
18822 ".section .fixup,\"ax\"\n"
18823 "9: lea 0(%%eax,%0,4),%0\n"
18824@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
18825
18826 __asm__ __volatile__(
18827 " .align 2,0x90\n"
18828- "0: movl 32(%4), %%eax\n"
18829+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18830 " cmpl $67, %0\n"
18831 " jbe 2f\n"
18832- "1: movl 64(%4), %%eax\n"
18833+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18834 " .align 2,0x90\n"
18835- "2: movl 0(%4), %%eax\n"
18836- "21: movl 4(%4), %%edx\n"
18837+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18838+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18839 " movnti %%eax, 0(%3)\n"
18840 " movnti %%edx, 4(%3)\n"
18841- "3: movl 8(%4), %%eax\n"
18842- "31: movl 12(%4),%%edx\n"
18843+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18844+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18845 " movnti %%eax, 8(%3)\n"
18846 " movnti %%edx, 12(%3)\n"
18847- "4: movl 16(%4), %%eax\n"
18848- "41: movl 20(%4), %%edx\n"
18849+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18850+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18851 " movnti %%eax, 16(%3)\n"
18852 " movnti %%edx, 20(%3)\n"
18853- "10: movl 24(%4), %%eax\n"
18854- "51: movl 28(%4), %%edx\n"
18855+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18856+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18857 " movnti %%eax, 24(%3)\n"
18858 " movnti %%edx, 28(%3)\n"
18859- "11: movl 32(%4), %%eax\n"
18860- "61: movl 36(%4), %%edx\n"
18861+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18862+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18863 " movnti %%eax, 32(%3)\n"
18864 " movnti %%edx, 36(%3)\n"
18865- "12: movl 40(%4), %%eax\n"
18866- "71: movl 44(%4), %%edx\n"
18867+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18868+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18869 " movnti %%eax, 40(%3)\n"
18870 " movnti %%edx, 44(%3)\n"
18871- "13: movl 48(%4), %%eax\n"
18872- "81: movl 52(%4), %%edx\n"
18873+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18874+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18875 " movnti %%eax, 48(%3)\n"
18876 " movnti %%edx, 52(%3)\n"
18877- "14: movl 56(%4), %%eax\n"
18878- "91: movl 60(%4), %%edx\n"
18879+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18880+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18881 " movnti %%eax, 56(%3)\n"
18882 " movnti %%edx, 60(%3)\n"
18883 " addl $-64, %0\n"
18884@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
18885 " shrl $2, %0\n"
18886 " andl $3, %%eax\n"
18887 " cld\n"
18888- "6: rep; movsl\n"
18889+ "6: rep; "__copyuser_seg" movsl\n"
18890 " movl %%eax,%0\n"
18891- "7: rep; movsb\n"
18892+ "7: rep; "__copyuser_seg" movsb\n"
18893 "8:\n"
18894 ".section .fixup,\"ax\"\n"
18895 "9: lea 0(%%eax,%0,4),%0\n"
18896@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
18897
18898 __asm__ __volatile__(
18899 " .align 2,0x90\n"
18900- "0: movl 32(%4), %%eax\n"
18901+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18902 " cmpl $67, %0\n"
18903 " jbe 2f\n"
18904- "1: movl 64(%4), %%eax\n"
18905+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18906 " .align 2,0x90\n"
18907- "2: movl 0(%4), %%eax\n"
18908- "21: movl 4(%4), %%edx\n"
18909+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18910+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18911 " movnti %%eax, 0(%3)\n"
18912 " movnti %%edx, 4(%3)\n"
18913- "3: movl 8(%4), %%eax\n"
18914- "31: movl 12(%4),%%edx\n"
18915+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18916+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18917 " movnti %%eax, 8(%3)\n"
18918 " movnti %%edx, 12(%3)\n"
18919- "4: movl 16(%4), %%eax\n"
18920- "41: movl 20(%4), %%edx\n"
18921+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18922+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18923 " movnti %%eax, 16(%3)\n"
18924 " movnti %%edx, 20(%3)\n"
18925- "10: movl 24(%4), %%eax\n"
18926- "51: movl 28(%4), %%edx\n"
18927+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18928+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18929 " movnti %%eax, 24(%3)\n"
18930 " movnti %%edx, 28(%3)\n"
18931- "11: movl 32(%4), %%eax\n"
18932- "61: movl 36(%4), %%edx\n"
18933+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18934+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18935 " movnti %%eax, 32(%3)\n"
18936 " movnti %%edx, 36(%3)\n"
18937- "12: movl 40(%4), %%eax\n"
18938- "71: movl 44(%4), %%edx\n"
18939+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18940+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18941 " movnti %%eax, 40(%3)\n"
18942 " movnti %%edx, 44(%3)\n"
18943- "13: movl 48(%4), %%eax\n"
18944- "81: movl 52(%4), %%edx\n"
18945+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18946+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18947 " movnti %%eax, 48(%3)\n"
18948 " movnti %%edx, 52(%3)\n"
18949- "14: movl 56(%4), %%eax\n"
18950- "91: movl 60(%4), %%edx\n"
18951+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18952+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18953 " movnti %%eax, 56(%3)\n"
18954 " movnti %%edx, 60(%3)\n"
18955 " addl $-64, %0\n"
18956@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
18957 " shrl $2, %0\n"
18958 " andl $3, %%eax\n"
18959 " cld\n"
18960- "6: rep; movsl\n"
18961+ "6: rep; "__copyuser_seg" movsl\n"
18962 " movl %%eax,%0\n"
18963- "7: rep; movsb\n"
18964+ "7: rep; "__copyuser_seg" movsb\n"
18965 "8:\n"
18966 ".section .fixup,\"ax\"\n"
18967 "9: lea 0(%%eax,%0,4),%0\n"
18968@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
18969 */
18970 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
18971 unsigned long size);
18972-unsigned long __copy_user_intel(void __user *to, const void *from,
18973+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
18974+ unsigned long size);
18975+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
18976 unsigned long size);
18977 unsigned long __copy_user_zeroing_intel_nocache(void *to,
18978 const void __user *from, unsigned long size);
18979 #endif /* CONFIG_X86_INTEL_USERCOPY */
18980
18981 /* Generic arbitrary sized copy. */
18982-#define __copy_user(to, from, size) \
18983+#define __copy_user(to, from, size, prefix, set, restore) \
18984 do { \
18985 int __d0, __d1, __d2; \
18986 __asm__ __volatile__( \
18987+ set \
18988 " cmp $7,%0\n" \
18989 " jbe 1f\n" \
18990 " movl %1,%0\n" \
18991 " negl %0\n" \
18992 " andl $7,%0\n" \
18993 " subl %0,%3\n" \
18994- "4: rep; movsb\n" \
18995+ "4: rep; "prefix"movsb\n" \
18996 " movl %3,%0\n" \
18997 " shrl $2,%0\n" \
18998 " andl $3,%3\n" \
18999 " .align 2,0x90\n" \
19000- "0: rep; movsl\n" \
19001+ "0: rep; "prefix"movsl\n" \
19002 " movl %3,%0\n" \
19003- "1: rep; movsb\n" \
19004+ "1: rep; "prefix"movsb\n" \
19005 "2:\n" \
19006+ restore \
19007 ".section .fixup,\"ax\"\n" \
19008 "5: addl %3,%0\n" \
19009 " jmp 2b\n" \
19010@@ -682,14 +799,14 @@ do { \
19011 " negl %0\n" \
19012 " andl $7,%0\n" \
19013 " subl %0,%3\n" \
19014- "4: rep; movsb\n" \
19015+ "4: rep; "__copyuser_seg"movsb\n" \
19016 " movl %3,%0\n" \
19017 " shrl $2,%0\n" \
19018 " andl $3,%3\n" \
19019 " .align 2,0x90\n" \
19020- "0: rep; movsl\n" \
19021+ "0: rep; "__copyuser_seg"movsl\n" \
19022 " movl %3,%0\n" \
19023- "1: rep; movsb\n" \
19024+ "1: rep; "__copyuser_seg"movsb\n" \
19025 "2:\n" \
19026 ".section .fixup,\"ax\"\n" \
19027 "5: addl %3,%0\n" \
19028@@ -775,9 +892,9 @@ survive:
19029 }
19030 #endif
19031 if (movsl_is_ok(to, from, n))
19032- __copy_user(to, from, n);
19033+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
19034 else
19035- n = __copy_user_intel(to, from, n);
19036+ n = __generic_copy_to_user_intel(to, from, n);
19037 return n;
19038 }
19039 EXPORT_SYMBOL(__copy_to_user_ll);
19040@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
19041 unsigned long n)
19042 {
19043 if (movsl_is_ok(to, from, n))
19044- __copy_user(to, from, n);
19045+ __copy_user(to, from, n, __copyuser_seg, "", "");
19046 else
19047- n = __copy_user_intel((void __user *)to,
19048- (const void *)from, n);
19049+ n = __generic_copy_from_user_intel(to, from, n);
19050 return n;
19051 }
19052 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
19053@@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocach
19054 if (n > 64 && cpu_has_xmm2)
19055 n = __copy_user_intel_nocache(to, from, n);
19056 else
19057- __copy_user(to, from, n);
19058+ __copy_user(to, from, n, __copyuser_seg, "", "");
19059 #else
19060- __copy_user(to, from, n);
19061+ __copy_user(to, from, n, __copyuser_seg, "", "");
19062 #endif
19063 return n;
19064 }
19065 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
19066
19067-/**
19068- * copy_to_user: - Copy a block of data into user space.
19069- * @to: Destination address, in user space.
19070- * @from: Source address, in kernel space.
19071- * @n: Number of bytes to copy.
19072- *
19073- * Context: User context only. This function may sleep.
19074- *
19075- * Copy data from kernel space to user space.
19076- *
19077- * Returns number of bytes that could not be copied.
19078- * On success, this will be zero.
19079- */
19080-unsigned long
19081-copy_to_user(void __user *to, const void *from, unsigned long n)
19082+void copy_from_user_overflow(void)
19083 {
19084- if (access_ok(VERIFY_WRITE, to, n))
19085- n = __copy_to_user(to, from, n);
19086- return n;
19087+ WARN(1, "Buffer overflow detected!\n");
19088 }
19089-EXPORT_SYMBOL(copy_to_user);
19090+EXPORT_SYMBOL(copy_from_user_overflow);
19091
19092-/**
19093- * copy_from_user: - Copy a block of data from user space.
19094- * @to: Destination address, in kernel space.
19095- * @from: Source address, in user space.
19096- * @n: Number of bytes to copy.
19097- *
19098- * Context: User context only. This function may sleep.
19099- *
19100- * Copy data from user space to kernel space.
19101- *
19102- * Returns number of bytes that could not be copied.
19103- * On success, this will be zero.
19104- *
19105- * If some data could not be copied, this function will pad the copied
19106- * data to the requested size using zero bytes.
19107- */
19108-unsigned long
19109-_copy_from_user(void *to, const void __user *from, unsigned long n)
19110+void copy_to_user_overflow(void)
19111 {
19112- if (access_ok(VERIFY_READ, from, n))
19113- n = __copy_from_user(to, from, n);
19114- else
19115- memset(to, 0, n);
19116- return n;
19117+ WARN(1, "Buffer overflow detected!\n");
19118 }
19119-EXPORT_SYMBOL(_copy_from_user);
19120+EXPORT_SYMBOL(copy_to_user_overflow);
19121
19122-void copy_from_user_overflow(void)
19123+#ifdef CONFIG_PAX_MEMORY_UDEREF
19124+void __set_fs(mm_segment_t x)
19125 {
19126- WARN(1, "Buffer overflow detected!\n");
19127+ switch (x.seg) {
19128+ case 0:
19129+ loadsegment(gs, 0);
19130+ break;
19131+ case TASK_SIZE_MAX:
19132+ loadsegment(gs, __USER_DS);
19133+ break;
19134+ case -1UL:
19135+ loadsegment(gs, __KERNEL_DS);
19136+ break;
19137+ default:
19138+ BUG();
19139+ }
19140+ return;
19141 }
19142-EXPORT_SYMBOL(copy_from_user_overflow);
19143+EXPORT_SYMBOL(__set_fs);
19144+
19145+void set_fs(mm_segment_t x)
19146+{
19147+ current_thread_info()->addr_limit = x;
19148+ __set_fs(x);
19149+}
19150+EXPORT_SYMBOL(set_fs);
19151+#endif
19152diff -urNp linux-3.0.7/arch/x86/lib/usercopy_64.c linux-3.0.7/arch/x86/lib/usercopy_64.c
19153--- linux-3.0.7/arch/x86/lib/usercopy_64.c 2011-07-21 22:17:23.000000000 -0400
19154+++ linux-3.0.7/arch/x86/lib/usercopy_64.c 2011-10-06 04:17:55.000000000 -0400
19155@@ -42,6 +42,12 @@ long
19156 __strncpy_from_user(char *dst, const char __user *src, long count)
19157 {
19158 long res;
19159+
19160+#ifdef CONFIG_PAX_MEMORY_UDEREF
19161+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19162+ src += PAX_USER_SHADOW_BASE;
19163+#endif
19164+
19165 __do_strncpy_from_user(dst, src, count, res);
19166 return res;
19167 }
19168@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
19169 {
19170 long __d0;
19171 might_fault();
19172+
19173+#ifdef CONFIG_PAX_MEMORY_UDEREF
19174+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
19175+ addr += PAX_USER_SHADOW_BASE;
19176+#endif
19177+
19178 /* no memory constraint because it doesn't change any memory gcc knows
19179 about */
19180 asm volatile(
19181@@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
19182
19183 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
19184 {
19185- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
19186- return copy_user_generic((__force void *)to, (__force void *)from, len);
19187- }
19188- return len;
19189+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
19190+
19191+#ifdef CONFIG_PAX_MEMORY_UDEREF
19192+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
19193+ to += PAX_USER_SHADOW_BASE;
19194+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
19195+ from += PAX_USER_SHADOW_BASE;
19196+#endif
19197+
19198+ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
19199+ }
19200+ return len;
19201 }
19202 EXPORT_SYMBOL(copy_in_user);
19203
19204@@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
19205 * it is not necessary to optimize tail handling.
19206 */
19207 unsigned long
19208-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
19209+copy_user_handle_tail(char __user *to, char __user *from, unsigned len, unsigned zerorest)
19210 {
19211 char c;
19212 unsigned zero_len;
19213diff -urNp linux-3.0.7/arch/x86/Makefile linux-3.0.7/arch/x86/Makefile
19214--- linux-3.0.7/arch/x86/Makefile 2011-07-21 22:17:23.000000000 -0400
19215+++ linux-3.0.7/arch/x86/Makefile 2011-08-23 21:48:14.000000000 -0400
19216@@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
19217 else
19218 BITS := 64
19219 UTS_MACHINE := x86_64
19220+ biarch := $(call cc-option,-m64)
19221 CHECKFLAGS += -D__x86_64__ -m64
19222
19223 KBUILD_AFLAGS += -m64
19224@@ -195,3 +196,12 @@ define archhelp
19225 echo ' FDARGS="..." arguments for the booted kernel'
19226 echo ' FDINITRD=file initrd for the booted kernel'
19227 endef
19228+
19229+define OLD_LD
19230+
19231+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
19232+*** Please upgrade your binutils to 2.18 or newer
19233+endef
19234+
19235+archprepare:
19236+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
19237diff -urNp linux-3.0.7/arch/x86/mm/extable.c linux-3.0.7/arch/x86/mm/extable.c
19238--- linux-3.0.7/arch/x86/mm/extable.c 2011-07-21 22:17:23.000000000 -0400
19239+++ linux-3.0.7/arch/x86/mm/extable.c 2011-08-23 21:47:55.000000000 -0400
19240@@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs
19241 const struct exception_table_entry *fixup;
19242
19243 #ifdef CONFIG_PNPBIOS
19244- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
19245+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
19246 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
19247 extern u32 pnp_bios_is_utter_crap;
19248 pnp_bios_is_utter_crap = 1;
19249diff -urNp linux-3.0.7/arch/x86/mm/fault.c linux-3.0.7/arch/x86/mm/fault.c
19250--- linux-3.0.7/arch/x86/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
19251+++ linux-3.0.7/arch/x86/mm/fault.c 2011-10-06 04:17:55.000000000 -0400
19252@@ -13,10 +13,18 @@
19253 #include <linux/perf_event.h> /* perf_sw_event */
19254 #include <linux/hugetlb.h> /* hstate_index_to_shift */
19255 #include <linux/prefetch.h> /* prefetchw */
19256+#include <linux/unistd.h>
19257+#include <linux/compiler.h>
19258
19259 #include <asm/traps.h> /* dotraplinkage, ... */
19260 #include <asm/pgalloc.h> /* pgd_*(), ... */
19261 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
19262+#include <asm/vsyscall.h>
19263+#include <asm/tlbflush.h>
19264+
19265+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19266+#include <asm/stacktrace.h>
19267+#endif
19268
19269 /*
19270 * Page fault error code bits:
19271@@ -54,7 +62,7 @@ static inline int __kprobes notify_page_
19272 int ret = 0;
19273
19274 /* kprobe_running() needs smp_processor_id() */
19275- if (kprobes_built_in() && !user_mode_vm(regs)) {
19276+ if (kprobes_built_in() && !user_mode(regs)) {
19277 preempt_disable();
19278 if (kprobe_running() && kprobe_fault_handler(regs, 14))
19279 ret = 1;
19280@@ -115,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *re
19281 return !instr_lo || (instr_lo>>1) == 1;
19282 case 0x00:
19283 /* Prefetch instruction is 0x0F0D or 0x0F18 */
19284- if (probe_kernel_address(instr, opcode))
19285+ if (user_mode(regs)) {
19286+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
19287+ return 0;
19288+ } else if (probe_kernel_address(instr, opcode))
19289 return 0;
19290
19291 *prefetch = (instr_lo == 0xF) &&
19292@@ -149,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsign
19293 while (instr < max_instr) {
19294 unsigned char opcode;
19295
19296- if (probe_kernel_address(instr, opcode))
19297+ if (user_mode(regs)) {
19298+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
19299+ break;
19300+ } else if (probe_kernel_address(instr, opcode))
19301 break;
19302
19303 instr++;
19304@@ -180,6 +194,30 @@ force_sig_info_fault(int si_signo, int s
19305 force_sig_info(si_signo, &info, tsk);
19306 }
19307
19308+#ifdef CONFIG_PAX_EMUTRAMP
19309+static int pax_handle_fetch_fault(struct pt_regs *regs);
19310+#endif
19311+
19312+#ifdef CONFIG_PAX_PAGEEXEC
19313+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
19314+{
19315+ pgd_t *pgd;
19316+ pud_t *pud;
19317+ pmd_t *pmd;
19318+
19319+ pgd = pgd_offset(mm, address);
19320+ if (!pgd_present(*pgd))
19321+ return NULL;
19322+ pud = pud_offset(pgd, address);
19323+ if (!pud_present(*pud))
19324+ return NULL;
19325+ pmd = pmd_offset(pud, address);
19326+ if (!pmd_present(*pmd))
19327+ return NULL;
19328+ return pmd;
19329+}
19330+#endif
19331+
19332 DEFINE_SPINLOCK(pgd_lock);
19333 LIST_HEAD(pgd_list);
19334
19335@@ -230,10 +268,22 @@ void vmalloc_sync_all(void)
19336 for (address = VMALLOC_START & PMD_MASK;
19337 address >= TASK_SIZE && address < FIXADDR_TOP;
19338 address += PMD_SIZE) {
19339+
19340+#ifdef CONFIG_PAX_PER_CPU_PGD
19341+ unsigned long cpu;
19342+#else
19343 struct page *page;
19344+#endif
19345
19346 spin_lock(&pgd_lock);
19347+
19348+#ifdef CONFIG_PAX_PER_CPU_PGD
19349+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
19350+ pgd_t *pgd = get_cpu_pgd(cpu);
19351+ pmd_t *ret;
19352+#else
19353 list_for_each_entry(page, &pgd_list, lru) {
19354+ pgd_t *pgd = page_address(page);
19355 spinlock_t *pgt_lock;
19356 pmd_t *ret;
19357
19358@@ -241,8 +291,13 @@ void vmalloc_sync_all(void)
19359 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
19360
19361 spin_lock(pgt_lock);
19362- ret = vmalloc_sync_one(page_address(page), address);
19363+#endif
19364+
19365+ ret = vmalloc_sync_one(pgd, address);
19366+
19367+#ifndef CONFIG_PAX_PER_CPU_PGD
19368 spin_unlock(pgt_lock);
19369+#endif
19370
19371 if (!ret)
19372 break;
19373@@ -276,6 +331,11 @@ static noinline __kprobes int vmalloc_fa
19374 * an interrupt in the middle of a task switch..
19375 */
19376 pgd_paddr = read_cr3();
19377+
19378+#ifdef CONFIG_PAX_PER_CPU_PGD
19379+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
19380+#endif
19381+
19382 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
19383 if (!pmd_k)
19384 return -1;
19385@@ -371,7 +431,14 @@ static noinline __kprobes int vmalloc_fa
19386 * happen within a race in page table update. In the later
19387 * case just flush:
19388 */
19389+
19390+#ifdef CONFIG_PAX_PER_CPU_PGD
19391+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
19392+ pgd = pgd_offset_cpu(smp_processor_id(), address);
19393+#else
19394 pgd = pgd_offset(current->active_mm, address);
19395+#endif
19396+
19397 pgd_ref = pgd_offset_k(address);
19398 if (pgd_none(*pgd_ref))
19399 return -1;
19400@@ -533,7 +600,7 @@ static int is_errata93(struct pt_regs *r
19401 static int is_errata100(struct pt_regs *regs, unsigned long address)
19402 {
19403 #ifdef CONFIG_X86_64
19404- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
19405+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
19406 return 1;
19407 #endif
19408 return 0;
19409@@ -560,7 +627,7 @@ static int is_f00f_bug(struct pt_regs *r
19410 }
19411
19412 static const char nx_warning[] = KERN_CRIT
19413-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
19414+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
19415
19416 static void
19417 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
19418@@ -569,14 +636,25 @@ show_fault_oops(struct pt_regs *regs, un
19419 if (!oops_may_print())
19420 return;
19421
19422- if (error_code & PF_INSTR) {
19423+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
19424 unsigned int level;
19425
19426 pte_t *pte = lookup_address(address, &level);
19427
19428 if (pte && pte_present(*pte) && !pte_exec(*pte))
19429- printk(nx_warning, current_uid());
19430+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
19431+ }
19432+
19433+#ifdef CONFIG_PAX_KERNEXEC
19434+ if (init_mm.start_code <= address && address < init_mm.end_code) {
19435+ if (current->signal->curr_ip)
19436+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
19437+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
19438+ else
19439+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
19440+ current->comm, task_pid_nr(current), current_uid(), current_euid());
19441 }
19442+#endif
19443
19444 printk(KERN_ALERT "BUG: unable to handle kernel ");
19445 if (address < PAGE_SIZE)
19446@@ -702,6 +780,66 @@ __bad_area_nosemaphore(struct pt_regs *r
19447 unsigned long address, int si_code)
19448 {
19449 struct task_struct *tsk = current;
19450+#if defined(CONFIG_X86_64) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19451+ struct mm_struct *mm = tsk->mm;
19452+#endif
19453+
19454+#ifdef CONFIG_X86_64
19455+ if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
19456+ if (regs->ip == VSYSCALL_ADDR(__NR_vgettimeofday) ||
19457+ regs->ip == VSYSCALL_ADDR(__NR_vtime) ||
19458+ regs->ip == VSYSCALL_ADDR(__NR_vgetcpu)) {
19459+ regs->ip += mm->context.vdso - PAGE_SIZE - VSYSCALL_START;
19460+ return;
19461+ }
19462+ }
19463+#endif
19464+
19465+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19466+ if (mm && (error_code & PF_USER)) {
19467+ unsigned long ip = regs->ip;
19468+
19469+ if (v8086_mode(regs))
19470+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
19471+
19472+ /*
19473+ * It's possible to have interrupts off here:
19474+ */
19475+ local_irq_enable();
19476+
19477+#ifdef CONFIG_PAX_PAGEEXEC
19478+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
19479+ (((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
19480+
19481+#ifdef CONFIG_PAX_EMUTRAMP
19482+ switch (pax_handle_fetch_fault(regs)) {
19483+ case 2:
19484+ return;
19485+ }
19486+#endif
19487+
19488+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
19489+ do_group_exit(SIGKILL);
19490+ }
19491+#endif
19492+
19493+#ifdef CONFIG_PAX_SEGMEXEC
19494+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
19495+
19496+#ifdef CONFIG_PAX_EMUTRAMP
19497+ switch (pax_handle_fetch_fault(regs)) {
19498+ case 2:
19499+ return;
19500+ }
19501+#endif
19502+
19503+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
19504+ do_group_exit(SIGKILL);
19505+ }
19506+#endif
19507+
19508+ }
19509+#endif
19510
19511 /* User mode accesses just cause a SIGSEGV */
19512 if (error_code & PF_USER) {
19513@@ -871,6 +1009,99 @@ static int spurious_fault_check(unsigned
19514 return 1;
19515 }
19516
19517+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
19518+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
19519+{
19520+ pte_t *pte;
19521+ pmd_t *pmd;
19522+ spinlock_t *ptl;
19523+ unsigned char pte_mask;
19524+
19525+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
19526+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
19527+ return 0;
19528+
19529+ /* PaX: it's our fault, let's handle it if we can */
19530+
19531+ /* PaX: take a look at read faults before acquiring any locks */
19532+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
19533+ /* instruction fetch attempt from a protected page in user mode */
19534+ up_read(&mm->mmap_sem);
19535+
19536+#ifdef CONFIG_PAX_EMUTRAMP
19537+ switch (pax_handle_fetch_fault(regs)) {
19538+ case 2:
19539+ return 1;
19540+ }
19541+#endif
19542+
19543+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
19544+ do_group_exit(SIGKILL);
19545+ }
19546+
19547+ pmd = pax_get_pmd(mm, address);
19548+ if (unlikely(!pmd))
19549+ return 0;
19550+
19551+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
19552+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
19553+ pte_unmap_unlock(pte, ptl);
19554+ return 0;
19555+ }
19556+
19557+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
19558+ /* write attempt to a protected page in user mode */
19559+ pte_unmap_unlock(pte, ptl);
19560+ return 0;
19561+ }
19562+
19563+#ifdef CONFIG_SMP
19564+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
19565+#else
19566+ if (likely(address > get_limit(regs->cs)))
19567+#endif
19568+ {
19569+ set_pte(pte, pte_mkread(*pte));
19570+ __flush_tlb_one(address);
19571+ pte_unmap_unlock(pte, ptl);
19572+ up_read(&mm->mmap_sem);
19573+ return 1;
19574+ }
19575+
19576+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
19577+
19578+ /*
19579+ * PaX: fill DTLB with user rights and retry
19580+ */
19581+ __asm__ __volatile__ (
19582+ "orb %2,(%1)\n"
19583+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
19584+/*
19585+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
19586+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
19587+ * page fault when examined during a TLB load attempt. this is true not only
19588+ * for PTEs holding a non-present entry but also present entries that will
19589+ * raise a page fault (such as those set up by PaX, or the copy-on-write
19590+ * mechanism). in effect it means that we do *not* need to flush the TLBs
19591+ * for our target pages since their PTEs are simply not in the TLBs at all.
19592+
19593+ * the best thing in omitting it is that we gain around 15-20% speed in the
19594+ * fast path of the page fault handler and can get rid of tracing since we
19595+ * can no longer flush unintended entries.
19596+ */
19597+ "invlpg (%0)\n"
19598+#endif
19599+ __copyuser_seg"testb $0,(%0)\n"
19600+ "xorb %3,(%1)\n"
19601+ :
19602+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
19603+ : "memory", "cc");
19604+ pte_unmap_unlock(pte, ptl);
19605+ up_read(&mm->mmap_sem);
19606+ return 1;
19607+}
19608+#endif
19609+
19610 /*
19611 * Handle a spurious fault caused by a stale TLB entry.
19612 *
19613@@ -943,6 +1174,9 @@ int show_unhandled_signals = 1;
19614 static inline int
19615 access_error(unsigned long error_code, struct vm_area_struct *vma)
19616 {
19617+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
19618+ return 1;
19619+
19620 if (error_code & PF_WRITE) {
19621 /* write, present and write, not present: */
19622 if (unlikely(!(vma->vm_flags & VM_WRITE)))
19623@@ -976,19 +1210,33 @@ do_page_fault(struct pt_regs *regs, unsi
19624 {
19625 struct vm_area_struct *vma;
19626 struct task_struct *tsk;
19627- unsigned long address;
19628 struct mm_struct *mm;
19629 int fault;
19630 int write = error_code & PF_WRITE;
19631 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
19632 (write ? FAULT_FLAG_WRITE : 0);
19633
19634+ /* Get the faulting address: */
19635+ unsigned long address = read_cr2();
19636+
19637+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19638+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
19639+ if (!search_exception_tables(regs->ip)) {
19640+ bad_area_nosemaphore(regs, error_code, address);
19641+ return;
19642+ }
19643+ if (address < PAX_USER_SHADOW_BASE) {
19644+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
19645+ printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
19646+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
19647+ } else
19648+ address -= PAX_USER_SHADOW_BASE;
19649+ }
19650+#endif
19651+
19652 tsk = current;
19653 mm = tsk->mm;
19654
19655- /* Get the faulting address: */
19656- address = read_cr2();
19657-
19658 /*
19659 * Detect and handle instructions that would cause a page fault for
19660 * both a tracked kernel page and a userspace page.
19661@@ -1048,7 +1296,7 @@ do_page_fault(struct pt_regs *regs, unsi
19662 * User-mode registers count as a user access even for any
19663 * potential system fault or CPU buglet:
19664 */
19665- if (user_mode_vm(regs)) {
19666+ if (user_mode(regs)) {
19667 local_irq_enable();
19668 error_code |= PF_USER;
19669 } else {
19670@@ -1103,6 +1351,11 @@ retry:
19671 might_sleep();
19672 }
19673
19674+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
19675+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
19676+ return;
19677+#endif
19678+
19679 vma = find_vma(mm, address);
19680 if (unlikely(!vma)) {
19681 bad_area(regs, error_code, address);
19682@@ -1114,18 +1367,24 @@ retry:
19683 bad_area(regs, error_code, address);
19684 return;
19685 }
19686- if (error_code & PF_USER) {
19687- /*
19688- * Accessing the stack below %sp is always a bug.
19689- * The large cushion allows instructions like enter
19690- * and pusha to work. ("enter $65535, $31" pushes
19691- * 32 pointers and then decrements %sp by 65535.)
19692- */
19693- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
19694- bad_area(regs, error_code, address);
19695- return;
19696- }
19697+ /*
19698+ * Accessing the stack below %sp is always a bug.
19699+ * The large cushion allows instructions like enter
19700+ * and pusha to work. ("enter $65535, $31" pushes
19701+ * 32 pointers and then decrements %sp by 65535.)
19702+ */
19703+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
19704+ bad_area(regs, error_code, address);
19705+ return;
19706 }
19707+
19708+#ifdef CONFIG_PAX_SEGMEXEC
19709+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
19710+ bad_area(regs, error_code, address);
19711+ return;
19712+ }
19713+#endif
19714+
19715 if (unlikely(expand_stack(vma, address))) {
19716 bad_area(regs, error_code, address);
19717 return;
19718@@ -1180,3 +1439,199 @@ good_area:
19719
19720 up_read(&mm->mmap_sem);
19721 }
19722+
19723+#ifdef CONFIG_PAX_EMUTRAMP
19724+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
19725+{
19726+ int err;
19727+
19728+ do { /* PaX: gcc trampoline emulation #1 */
19729+ unsigned char mov1, mov2;
19730+ unsigned short jmp;
19731+ unsigned int addr1, addr2;
19732+
19733+#ifdef CONFIG_X86_64
19734+ if ((regs->ip + 11) >> 32)
19735+ break;
19736+#endif
19737+
19738+ err = get_user(mov1, (unsigned char __user *)regs->ip);
19739+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
19740+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
19741+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
19742+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
19743+
19744+ if (err)
19745+ break;
19746+
19747+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
19748+ regs->cx = addr1;
19749+ regs->ax = addr2;
19750+ regs->ip = addr2;
19751+ return 2;
19752+ }
19753+ } while (0);
19754+
19755+ do { /* PaX: gcc trampoline emulation #2 */
19756+ unsigned char mov, jmp;
19757+ unsigned int addr1, addr2;
19758+
19759+#ifdef CONFIG_X86_64
19760+ if ((regs->ip + 9) >> 32)
19761+ break;
19762+#endif
19763+
19764+ err = get_user(mov, (unsigned char __user *)regs->ip);
19765+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
19766+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
19767+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
19768+
19769+ if (err)
19770+ break;
19771+
19772+ if (mov == 0xB9 && jmp == 0xE9) {
19773+ regs->cx = addr1;
19774+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
19775+ return 2;
19776+ }
19777+ } while (0);
19778+
19779+ return 1; /* PaX in action */
19780+}
19781+
19782+#ifdef CONFIG_X86_64
19783+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
19784+{
19785+ int err;
19786+
19787+ do { /* PaX: gcc trampoline emulation #1 */
19788+ unsigned short mov1, mov2, jmp1;
19789+ unsigned char jmp2;
19790+ unsigned int addr1;
19791+ unsigned long addr2;
19792+
19793+ err = get_user(mov1, (unsigned short __user *)regs->ip);
19794+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
19795+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
19796+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
19797+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
19798+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
19799+
19800+ if (err)
19801+ break;
19802+
19803+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
19804+ regs->r11 = addr1;
19805+ regs->r10 = addr2;
19806+ regs->ip = addr1;
19807+ return 2;
19808+ }
19809+ } while (0);
19810+
19811+ do { /* PaX: gcc trampoline emulation #2 */
19812+ unsigned short mov1, mov2, jmp1;
19813+ unsigned char jmp2;
19814+ unsigned long addr1, addr2;
19815+
19816+ err = get_user(mov1, (unsigned short __user *)regs->ip);
19817+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
19818+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
19819+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
19820+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
19821+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
19822+
19823+ if (err)
19824+ break;
19825+
19826+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
19827+ regs->r11 = addr1;
19828+ regs->r10 = addr2;
19829+ regs->ip = addr1;
19830+ return 2;
19831+ }
19832+ } while (0);
19833+
19834+ return 1; /* PaX in action */
19835+}
19836+#endif
19837+
19838+/*
19839+ * PaX: decide what to do with offenders (regs->ip = fault address)
19840+ *
19841+ * returns 1 when task should be killed
19842+ * 2 when gcc trampoline was detected
19843+ */
19844+static int pax_handle_fetch_fault(struct pt_regs *regs)
19845+{
19846+ if (v8086_mode(regs))
19847+ return 1;
19848+
19849+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
19850+ return 1;
19851+
19852+#ifdef CONFIG_X86_32
19853+ return pax_handle_fetch_fault_32(regs);
19854+#else
19855+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
19856+ return pax_handle_fetch_fault_32(regs);
19857+ else
19858+ return pax_handle_fetch_fault_64(regs);
19859+#endif
19860+}
19861+#endif
19862+
19863+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19864+void pax_report_insns(void *pc, void *sp)
19865+{
19866+ long i;
19867+
19868+ printk(KERN_ERR "PAX: bytes at PC: ");
19869+ for (i = 0; i < 20; i++) {
19870+ unsigned char c;
19871+ if (get_user(c, (unsigned char __force_user *)pc+i))
19872+ printk(KERN_CONT "?? ");
19873+ else
19874+ printk(KERN_CONT "%02x ", c);
19875+ }
19876+ printk("\n");
19877+
19878+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
19879+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
19880+ unsigned long c;
19881+ if (get_user(c, (unsigned long __force_user *)sp+i))
19882+#ifdef CONFIG_X86_32
19883+ printk(KERN_CONT "???????? ");
19884+#else
19885+ printk(KERN_CONT "???????????????? ");
19886+#endif
19887+ else
19888+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
19889+ }
19890+ printk("\n");
19891+}
19892+#endif
19893+
19894+/**
19895+ * probe_kernel_write(): safely attempt to write to a location
19896+ * @dst: address to write to
19897+ * @src: pointer to the data that shall be written
19898+ * @size: size of the data chunk
19899+ *
19900+ * Safely write to address @dst from the buffer at @src. If a kernel fault
19901+ * happens, handle that and return -EFAULT.
19902+ */
19903+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
19904+{
19905+ long ret;
19906+ mm_segment_t old_fs = get_fs();
19907+
19908+ set_fs(KERNEL_DS);
19909+ pagefault_disable();
19910+ pax_open_kernel();
19911+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
19912+ pax_close_kernel();
19913+ pagefault_enable();
19914+ set_fs(old_fs);
19915+
19916+ return ret ? -EFAULT : 0;
19917+}
19918diff -urNp linux-3.0.7/arch/x86/mm/gup.c linux-3.0.7/arch/x86/mm/gup.c
19919--- linux-3.0.7/arch/x86/mm/gup.c 2011-07-21 22:17:23.000000000 -0400
19920+++ linux-3.0.7/arch/x86/mm/gup.c 2011-08-23 21:47:55.000000000 -0400
19921@@ -263,7 +263,7 @@ int __get_user_pages_fast(unsigned long
19922 addr = start;
19923 len = (unsigned long) nr_pages << PAGE_SHIFT;
19924 end = start + len;
19925- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19926+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19927 (void __user *)start, len)))
19928 return 0;
19929
19930diff -urNp linux-3.0.7/arch/x86/mm/highmem_32.c linux-3.0.7/arch/x86/mm/highmem_32.c
19931--- linux-3.0.7/arch/x86/mm/highmem_32.c 2011-07-21 22:17:23.000000000 -0400
19932+++ linux-3.0.7/arch/x86/mm/highmem_32.c 2011-08-23 21:47:55.000000000 -0400
19933@@ -44,7 +44,10 @@ void *kmap_atomic_prot(struct page *page
19934 idx = type + KM_TYPE_NR*smp_processor_id();
19935 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
19936 BUG_ON(!pte_none(*(kmap_pte-idx)));
19937+
19938+ pax_open_kernel();
19939 set_pte(kmap_pte-idx, mk_pte(page, prot));
19940+ pax_close_kernel();
19941
19942 return (void *)vaddr;
19943 }
19944diff -urNp linux-3.0.7/arch/x86/mm/hugetlbpage.c linux-3.0.7/arch/x86/mm/hugetlbpage.c
19945--- linux-3.0.7/arch/x86/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
19946+++ linux-3.0.7/arch/x86/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
19947@@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmappe
19948 struct hstate *h = hstate_file(file);
19949 struct mm_struct *mm = current->mm;
19950 struct vm_area_struct *vma;
19951- unsigned long start_addr;
19952+ unsigned long start_addr, pax_task_size = TASK_SIZE;
19953+
19954+#ifdef CONFIG_PAX_SEGMEXEC
19955+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19956+ pax_task_size = SEGMEXEC_TASK_SIZE;
19957+#endif
19958+
19959+ pax_task_size -= PAGE_SIZE;
19960
19961 if (len > mm->cached_hole_size) {
19962- start_addr = mm->free_area_cache;
19963+ start_addr = mm->free_area_cache;
19964 } else {
19965- start_addr = TASK_UNMAPPED_BASE;
19966- mm->cached_hole_size = 0;
19967+ start_addr = mm->mmap_base;
19968+ mm->cached_hole_size = 0;
19969 }
19970
19971 full_search:
19972@@ -280,26 +287,27 @@ full_search:
19973
19974 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19975 /* At this point: (!vma || addr < vma->vm_end). */
19976- if (TASK_SIZE - len < addr) {
19977+ if (pax_task_size - len < addr) {
19978 /*
19979 * Start a new search - just in case we missed
19980 * some holes.
19981 */
19982- if (start_addr != TASK_UNMAPPED_BASE) {
19983- start_addr = TASK_UNMAPPED_BASE;
19984+ if (start_addr != mm->mmap_base) {
19985+ start_addr = mm->mmap_base;
19986 mm->cached_hole_size = 0;
19987 goto full_search;
19988 }
19989 return -ENOMEM;
19990 }
19991- if (!vma || addr + len <= vma->vm_start) {
19992- mm->free_area_cache = addr + len;
19993- return addr;
19994- }
19995+ if (check_heap_stack_gap(vma, addr, len))
19996+ break;
19997 if (addr + mm->cached_hole_size < vma->vm_start)
19998 mm->cached_hole_size = vma->vm_start - addr;
19999 addr = ALIGN(vma->vm_end, huge_page_size(h));
20000 }
20001+
20002+ mm->free_area_cache = addr + len;
20003+ return addr;
20004 }
20005
20006 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
20007@@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmappe
20008 {
20009 struct hstate *h = hstate_file(file);
20010 struct mm_struct *mm = current->mm;
20011- struct vm_area_struct *vma, *prev_vma;
20012- unsigned long base = mm->mmap_base, addr = addr0;
20013+ struct vm_area_struct *vma;
20014+ unsigned long base = mm->mmap_base, addr;
20015 unsigned long largest_hole = mm->cached_hole_size;
20016- int first_time = 1;
20017
20018 /* don't allow allocations above current base */
20019 if (mm->free_area_cache > base)
20020@@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmappe
20021 largest_hole = 0;
20022 mm->free_area_cache = base;
20023 }
20024-try_again:
20025+
20026 /* make sure it can fit in the remaining address space */
20027 if (mm->free_area_cache < len)
20028 goto fail;
20029
20030 /* either no address requested or can't fit in requested address hole */
20031- addr = (mm->free_area_cache - len) & huge_page_mask(h);
20032+ addr = (mm->free_area_cache - len);
20033 do {
20034+ addr &= huge_page_mask(h);
20035+ vma = find_vma(mm, addr);
20036 /*
20037 * Lookup failure means no vma is above this address,
20038 * i.e. return with success:
20039- */
20040- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
20041- return addr;
20042-
20043- /*
20044 * new region fits between prev_vma->vm_end and
20045 * vma->vm_start, use it:
20046 */
20047- if (addr + len <= vma->vm_start &&
20048- (!prev_vma || (addr >= prev_vma->vm_end))) {
20049+ if (check_heap_stack_gap(vma, addr, len)) {
20050 /* remember the address as a hint for next time */
20051- mm->cached_hole_size = largest_hole;
20052- return (mm->free_area_cache = addr);
20053- } else {
20054- /* pull free_area_cache down to the first hole */
20055- if (mm->free_area_cache == vma->vm_end) {
20056- mm->free_area_cache = vma->vm_start;
20057- mm->cached_hole_size = largest_hole;
20058- }
20059+ mm->cached_hole_size = largest_hole;
20060+ return (mm->free_area_cache = addr);
20061+ }
20062+ /* pull free_area_cache down to the first hole */
20063+ if (mm->free_area_cache == vma->vm_end) {
20064+ mm->free_area_cache = vma->vm_start;
20065+ mm->cached_hole_size = largest_hole;
20066 }
20067
20068 /* remember the largest hole we saw so far */
20069 if (addr + largest_hole < vma->vm_start)
20070- largest_hole = vma->vm_start - addr;
20071+ largest_hole = vma->vm_start - addr;
20072
20073 /* try just below the current vma->vm_start */
20074- addr = (vma->vm_start - len) & huge_page_mask(h);
20075- } while (len <= vma->vm_start);
20076+ addr = skip_heap_stack_gap(vma, len);
20077+ } while (!IS_ERR_VALUE(addr));
20078
20079 fail:
20080 /*
20081- * if hint left us with no space for the requested
20082- * mapping then try again:
20083- */
20084- if (first_time) {
20085- mm->free_area_cache = base;
20086- largest_hole = 0;
20087- first_time = 0;
20088- goto try_again;
20089- }
20090- /*
20091 * A failed mmap() very likely causes application failure,
20092 * so fall back to the bottom-up function here. This scenario
20093 * can happen with large stack limits and large mmap()
20094 * allocations.
20095 */
20096- mm->free_area_cache = TASK_UNMAPPED_BASE;
20097+
20098+#ifdef CONFIG_PAX_SEGMEXEC
20099+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20100+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
20101+ else
20102+#endif
20103+
20104+ mm->mmap_base = TASK_UNMAPPED_BASE;
20105+
20106+#ifdef CONFIG_PAX_RANDMMAP
20107+ if (mm->pax_flags & MF_PAX_RANDMMAP)
20108+ mm->mmap_base += mm->delta_mmap;
20109+#endif
20110+
20111+ mm->free_area_cache = mm->mmap_base;
20112 mm->cached_hole_size = ~0UL;
20113 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
20114 len, pgoff, flags);
20115@@ -386,6 +392,7 @@ fail:
20116 /*
20117 * Restore the topdown base:
20118 */
20119+ mm->mmap_base = base;
20120 mm->free_area_cache = base;
20121 mm->cached_hole_size = ~0UL;
20122
20123@@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *f
20124 struct hstate *h = hstate_file(file);
20125 struct mm_struct *mm = current->mm;
20126 struct vm_area_struct *vma;
20127+ unsigned long pax_task_size = TASK_SIZE;
20128
20129 if (len & ~huge_page_mask(h))
20130 return -EINVAL;
20131- if (len > TASK_SIZE)
20132+
20133+#ifdef CONFIG_PAX_SEGMEXEC
20134+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20135+ pax_task_size = SEGMEXEC_TASK_SIZE;
20136+#endif
20137+
20138+ pax_task_size -= PAGE_SIZE;
20139+
20140+ if (len > pax_task_size)
20141 return -ENOMEM;
20142
20143 if (flags & MAP_FIXED) {
20144@@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *f
20145 if (addr) {
20146 addr = ALIGN(addr, huge_page_size(h));
20147 vma = find_vma(mm, addr);
20148- if (TASK_SIZE - len >= addr &&
20149- (!vma || addr + len <= vma->vm_start))
20150+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
20151 return addr;
20152 }
20153 if (mm->get_unmapped_area == arch_get_unmapped_area)
20154diff -urNp linux-3.0.7/arch/x86/mm/init_32.c linux-3.0.7/arch/x86/mm/init_32.c
20155--- linux-3.0.7/arch/x86/mm/init_32.c 2011-07-21 22:17:23.000000000 -0400
20156+++ linux-3.0.7/arch/x86/mm/init_32.c 2011-08-23 21:47:55.000000000 -0400
20157@@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
20158 }
20159
20160 /*
20161- * Creates a middle page table and puts a pointer to it in the
20162- * given global directory entry. This only returns the gd entry
20163- * in non-PAE compilation mode, since the middle layer is folded.
20164- */
20165-static pmd_t * __init one_md_table_init(pgd_t *pgd)
20166-{
20167- pud_t *pud;
20168- pmd_t *pmd_table;
20169-
20170-#ifdef CONFIG_X86_PAE
20171- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
20172- if (after_bootmem)
20173- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
20174- else
20175- pmd_table = (pmd_t *)alloc_low_page();
20176- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
20177- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
20178- pud = pud_offset(pgd, 0);
20179- BUG_ON(pmd_table != pmd_offset(pud, 0));
20180-
20181- return pmd_table;
20182- }
20183-#endif
20184- pud = pud_offset(pgd, 0);
20185- pmd_table = pmd_offset(pud, 0);
20186-
20187- return pmd_table;
20188-}
20189-
20190-/*
20191 * Create a page table and place a pointer to it in a middle page
20192 * directory entry:
20193 */
20194@@ -123,13 +93,28 @@ static pte_t * __init one_page_table_ini
20195 page_table = (pte_t *)alloc_low_page();
20196
20197 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
20198+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20199+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
20200+#else
20201 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
20202+#endif
20203 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
20204 }
20205
20206 return pte_offset_kernel(pmd, 0);
20207 }
20208
20209+static pmd_t * __init one_md_table_init(pgd_t *pgd)
20210+{
20211+ pud_t *pud;
20212+ pmd_t *pmd_table;
20213+
20214+ pud = pud_offset(pgd, 0);
20215+ pmd_table = pmd_offset(pud, 0);
20216+
20217+ return pmd_table;
20218+}
20219+
20220 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
20221 {
20222 int pgd_idx = pgd_index(vaddr);
20223@@ -203,6 +188,7 @@ page_table_range_init(unsigned long star
20224 int pgd_idx, pmd_idx;
20225 unsigned long vaddr;
20226 pgd_t *pgd;
20227+ pud_t *pud;
20228 pmd_t *pmd;
20229 pte_t *pte = NULL;
20230
20231@@ -212,8 +198,13 @@ page_table_range_init(unsigned long star
20232 pgd = pgd_base + pgd_idx;
20233
20234 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
20235- pmd = one_md_table_init(pgd);
20236- pmd = pmd + pmd_index(vaddr);
20237+ pud = pud_offset(pgd, vaddr);
20238+ pmd = pmd_offset(pud, vaddr);
20239+
20240+#ifdef CONFIG_X86_PAE
20241+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
20242+#endif
20243+
20244 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
20245 pmd++, pmd_idx++) {
20246 pte = page_table_kmap_check(one_page_table_init(pmd),
20247@@ -225,11 +216,20 @@ page_table_range_init(unsigned long star
20248 }
20249 }
20250
20251-static inline int is_kernel_text(unsigned long addr)
20252+static inline int is_kernel_text(unsigned long start, unsigned long end)
20253 {
20254- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
20255- return 1;
20256- return 0;
20257+ if ((start > ktla_ktva((unsigned long)_etext) ||
20258+ end <= ktla_ktva((unsigned long)_stext)) &&
20259+ (start > ktla_ktva((unsigned long)_einittext) ||
20260+ end <= ktla_ktva((unsigned long)_sinittext)) &&
20261+
20262+#ifdef CONFIG_ACPI_SLEEP
20263+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
20264+#endif
20265+
20266+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
20267+ return 0;
20268+ return 1;
20269 }
20270
20271 /*
20272@@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned lo
20273 unsigned long last_map_addr = end;
20274 unsigned long start_pfn, end_pfn;
20275 pgd_t *pgd_base = swapper_pg_dir;
20276- int pgd_idx, pmd_idx, pte_ofs;
20277+ unsigned int pgd_idx, pmd_idx, pte_ofs;
20278 unsigned long pfn;
20279 pgd_t *pgd;
20280+ pud_t *pud;
20281 pmd_t *pmd;
20282 pte_t *pte;
20283 unsigned pages_2m, pages_4k;
20284@@ -281,8 +282,13 @@ repeat:
20285 pfn = start_pfn;
20286 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
20287 pgd = pgd_base + pgd_idx;
20288- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
20289- pmd = one_md_table_init(pgd);
20290+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
20291+ pud = pud_offset(pgd, 0);
20292+ pmd = pmd_offset(pud, 0);
20293+
20294+#ifdef CONFIG_X86_PAE
20295+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
20296+#endif
20297
20298 if (pfn >= end_pfn)
20299 continue;
20300@@ -294,14 +300,13 @@ repeat:
20301 #endif
20302 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
20303 pmd++, pmd_idx++) {
20304- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
20305+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
20306
20307 /*
20308 * Map with big pages if possible, otherwise
20309 * create normal page tables:
20310 */
20311 if (use_pse) {
20312- unsigned int addr2;
20313 pgprot_t prot = PAGE_KERNEL_LARGE;
20314 /*
20315 * first pass will use the same initial
20316@@ -311,11 +316,7 @@ repeat:
20317 __pgprot(PTE_IDENT_ATTR |
20318 _PAGE_PSE);
20319
20320- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
20321- PAGE_OFFSET + PAGE_SIZE-1;
20322-
20323- if (is_kernel_text(addr) ||
20324- is_kernel_text(addr2))
20325+ if (is_kernel_text(address, address + PMD_SIZE))
20326 prot = PAGE_KERNEL_LARGE_EXEC;
20327
20328 pages_2m++;
20329@@ -332,7 +333,7 @@ repeat:
20330 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
20331 pte += pte_ofs;
20332 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
20333- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
20334+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
20335 pgprot_t prot = PAGE_KERNEL;
20336 /*
20337 * first pass will use the same initial
20338@@ -340,7 +341,7 @@ repeat:
20339 */
20340 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
20341
20342- if (is_kernel_text(addr))
20343+ if (is_kernel_text(address, address + PAGE_SIZE))
20344 prot = PAGE_KERNEL_EXEC;
20345
20346 pages_4k++;
20347@@ -472,7 +473,7 @@ void __init native_pagetable_setup_start
20348
20349 pud = pud_offset(pgd, va);
20350 pmd = pmd_offset(pud, va);
20351- if (!pmd_present(*pmd))
20352+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
20353 break;
20354
20355 pte = pte_offset_kernel(pmd, va);
20356@@ -524,12 +525,10 @@ void __init early_ioremap_page_table_ran
20357
20358 static void __init pagetable_init(void)
20359 {
20360- pgd_t *pgd_base = swapper_pg_dir;
20361-
20362- permanent_kmaps_init(pgd_base);
20363+ permanent_kmaps_init(swapper_pg_dir);
20364 }
20365
20366-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
20367+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
20368 EXPORT_SYMBOL_GPL(__supported_pte_mask);
20369
20370 /* user-defined highmem size */
20371@@ -757,6 +756,12 @@ void __init mem_init(void)
20372
20373 pci_iommu_alloc();
20374
20375+#ifdef CONFIG_PAX_PER_CPU_PGD
20376+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
20377+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20378+ KERNEL_PGD_PTRS);
20379+#endif
20380+
20381 #ifdef CONFIG_FLATMEM
20382 BUG_ON(!mem_map);
20383 #endif
20384@@ -774,7 +779,7 @@ void __init mem_init(void)
20385 set_highmem_pages_init();
20386
20387 codesize = (unsigned long) &_etext - (unsigned long) &_text;
20388- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
20389+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
20390 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
20391
20392 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
20393@@ -815,10 +820,10 @@ void __init mem_init(void)
20394 ((unsigned long)&__init_end -
20395 (unsigned long)&__init_begin) >> 10,
20396
20397- (unsigned long)&_etext, (unsigned long)&_edata,
20398- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
20399+ (unsigned long)&_sdata, (unsigned long)&_edata,
20400+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
20401
20402- (unsigned long)&_text, (unsigned long)&_etext,
20403+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
20404 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
20405
20406 /*
20407@@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
20408 if (!kernel_set_to_readonly)
20409 return;
20410
20411+ start = ktla_ktva(start);
20412 pr_debug("Set kernel text: %lx - %lx for read write\n",
20413 start, start+size);
20414
20415@@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
20416 if (!kernel_set_to_readonly)
20417 return;
20418
20419+ start = ktla_ktva(start);
20420 pr_debug("Set kernel text: %lx - %lx for read only\n",
20421 start, start+size);
20422
20423@@ -938,6 +945,7 @@ void mark_rodata_ro(void)
20424 unsigned long start = PFN_ALIGN(_text);
20425 unsigned long size = PFN_ALIGN(_etext) - start;
20426
20427+ start = ktla_ktva(start);
20428 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
20429 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
20430 size >> 10);
20431diff -urNp linux-3.0.7/arch/x86/mm/init_64.c linux-3.0.7/arch/x86/mm/init_64.c
20432--- linux-3.0.7/arch/x86/mm/init_64.c 2011-07-21 22:17:23.000000000 -0400
20433+++ linux-3.0.7/arch/x86/mm/init_64.c 2011-10-06 04:17:55.000000000 -0400
20434@@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpa
20435 * around without checking the pgd every time.
20436 */
20437
20438-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
20439+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
20440 EXPORT_SYMBOL_GPL(__supported_pte_mask);
20441
20442 int force_personality32;
20443@@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long star
20444
20445 for (address = start; address <= end; address += PGDIR_SIZE) {
20446 const pgd_t *pgd_ref = pgd_offset_k(address);
20447+
20448+#ifdef CONFIG_PAX_PER_CPU_PGD
20449+ unsigned long cpu;
20450+#else
20451 struct page *page;
20452+#endif
20453
20454 if (pgd_none(*pgd_ref))
20455 continue;
20456
20457 spin_lock(&pgd_lock);
20458+
20459+#ifdef CONFIG_PAX_PER_CPU_PGD
20460+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20461+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
20462+#else
20463 list_for_each_entry(page, &pgd_list, lru) {
20464 pgd_t *pgd;
20465 spinlock_t *pgt_lock;
20466@@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long star
20467 /* the pgt_lock only for Xen */
20468 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
20469 spin_lock(pgt_lock);
20470+#endif
20471
20472 if (pgd_none(*pgd))
20473 set_pgd(pgd, *pgd_ref);
20474@@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long star
20475 BUG_ON(pgd_page_vaddr(*pgd)
20476 != pgd_page_vaddr(*pgd_ref));
20477
20478+#ifndef CONFIG_PAX_PER_CPU_PGD
20479 spin_unlock(pgt_lock);
20480+#endif
20481+
20482 }
20483 spin_unlock(&pgd_lock);
20484 }
20485@@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
20486 pmd = fill_pmd(pud, vaddr);
20487 pte = fill_pte(pmd, vaddr);
20488
20489+ pax_open_kernel();
20490 set_pte(pte, new_pte);
20491+ pax_close_kernel();
20492
20493 /*
20494 * It's enough to flush this one mapping.
20495@@ -262,14 +278,12 @@ static void __init __init_extra_mapping(
20496 pgd = pgd_offset_k((unsigned long)__va(phys));
20497 if (pgd_none(*pgd)) {
20498 pud = (pud_t *) spp_getpage();
20499- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
20500- _PAGE_USER));
20501+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
20502 }
20503 pud = pud_offset(pgd, (unsigned long)__va(phys));
20504 if (pud_none(*pud)) {
20505 pmd = (pmd_t *) spp_getpage();
20506- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
20507- _PAGE_USER));
20508+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
20509 }
20510 pmd = pmd_offset(pud, phys);
20511 BUG_ON(!pmd_none(*pmd));
20512@@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsign
20513 if (pfn >= pgt_buf_top)
20514 panic("alloc_low_page: ran out of memory");
20515
20516- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
20517+ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
20518 clear_page(adr);
20519 *phys = pfn * PAGE_SIZE;
20520 return adr;
20521@@ -346,7 +360,7 @@ static __ref void *map_low_page(void *vi
20522
20523 phys = __pa(virt);
20524 left = phys & (PAGE_SIZE - 1);
20525- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
20526+ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
20527 adr = (void *)(((unsigned long)adr) | left);
20528
20529 return adr;
20530@@ -693,6 +707,12 @@ void __init mem_init(void)
20531
20532 pci_iommu_alloc();
20533
20534+#ifdef CONFIG_PAX_PER_CPU_PGD
20535+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
20536+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20537+ KERNEL_PGD_PTRS);
20538+#endif
20539+
20540 /* clear_bss() already clear the empty_zero_page */
20541
20542 reservedpages = 0;
20543@@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
20544 static struct vm_area_struct gate_vma = {
20545 .vm_start = VSYSCALL_START,
20546 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
20547- .vm_page_prot = PAGE_READONLY_EXEC,
20548- .vm_flags = VM_READ | VM_EXEC
20549+ .vm_page_prot = PAGE_READONLY,
20550+ .vm_flags = VM_READ
20551 };
20552
20553 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
20554@@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long add
20555
20556 const char *arch_vma_name(struct vm_area_struct *vma)
20557 {
20558- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
20559+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
20560 return "[vdso]";
20561 if (vma == &gate_vma)
20562 return "[vsyscall]";
20563diff -urNp linux-3.0.7/arch/x86/mm/init.c linux-3.0.7/arch/x86/mm/init.c
20564--- linux-3.0.7/arch/x86/mm/init.c 2011-07-21 22:17:23.000000000 -0400
20565+++ linux-3.0.7/arch/x86/mm/init.c 2011-08-23 21:48:14.000000000 -0400
20566@@ -31,7 +31,7 @@ int direct_gbpages
20567 static void __init find_early_table_space(unsigned long end, int use_pse,
20568 int use_gbpages)
20569 {
20570- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
20571+ unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
20572 phys_addr_t base;
20573
20574 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
20575@@ -313,12 +313,34 @@ unsigned long __init_refok init_memory_m
20576 */
20577 int devmem_is_allowed(unsigned long pagenr)
20578 {
20579- if (pagenr <= 256)
20580+#ifdef CONFIG_GRKERNSEC_KMEM
20581+ /* allow BDA */
20582+ if (!pagenr)
20583+ return 1;
20584+ /* allow EBDA */
20585+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
20586+ return 1;
20587+#else
20588+ if (!pagenr)
20589+ return 1;
20590+#ifdef CONFIG_VM86
20591+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
20592+ return 1;
20593+#endif
20594+#endif
20595+
20596+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
20597 return 1;
20598+#ifdef CONFIG_GRKERNSEC_KMEM
20599+ /* throw out everything else below 1MB */
20600+ if (pagenr <= 256)
20601+ return 0;
20602+#endif
20603 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
20604 return 0;
20605 if (!page_is_ram(pagenr))
20606 return 1;
20607+
20608 return 0;
20609 }
20610
20611@@ -373,6 +395,86 @@ void free_init_pages(char *what, unsigne
20612
20613 void free_initmem(void)
20614 {
20615+
20616+#ifdef CONFIG_PAX_KERNEXEC
20617+#ifdef CONFIG_X86_32
20618+ /* PaX: limit KERNEL_CS to actual size */
20619+ unsigned long addr, limit;
20620+ struct desc_struct d;
20621+ int cpu;
20622+
20623+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
20624+ limit = (limit - 1UL) >> PAGE_SHIFT;
20625+
20626+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
20627+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
20628+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
20629+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
20630+ }
20631+
20632+ /* PaX: make KERNEL_CS read-only */
20633+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
20634+ if (!paravirt_enabled())
20635+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
20636+/*
20637+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
20638+ pgd = pgd_offset_k(addr);
20639+ pud = pud_offset(pgd, addr);
20640+ pmd = pmd_offset(pud, addr);
20641+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
20642+ }
20643+*/
20644+#ifdef CONFIG_X86_PAE
20645+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
20646+/*
20647+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
20648+ pgd = pgd_offset_k(addr);
20649+ pud = pud_offset(pgd, addr);
20650+ pmd = pmd_offset(pud, addr);
20651+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
20652+ }
20653+*/
20654+#endif
20655+
20656+#ifdef CONFIG_MODULES
20657+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
20658+#endif
20659+
20660+#else
20661+ pgd_t *pgd;
20662+ pud_t *pud;
20663+ pmd_t *pmd;
20664+ unsigned long addr, end;
20665+
20666+ /* PaX: make kernel code/rodata read-only, rest non-executable */
20667+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
20668+ pgd = pgd_offset_k(addr);
20669+ pud = pud_offset(pgd, addr);
20670+ pmd = pmd_offset(pud, addr);
20671+ if (!pmd_present(*pmd))
20672+ continue;
20673+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
20674+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
20675+ else
20676+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
20677+ }
20678+
20679+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
20680+ end = addr + KERNEL_IMAGE_SIZE;
20681+ for (; addr < end; addr += PMD_SIZE) {
20682+ pgd = pgd_offset_k(addr);
20683+ pud = pud_offset(pgd, addr);
20684+ pmd = pmd_offset(pud, addr);
20685+ if (!pmd_present(*pmd))
20686+ continue;
20687+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
20688+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
20689+ }
20690+#endif
20691+
20692+ flush_tlb_all();
20693+#endif
20694+
20695 free_init_pages("unused kernel memory",
20696 (unsigned long)(&__init_begin),
20697 (unsigned long)(&__init_end));
20698diff -urNp linux-3.0.7/arch/x86/mm/iomap_32.c linux-3.0.7/arch/x86/mm/iomap_32.c
20699--- linux-3.0.7/arch/x86/mm/iomap_32.c 2011-07-21 22:17:23.000000000 -0400
20700+++ linux-3.0.7/arch/x86/mm/iomap_32.c 2011-08-23 21:47:55.000000000 -0400
20701@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long
20702 type = kmap_atomic_idx_push();
20703 idx = type + KM_TYPE_NR * smp_processor_id();
20704 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
20705+
20706+ pax_open_kernel();
20707 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
20708+ pax_close_kernel();
20709+
20710 arch_flush_lazy_mmu_mode();
20711
20712 return (void *)vaddr;
20713diff -urNp linux-3.0.7/arch/x86/mm/ioremap.c linux-3.0.7/arch/x86/mm/ioremap.c
20714--- linux-3.0.7/arch/x86/mm/ioremap.c 2011-07-21 22:17:23.000000000 -0400
20715+++ linux-3.0.7/arch/x86/mm/ioremap.c 2011-08-23 21:47:55.000000000 -0400
20716@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(re
20717 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
20718 int is_ram = page_is_ram(pfn);
20719
20720- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
20721+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
20722 return NULL;
20723 WARN_ON_ONCE(is_ram);
20724 }
20725@@ -344,7 +344,7 @@ static int __init early_ioremap_debug_se
20726 early_param("early_ioremap_debug", early_ioremap_debug_setup);
20727
20728 static __initdata int after_paging_init;
20729-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
20730+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
20731
20732 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
20733 {
20734@@ -381,8 +381,7 @@ void __init early_ioremap_init(void)
20735 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
20736
20737 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
20738- memset(bm_pte, 0, sizeof(bm_pte));
20739- pmd_populate_kernel(&init_mm, pmd, bm_pte);
20740+ pmd_populate_user(&init_mm, pmd, bm_pte);
20741
20742 /*
20743 * The boot-ioremap range spans multiple pmds, for which
20744diff -urNp linux-3.0.7/arch/x86/mm/kmemcheck/kmemcheck.c linux-3.0.7/arch/x86/mm/kmemcheck/kmemcheck.c
20745--- linux-3.0.7/arch/x86/mm/kmemcheck/kmemcheck.c 2011-07-21 22:17:23.000000000 -0400
20746+++ linux-3.0.7/arch/x86/mm/kmemcheck/kmemcheck.c 2011-08-23 21:47:55.000000000 -0400
20747@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
20748 * memory (e.g. tracked pages)? For now, we need this to avoid
20749 * invoking kmemcheck for PnP BIOS calls.
20750 */
20751- if (regs->flags & X86_VM_MASK)
20752+ if (v8086_mode(regs))
20753 return false;
20754- if (regs->cs != __KERNEL_CS)
20755+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
20756 return false;
20757
20758 pte = kmemcheck_pte_lookup(address);
20759diff -urNp linux-3.0.7/arch/x86/mm/mmap.c linux-3.0.7/arch/x86/mm/mmap.c
20760--- linux-3.0.7/arch/x86/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
20761+++ linux-3.0.7/arch/x86/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
20762@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
20763 * Leave an at least ~128 MB hole with possible stack randomization.
20764 */
20765 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
20766-#define MAX_GAP (TASK_SIZE/6*5)
20767+#define MAX_GAP (pax_task_size/6*5)
20768
20769 /*
20770 * True on X86_32 or when emulating IA32 on X86_64
20771@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
20772 return rnd << PAGE_SHIFT;
20773 }
20774
20775-static unsigned long mmap_base(void)
20776+static unsigned long mmap_base(struct mm_struct *mm)
20777 {
20778 unsigned long gap = rlimit(RLIMIT_STACK);
20779+ unsigned long pax_task_size = TASK_SIZE;
20780+
20781+#ifdef CONFIG_PAX_SEGMEXEC
20782+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20783+ pax_task_size = SEGMEXEC_TASK_SIZE;
20784+#endif
20785
20786 if (gap < MIN_GAP)
20787 gap = MIN_GAP;
20788 else if (gap > MAX_GAP)
20789 gap = MAX_GAP;
20790
20791- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
20792+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
20793 }
20794
20795 /*
20796 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
20797 * does, but not when emulating X86_32
20798 */
20799-static unsigned long mmap_legacy_base(void)
20800+static unsigned long mmap_legacy_base(struct mm_struct *mm)
20801 {
20802- if (mmap_is_ia32())
20803+ if (mmap_is_ia32()) {
20804+
20805+#ifdef CONFIG_PAX_SEGMEXEC
20806+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20807+ return SEGMEXEC_TASK_UNMAPPED_BASE;
20808+ else
20809+#endif
20810+
20811 return TASK_UNMAPPED_BASE;
20812- else
20813+ } else
20814 return TASK_UNMAPPED_BASE + mmap_rnd();
20815 }
20816
20817@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
20818 void arch_pick_mmap_layout(struct mm_struct *mm)
20819 {
20820 if (mmap_is_legacy()) {
20821- mm->mmap_base = mmap_legacy_base();
20822+ mm->mmap_base = mmap_legacy_base(mm);
20823+
20824+#ifdef CONFIG_PAX_RANDMMAP
20825+ if (mm->pax_flags & MF_PAX_RANDMMAP)
20826+ mm->mmap_base += mm->delta_mmap;
20827+#endif
20828+
20829 mm->get_unmapped_area = arch_get_unmapped_area;
20830 mm->unmap_area = arch_unmap_area;
20831 } else {
20832- mm->mmap_base = mmap_base();
20833+ mm->mmap_base = mmap_base(mm);
20834+
20835+#ifdef CONFIG_PAX_RANDMMAP
20836+ if (mm->pax_flags & MF_PAX_RANDMMAP)
20837+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
20838+#endif
20839+
20840 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
20841 mm->unmap_area = arch_unmap_area_topdown;
20842 }
20843diff -urNp linux-3.0.7/arch/x86/mm/mmio-mod.c linux-3.0.7/arch/x86/mm/mmio-mod.c
20844--- linux-3.0.7/arch/x86/mm/mmio-mod.c 2011-07-21 22:17:23.000000000 -0400
20845+++ linux-3.0.7/arch/x86/mm/mmio-mod.c 2011-08-23 21:47:55.000000000 -0400
20846@@ -195,7 +195,7 @@ static void pre(struct kmmio_probe *p, s
20847 break;
20848 default:
20849 {
20850- unsigned char *ip = (unsigned char *)instptr;
20851+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
20852 my_trace->opcode = MMIO_UNKNOWN_OP;
20853 my_trace->width = 0;
20854 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
20855@@ -235,7 +235,7 @@ static void post(struct kmmio_probe *p,
20856 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
20857 void __iomem *addr)
20858 {
20859- static atomic_t next_id;
20860+ static atomic_unchecked_t next_id;
20861 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
20862 /* These are page-unaligned. */
20863 struct mmiotrace_map map = {
20864@@ -259,7 +259,7 @@ static void ioremap_trace_core(resource_
20865 .private = trace
20866 },
20867 .phys = offset,
20868- .id = atomic_inc_return(&next_id)
20869+ .id = atomic_inc_return_unchecked(&next_id)
20870 };
20871 map.map_id = trace->id;
20872
20873diff -urNp linux-3.0.7/arch/x86/mm/pageattr.c linux-3.0.7/arch/x86/mm/pageattr.c
20874--- linux-3.0.7/arch/x86/mm/pageattr.c 2011-07-21 22:17:23.000000000 -0400
20875+++ linux-3.0.7/arch/x86/mm/pageattr.c 2011-08-23 21:47:55.000000000 -0400
20876@@ -261,7 +261,7 @@ static inline pgprot_t static_protection
20877 */
20878 #ifdef CONFIG_PCI_BIOS
20879 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
20880- pgprot_val(forbidden) |= _PAGE_NX;
20881+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20882 #endif
20883
20884 /*
20885@@ -269,9 +269,10 @@ static inline pgprot_t static_protection
20886 * Does not cover __inittext since that is gone later on. On
20887 * 64bit we do not enforce !NX on the low mapping
20888 */
20889- if (within(address, (unsigned long)_text, (unsigned long)_etext))
20890- pgprot_val(forbidden) |= _PAGE_NX;
20891+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
20892+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20893
20894+#ifdef CONFIG_DEBUG_RODATA
20895 /*
20896 * The .rodata section needs to be read-only. Using the pfn
20897 * catches all aliases.
20898@@ -279,6 +280,7 @@ static inline pgprot_t static_protection
20899 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
20900 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
20901 pgprot_val(forbidden) |= _PAGE_RW;
20902+#endif
20903
20904 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
20905 /*
20906@@ -317,6 +319,13 @@ static inline pgprot_t static_protection
20907 }
20908 #endif
20909
20910+#ifdef CONFIG_PAX_KERNEXEC
20911+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
20912+ pgprot_val(forbidden) |= _PAGE_RW;
20913+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20914+ }
20915+#endif
20916+
20917 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
20918
20919 return prot;
20920@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
20921 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
20922 {
20923 /* change init_mm */
20924+ pax_open_kernel();
20925 set_pte_atomic(kpte, pte);
20926+
20927 #ifdef CONFIG_X86_32
20928 if (!SHARED_KERNEL_PMD) {
20929+
20930+#ifdef CONFIG_PAX_PER_CPU_PGD
20931+ unsigned long cpu;
20932+#else
20933 struct page *page;
20934+#endif
20935
20936+#ifdef CONFIG_PAX_PER_CPU_PGD
20937+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20938+ pgd_t *pgd = get_cpu_pgd(cpu);
20939+#else
20940 list_for_each_entry(page, &pgd_list, lru) {
20941- pgd_t *pgd;
20942+ pgd_t *pgd = (pgd_t *)page_address(page);
20943+#endif
20944+
20945 pud_t *pud;
20946 pmd_t *pmd;
20947
20948- pgd = (pgd_t *)page_address(page) + pgd_index(address);
20949+ pgd += pgd_index(address);
20950 pud = pud_offset(pgd, address);
20951 pmd = pmd_offset(pud, address);
20952 set_pte_atomic((pte_t *)pmd, pte);
20953 }
20954 }
20955 #endif
20956+ pax_close_kernel();
20957 }
20958
20959 static int
20960diff -urNp linux-3.0.7/arch/x86/mm/pageattr-test.c linux-3.0.7/arch/x86/mm/pageattr-test.c
20961--- linux-3.0.7/arch/x86/mm/pageattr-test.c 2011-07-21 22:17:23.000000000 -0400
20962+++ linux-3.0.7/arch/x86/mm/pageattr-test.c 2011-08-23 21:47:55.000000000 -0400
20963@@ -36,7 +36,7 @@ enum {
20964
20965 static int pte_testbit(pte_t pte)
20966 {
20967- return pte_flags(pte) & _PAGE_UNUSED1;
20968+ return pte_flags(pte) & _PAGE_CPA_TEST;
20969 }
20970
20971 struct split_state {
20972diff -urNp linux-3.0.7/arch/x86/mm/pat.c linux-3.0.7/arch/x86/mm/pat.c
20973--- linux-3.0.7/arch/x86/mm/pat.c 2011-07-21 22:17:23.000000000 -0400
20974+++ linux-3.0.7/arch/x86/mm/pat.c 2011-08-23 21:47:55.000000000 -0400
20975@@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
20976
20977 if (!entry) {
20978 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
20979- current->comm, current->pid, start, end);
20980+ current->comm, task_pid_nr(current), start, end);
20981 return -EINVAL;
20982 }
20983
20984@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsig
20985 while (cursor < to) {
20986 if (!devmem_is_allowed(pfn)) {
20987 printk(KERN_INFO
20988- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
20989- current->comm, from, to);
20990+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
20991+ current->comm, from, to, cursor);
20992 return 0;
20993 }
20994 cursor += PAGE_SIZE;
20995@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, un
20996 printk(KERN_INFO
20997 "%s:%d ioremap_change_attr failed %s "
20998 "for %Lx-%Lx\n",
20999- current->comm, current->pid,
21000+ current->comm, task_pid_nr(current),
21001 cattr_name(flags),
21002 base, (unsigned long long)(base + size));
21003 return -EINVAL;
21004@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr,
21005 if (want_flags != flags) {
21006 printk(KERN_WARNING
21007 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
21008- current->comm, current->pid,
21009+ current->comm, task_pid_nr(current),
21010 cattr_name(want_flags),
21011 (unsigned long long)paddr,
21012 (unsigned long long)(paddr + size),
21013@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr,
21014 free_memtype(paddr, paddr + size);
21015 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
21016 " for %Lx-%Lx, got %s\n",
21017- current->comm, current->pid,
21018+ current->comm, task_pid_nr(current),
21019 cattr_name(want_flags),
21020 (unsigned long long)paddr,
21021 (unsigned long long)(paddr + size),
21022diff -urNp linux-3.0.7/arch/x86/mm/pf_in.c linux-3.0.7/arch/x86/mm/pf_in.c
21023--- linux-3.0.7/arch/x86/mm/pf_in.c 2011-07-21 22:17:23.000000000 -0400
21024+++ linux-3.0.7/arch/x86/mm/pf_in.c 2011-08-23 21:47:55.000000000 -0400
21025@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
21026 int i;
21027 enum reason_type rv = OTHERS;
21028
21029- p = (unsigned char *)ins_addr;
21030+ p = (unsigned char *)ktla_ktva(ins_addr);
21031 p += skip_prefix(p, &prf);
21032 p += get_opcode(p, &opcode);
21033
21034@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
21035 struct prefix_bits prf;
21036 int i;
21037
21038- p = (unsigned char *)ins_addr;
21039+ p = (unsigned char *)ktla_ktva(ins_addr);
21040 p += skip_prefix(p, &prf);
21041 p += get_opcode(p, &opcode);
21042
21043@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
21044 struct prefix_bits prf;
21045 int i;
21046
21047- p = (unsigned char *)ins_addr;
21048+ p = (unsigned char *)ktla_ktva(ins_addr);
21049 p += skip_prefix(p, &prf);
21050 p += get_opcode(p, &opcode);
21051
21052@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned l
21053 struct prefix_bits prf;
21054 int i;
21055
21056- p = (unsigned char *)ins_addr;
21057+ p = (unsigned char *)ktla_ktva(ins_addr);
21058 p += skip_prefix(p, &prf);
21059 p += get_opcode(p, &opcode);
21060 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
21061@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned l
21062 struct prefix_bits prf;
21063 int i;
21064
21065- p = (unsigned char *)ins_addr;
21066+ p = (unsigned char *)ktla_ktva(ins_addr);
21067 p += skip_prefix(p, &prf);
21068 p += get_opcode(p, &opcode);
21069 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
21070diff -urNp linux-3.0.7/arch/x86/mm/pgtable_32.c linux-3.0.7/arch/x86/mm/pgtable_32.c
21071--- linux-3.0.7/arch/x86/mm/pgtable_32.c 2011-07-21 22:17:23.000000000 -0400
21072+++ linux-3.0.7/arch/x86/mm/pgtable_32.c 2011-08-23 21:47:55.000000000 -0400
21073@@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr,
21074 return;
21075 }
21076 pte = pte_offset_kernel(pmd, vaddr);
21077+
21078+ pax_open_kernel();
21079 if (pte_val(pteval))
21080 set_pte_at(&init_mm, vaddr, pte, pteval);
21081 else
21082 pte_clear(&init_mm, vaddr, pte);
21083+ pax_close_kernel();
21084
21085 /*
21086 * It's enough to flush this one mapping.
21087diff -urNp linux-3.0.7/arch/x86/mm/pgtable.c linux-3.0.7/arch/x86/mm/pgtable.c
21088--- linux-3.0.7/arch/x86/mm/pgtable.c 2011-07-21 22:17:23.000000000 -0400
21089+++ linux-3.0.7/arch/x86/mm/pgtable.c 2011-08-23 21:47:55.000000000 -0400
21090@@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *p
21091 list_del(&page->lru);
21092 }
21093
21094-#define UNSHARED_PTRS_PER_PGD \
21095- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
21096+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21097+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
21098
21099+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
21100+{
21101+ while (count--)
21102+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
21103+}
21104+#endif
21105+
21106+#ifdef CONFIG_PAX_PER_CPU_PGD
21107+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
21108+{
21109+ while (count--)
21110+
21111+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21112+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
21113+#else
21114+ *dst++ = *src++;
21115+#endif
21116
21117+}
21118+#endif
21119+
21120+#ifdef CONFIG_X86_64
21121+#define pxd_t pud_t
21122+#define pyd_t pgd_t
21123+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
21124+#define pxd_free(mm, pud) pud_free((mm), (pud))
21125+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
21126+#define pyd_offset(mm ,address) pgd_offset((mm), (address))
21127+#define PYD_SIZE PGDIR_SIZE
21128+#else
21129+#define pxd_t pmd_t
21130+#define pyd_t pud_t
21131+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
21132+#define pxd_free(mm, pud) pmd_free((mm), (pud))
21133+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
21134+#define pyd_offset(mm ,address) pud_offset((mm), (address))
21135+#define PYD_SIZE PUD_SIZE
21136+#endif
21137+
21138+#ifdef CONFIG_PAX_PER_CPU_PGD
21139+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
21140+static inline void pgd_dtor(pgd_t *pgd) {}
21141+#else
21142 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
21143 {
21144 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
21145@@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
21146 pgd_list_del(pgd);
21147 spin_unlock(&pgd_lock);
21148 }
21149+#endif
21150
21151 /*
21152 * List of all pgd's needed for non-PAE so it can invalidate entries
21153@@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
21154 * -- wli
21155 */
21156
21157-#ifdef CONFIG_X86_PAE
21158+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
21159 /*
21160 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
21161 * updating the top-level pagetable entries to guarantee the
21162@@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
21163 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
21164 * and initialize the kernel pmds here.
21165 */
21166-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
21167+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
21168
21169 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
21170 {
21171@@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm,
21172 */
21173 flush_tlb_mm(mm);
21174 }
21175+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
21176+#define PREALLOCATED_PXDS USER_PGD_PTRS
21177 #else /* !CONFIG_X86_PAE */
21178
21179 /* No need to prepopulate any pagetable entries in non-PAE modes. */
21180-#define PREALLOCATED_PMDS 0
21181+#define PREALLOCATED_PXDS 0
21182
21183 #endif /* CONFIG_X86_PAE */
21184
21185-static void free_pmds(pmd_t *pmds[])
21186+static void free_pxds(pxd_t *pxds[])
21187 {
21188 int i;
21189
21190- for(i = 0; i < PREALLOCATED_PMDS; i++)
21191- if (pmds[i])
21192- free_page((unsigned long)pmds[i]);
21193+ for(i = 0; i < PREALLOCATED_PXDS; i++)
21194+ if (pxds[i])
21195+ free_page((unsigned long)pxds[i]);
21196 }
21197
21198-static int preallocate_pmds(pmd_t *pmds[])
21199+static int preallocate_pxds(pxd_t *pxds[])
21200 {
21201 int i;
21202 bool failed = false;
21203
21204- for(i = 0; i < PREALLOCATED_PMDS; i++) {
21205- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
21206- if (pmd == NULL)
21207+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
21208+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
21209+ if (pxd == NULL)
21210 failed = true;
21211- pmds[i] = pmd;
21212+ pxds[i] = pxd;
21213 }
21214
21215 if (failed) {
21216- free_pmds(pmds);
21217+ free_pxds(pxds);
21218 return -ENOMEM;
21219 }
21220
21221@@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[
21222 * preallocate which never got a corresponding vma will need to be
21223 * freed manually.
21224 */
21225-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
21226+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
21227 {
21228 int i;
21229
21230- for(i = 0; i < PREALLOCATED_PMDS; i++) {
21231+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
21232 pgd_t pgd = pgdp[i];
21233
21234 if (pgd_val(pgd) != 0) {
21235- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
21236+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
21237
21238- pgdp[i] = native_make_pgd(0);
21239+ set_pgd(pgdp + i, native_make_pgd(0));
21240
21241- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
21242- pmd_free(mm, pmd);
21243+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
21244+ pxd_free(mm, pxd);
21245 }
21246 }
21247 }
21248
21249-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
21250+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
21251 {
21252- pud_t *pud;
21253+ pyd_t *pyd;
21254 unsigned long addr;
21255 int i;
21256
21257- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
21258+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
21259 return;
21260
21261- pud = pud_offset(pgd, 0);
21262+#ifdef CONFIG_X86_64
21263+ pyd = pyd_offset(mm, 0L);
21264+#else
21265+ pyd = pyd_offset(pgd, 0L);
21266+#endif
21267
21268- for (addr = i = 0; i < PREALLOCATED_PMDS;
21269- i++, pud++, addr += PUD_SIZE) {
21270- pmd_t *pmd = pmds[i];
21271+ for (addr = i = 0; i < PREALLOCATED_PXDS;
21272+ i++, pyd++, addr += PYD_SIZE) {
21273+ pxd_t *pxd = pxds[i];
21274
21275 if (i >= KERNEL_PGD_BOUNDARY)
21276- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
21277- sizeof(pmd_t) * PTRS_PER_PMD);
21278+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
21279+ sizeof(pxd_t) * PTRS_PER_PMD);
21280
21281- pud_populate(mm, pud, pmd);
21282+ pyd_populate(mm, pyd, pxd);
21283 }
21284 }
21285
21286 pgd_t *pgd_alloc(struct mm_struct *mm)
21287 {
21288 pgd_t *pgd;
21289- pmd_t *pmds[PREALLOCATED_PMDS];
21290+ pxd_t *pxds[PREALLOCATED_PXDS];
21291
21292 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
21293
21294@@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
21295
21296 mm->pgd = pgd;
21297
21298- if (preallocate_pmds(pmds) != 0)
21299+ if (preallocate_pxds(pxds) != 0)
21300 goto out_free_pgd;
21301
21302 if (paravirt_pgd_alloc(mm) != 0)
21303- goto out_free_pmds;
21304+ goto out_free_pxds;
21305
21306 /*
21307 * Make sure that pre-populating the pmds is atomic with
21308@@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
21309 spin_lock(&pgd_lock);
21310
21311 pgd_ctor(mm, pgd);
21312- pgd_prepopulate_pmd(mm, pgd, pmds);
21313+ pgd_prepopulate_pxd(mm, pgd, pxds);
21314
21315 spin_unlock(&pgd_lock);
21316
21317 return pgd;
21318
21319-out_free_pmds:
21320- free_pmds(pmds);
21321+out_free_pxds:
21322+ free_pxds(pxds);
21323 out_free_pgd:
21324 free_page((unsigned long)pgd);
21325 out:
21326@@ -295,7 +344,7 @@ out:
21327
21328 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
21329 {
21330- pgd_mop_up_pmds(mm, pgd);
21331+ pgd_mop_up_pxds(mm, pgd);
21332 pgd_dtor(pgd);
21333 paravirt_pgd_free(mm, pgd);
21334 free_page((unsigned long)pgd);
21335diff -urNp linux-3.0.7/arch/x86/mm/setup_nx.c linux-3.0.7/arch/x86/mm/setup_nx.c
21336--- linux-3.0.7/arch/x86/mm/setup_nx.c 2011-07-21 22:17:23.000000000 -0400
21337+++ linux-3.0.7/arch/x86/mm/setup_nx.c 2011-08-23 21:47:55.000000000 -0400
21338@@ -5,8 +5,10 @@
21339 #include <asm/pgtable.h>
21340 #include <asm/proto.h>
21341
21342+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21343 static int disable_nx __cpuinitdata;
21344
21345+#ifndef CONFIG_PAX_PAGEEXEC
21346 /*
21347 * noexec = on|off
21348 *
21349@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str
21350 return 0;
21351 }
21352 early_param("noexec", noexec_setup);
21353+#endif
21354+
21355+#endif
21356
21357 void __cpuinit x86_configure_nx(void)
21358 {
21359+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21360 if (cpu_has_nx && !disable_nx)
21361 __supported_pte_mask |= _PAGE_NX;
21362 else
21363+#endif
21364 __supported_pte_mask &= ~_PAGE_NX;
21365 }
21366
21367diff -urNp linux-3.0.7/arch/x86/mm/tlb.c linux-3.0.7/arch/x86/mm/tlb.c
21368--- linux-3.0.7/arch/x86/mm/tlb.c 2011-07-21 22:17:23.000000000 -0400
21369+++ linux-3.0.7/arch/x86/mm/tlb.c 2011-08-23 21:47:55.000000000 -0400
21370@@ -65,7 +65,11 @@ void leave_mm(int cpu)
21371 BUG();
21372 cpumask_clear_cpu(cpu,
21373 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
21374+
21375+#ifndef CONFIG_PAX_PER_CPU_PGD
21376 load_cr3(swapper_pg_dir);
21377+#endif
21378+
21379 }
21380 EXPORT_SYMBOL_GPL(leave_mm);
21381
21382diff -urNp linux-3.0.7/arch/x86/net/bpf_jit_comp.c linux-3.0.7/arch/x86/net/bpf_jit_comp.c
21383--- linux-3.0.7/arch/x86/net/bpf_jit_comp.c 2011-07-21 22:17:23.000000000 -0400
21384+++ linux-3.0.7/arch/x86/net/bpf_jit_comp.c 2011-08-23 21:47:55.000000000 -0400
21385@@ -589,7 +589,9 @@ cond_branch: f_offset = addrs[i + filt
21386 module_free(NULL, image);
21387 return;
21388 }
21389+ pax_open_kernel();
21390 memcpy(image + proglen, temp, ilen);
21391+ pax_close_kernel();
21392 }
21393 proglen += ilen;
21394 addrs[i] = proglen;
21395@@ -609,7 +611,7 @@ cond_branch: f_offset = addrs[i + filt
21396 break;
21397 }
21398 if (proglen == oldproglen) {
21399- image = module_alloc(max_t(unsigned int,
21400+ image = module_alloc_exec(max_t(unsigned int,
21401 proglen,
21402 sizeof(struct work_struct)));
21403 if (!image)
21404diff -urNp linux-3.0.7/arch/x86/net/bpf_jit.S linux-3.0.7/arch/x86/net/bpf_jit.S
21405--- linux-3.0.7/arch/x86/net/bpf_jit.S 2011-07-21 22:17:23.000000000 -0400
21406+++ linux-3.0.7/arch/x86/net/bpf_jit.S 2011-10-07 19:07:28.000000000 -0400
21407@@ -9,6 +9,7 @@
21408 */
21409 #include <linux/linkage.h>
21410 #include <asm/dwarf2.h>
21411+#include <asm/alternative-asm.h>
21412
21413 /*
21414 * Calling convention :
21415@@ -35,6 +36,7 @@ sk_load_word:
21416 jle bpf_slow_path_word
21417 mov (SKBDATA,%rsi),%eax
21418 bswap %eax /* ntohl() */
21419+ pax_force_retaddr
21420 ret
21421
21422
21423@@ -53,6 +55,7 @@ sk_load_half:
21424 jle bpf_slow_path_half
21425 movzwl (SKBDATA,%rsi),%eax
21426 rol $8,%ax # ntohs()
21427+ pax_force_retaddr
21428 ret
21429
21430 sk_load_byte_ind:
21431@@ -66,6 +69,7 @@ sk_load_byte:
21432 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
21433 jle bpf_slow_path_byte
21434 movzbl (SKBDATA,%rsi),%eax
21435+ pax_force_retaddr
21436 ret
21437
21438 /**
21439@@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh)
21440 movzbl (SKBDATA,%rsi),%ebx
21441 and $15,%bl
21442 shl $2,%bl
21443+ pax_force_retaddr
21444 ret
21445 CFI_ENDPROC
21446 ENDPROC(sk_load_byte_msh)
21447@@ -91,6 +96,7 @@ bpf_error:
21448 xor %eax,%eax
21449 mov -8(%rbp),%rbx
21450 leaveq
21451+ pax_force_retaddr
21452 ret
21453
21454 /* rsi contains offset and can be scratched */
21455@@ -113,6 +119,7 @@ bpf_slow_path_word:
21456 js bpf_error
21457 mov -12(%rbp),%eax
21458 bswap %eax
21459+ pax_force_retaddr
21460 ret
21461
21462 bpf_slow_path_half:
21463@@ -121,12 +128,14 @@ bpf_slow_path_half:
21464 mov -12(%rbp),%ax
21465 rol $8,%ax
21466 movzwl %ax,%eax
21467+ pax_force_retaddr
21468 ret
21469
21470 bpf_slow_path_byte:
21471 bpf_slow_path_common(1)
21472 js bpf_error
21473 movzbl -12(%rbp),%eax
21474+ pax_force_retaddr
21475 ret
21476
21477 bpf_slow_path_byte_msh:
21478@@ -137,4 +146,5 @@ bpf_slow_path_byte_msh:
21479 and $15,%al
21480 shl $2,%al
21481 xchg %eax,%ebx
21482+ pax_force_retaddr
21483 ret
21484diff -urNp linux-3.0.7/arch/x86/oprofile/backtrace.c linux-3.0.7/arch/x86/oprofile/backtrace.c
21485--- linux-3.0.7/arch/x86/oprofile/backtrace.c 2011-09-02 18:11:21.000000000 -0400
21486+++ linux-3.0.7/arch/x86/oprofile/backtrace.c 2011-10-06 04:17:55.000000000 -0400
21487@@ -83,11 +83,11 @@ dump_user_backtrace_32(struct stack_fram
21488 struct stack_frame_ia32 *fp;
21489 unsigned long bytes;
21490
21491- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
21492+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
21493 if (bytes != sizeof(bufhead))
21494 return NULL;
21495
21496- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
21497+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
21498
21499 oprofile_add_trace(bufhead[0].return_address);
21500
21501@@ -129,7 +129,7 @@ static struct stack_frame *dump_user_bac
21502 struct stack_frame bufhead[2];
21503 unsigned long bytes;
21504
21505- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
21506+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
21507 if (bytes != sizeof(bufhead))
21508 return NULL;
21509
21510@@ -148,7 +148,7 @@ x86_backtrace(struct pt_regs * const reg
21511 {
21512 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
21513
21514- if (!user_mode_vm(regs)) {
21515+ if (!user_mode(regs)) {
21516 unsigned long stack = kernel_stack_pointer(regs);
21517 if (depth)
21518 dump_trace(NULL, regs, (unsigned long *)stack, 0,
21519diff -urNp linux-3.0.7/arch/x86/pci/mrst.c linux-3.0.7/arch/x86/pci/mrst.c
21520--- linux-3.0.7/arch/x86/pci/mrst.c 2011-07-21 22:17:23.000000000 -0400
21521+++ linux-3.0.7/arch/x86/pci/mrst.c 2011-08-23 21:47:55.000000000 -0400
21522@@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
21523 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
21524 pci_mmcfg_late_init();
21525 pcibios_enable_irq = mrst_pci_irq_enable;
21526- pci_root_ops = pci_mrst_ops;
21527+ pax_open_kernel();
21528+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
21529+ pax_close_kernel();
21530 /* Continue with standard init */
21531 return 1;
21532 }
21533diff -urNp linux-3.0.7/arch/x86/pci/pcbios.c linux-3.0.7/arch/x86/pci/pcbios.c
21534--- linux-3.0.7/arch/x86/pci/pcbios.c 2011-07-21 22:17:23.000000000 -0400
21535+++ linux-3.0.7/arch/x86/pci/pcbios.c 2011-08-23 21:47:55.000000000 -0400
21536@@ -79,50 +79,93 @@ union bios32 {
21537 static struct {
21538 unsigned long address;
21539 unsigned short segment;
21540-} bios32_indirect = { 0, __KERNEL_CS };
21541+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
21542
21543 /*
21544 * Returns the entry point for the given service, NULL on error
21545 */
21546
21547-static unsigned long bios32_service(unsigned long service)
21548+static unsigned long __devinit bios32_service(unsigned long service)
21549 {
21550 unsigned char return_code; /* %al */
21551 unsigned long address; /* %ebx */
21552 unsigned long length; /* %ecx */
21553 unsigned long entry; /* %edx */
21554 unsigned long flags;
21555+ struct desc_struct d, *gdt;
21556
21557 local_irq_save(flags);
21558- __asm__("lcall *(%%edi); cld"
21559+
21560+ gdt = get_cpu_gdt_table(smp_processor_id());
21561+
21562+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
21563+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
21564+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
21565+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
21566+
21567+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
21568 : "=a" (return_code),
21569 "=b" (address),
21570 "=c" (length),
21571 "=d" (entry)
21572 : "0" (service),
21573 "1" (0),
21574- "D" (&bios32_indirect));
21575+ "D" (&bios32_indirect),
21576+ "r"(__PCIBIOS_DS)
21577+ : "memory");
21578+
21579+ pax_open_kernel();
21580+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
21581+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
21582+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
21583+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
21584+ pax_close_kernel();
21585+
21586 local_irq_restore(flags);
21587
21588 switch (return_code) {
21589- case 0:
21590- return address + entry;
21591- case 0x80: /* Not present */
21592- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
21593- return 0;
21594- default: /* Shouldn't happen */
21595- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
21596- service, return_code);
21597+ case 0: {
21598+ int cpu;
21599+ unsigned char flags;
21600+
21601+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
21602+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
21603+ printk(KERN_WARNING "bios32_service: not valid\n");
21604 return 0;
21605+ }
21606+ address = address + PAGE_OFFSET;
21607+ length += 16UL; /* some BIOSs underreport this... */
21608+ flags = 4;
21609+ if (length >= 64*1024*1024) {
21610+ length >>= PAGE_SHIFT;
21611+ flags |= 8;
21612+ }
21613+
21614+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
21615+ gdt = get_cpu_gdt_table(cpu);
21616+ pack_descriptor(&d, address, length, 0x9b, flags);
21617+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
21618+ pack_descriptor(&d, address, length, 0x93, flags);
21619+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
21620+ }
21621+ return entry;
21622+ }
21623+ case 0x80: /* Not present */
21624+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
21625+ return 0;
21626+ default: /* Shouldn't happen */
21627+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
21628+ service, return_code);
21629+ return 0;
21630 }
21631 }
21632
21633 static struct {
21634 unsigned long address;
21635 unsigned short segment;
21636-} pci_indirect = { 0, __KERNEL_CS };
21637+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
21638
21639-static int pci_bios_present;
21640+static int pci_bios_present __read_only;
21641
21642 static int __devinit check_pcibios(void)
21643 {
21644@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
21645 unsigned long flags, pcibios_entry;
21646
21647 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
21648- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
21649+ pci_indirect.address = pcibios_entry;
21650
21651 local_irq_save(flags);
21652- __asm__(
21653- "lcall *(%%edi); cld\n\t"
21654+ __asm__("movw %w6, %%ds\n\t"
21655+ "lcall *%%ss:(%%edi); cld\n\t"
21656+ "push %%ss\n\t"
21657+ "pop %%ds\n\t"
21658 "jc 1f\n\t"
21659 "xor %%ah, %%ah\n"
21660 "1:"
21661@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
21662 "=b" (ebx),
21663 "=c" (ecx)
21664 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
21665- "D" (&pci_indirect)
21666+ "D" (&pci_indirect),
21667+ "r" (__PCIBIOS_DS)
21668 : "memory");
21669 local_irq_restore(flags);
21670
21671@@ -188,7 +234,10 @@ static int pci_bios_read(unsigned int se
21672
21673 switch (len) {
21674 case 1:
21675- __asm__("lcall *(%%esi); cld\n\t"
21676+ __asm__("movw %w6, %%ds\n\t"
21677+ "lcall *%%ss:(%%esi); cld\n\t"
21678+ "push %%ss\n\t"
21679+ "pop %%ds\n\t"
21680 "jc 1f\n\t"
21681 "xor %%ah, %%ah\n"
21682 "1:"
21683@@ -197,7 +246,8 @@ static int pci_bios_read(unsigned int se
21684 : "1" (PCIBIOS_READ_CONFIG_BYTE),
21685 "b" (bx),
21686 "D" ((long)reg),
21687- "S" (&pci_indirect));
21688+ "S" (&pci_indirect),
21689+ "r" (__PCIBIOS_DS));
21690 /*
21691 * Zero-extend the result beyond 8 bits, do not trust the
21692 * BIOS having done it:
21693@@ -205,7 +255,10 @@ static int pci_bios_read(unsigned int se
21694 *value &= 0xff;
21695 break;
21696 case 2:
21697- __asm__("lcall *(%%esi); cld\n\t"
21698+ __asm__("movw %w6, %%ds\n\t"
21699+ "lcall *%%ss:(%%esi); cld\n\t"
21700+ "push %%ss\n\t"
21701+ "pop %%ds\n\t"
21702 "jc 1f\n\t"
21703 "xor %%ah, %%ah\n"
21704 "1:"
21705@@ -214,7 +267,8 @@ static int pci_bios_read(unsigned int se
21706 : "1" (PCIBIOS_READ_CONFIG_WORD),
21707 "b" (bx),
21708 "D" ((long)reg),
21709- "S" (&pci_indirect));
21710+ "S" (&pci_indirect),
21711+ "r" (__PCIBIOS_DS));
21712 /*
21713 * Zero-extend the result beyond 16 bits, do not trust the
21714 * BIOS having done it:
21715@@ -222,7 +276,10 @@ static int pci_bios_read(unsigned int se
21716 *value &= 0xffff;
21717 break;
21718 case 4:
21719- __asm__("lcall *(%%esi); cld\n\t"
21720+ __asm__("movw %w6, %%ds\n\t"
21721+ "lcall *%%ss:(%%esi); cld\n\t"
21722+ "push %%ss\n\t"
21723+ "pop %%ds\n\t"
21724 "jc 1f\n\t"
21725 "xor %%ah, %%ah\n"
21726 "1:"
21727@@ -231,7 +288,8 @@ static int pci_bios_read(unsigned int se
21728 : "1" (PCIBIOS_READ_CONFIG_DWORD),
21729 "b" (bx),
21730 "D" ((long)reg),
21731- "S" (&pci_indirect));
21732+ "S" (&pci_indirect),
21733+ "r" (__PCIBIOS_DS));
21734 break;
21735 }
21736
21737@@ -254,7 +312,10 @@ static int pci_bios_write(unsigned int s
21738
21739 switch (len) {
21740 case 1:
21741- __asm__("lcall *(%%esi); cld\n\t"
21742+ __asm__("movw %w6, %%ds\n\t"
21743+ "lcall *%%ss:(%%esi); cld\n\t"
21744+ "push %%ss\n\t"
21745+ "pop %%ds\n\t"
21746 "jc 1f\n\t"
21747 "xor %%ah, %%ah\n"
21748 "1:"
21749@@ -263,10 +324,14 @@ static int pci_bios_write(unsigned int s
21750 "c" (value),
21751 "b" (bx),
21752 "D" ((long)reg),
21753- "S" (&pci_indirect));
21754+ "S" (&pci_indirect),
21755+ "r" (__PCIBIOS_DS));
21756 break;
21757 case 2:
21758- __asm__("lcall *(%%esi); cld\n\t"
21759+ __asm__("movw %w6, %%ds\n\t"
21760+ "lcall *%%ss:(%%esi); cld\n\t"
21761+ "push %%ss\n\t"
21762+ "pop %%ds\n\t"
21763 "jc 1f\n\t"
21764 "xor %%ah, %%ah\n"
21765 "1:"
21766@@ -275,10 +340,14 @@ static int pci_bios_write(unsigned int s
21767 "c" (value),
21768 "b" (bx),
21769 "D" ((long)reg),
21770- "S" (&pci_indirect));
21771+ "S" (&pci_indirect),
21772+ "r" (__PCIBIOS_DS));
21773 break;
21774 case 4:
21775- __asm__("lcall *(%%esi); cld\n\t"
21776+ __asm__("movw %w6, %%ds\n\t"
21777+ "lcall *%%ss:(%%esi); cld\n\t"
21778+ "push %%ss\n\t"
21779+ "pop %%ds\n\t"
21780 "jc 1f\n\t"
21781 "xor %%ah, %%ah\n"
21782 "1:"
21783@@ -287,7 +356,8 @@ static int pci_bios_write(unsigned int s
21784 "c" (value),
21785 "b" (bx),
21786 "D" ((long)reg),
21787- "S" (&pci_indirect));
21788+ "S" (&pci_indirect),
21789+ "r" (__PCIBIOS_DS));
21790 break;
21791 }
21792
21793@@ -392,10 +462,13 @@ struct irq_routing_table * pcibios_get_i
21794
21795 DBG("PCI: Fetching IRQ routing table... ");
21796 __asm__("push %%es\n\t"
21797+ "movw %w8, %%ds\n\t"
21798 "push %%ds\n\t"
21799 "pop %%es\n\t"
21800- "lcall *(%%esi); cld\n\t"
21801+ "lcall *%%ss:(%%esi); cld\n\t"
21802 "pop %%es\n\t"
21803+ "push %%ss\n\t"
21804+ "pop %%ds\n"
21805 "jc 1f\n\t"
21806 "xor %%ah, %%ah\n"
21807 "1:"
21808@@ -406,7 +479,8 @@ struct irq_routing_table * pcibios_get_i
21809 "1" (0),
21810 "D" ((long) &opt),
21811 "S" (&pci_indirect),
21812- "m" (opt)
21813+ "m" (opt),
21814+ "r" (__PCIBIOS_DS)
21815 : "memory");
21816 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
21817 if (ret & 0xff00)
21818@@ -430,7 +504,10 @@ int pcibios_set_irq_routing(struct pci_d
21819 {
21820 int ret;
21821
21822- __asm__("lcall *(%%esi); cld\n\t"
21823+ __asm__("movw %w5, %%ds\n\t"
21824+ "lcall *%%ss:(%%esi); cld\n\t"
21825+ "push %%ss\n\t"
21826+ "pop %%ds\n"
21827 "jc 1f\n\t"
21828 "xor %%ah, %%ah\n"
21829 "1:"
21830@@ -438,7 +515,8 @@ int pcibios_set_irq_routing(struct pci_d
21831 : "0" (PCIBIOS_SET_PCI_HW_INT),
21832 "b" ((dev->bus->number << 8) | dev->devfn),
21833 "c" ((irq << 8) | (pin + 10)),
21834- "S" (&pci_indirect));
21835+ "S" (&pci_indirect),
21836+ "r" (__PCIBIOS_DS));
21837 return !(ret & 0xff00);
21838 }
21839 EXPORT_SYMBOL(pcibios_set_irq_routing);
21840diff -urNp linux-3.0.7/arch/x86/platform/efi/efi_32.c linux-3.0.7/arch/x86/platform/efi/efi_32.c
21841--- linux-3.0.7/arch/x86/platform/efi/efi_32.c 2011-07-21 22:17:23.000000000 -0400
21842+++ linux-3.0.7/arch/x86/platform/efi/efi_32.c 2011-10-06 04:17:55.000000000 -0400
21843@@ -38,70 +38,56 @@
21844 */
21845
21846 static unsigned long efi_rt_eflags;
21847-static pgd_t efi_bak_pg_dir_pointer[2];
21848+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
21849
21850-void efi_call_phys_prelog(void)
21851+void __init efi_call_phys_prelog(void)
21852 {
21853- unsigned long cr4;
21854- unsigned long temp;
21855 struct desc_ptr gdt_descr;
21856
21857- local_irq_save(efi_rt_eflags);
21858+#ifdef CONFIG_PAX_KERNEXEC
21859+ struct desc_struct d;
21860+#endif
21861
21862- /*
21863- * If I don't have PAE, I should just duplicate two entries in page
21864- * directory. If I have PAE, I just need to duplicate one entry in
21865- * page directory.
21866- */
21867- cr4 = read_cr4_safe();
21868+ local_irq_save(efi_rt_eflags);
21869
21870- if (cr4 & X86_CR4_PAE) {
21871- efi_bak_pg_dir_pointer[0].pgd =
21872- swapper_pg_dir[pgd_index(0)].pgd;
21873- swapper_pg_dir[0].pgd =
21874- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
21875- } else {
21876- efi_bak_pg_dir_pointer[0].pgd =
21877- swapper_pg_dir[pgd_index(0)].pgd;
21878- efi_bak_pg_dir_pointer[1].pgd =
21879- swapper_pg_dir[pgd_index(0x400000)].pgd;
21880- swapper_pg_dir[pgd_index(0)].pgd =
21881- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
21882- temp = PAGE_OFFSET + 0x400000;
21883- swapper_pg_dir[pgd_index(0x400000)].pgd =
21884- swapper_pg_dir[pgd_index(temp)].pgd;
21885- }
21886+ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
21887+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21888+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
21889
21890 /*
21891 * After the lock is released, the original page table is restored.
21892 */
21893 __flush_tlb_all();
21894
21895+#ifdef CONFIG_PAX_KERNEXEC
21896+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
21897+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
21898+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
21899+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
21900+#endif
21901+
21902 gdt_descr.address = __pa(get_cpu_gdt_table(0));
21903 gdt_descr.size = GDT_SIZE - 1;
21904 load_gdt(&gdt_descr);
21905 }
21906
21907-void efi_call_phys_epilog(void)
21908+void __init efi_call_phys_epilog(void)
21909 {
21910- unsigned long cr4;
21911 struct desc_ptr gdt_descr;
21912
21913+#ifdef CONFIG_PAX_KERNEXEC
21914+ struct desc_struct d;
21915+
21916+ memset(&d, 0, sizeof d);
21917+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
21918+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
21919+#endif
21920+
21921 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
21922 gdt_descr.size = GDT_SIZE - 1;
21923 load_gdt(&gdt_descr);
21924
21925- cr4 = read_cr4_safe();
21926-
21927- if (cr4 & X86_CR4_PAE) {
21928- swapper_pg_dir[pgd_index(0)].pgd =
21929- efi_bak_pg_dir_pointer[0].pgd;
21930- } else {
21931- swapper_pg_dir[pgd_index(0)].pgd =
21932- efi_bak_pg_dir_pointer[0].pgd;
21933- swapper_pg_dir[pgd_index(0x400000)].pgd =
21934- efi_bak_pg_dir_pointer[1].pgd;
21935- }
21936+ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
21937
21938 /*
21939 * After the lock is released, the original page table is restored.
21940diff -urNp linux-3.0.7/arch/x86/platform/efi/efi_stub_32.S linux-3.0.7/arch/x86/platform/efi/efi_stub_32.S
21941--- linux-3.0.7/arch/x86/platform/efi/efi_stub_32.S 2011-07-21 22:17:23.000000000 -0400
21942+++ linux-3.0.7/arch/x86/platform/efi/efi_stub_32.S 2011-09-19 09:16:58.000000000 -0400
21943@@ -6,7 +6,9 @@
21944 */
21945
21946 #include <linux/linkage.h>
21947+#include <linux/init.h>
21948 #include <asm/page_types.h>
21949+#include <asm/segment.h>
21950
21951 /*
21952 * efi_call_phys(void *, ...) is a function with variable parameters.
21953@@ -20,7 +22,7 @@
21954 * service functions will comply with gcc calling convention, too.
21955 */
21956
21957-.text
21958+__INIT
21959 ENTRY(efi_call_phys)
21960 /*
21961 * 0. The function can only be called in Linux kernel. So CS has been
21962@@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
21963 * The mapping of lower virtual memory has been created in prelog and
21964 * epilog.
21965 */
21966- movl $1f, %edx
21967- subl $__PAGE_OFFSET, %edx
21968- jmp *%edx
21969+ movl $(__KERNEXEC_EFI_DS), %edx
21970+ mov %edx, %ds
21971+ mov %edx, %es
21972+ mov %edx, %ss
21973+ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
21974 1:
21975
21976 /*
21977@@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
21978 * parameter 2, ..., param n. To make things easy, we save the return
21979 * address of efi_call_phys in a global variable.
21980 */
21981- popl %edx
21982- movl %edx, saved_return_addr
21983- /* get the function pointer into ECX*/
21984- popl %ecx
21985- movl %ecx, efi_rt_function_ptr
21986- movl $2f, %edx
21987- subl $__PAGE_OFFSET, %edx
21988- pushl %edx
21989+ popl (saved_return_addr)
21990+ popl (efi_rt_function_ptr)
21991
21992 /*
21993 * 3. Clear PG bit in %CR0.
21994@@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
21995 /*
21996 * 5. Call the physical function.
21997 */
21998- jmp *%ecx
21999+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
22000
22001-2:
22002 /*
22003 * 6. After EFI runtime service returns, control will return to
22004 * following instruction. We'd better readjust stack pointer first.
22005@@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
22006 movl %cr0, %edx
22007 orl $0x80000000, %edx
22008 movl %edx, %cr0
22009- jmp 1f
22010-1:
22011+
22012 /*
22013 * 8. Now restore the virtual mode from flat mode by
22014 * adding EIP with PAGE_OFFSET.
22015 */
22016- movl $1f, %edx
22017- jmp *%edx
22018+ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
22019 1:
22020+ movl $(__KERNEL_DS), %edx
22021+ mov %edx, %ds
22022+ mov %edx, %es
22023+ mov %edx, %ss
22024
22025 /*
22026 * 9. Balance the stack. And because EAX contain the return value,
22027 * we'd better not clobber it.
22028 */
22029- leal efi_rt_function_ptr, %edx
22030- movl (%edx), %ecx
22031- pushl %ecx
22032+ pushl (efi_rt_function_ptr)
22033
22034 /*
22035- * 10. Push the saved return address onto the stack and return.
22036+ * 10. Return to the saved return address.
22037 */
22038- leal saved_return_addr, %edx
22039- movl (%edx), %ecx
22040- pushl %ecx
22041- ret
22042+ jmpl *(saved_return_addr)
22043 ENDPROC(efi_call_phys)
22044 .previous
22045
22046-.data
22047+__INITDATA
22048 saved_return_addr:
22049 .long 0
22050 efi_rt_function_ptr:
22051diff -urNp linux-3.0.7/arch/x86/platform/efi/efi_stub_64.S linux-3.0.7/arch/x86/platform/efi/efi_stub_64.S
22052--- linux-3.0.7/arch/x86/platform/efi/efi_stub_64.S 2011-07-21 22:17:23.000000000 -0400
22053+++ linux-3.0.7/arch/x86/platform/efi/efi_stub_64.S 2011-10-06 04:17:55.000000000 -0400
22054@@ -7,6 +7,7 @@
22055 */
22056
22057 #include <linux/linkage.h>
22058+#include <asm/alternative-asm.h>
22059
22060 #define SAVE_XMM \
22061 mov %rsp, %rax; \
22062@@ -40,6 +41,7 @@ ENTRY(efi_call0)
22063 call *%rdi
22064 addq $32, %rsp
22065 RESTORE_XMM
22066+ pax_force_retaddr
22067 ret
22068 ENDPROC(efi_call0)
22069
22070@@ -50,6 +52,7 @@ ENTRY(efi_call1)
22071 call *%rdi
22072 addq $32, %rsp
22073 RESTORE_XMM
22074+ pax_force_retaddr
22075 ret
22076 ENDPROC(efi_call1)
22077
22078@@ -60,6 +63,7 @@ ENTRY(efi_call2)
22079 call *%rdi
22080 addq $32, %rsp
22081 RESTORE_XMM
22082+ pax_force_retaddr
22083 ret
22084 ENDPROC(efi_call2)
22085
22086@@ -71,6 +75,7 @@ ENTRY(efi_call3)
22087 call *%rdi
22088 addq $32, %rsp
22089 RESTORE_XMM
22090+ pax_force_retaddr
22091 ret
22092 ENDPROC(efi_call3)
22093
22094@@ -83,6 +88,7 @@ ENTRY(efi_call4)
22095 call *%rdi
22096 addq $32, %rsp
22097 RESTORE_XMM
22098+ pax_force_retaddr
22099 ret
22100 ENDPROC(efi_call4)
22101
22102@@ -96,6 +102,7 @@ ENTRY(efi_call5)
22103 call *%rdi
22104 addq $48, %rsp
22105 RESTORE_XMM
22106+ pax_force_retaddr
22107 ret
22108 ENDPROC(efi_call5)
22109
22110@@ -112,5 +119,6 @@ ENTRY(efi_call6)
22111 call *%rdi
22112 addq $48, %rsp
22113 RESTORE_XMM
22114+ pax_force_retaddr
22115 ret
22116 ENDPROC(efi_call6)
22117diff -urNp linux-3.0.7/arch/x86/platform/mrst/mrst.c linux-3.0.7/arch/x86/platform/mrst/mrst.c
22118--- linux-3.0.7/arch/x86/platform/mrst/mrst.c 2011-07-21 22:17:23.000000000 -0400
22119+++ linux-3.0.7/arch/x86/platform/mrst/mrst.c 2011-08-23 21:47:55.000000000 -0400
22120@@ -239,14 +239,16 @@ static int mrst_i8042_detect(void)
22121 }
22122
22123 /* Reboot and power off are handled by the SCU on a MID device */
22124-static void mrst_power_off(void)
22125+static __noreturn void mrst_power_off(void)
22126 {
22127 intel_scu_ipc_simple_command(0xf1, 1);
22128+ BUG();
22129 }
22130
22131-static void mrst_reboot(void)
22132+static __noreturn void mrst_reboot(void)
22133 {
22134 intel_scu_ipc_simple_command(0xf1, 0);
22135+ BUG();
22136 }
22137
22138 /*
22139diff -urNp linux-3.0.7/arch/x86/platform/uv/tlb_uv.c linux-3.0.7/arch/x86/platform/uv/tlb_uv.c
22140--- linux-3.0.7/arch/x86/platform/uv/tlb_uv.c 2011-07-21 22:17:23.000000000 -0400
22141+++ linux-3.0.7/arch/x86/platform/uv/tlb_uv.c 2011-08-23 21:48:14.000000000 -0400
22142@@ -373,6 +373,8 @@ static void reset_with_ipi(struct bau_ta
22143 cpumask_t mask;
22144 struct reset_args reset_args;
22145
22146+ pax_track_stack();
22147+
22148 reset_args.sender = sender;
22149 cpus_clear(mask);
22150 /* find a single cpu for each uvhub in this distribution mask */
22151diff -urNp linux-3.0.7/arch/x86/power/cpu.c linux-3.0.7/arch/x86/power/cpu.c
22152--- linux-3.0.7/arch/x86/power/cpu.c 2011-07-21 22:17:23.000000000 -0400
22153+++ linux-3.0.7/arch/x86/power/cpu.c 2011-08-23 21:47:55.000000000 -0400
22154@@ -130,7 +130,7 @@ static void do_fpu_end(void)
22155 static void fix_processor_context(void)
22156 {
22157 int cpu = smp_processor_id();
22158- struct tss_struct *t = &per_cpu(init_tss, cpu);
22159+ struct tss_struct *t = init_tss + cpu;
22160
22161 set_tss_desc(cpu, t); /*
22162 * This just modifies memory; should not be
22163@@ -140,7 +140,9 @@ static void fix_processor_context(void)
22164 */
22165
22166 #ifdef CONFIG_X86_64
22167+ pax_open_kernel();
22168 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
22169+ pax_close_kernel();
22170
22171 syscall_init(); /* This sets MSR_*STAR and related */
22172 #endif
22173diff -urNp linux-3.0.7/arch/x86/vdso/Makefile linux-3.0.7/arch/x86/vdso/Makefile
22174--- linux-3.0.7/arch/x86/vdso/Makefile 2011-07-21 22:17:23.000000000 -0400
22175+++ linux-3.0.7/arch/x86/vdso/Makefile 2011-08-23 21:47:55.000000000 -0400
22176@@ -136,7 +136,7 @@ quiet_cmd_vdso = VDSO $@
22177 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
22178 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
22179
22180-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
22181+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
22182 GCOV_PROFILE := n
22183
22184 #
22185diff -urNp linux-3.0.7/arch/x86/vdso/vdso32-setup.c linux-3.0.7/arch/x86/vdso/vdso32-setup.c
22186--- linux-3.0.7/arch/x86/vdso/vdso32-setup.c 2011-07-21 22:17:23.000000000 -0400
22187+++ linux-3.0.7/arch/x86/vdso/vdso32-setup.c 2011-08-23 21:47:55.000000000 -0400
22188@@ -25,6 +25,7 @@
22189 #include <asm/tlbflush.h>
22190 #include <asm/vdso.h>
22191 #include <asm/proto.h>
22192+#include <asm/mman.h>
22193
22194 enum {
22195 VDSO_DISABLED = 0,
22196@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
22197 void enable_sep_cpu(void)
22198 {
22199 int cpu = get_cpu();
22200- struct tss_struct *tss = &per_cpu(init_tss, cpu);
22201+ struct tss_struct *tss = init_tss + cpu;
22202
22203 if (!boot_cpu_has(X86_FEATURE_SEP)) {
22204 put_cpu();
22205@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
22206 gate_vma.vm_start = FIXADDR_USER_START;
22207 gate_vma.vm_end = FIXADDR_USER_END;
22208 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
22209- gate_vma.vm_page_prot = __P101;
22210+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
22211 /*
22212 * Make sure the vDSO gets into every core dump.
22213 * Dumping its contents makes post-mortem fully interpretable later
22214@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
22215 if (compat)
22216 addr = VDSO_HIGH_BASE;
22217 else {
22218- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
22219+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
22220 if (IS_ERR_VALUE(addr)) {
22221 ret = addr;
22222 goto up_fail;
22223 }
22224 }
22225
22226- current->mm->context.vdso = (void *)addr;
22227+ current->mm->context.vdso = addr;
22228
22229 if (compat_uses_vma || !compat) {
22230 /*
22231@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
22232 }
22233
22234 current_thread_info()->sysenter_return =
22235- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
22236+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
22237
22238 up_fail:
22239 if (ret)
22240- current->mm->context.vdso = NULL;
22241+ current->mm->context.vdso = 0;
22242
22243 up_write(&mm->mmap_sem);
22244
22245@@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
22246
22247 const char *arch_vma_name(struct vm_area_struct *vma)
22248 {
22249- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
22250+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
22251 return "[vdso]";
22252+
22253+#ifdef CONFIG_PAX_SEGMEXEC
22254+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
22255+ return "[vdso]";
22256+#endif
22257+
22258 return NULL;
22259 }
22260
22261@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
22262 * Check to see if the corresponding task was created in compat vdso
22263 * mode.
22264 */
22265- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
22266+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
22267 return &gate_vma;
22268 return NULL;
22269 }
22270diff -urNp linux-3.0.7/arch/x86/vdso/vma.c linux-3.0.7/arch/x86/vdso/vma.c
22271--- linux-3.0.7/arch/x86/vdso/vma.c 2011-07-21 22:17:23.000000000 -0400
22272+++ linux-3.0.7/arch/x86/vdso/vma.c 2011-08-23 21:47:55.000000000 -0400
22273@@ -15,18 +15,19 @@
22274 #include <asm/proto.h>
22275 #include <asm/vdso.h>
22276
22277-unsigned int __read_mostly vdso_enabled = 1;
22278-
22279 extern char vdso_start[], vdso_end[];
22280 extern unsigned short vdso_sync_cpuid;
22281+extern char __vsyscall_0;
22282
22283 static struct page **vdso_pages;
22284+static struct page *vsyscall_page;
22285 static unsigned vdso_size;
22286
22287 static int __init init_vdso_vars(void)
22288 {
22289- int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
22290- int i;
22291+ size_t nbytes = vdso_end - vdso_start;
22292+ size_t npages = (nbytes + PAGE_SIZE - 1) / PAGE_SIZE;
22293+ size_t i;
22294
22295 vdso_size = npages << PAGE_SHIFT;
22296 vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL);
22297@@ -34,19 +35,19 @@ static int __init init_vdso_vars(void)
22298 goto oom;
22299 for (i = 0; i < npages; i++) {
22300 struct page *p;
22301- p = alloc_page(GFP_KERNEL);
22302+ p = alloc_page(GFP_KERNEL | __GFP_ZERO);
22303 if (!p)
22304 goto oom;
22305 vdso_pages[i] = p;
22306- copy_page(page_address(p), vdso_start + i*PAGE_SIZE);
22307+ memcpy(page_address(p), vdso_start + i*PAGE_SIZE, nbytes > PAGE_SIZE ? PAGE_SIZE : nbytes);
22308+ nbytes -= PAGE_SIZE;
22309 }
22310+ vsyscall_page = pfn_to_page((__pa_symbol(&__vsyscall_0)) >> PAGE_SHIFT);
22311
22312 return 0;
22313
22314 oom:
22315- printk("Cannot allocate vdso\n");
22316- vdso_enabled = 0;
22317- return -ENOMEM;
22318+ panic("Cannot allocate vdso\n");
22319 }
22320 subsys_initcall(init_vdso_vars);
22321
22322@@ -80,37 +81,35 @@ int arch_setup_additional_pages(struct l
22323 unsigned long addr;
22324 int ret;
22325
22326- if (!vdso_enabled)
22327- return 0;
22328-
22329 down_write(&mm->mmap_sem);
22330- addr = vdso_addr(mm->start_stack, vdso_size);
22331- addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
22332+ addr = vdso_addr(mm->start_stack, vdso_size + PAGE_SIZE);
22333+ addr = get_unmapped_area(NULL, addr, vdso_size + PAGE_SIZE, 0, 0);
22334 if (IS_ERR_VALUE(addr)) {
22335 ret = addr;
22336 goto up_fail;
22337 }
22338
22339- current->mm->context.vdso = (void *)addr;
22340+ mm->context.vdso = addr + PAGE_SIZE;
22341
22342- ret = install_special_mapping(mm, addr, vdso_size,
22343+ ret = install_special_mapping(mm, addr, PAGE_SIZE,
22344 VM_READ|VM_EXEC|
22345- VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
22346+ VM_MAYREAD|VM_MAYEXEC|
22347 VM_ALWAYSDUMP,
22348- vdso_pages);
22349+ &vsyscall_page);
22350 if (ret) {
22351- current->mm->context.vdso = NULL;
22352+ mm->context.vdso = 0;
22353 goto up_fail;
22354 }
22355
22356+ ret = install_special_mapping(mm, addr + PAGE_SIZE, vdso_size,
22357+ VM_READ|VM_EXEC|
22358+ VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
22359+ VM_ALWAYSDUMP,
22360+ vdso_pages);
22361+ if (ret)
22362+ mm->context.vdso = 0;
22363+
22364 up_fail:
22365 up_write(&mm->mmap_sem);
22366 return ret;
22367 }
22368-
22369-static __init int vdso_setup(char *s)
22370-{
22371- vdso_enabled = simple_strtoul(s, NULL, 0);
22372- return 0;
22373-}
22374-__setup("vdso=", vdso_setup);
22375diff -urNp linux-3.0.7/arch/x86/xen/enlighten.c linux-3.0.7/arch/x86/xen/enlighten.c
22376--- linux-3.0.7/arch/x86/xen/enlighten.c 2011-09-02 18:11:26.000000000 -0400
22377+++ linux-3.0.7/arch/x86/xen/enlighten.c 2011-08-29 23:26:21.000000000 -0400
22378@@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
22379
22380 struct shared_info xen_dummy_shared_info;
22381
22382-void *xen_initial_gdt;
22383-
22384 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
22385 __read_mostly int xen_have_vector_callback;
22386 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
22387@@ -1010,7 +1008,7 @@ static const struct pv_apic_ops xen_apic
22388 #endif
22389 };
22390
22391-static void xen_reboot(int reason)
22392+static __noreturn void xen_reboot(int reason)
22393 {
22394 struct sched_shutdown r = { .reason = reason };
22395
22396@@ -1018,17 +1016,17 @@ static void xen_reboot(int reason)
22397 BUG();
22398 }
22399
22400-static void xen_restart(char *msg)
22401+static __noreturn void xen_restart(char *msg)
22402 {
22403 xen_reboot(SHUTDOWN_reboot);
22404 }
22405
22406-static void xen_emergency_restart(void)
22407+static __noreturn void xen_emergency_restart(void)
22408 {
22409 xen_reboot(SHUTDOWN_reboot);
22410 }
22411
22412-static void xen_machine_halt(void)
22413+static __noreturn void xen_machine_halt(void)
22414 {
22415 xen_reboot(SHUTDOWN_poweroff);
22416 }
22417@@ -1134,7 +1132,17 @@ asmlinkage void __init xen_start_kernel(
22418 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
22419
22420 /* Work out if we support NX */
22421- x86_configure_nx();
22422+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
22423+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
22424+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
22425+ unsigned l, h;
22426+
22427+ __supported_pte_mask |= _PAGE_NX;
22428+ rdmsr(MSR_EFER, l, h);
22429+ l |= EFER_NX;
22430+ wrmsr(MSR_EFER, l, h);
22431+ }
22432+#endif
22433
22434 xen_setup_features();
22435
22436@@ -1165,13 +1173,6 @@ asmlinkage void __init xen_start_kernel(
22437
22438 machine_ops = xen_machine_ops;
22439
22440- /*
22441- * The only reliable way to retain the initial address of the
22442- * percpu gdt_page is to remember it here, so we can go and
22443- * mark it RW later, when the initial percpu area is freed.
22444- */
22445- xen_initial_gdt = &per_cpu(gdt_page, 0);
22446-
22447 xen_smp_init();
22448
22449 #ifdef CONFIG_ACPI_NUMA
22450diff -urNp linux-3.0.7/arch/x86/xen/mmu.c linux-3.0.7/arch/x86/xen/mmu.c
22451--- linux-3.0.7/arch/x86/xen/mmu.c 2011-09-02 18:11:26.000000000 -0400
22452+++ linux-3.0.7/arch/x86/xen/mmu.c 2011-08-29 23:26:21.000000000 -0400
22453@@ -1683,6 +1683,8 @@ pgd_t * __init xen_setup_kernel_pagetabl
22454 convert_pfn_mfn(init_level4_pgt);
22455 convert_pfn_mfn(level3_ident_pgt);
22456 convert_pfn_mfn(level3_kernel_pgt);
22457+ convert_pfn_mfn(level3_vmalloc_pgt);
22458+ convert_pfn_mfn(level3_vmemmap_pgt);
22459
22460 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
22461 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
22462@@ -1701,7 +1703,10 @@ pgd_t * __init xen_setup_kernel_pagetabl
22463 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
22464 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
22465 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
22466+ set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
22467+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
22468 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
22469+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
22470 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
22471 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
22472
22473@@ -1913,6 +1918,7 @@ static void __init xen_post_allocator_in
22474 pv_mmu_ops.set_pud = xen_set_pud;
22475 #if PAGETABLE_LEVELS == 4
22476 pv_mmu_ops.set_pgd = xen_set_pgd;
22477+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
22478 #endif
22479
22480 /* This will work as long as patching hasn't happened yet
22481@@ -1994,6 +2000,7 @@ static const struct pv_mmu_ops xen_mmu_o
22482 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
22483 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
22484 .set_pgd = xen_set_pgd_hyper,
22485+ .set_pgd_batched = xen_set_pgd_hyper,
22486
22487 .alloc_pud = xen_alloc_pmd_init,
22488 .release_pud = xen_release_pmd_init,
22489diff -urNp linux-3.0.7/arch/x86/xen/smp.c linux-3.0.7/arch/x86/xen/smp.c
22490--- linux-3.0.7/arch/x86/xen/smp.c 2011-10-16 21:54:53.000000000 -0400
22491+++ linux-3.0.7/arch/x86/xen/smp.c 2011-10-16 21:55:27.000000000 -0400
22492@@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_
22493 {
22494 BUG_ON(smp_processor_id() != 0);
22495 native_smp_prepare_boot_cpu();
22496-
22497- /* We've switched to the "real" per-cpu gdt, so make sure the
22498- old memory can be recycled */
22499- make_lowmem_page_readwrite(xen_initial_gdt);
22500-
22501 xen_filter_cpu_maps();
22502 xen_setup_vcpu_info_placement();
22503 }
22504@@ -275,12 +270,12 @@ cpu_initialize_context(unsigned int cpu,
22505 gdt = get_cpu_gdt_table(cpu);
22506
22507 ctxt->flags = VGCF_IN_KERNEL;
22508- ctxt->user_regs.ds = __USER_DS;
22509- ctxt->user_regs.es = __USER_DS;
22510+ ctxt->user_regs.ds = __KERNEL_DS;
22511+ ctxt->user_regs.es = __KERNEL_DS;
22512 ctxt->user_regs.ss = __KERNEL_DS;
22513 #ifdef CONFIG_X86_32
22514 ctxt->user_regs.fs = __KERNEL_PERCPU;
22515- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
22516+ savesegment(gs, ctxt->user_regs.gs);
22517 #else
22518 ctxt->gs_base_kernel = per_cpu_offset(cpu);
22519 #endif
22520@@ -331,13 +326,12 @@ static int __cpuinit xen_cpu_up(unsigned
22521 int rc;
22522
22523 per_cpu(current_task, cpu) = idle;
22524+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
22525 #ifdef CONFIG_X86_32
22526 irq_ctx_init(cpu);
22527 #else
22528 clear_tsk_thread_flag(idle, TIF_FORK);
22529- per_cpu(kernel_stack, cpu) =
22530- (unsigned long)task_stack_page(idle) -
22531- KERNEL_STACK_OFFSET + THREAD_SIZE;
22532+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
22533 #endif
22534 xen_setup_runstate_info(cpu);
22535 xen_setup_timer(cpu);
22536diff -urNp linux-3.0.7/arch/x86/xen/xen-asm_32.S linux-3.0.7/arch/x86/xen/xen-asm_32.S
22537--- linux-3.0.7/arch/x86/xen/xen-asm_32.S 2011-10-16 21:54:53.000000000 -0400
22538+++ linux-3.0.7/arch/x86/xen/xen-asm_32.S 2011-10-16 21:55:27.000000000 -0400
22539@@ -83,14 +83,14 @@ ENTRY(xen_iret)
22540 ESP_OFFSET=4 # bytes pushed onto stack
22541
22542 /*
22543- * Store vcpu_info pointer for easy access. Do it this way to
22544- * avoid having to reload %fs
22545+ * Store vcpu_info pointer for easy access.
22546 */
22547 #ifdef CONFIG_SMP
22548- GET_THREAD_INFO(%eax)
22549- movl TI_cpu(%eax), %eax
22550- movl __per_cpu_offset(,%eax,4), %eax
22551- mov xen_vcpu(%eax), %eax
22552+ push %fs
22553+ mov $(__KERNEL_PERCPU), %eax
22554+ mov %eax, %fs
22555+ mov PER_CPU_VAR(xen_vcpu), %eax
22556+ pop %fs
22557 #else
22558 movl xen_vcpu, %eax
22559 #endif
22560diff -urNp linux-3.0.7/arch/x86/xen/xen-head.S linux-3.0.7/arch/x86/xen/xen-head.S
22561--- linux-3.0.7/arch/x86/xen/xen-head.S 2011-07-21 22:17:23.000000000 -0400
22562+++ linux-3.0.7/arch/x86/xen/xen-head.S 2011-08-23 21:47:55.000000000 -0400
22563@@ -19,6 +19,17 @@ ENTRY(startup_xen)
22564 #ifdef CONFIG_X86_32
22565 mov %esi,xen_start_info
22566 mov $init_thread_union+THREAD_SIZE,%esp
22567+#ifdef CONFIG_SMP
22568+ movl $cpu_gdt_table,%edi
22569+ movl $__per_cpu_load,%eax
22570+ movw %ax,__KERNEL_PERCPU + 2(%edi)
22571+ rorl $16,%eax
22572+ movb %al,__KERNEL_PERCPU + 4(%edi)
22573+ movb %ah,__KERNEL_PERCPU + 7(%edi)
22574+ movl $__per_cpu_end - 1,%eax
22575+ subl $__per_cpu_start,%eax
22576+ movw %ax,__KERNEL_PERCPU + 0(%edi)
22577+#endif
22578 #else
22579 mov %rsi,xen_start_info
22580 mov $init_thread_union+THREAD_SIZE,%rsp
22581diff -urNp linux-3.0.7/arch/x86/xen/xen-ops.h linux-3.0.7/arch/x86/xen/xen-ops.h
22582--- linux-3.0.7/arch/x86/xen/xen-ops.h 2011-09-02 18:11:21.000000000 -0400
22583+++ linux-3.0.7/arch/x86/xen/xen-ops.h 2011-08-23 21:47:55.000000000 -0400
22584@@ -10,8 +10,6 @@
22585 extern const char xen_hypervisor_callback[];
22586 extern const char xen_failsafe_callback[];
22587
22588-extern void *xen_initial_gdt;
22589-
22590 struct trap_info;
22591 void xen_copy_trap_info(struct trap_info *traps);
22592
22593diff -urNp linux-3.0.7/block/blk-iopoll.c linux-3.0.7/block/blk-iopoll.c
22594--- linux-3.0.7/block/blk-iopoll.c 2011-07-21 22:17:23.000000000 -0400
22595+++ linux-3.0.7/block/blk-iopoll.c 2011-08-23 21:47:55.000000000 -0400
22596@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
22597 }
22598 EXPORT_SYMBOL(blk_iopoll_complete);
22599
22600-static void blk_iopoll_softirq(struct softirq_action *h)
22601+static void blk_iopoll_softirq(void)
22602 {
22603 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
22604 int rearm = 0, budget = blk_iopoll_budget;
22605diff -urNp linux-3.0.7/block/blk-map.c linux-3.0.7/block/blk-map.c
22606--- linux-3.0.7/block/blk-map.c 2011-07-21 22:17:23.000000000 -0400
22607+++ linux-3.0.7/block/blk-map.c 2011-08-23 21:47:55.000000000 -0400
22608@@ -301,7 +301,7 @@ int blk_rq_map_kern(struct request_queue
22609 if (!len || !kbuf)
22610 return -EINVAL;
22611
22612- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
22613+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
22614 if (do_copy)
22615 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
22616 else
22617diff -urNp linux-3.0.7/block/blk-softirq.c linux-3.0.7/block/blk-softirq.c
22618--- linux-3.0.7/block/blk-softirq.c 2011-07-21 22:17:23.000000000 -0400
22619+++ linux-3.0.7/block/blk-softirq.c 2011-08-23 21:47:55.000000000 -0400
22620@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
22621 * Softirq action handler - move entries to local list and loop over them
22622 * while passing them to the queue registered handler.
22623 */
22624-static void blk_done_softirq(struct softirq_action *h)
22625+static void blk_done_softirq(void)
22626 {
22627 struct list_head *cpu_list, local_list;
22628
22629diff -urNp linux-3.0.7/block/bsg.c linux-3.0.7/block/bsg.c
22630--- linux-3.0.7/block/bsg.c 2011-07-21 22:17:23.000000000 -0400
22631+++ linux-3.0.7/block/bsg.c 2011-10-06 04:17:55.000000000 -0400
22632@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
22633 struct sg_io_v4 *hdr, struct bsg_device *bd,
22634 fmode_t has_write_perm)
22635 {
22636+ unsigned char tmpcmd[sizeof(rq->__cmd)];
22637+ unsigned char *cmdptr;
22638+
22639 if (hdr->request_len > BLK_MAX_CDB) {
22640 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
22641 if (!rq->cmd)
22642 return -ENOMEM;
22643- }
22644+ cmdptr = rq->cmd;
22645+ } else
22646+ cmdptr = tmpcmd;
22647
22648- if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
22649+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
22650 hdr->request_len))
22651 return -EFAULT;
22652
22653+ if (cmdptr != rq->cmd)
22654+ memcpy(rq->cmd, cmdptr, hdr->request_len);
22655+
22656 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
22657 if (blk_verify_command(rq->cmd, has_write_perm))
22658 return -EPERM;
22659@@ -249,7 +257,7 @@ bsg_map_hdr(struct bsg_device *bd, struc
22660 struct request *rq, *next_rq = NULL;
22661 int ret, rw;
22662 unsigned int dxfer_len;
22663- void *dxferp = NULL;
22664+ void __user *dxferp = NULL;
22665 struct bsg_class_device *bcd = &q->bsg_dev;
22666
22667 /* if the LLD has been removed then the bsg_unregister_queue will
22668@@ -291,7 +299,7 @@ bsg_map_hdr(struct bsg_device *bd, struc
22669 rq->next_rq = next_rq;
22670 next_rq->cmd_type = rq->cmd_type;
22671
22672- dxferp = (void*)(unsigned long)hdr->din_xferp;
22673+ dxferp = (void __user *)(unsigned long)hdr->din_xferp;
22674 ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
22675 hdr->din_xfer_len, GFP_KERNEL);
22676 if (ret)
22677@@ -300,10 +308,10 @@ bsg_map_hdr(struct bsg_device *bd, struc
22678
22679 if (hdr->dout_xfer_len) {
22680 dxfer_len = hdr->dout_xfer_len;
22681- dxferp = (void*)(unsigned long)hdr->dout_xferp;
22682+ dxferp = (void __user *)(unsigned long)hdr->dout_xferp;
22683 } else if (hdr->din_xfer_len) {
22684 dxfer_len = hdr->din_xfer_len;
22685- dxferp = (void*)(unsigned long)hdr->din_xferp;
22686+ dxferp = (void __user *)(unsigned long)hdr->din_xferp;
22687 } else
22688 dxfer_len = 0;
22689
22690@@ -445,7 +453,7 @@ static int blk_complete_sgv4_hdr_rq(stru
22691 int len = min_t(unsigned int, hdr->max_response_len,
22692 rq->sense_len);
22693
22694- ret = copy_to_user((void*)(unsigned long)hdr->response,
22695+ ret = copy_to_user((void __user *)(unsigned long)hdr->response,
22696 rq->sense, len);
22697 if (!ret)
22698 hdr->response_len = len;
22699diff -urNp linux-3.0.7/block/compat_ioctl.c linux-3.0.7/block/compat_ioctl.c
22700--- linux-3.0.7/block/compat_ioctl.c 2011-07-21 22:17:23.000000000 -0400
22701+++ linux-3.0.7/block/compat_ioctl.c 2011-10-06 04:17:55.000000000 -0400
22702@@ -354,7 +354,7 @@ static int compat_fd_ioctl(struct block_
22703 err |= __get_user(f->spec1, &uf->spec1);
22704 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
22705 err |= __get_user(name, &uf->name);
22706- f->name = compat_ptr(name);
22707+ f->name = (void __force_kernel *)compat_ptr(name);
22708 if (err) {
22709 err = -EFAULT;
22710 goto out;
22711diff -urNp linux-3.0.7/block/scsi_ioctl.c linux-3.0.7/block/scsi_ioctl.c
22712--- linux-3.0.7/block/scsi_ioctl.c 2011-07-21 22:17:23.000000000 -0400
22713+++ linux-3.0.7/block/scsi_ioctl.c 2011-08-23 21:47:55.000000000 -0400
22714@@ -222,8 +222,20 @@ EXPORT_SYMBOL(blk_verify_command);
22715 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
22716 struct sg_io_hdr *hdr, fmode_t mode)
22717 {
22718- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
22719+ unsigned char tmpcmd[sizeof(rq->__cmd)];
22720+ unsigned char *cmdptr;
22721+
22722+ if (rq->cmd != rq->__cmd)
22723+ cmdptr = rq->cmd;
22724+ else
22725+ cmdptr = tmpcmd;
22726+
22727+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
22728 return -EFAULT;
22729+
22730+ if (cmdptr != rq->cmd)
22731+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
22732+
22733 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
22734 return -EPERM;
22735
22736@@ -432,6 +444,8 @@ int sg_scsi_ioctl(struct request_queue *
22737 int err;
22738 unsigned int in_len, out_len, bytes, opcode, cmdlen;
22739 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
22740+ unsigned char tmpcmd[sizeof(rq->__cmd)];
22741+ unsigned char *cmdptr;
22742
22743 if (!sic)
22744 return -EINVAL;
22745@@ -465,9 +479,18 @@ int sg_scsi_ioctl(struct request_queue *
22746 */
22747 err = -EFAULT;
22748 rq->cmd_len = cmdlen;
22749- if (copy_from_user(rq->cmd, sic->data, cmdlen))
22750+
22751+ if (rq->cmd != rq->__cmd)
22752+ cmdptr = rq->cmd;
22753+ else
22754+ cmdptr = tmpcmd;
22755+
22756+ if (copy_from_user(cmdptr, sic->data, cmdlen))
22757 goto error;
22758
22759+ if (rq->cmd != cmdptr)
22760+ memcpy(rq->cmd, cmdptr, cmdlen);
22761+
22762 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
22763 goto error;
22764
22765diff -urNp linux-3.0.7/crypto/cryptd.c linux-3.0.7/crypto/cryptd.c
22766--- linux-3.0.7/crypto/cryptd.c 2011-07-21 22:17:23.000000000 -0400
22767+++ linux-3.0.7/crypto/cryptd.c 2011-08-23 21:47:55.000000000 -0400
22768@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
22769
22770 struct cryptd_blkcipher_request_ctx {
22771 crypto_completion_t complete;
22772-};
22773+} __no_const;
22774
22775 struct cryptd_hash_ctx {
22776 struct crypto_shash *child;
22777@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
22778
22779 struct cryptd_aead_request_ctx {
22780 crypto_completion_t complete;
22781-};
22782+} __no_const;
22783
22784 static void cryptd_queue_worker(struct work_struct *work);
22785
22786diff -urNp linux-3.0.7/crypto/gf128mul.c linux-3.0.7/crypto/gf128mul.c
22787--- linux-3.0.7/crypto/gf128mul.c 2011-07-21 22:17:23.000000000 -0400
22788+++ linux-3.0.7/crypto/gf128mul.c 2011-08-23 21:47:55.000000000 -0400
22789@@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128
22790 for (i = 0; i < 7; ++i)
22791 gf128mul_x_lle(&p[i + 1], &p[i]);
22792
22793- memset(r, 0, sizeof(r));
22794+ memset(r, 0, sizeof(*r));
22795 for (i = 0;;) {
22796 u8 ch = ((u8 *)b)[15 - i];
22797
22798@@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128
22799 for (i = 0; i < 7; ++i)
22800 gf128mul_x_bbe(&p[i + 1], &p[i]);
22801
22802- memset(r, 0, sizeof(r));
22803+ memset(r, 0, sizeof(*r));
22804 for (i = 0;;) {
22805 u8 ch = ((u8 *)b)[i];
22806
22807diff -urNp linux-3.0.7/crypto/serpent.c linux-3.0.7/crypto/serpent.c
22808--- linux-3.0.7/crypto/serpent.c 2011-07-21 22:17:23.000000000 -0400
22809+++ linux-3.0.7/crypto/serpent.c 2011-08-23 21:48:14.000000000 -0400
22810@@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
22811 u32 r0,r1,r2,r3,r4;
22812 int i;
22813
22814+ pax_track_stack();
22815+
22816 /* Copy key, add padding */
22817
22818 for (i = 0; i < keylen; ++i)
22819diff -urNp linux-3.0.7/Documentation/dontdiff linux-3.0.7/Documentation/dontdiff
22820--- linux-3.0.7/Documentation/dontdiff 2011-07-21 22:17:23.000000000 -0400
22821+++ linux-3.0.7/Documentation/dontdiff 2011-10-07 19:07:23.000000000 -0400
22822@@ -5,6 +5,7 @@
22823 *.cis
22824 *.cpio
22825 *.csp
22826+*.dbg
22827 *.dsp
22828 *.dvi
22829 *.elf
22830@@ -48,9 +49,11 @@
22831 *.tab.h
22832 *.tex
22833 *.ver
22834+*.vim
22835 *.xml
22836 *.xz
22837 *_MODULES
22838+*_reg_safe.h
22839 *_vga16.c
22840 *~
22841 \#*#
22842@@ -70,6 +73,7 @@ Kerntypes
22843 Module.markers
22844 Module.symvers
22845 PENDING
22846+PERF*
22847 SCCS
22848 System.map*
22849 TAGS
22850@@ -98,6 +102,8 @@ bzImage*
22851 capability_names.h
22852 capflags.c
22853 classlist.h*
22854+clut_vga16.c
22855+common-cmds.h
22856 comp*.log
22857 compile.h*
22858 conf
22859@@ -126,12 +132,14 @@ fore200e_pca_fw.c*
22860 gconf
22861 gconf.glade.h
22862 gen-devlist
22863+gen-kdb_cmds.c
22864 gen_crc32table
22865 gen_init_cpio
22866 generated
22867 genheaders
22868 genksyms
22869 *_gray256.c
22870+hash
22871 hpet_example
22872 hugepage-mmap
22873 hugepage-shm
22874@@ -146,7 +154,6 @@ int32.c
22875 int4.c
22876 int8.c
22877 kallsyms
22878-kconfig
22879 keywords.c
22880 ksym.c*
22881 ksym.h*
22882@@ -154,7 +161,6 @@ kxgettext
22883 lkc_defs.h
22884 lex.c
22885 lex.*.c
22886-linux
22887 logo_*.c
22888 logo_*_clut224.c
22889 logo_*_mono.c
22890@@ -166,7 +172,6 @@ machtypes.h
22891 map
22892 map_hugetlb
22893 maui_boot.h
22894-media
22895 mconf
22896 miboot*
22897 mk_elfconfig
22898@@ -174,6 +179,7 @@ mkboot
22899 mkbugboot
22900 mkcpustr
22901 mkdep
22902+mkpiggy
22903 mkprep
22904 mkregtable
22905 mktables
22906@@ -209,6 +215,7 @@ r300_reg_safe.h
22907 r420_reg_safe.h
22908 r600_reg_safe.h
22909 recordmcount
22910+regdb.c
22911 relocs
22912 rlim_names.h
22913 rn50_reg_safe.h
22914@@ -219,6 +226,7 @@ setup
22915 setup.bin
22916 setup.elf
22917 sImage
22918+slabinfo
22919 sm_tbl*
22920 split-include
22921 syscalltab.h
22922@@ -246,7 +254,9 @@ vmlinux
22923 vmlinux-*
22924 vmlinux.aout
22925 vmlinux.bin.all
22926+vmlinux.bin.bz2
22927 vmlinux.lds
22928+vmlinux.relocs
22929 vmlinuz
22930 voffset.h
22931 vsyscall.lds
22932@@ -254,6 +264,7 @@ vsyscall_32.lds
22933 wanxlfw.inc
22934 uImage
22935 unifdef
22936+utsrelease.h
22937 wakeup.bin
22938 wakeup.elf
22939 wakeup.lds
22940diff -urNp linux-3.0.7/Documentation/kernel-parameters.txt linux-3.0.7/Documentation/kernel-parameters.txt
22941--- linux-3.0.7/Documentation/kernel-parameters.txt 2011-07-21 22:17:23.000000000 -0400
22942+++ linux-3.0.7/Documentation/kernel-parameters.txt 2011-08-23 21:47:55.000000000 -0400
22943@@ -1883,6 +1883,13 @@ bytes respectively. Such letter suffixes
22944 the specified number of seconds. This is to be used if
22945 your oopses keep scrolling off the screen.
22946
22947+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
22948+ virtualization environments that don't cope well with the
22949+ expand down segment used by UDEREF on X86-32 or the frequent
22950+ page table updates on X86-64.
22951+
22952+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
22953+
22954 pcbit= [HW,ISDN]
22955
22956 pcd. [PARIDE]
22957diff -urNp linux-3.0.7/drivers/acpi/apei/cper.c linux-3.0.7/drivers/acpi/apei/cper.c
22958--- linux-3.0.7/drivers/acpi/apei/cper.c 2011-07-21 22:17:23.000000000 -0400
22959+++ linux-3.0.7/drivers/acpi/apei/cper.c 2011-08-23 21:47:55.000000000 -0400
22960@@ -38,12 +38,12 @@
22961 */
22962 u64 cper_next_record_id(void)
22963 {
22964- static atomic64_t seq;
22965+ static atomic64_unchecked_t seq;
22966
22967- if (!atomic64_read(&seq))
22968- atomic64_set(&seq, ((u64)get_seconds()) << 32);
22969+ if (!atomic64_read_unchecked(&seq))
22970+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
22971
22972- return atomic64_inc_return(&seq);
22973+ return atomic64_inc_return_unchecked(&seq);
22974 }
22975 EXPORT_SYMBOL_GPL(cper_next_record_id);
22976
22977diff -urNp linux-3.0.7/drivers/acpi/ec_sys.c linux-3.0.7/drivers/acpi/ec_sys.c
22978--- linux-3.0.7/drivers/acpi/ec_sys.c 2011-07-21 22:17:23.000000000 -0400
22979+++ linux-3.0.7/drivers/acpi/ec_sys.c 2011-08-24 19:06:55.000000000 -0400
22980@@ -11,6 +11,7 @@
22981 #include <linux/kernel.h>
22982 #include <linux/acpi.h>
22983 #include <linux/debugfs.h>
22984+#include <asm/uaccess.h>
22985 #include "internal.h"
22986
22987 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
22988@@ -39,7 +40,7 @@ static ssize_t acpi_ec_read_io(struct fi
22989 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
22990 */
22991 unsigned int size = EC_SPACE_SIZE;
22992- u8 *data = (u8 *) buf;
22993+ u8 data;
22994 loff_t init_off = *off;
22995 int err = 0;
22996
22997@@ -52,9 +53,11 @@ static ssize_t acpi_ec_read_io(struct fi
22998 size = count;
22999
23000 while (size) {
23001- err = ec_read(*off, &data[*off - init_off]);
23002+ err = ec_read(*off, &data);
23003 if (err)
23004 return err;
23005+ if (put_user(data, &buf[*off - init_off]))
23006+ return -EFAULT;
23007 *off += 1;
23008 size--;
23009 }
23010@@ -70,7 +73,6 @@ static ssize_t acpi_ec_write_io(struct f
23011
23012 unsigned int size = count;
23013 loff_t init_off = *off;
23014- u8 *data = (u8 *) buf;
23015 int err = 0;
23016
23017 if (*off >= EC_SPACE_SIZE)
23018@@ -81,7 +83,9 @@ static ssize_t acpi_ec_write_io(struct f
23019 }
23020
23021 while (size) {
23022- u8 byte_write = data[*off - init_off];
23023+ u8 byte_write;
23024+ if (get_user(byte_write, &buf[*off - init_off]))
23025+ return -EFAULT;
23026 err = ec_write(*off, byte_write);
23027 if (err)
23028 return err;
23029diff -urNp linux-3.0.7/drivers/acpi/proc.c linux-3.0.7/drivers/acpi/proc.c
23030--- linux-3.0.7/drivers/acpi/proc.c 2011-07-21 22:17:23.000000000 -0400
23031+++ linux-3.0.7/drivers/acpi/proc.c 2011-08-23 21:47:55.000000000 -0400
23032@@ -342,19 +342,13 @@ acpi_system_write_wakeup_device(struct f
23033 size_t count, loff_t * ppos)
23034 {
23035 struct list_head *node, *next;
23036- char strbuf[5];
23037- char str[5] = "";
23038- unsigned int len = count;
23039-
23040- if (len > 4)
23041- len = 4;
23042- if (len < 0)
23043- return -EFAULT;
23044+ char strbuf[5] = {0};
23045
23046- if (copy_from_user(strbuf, buffer, len))
23047+ if (count > 4)
23048+ count = 4;
23049+ if (copy_from_user(strbuf, buffer, count))
23050 return -EFAULT;
23051- strbuf[len] = '\0';
23052- sscanf(strbuf, "%s", str);
23053+ strbuf[count] = '\0';
23054
23055 mutex_lock(&acpi_device_lock);
23056 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
23057@@ -363,7 +357,7 @@ acpi_system_write_wakeup_device(struct f
23058 if (!dev->wakeup.flags.valid)
23059 continue;
23060
23061- if (!strncmp(dev->pnp.bus_id, str, 4)) {
23062+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
23063 if (device_can_wakeup(&dev->dev)) {
23064 bool enable = !device_may_wakeup(&dev->dev);
23065 device_set_wakeup_enable(&dev->dev, enable);
23066diff -urNp linux-3.0.7/drivers/acpi/processor_driver.c linux-3.0.7/drivers/acpi/processor_driver.c
23067--- linux-3.0.7/drivers/acpi/processor_driver.c 2011-07-21 22:17:23.000000000 -0400
23068+++ linux-3.0.7/drivers/acpi/processor_driver.c 2011-08-23 21:47:55.000000000 -0400
23069@@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(
23070 return 0;
23071 #endif
23072
23073- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
23074+ BUG_ON(pr->id >= nr_cpu_ids);
23075
23076 /*
23077 * Buggy BIOS check
23078diff -urNp linux-3.0.7/drivers/ata/libata-core.c linux-3.0.7/drivers/ata/libata-core.c
23079--- linux-3.0.7/drivers/ata/libata-core.c 2011-07-21 22:17:23.000000000 -0400
23080+++ linux-3.0.7/drivers/ata/libata-core.c 2011-08-23 21:47:55.000000000 -0400
23081@@ -4753,7 +4753,7 @@ void ata_qc_free(struct ata_queued_cmd *
23082 struct ata_port *ap;
23083 unsigned int tag;
23084
23085- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23086+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23087 ap = qc->ap;
23088
23089 qc->flags = 0;
23090@@ -4769,7 +4769,7 @@ void __ata_qc_complete(struct ata_queued
23091 struct ata_port *ap;
23092 struct ata_link *link;
23093
23094- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23095+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23096 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
23097 ap = qc->ap;
23098 link = qc->dev->link;
23099@@ -5774,6 +5774,7 @@ static void ata_finalize_port_ops(struct
23100 return;
23101
23102 spin_lock(&lock);
23103+ pax_open_kernel();
23104
23105 for (cur = ops->inherits; cur; cur = cur->inherits) {
23106 void **inherit = (void **)cur;
23107@@ -5787,8 +5788,9 @@ static void ata_finalize_port_ops(struct
23108 if (IS_ERR(*pp))
23109 *pp = NULL;
23110
23111- ops->inherits = NULL;
23112+ *(struct ata_port_operations **)&ops->inherits = NULL;
23113
23114+ pax_close_kernel();
23115 spin_unlock(&lock);
23116 }
23117
23118diff -urNp linux-3.0.7/drivers/ata/libata-eh.c linux-3.0.7/drivers/ata/libata-eh.c
23119--- linux-3.0.7/drivers/ata/libata-eh.c 2011-07-21 22:17:23.000000000 -0400
23120+++ linux-3.0.7/drivers/ata/libata-eh.c 2011-08-23 21:48:14.000000000 -0400
23121@@ -2518,6 +2518,8 @@ void ata_eh_report(struct ata_port *ap)
23122 {
23123 struct ata_link *link;
23124
23125+ pax_track_stack();
23126+
23127 ata_for_each_link(link, ap, HOST_FIRST)
23128 ata_eh_link_report(link);
23129 }
23130diff -urNp linux-3.0.7/drivers/ata/pata_arasan_cf.c linux-3.0.7/drivers/ata/pata_arasan_cf.c
23131--- linux-3.0.7/drivers/ata/pata_arasan_cf.c 2011-07-21 22:17:23.000000000 -0400
23132+++ linux-3.0.7/drivers/ata/pata_arasan_cf.c 2011-08-23 21:47:55.000000000 -0400
23133@@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(str
23134 /* Handle platform specific quirks */
23135 if (pdata->quirk) {
23136 if (pdata->quirk & CF_BROKEN_PIO) {
23137- ap->ops->set_piomode = NULL;
23138+ pax_open_kernel();
23139+ *(void **)&ap->ops->set_piomode = NULL;
23140+ pax_close_kernel();
23141 ap->pio_mask = 0;
23142 }
23143 if (pdata->quirk & CF_BROKEN_MWDMA)
23144diff -urNp linux-3.0.7/drivers/atm/adummy.c linux-3.0.7/drivers/atm/adummy.c
23145--- linux-3.0.7/drivers/atm/adummy.c 2011-07-21 22:17:23.000000000 -0400
23146+++ linux-3.0.7/drivers/atm/adummy.c 2011-08-23 21:47:55.000000000 -0400
23147@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct
23148 vcc->pop(vcc, skb);
23149 else
23150 dev_kfree_skb_any(skb);
23151- atomic_inc(&vcc->stats->tx);
23152+ atomic_inc_unchecked(&vcc->stats->tx);
23153
23154 return 0;
23155 }
23156diff -urNp linux-3.0.7/drivers/atm/ambassador.c linux-3.0.7/drivers/atm/ambassador.c
23157--- linux-3.0.7/drivers/atm/ambassador.c 2011-07-21 22:17:23.000000000 -0400
23158+++ linux-3.0.7/drivers/atm/ambassador.c 2011-08-23 21:47:55.000000000 -0400
23159@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev,
23160 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
23161
23162 // VC layer stats
23163- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
23164+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
23165
23166 // free the descriptor
23167 kfree (tx_descr);
23168@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev,
23169 dump_skb ("<<<", vc, skb);
23170
23171 // VC layer stats
23172- atomic_inc(&atm_vcc->stats->rx);
23173+ atomic_inc_unchecked(&atm_vcc->stats->rx);
23174 __net_timestamp(skb);
23175 // end of our responsibility
23176 atm_vcc->push (atm_vcc, skb);
23177@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev,
23178 } else {
23179 PRINTK (KERN_INFO, "dropped over-size frame");
23180 // should we count this?
23181- atomic_inc(&atm_vcc->stats->rx_drop);
23182+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
23183 }
23184
23185 } else {
23186@@ -1342,7 +1342,7 @@ static int amb_send (struct atm_vcc * at
23187 }
23188
23189 if (check_area (skb->data, skb->len)) {
23190- atomic_inc(&atm_vcc->stats->tx_err);
23191+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
23192 return -ENOMEM; // ?
23193 }
23194
23195diff -urNp linux-3.0.7/drivers/atm/atmtcp.c linux-3.0.7/drivers/atm/atmtcp.c
23196--- linux-3.0.7/drivers/atm/atmtcp.c 2011-07-21 22:17:23.000000000 -0400
23197+++ linux-3.0.7/drivers/atm/atmtcp.c 2011-08-23 21:47:55.000000000 -0400
23198@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc
23199 if (vcc->pop) vcc->pop(vcc,skb);
23200 else dev_kfree_skb(skb);
23201 if (dev_data) return 0;
23202- atomic_inc(&vcc->stats->tx_err);
23203+ atomic_inc_unchecked(&vcc->stats->tx_err);
23204 return -ENOLINK;
23205 }
23206 size = skb->len+sizeof(struct atmtcp_hdr);
23207@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc
23208 if (!new_skb) {
23209 if (vcc->pop) vcc->pop(vcc,skb);
23210 else dev_kfree_skb(skb);
23211- atomic_inc(&vcc->stats->tx_err);
23212+ atomic_inc_unchecked(&vcc->stats->tx_err);
23213 return -ENOBUFS;
23214 }
23215 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
23216@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc
23217 if (vcc->pop) vcc->pop(vcc,skb);
23218 else dev_kfree_skb(skb);
23219 out_vcc->push(out_vcc,new_skb);
23220- atomic_inc(&vcc->stats->tx);
23221- atomic_inc(&out_vcc->stats->rx);
23222+ atomic_inc_unchecked(&vcc->stats->tx);
23223+ atomic_inc_unchecked(&out_vcc->stats->rx);
23224 return 0;
23225 }
23226
23227@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc
23228 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
23229 read_unlock(&vcc_sklist_lock);
23230 if (!out_vcc) {
23231- atomic_inc(&vcc->stats->tx_err);
23232+ atomic_inc_unchecked(&vcc->stats->tx_err);
23233 goto done;
23234 }
23235 skb_pull(skb,sizeof(struct atmtcp_hdr));
23236@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc
23237 __net_timestamp(new_skb);
23238 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
23239 out_vcc->push(out_vcc,new_skb);
23240- atomic_inc(&vcc->stats->tx);
23241- atomic_inc(&out_vcc->stats->rx);
23242+ atomic_inc_unchecked(&vcc->stats->tx);
23243+ atomic_inc_unchecked(&out_vcc->stats->rx);
23244 done:
23245 if (vcc->pop) vcc->pop(vcc,skb);
23246 else dev_kfree_skb(skb);
23247diff -urNp linux-3.0.7/drivers/atm/eni.c linux-3.0.7/drivers/atm/eni.c
23248--- linux-3.0.7/drivers/atm/eni.c 2011-07-21 22:17:23.000000000 -0400
23249+++ linux-3.0.7/drivers/atm/eni.c 2011-10-11 10:44:33.000000000 -0400
23250@@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
23251 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
23252 vcc->dev->number);
23253 length = 0;
23254- atomic_inc(&vcc->stats->rx_err);
23255+ atomic_inc_unchecked(&vcc->stats->rx_err);
23256 }
23257 else {
23258 length = ATM_CELL_SIZE-1; /* no HEC */
23259@@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
23260 size);
23261 }
23262 eff = length = 0;
23263- atomic_inc(&vcc->stats->rx_err);
23264+ atomic_inc_unchecked(&vcc->stats->rx_err);
23265 }
23266 else {
23267 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
23268@@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
23269 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
23270 vcc->dev->number,vcc->vci,length,size << 2,descr);
23271 length = eff = 0;
23272- atomic_inc(&vcc->stats->rx_err);
23273+ atomic_inc_unchecked(&vcc->stats->rx_err);
23274 }
23275 }
23276 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
23277@@ -771,7 +771,7 @@ rx_dequeued++;
23278 vcc->push(vcc,skb);
23279 pushed++;
23280 }
23281- atomic_inc(&vcc->stats->rx);
23282+ atomic_inc_unchecked(&vcc->stats->rx);
23283 }
23284 wake_up(&eni_dev->rx_wait);
23285 }
23286@@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *d
23287 PCI_DMA_TODEVICE);
23288 if (vcc->pop) vcc->pop(vcc,skb);
23289 else dev_kfree_skb_irq(skb);
23290- atomic_inc(&vcc->stats->tx);
23291+ atomic_inc_unchecked(&vcc->stats->tx);
23292 wake_up(&eni_dev->tx_wait);
23293 dma_complete++;
23294 }
23295@@ -1568,7 +1568,7 @@ tx_complete++;
23296 /*--------------------------------- entries ---------------------------------*/
23297
23298
23299-static const char *media_name[] __devinitdata = {
23300+static const char *media_name[] __devinitconst = {
23301 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
23302 "UTP", "05?", "06?", "07?", /* 4- 7 */
23303 "TAXI","09?", "10?", "11?", /* 8-11 */
23304diff -urNp linux-3.0.7/drivers/atm/firestream.c linux-3.0.7/drivers/atm/firestream.c
23305--- linux-3.0.7/drivers/atm/firestream.c 2011-07-21 22:17:23.000000000 -0400
23306+++ linux-3.0.7/drivers/atm/firestream.c 2011-08-23 21:47:55.000000000 -0400
23307@@ -749,7 +749,7 @@ static void process_txdone_queue (struct
23308 }
23309 }
23310
23311- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
23312+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
23313
23314 fs_dprintk (FS_DEBUG_TXMEM, "i");
23315 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
23316@@ -816,7 +816,7 @@ static void process_incoming (struct fs_
23317 #endif
23318 skb_put (skb, qe->p1 & 0xffff);
23319 ATM_SKB(skb)->vcc = atm_vcc;
23320- atomic_inc(&atm_vcc->stats->rx);
23321+ atomic_inc_unchecked(&atm_vcc->stats->rx);
23322 __net_timestamp(skb);
23323 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
23324 atm_vcc->push (atm_vcc, skb);
23325@@ -837,12 +837,12 @@ static void process_incoming (struct fs_
23326 kfree (pe);
23327 }
23328 if (atm_vcc)
23329- atomic_inc(&atm_vcc->stats->rx_drop);
23330+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
23331 break;
23332 case 0x1f: /* Reassembly abort: no buffers. */
23333 /* Silently increment error counter. */
23334 if (atm_vcc)
23335- atomic_inc(&atm_vcc->stats->rx_drop);
23336+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
23337 break;
23338 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
23339 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
23340diff -urNp linux-3.0.7/drivers/atm/fore200e.c linux-3.0.7/drivers/atm/fore200e.c
23341--- linux-3.0.7/drivers/atm/fore200e.c 2011-07-21 22:17:23.000000000 -0400
23342+++ linux-3.0.7/drivers/atm/fore200e.c 2011-08-23 21:47:55.000000000 -0400
23343@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200
23344 #endif
23345 /* check error condition */
23346 if (*entry->status & STATUS_ERROR)
23347- atomic_inc(&vcc->stats->tx_err);
23348+ atomic_inc_unchecked(&vcc->stats->tx_err);
23349 else
23350- atomic_inc(&vcc->stats->tx);
23351+ atomic_inc_unchecked(&vcc->stats->tx);
23352 }
23353 }
23354
23355@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore2
23356 if (skb == NULL) {
23357 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
23358
23359- atomic_inc(&vcc->stats->rx_drop);
23360+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23361 return -ENOMEM;
23362 }
23363
23364@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore2
23365
23366 dev_kfree_skb_any(skb);
23367
23368- atomic_inc(&vcc->stats->rx_drop);
23369+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23370 return -ENOMEM;
23371 }
23372
23373 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
23374
23375 vcc->push(vcc, skb);
23376- atomic_inc(&vcc->stats->rx);
23377+ atomic_inc_unchecked(&vcc->stats->rx);
23378
23379 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
23380
23381@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200
23382 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
23383 fore200e->atm_dev->number,
23384 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
23385- atomic_inc(&vcc->stats->rx_err);
23386+ atomic_inc_unchecked(&vcc->stats->rx_err);
23387 }
23388 }
23389
23390@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struc
23391 goto retry_here;
23392 }
23393
23394- atomic_inc(&vcc->stats->tx_err);
23395+ atomic_inc_unchecked(&vcc->stats->tx_err);
23396
23397 fore200e->tx_sat++;
23398 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
23399diff -urNp linux-3.0.7/drivers/atm/he.c linux-3.0.7/drivers/atm/he.c
23400--- linux-3.0.7/drivers/atm/he.c 2011-07-21 22:17:23.000000000 -0400
23401+++ linux-3.0.7/drivers/atm/he.c 2011-08-23 21:47:55.000000000 -0400
23402@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, i
23403
23404 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
23405 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
23406- atomic_inc(&vcc->stats->rx_drop);
23407+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23408 goto return_host_buffers;
23409 }
23410
23411@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, i
23412 RBRQ_LEN_ERR(he_dev->rbrq_head)
23413 ? "LEN_ERR" : "",
23414 vcc->vpi, vcc->vci);
23415- atomic_inc(&vcc->stats->rx_err);
23416+ atomic_inc_unchecked(&vcc->stats->rx_err);
23417 goto return_host_buffers;
23418 }
23419
23420@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, i
23421 vcc->push(vcc, skb);
23422 spin_lock(&he_dev->global_lock);
23423
23424- atomic_inc(&vcc->stats->rx);
23425+ atomic_inc_unchecked(&vcc->stats->rx);
23426
23427 return_host_buffers:
23428 ++pdus_assembled;
23429@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
23430 tpd->vcc->pop(tpd->vcc, tpd->skb);
23431 else
23432 dev_kfree_skb_any(tpd->skb);
23433- atomic_inc(&tpd->vcc->stats->tx_err);
23434+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
23435 }
23436 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
23437 return;
23438@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
23439 vcc->pop(vcc, skb);
23440 else
23441 dev_kfree_skb_any(skb);
23442- atomic_inc(&vcc->stats->tx_err);
23443+ atomic_inc_unchecked(&vcc->stats->tx_err);
23444 return -EINVAL;
23445 }
23446
23447@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
23448 vcc->pop(vcc, skb);
23449 else
23450 dev_kfree_skb_any(skb);
23451- atomic_inc(&vcc->stats->tx_err);
23452+ atomic_inc_unchecked(&vcc->stats->tx_err);
23453 return -EINVAL;
23454 }
23455 #endif
23456@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
23457 vcc->pop(vcc, skb);
23458 else
23459 dev_kfree_skb_any(skb);
23460- atomic_inc(&vcc->stats->tx_err);
23461+ atomic_inc_unchecked(&vcc->stats->tx_err);
23462 spin_unlock_irqrestore(&he_dev->global_lock, flags);
23463 return -ENOMEM;
23464 }
23465@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
23466 vcc->pop(vcc, skb);
23467 else
23468 dev_kfree_skb_any(skb);
23469- atomic_inc(&vcc->stats->tx_err);
23470+ atomic_inc_unchecked(&vcc->stats->tx_err);
23471 spin_unlock_irqrestore(&he_dev->global_lock, flags);
23472 return -ENOMEM;
23473 }
23474@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
23475 __enqueue_tpd(he_dev, tpd, cid);
23476 spin_unlock_irqrestore(&he_dev->global_lock, flags);
23477
23478- atomic_inc(&vcc->stats->tx);
23479+ atomic_inc_unchecked(&vcc->stats->tx);
23480
23481 return 0;
23482 }
23483diff -urNp linux-3.0.7/drivers/atm/horizon.c linux-3.0.7/drivers/atm/horizon.c
23484--- linux-3.0.7/drivers/atm/horizon.c 2011-07-21 22:17:23.000000000 -0400
23485+++ linux-3.0.7/drivers/atm/horizon.c 2011-08-23 21:47:55.000000000 -0400
23486@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev,
23487 {
23488 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
23489 // VC layer stats
23490- atomic_inc(&vcc->stats->rx);
23491+ atomic_inc_unchecked(&vcc->stats->rx);
23492 __net_timestamp(skb);
23493 // end of our responsibility
23494 vcc->push (vcc, skb);
23495@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const
23496 dev->tx_iovec = NULL;
23497
23498 // VC layer stats
23499- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
23500+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
23501
23502 // free the skb
23503 hrz_kfree_skb (skb);
23504diff -urNp linux-3.0.7/drivers/atm/idt77252.c linux-3.0.7/drivers/atm/idt77252.c
23505--- linux-3.0.7/drivers/atm/idt77252.c 2011-07-21 22:17:23.000000000 -0400
23506+++ linux-3.0.7/drivers/atm/idt77252.c 2011-08-23 21:47:55.000000000 -0400
23507@@ -811,7 +811,7 @@ drain_scq(struct idt77252_dev *card, str
23508 else
23509 dev_kfree_skb(skb);
23510
23511- atomic_inc(&vcc->stats->tx);
23512+ atomic_inc_unchecked(&vcc->stats->tx);
23513 }
23514
23515 atomic_dec(&scq->used);
23516@@ -1074,13 +1074,13 @@ dequeue_rx(struct idt77252_dev *card, st
23517 if ((sb = dev_alloc_skb(64)) == NULL) {
23518 printk("%s: Can't allocate buffers for aal0.\n",
23519 card->name);
23520- atomic_add(i, &vcc->stats->rx_drop);
23521+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
23522 break;
23523 }
23524 if (!atm_charge(vcc, sb->truesize)) {
23525 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
23526 card->name);
23527- atomic_add(i - 1, &vcc->stats->rx_drop);
23528+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
23529 dev_kfree_skb(sb);
23530 break;
23531 }
23532@@ -1097,7 +1097,7 @@ dequeue_rx(struct idt77252_dev *card, st
23533 ATM_SKB(sb)->vcc = vcc;
23534 __net_timestamp(sb);
23535 vcc->push(vcc, sb);
23536- atomic_inc(&vcc->stats->rx);
23537+ atomic_inc_unchecked(&vcc->stats->rx);
23538
23539 cell += ATM_CELL_PAYLOAD;
23540 }
23541@@ -1134,13 +1134,13 @@ dequeue_rx(struct idt77252_dev *card, st
23542 "(CDC: %08x)\n",
23543 card->name, len, rpp->len, readl(SAR_REG_CDC));
23544 recycle_rx_pool_skb(card, rpp);
23545- atomic_inc(&vcc->stats->rx_err);
23546+ atomic_inc_unchecked(&vcc->stats->rx_err);
23547 return;
23548 }
23549 if (stat & SAR_RSQE_CRC) {
23550 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
23551 recycle_rx_pool_skb(card, rpp);
23552- atomic_inc(&vcc->stats->rx_err);
23553+ atomic_inc_unchecked(&vcc->stats->rx_err);
23554 return;
23555 }
23556 if (skb_queue_len(&rpp->queue) > 1) {
23557@@ -1151,7 +1151,7 @@ dequeue_rx(struct idt77252_dev *card, st
23558 RXPRINTK("%s: Can't alloc RX skb.\n",
23559 card->name);
23560 recycle_rx_pool_skb(card, rpp);
23561- atomic_inc(&vcc->stats->rx_err);
23562+ atomic_inc_unchecked(&vcc->stats->rx_err);
23563 return;
23564 }
23565 if (!atm_charge(vcc, skb->truesize)) {
23566@@ -1170,7 +1170,7 @@ dequeue_rx(struct idt77252_dev *card, st
23567 __net_timestamp(skb);
23568
23569 vcc->push(vcc, skb);
23570- atomic_inc(&vcc->stats->rx);
23571+ atomic_inc_unchecked(&vcc->stats->rx);
23572
23573 return;
23574 }
23575@@ -1192,7 +1192,7 @@ dequeue_rx(struct idt77252_dev *card, st
23576 __net_timestamp(skb);
23577
23578 vcc->push(vcc, skb);
23579- atomic_inc(&vcc->stats->rx);
23580+ atomic_inc_unchecked(&vcc->stats->rx);
23581
23582 if (skb->truesize > SAR_FB_SIZE_3)
23583 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
23584@@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
23585 if (vcc->qos.aal != ATM_AAL0) {
23586 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
23587 card->name, vpi, vci);
23588- atomic_inc(&vcc->stats->rx_drop);
23589+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23590 goto drop;
23591 }
23592
23593 if ((sb = dev_alloc_skb(64)) == NULL) {
23594 printk("%s: Can't allocate buffers for AAL0.\n",
23595 card->name);
23596- atomic_inc(&vcc->stats->rx_err);
23597+ atomic_inc_unchecked(&vcc->stats->rx_err);
23598 goto drop;
23599 }
23600
23601@@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
23602 ATM_SKB(sb)->vcc = vcc;
23603 __net_timestamp(sb);
23604 vcc->push(vcc, sb);
23605- atomic_inc(&vcc->stats->rx);
23606+ atomic_inc_unchecked(&vcc->stats->rx);
23607
23608 drop:
23609 skb_pull(queue, 64);
23610@@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
23611
23612 if (vc == NULL) {
23613 printk("%s: NULL connection in send().\n", card->name);
23614- atomic_inc(&vcc->stats->tx_err);
23615+ atomic_inc_unchecked(&vcc->stats->tx_err);
23616 dev_kfree_skb(skb);
23617 return -EINVAL;
23618 }
23619 if (!test_bit(VCF_TX, &vc->flags)) {
23620 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
23621- atomic_inc(&vcc->stats->tx_err);
23622+ atomic_inc_unchecked(&vcc->stats->tx_err);
23623 dev_kfree_skb(skb);
23624 return -EINVAL;
23625 }
23626@@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
23627 break;
23628 default:
23629 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
23630- atomic_inc(&vcc->stats->tx_err);
23631+ atomic_inc_unchecked(&vcc->stats->tx_err);
23632 dev_kfree_skb(skb);
23633 return -EINVAL;
23634 }
23635
23636 if (skb_shinfo(skb)->nr_frags != 0) {
23637 printk("%s: No scatter-gather yet.\n", card->name);
23638- atomic_inc(&vcc->stats->tx_err);
23639+ atomic_inc_unchecked(&vcc->stats->tx_err);
23640 dev_kfree_skb(skb);
23641 return -EINVAL;
23642 }
23643@@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
23644
23645 err = queue_skb(card, vc, skb, oam);
23646 if (err) {
23647- atomic_inc(&vcc->stats->tx_err);
23648+ atomic_inc_unchecked(&vcc->stats->tx_err);
23649 dev_kfree_skb(skb);
23650 return err;
23651 }
23652@@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
23653 skb = dev_alloc_skb(64);
23654 if (!skb) {
23655 printk("%s: Out of memory in send_oam().\n", card->name);
23656- atomic_inc(&vcc->stats->tx_err);
23657+ atomic_inc_unchecked(&vcc->stats->tx_err);
23658 return -ENOMEM;
23659 }
23660 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
23661diff -urNp linux-3.0.7/drivers/atm/iphase.c linux-3.0.7/drivers/atm/iphase.c
23662--- linux-3.0.7/drivers/atm/iphase.c 2011-07-21 22:17:23.000000000 -0400
23663+++ linux-3.0.7/drivers/atm/iphase.c 2011-08-23 21:47:55.000000000 -0400
23664@@ -1120,7 +1120,7 @@ static int rx_pkt(struct atm_dev *dev)
23665 status = (u_short) (buf_desc_ptr->desc_mode);
23666 if (status & (RX_CER | RX_PTE | RX_OFL))
23667 {
23668- atomic_inc(&vcc->stats->rx_err);
23669+ atomic_inc_unchecked(&vcc->stats->rx_err);
23670 IF_ERR(printk("IA: bad packet, dropping it");)
23671 if (status & RX_CER) {
23672 IF_ERR(printk(" cause: packet CRC error\n");)
23673@@ -1143,7 +1143,7 @@ static int rx_pkt(struct atm_dev *dev)
23674 len = dma_addr - buf_addr;
23675 if (len > iadev->rx_buf_sz) {
23676 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
23677- atomic_inc(&vcc->stats->rx_err);
23678+ atomic_inc_unchecked(&vcc->stats->rx_err);
23679 goto out_free_desc;
23680 }
23681
23682@@ -1293,7 +1293,7 @@ static void rx_dle_intr(struct atm_dev *
23683 ia_vcc = INPH_IA_VCC(vcc);
23684 if (ia_vcc == NULL)
23685 {
23686- atomic_inc(&vcc->stats->rx_err);
23687+ atomic_inc_unchecked(&vcc->stats->rx_err);
23688 dev_kfree_skb_any(skb);
23689 atm_return(vcc, atm_guess_pdu2truesize(len));
23690 goto INCR_DLE;
23691@@ -1305,7 +1305,7 @@ static void rx_dle_intr(struct atm_dev *
23692 if ((length > iadev->rx_buf_sz) || (length >
23693 (skb->len - sizeof(struct cpcs_trailer))))
23694 {
23695- atomic_inc(&vcc->stats->rx_err);
23696+ atomic_inc_unchecked(&vcc->stats->rx_err);
23697 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
23698 length, skb->len);)
23699 dev_kfree_skb_any(skb);
23700@@ -1321,7 +1321,7 @@ static void rx_dle_intr(struct atm_dev *
23701
23702 IF_RX(printk("rx_dle_intr: skb push");)
23703 vcc->push(vcc,skb);
23704- atomic_inc(&vcc->stats->rx);
23705+ atomic_inc_unchecked(&vcc->stats->rx);
23706 iadev->rx_pkt_cnt++;
23707 }
23708 INCR_DLE:
23709@@ -2801,15 +2801,15 @@ static int ia_ioctl(struct atm_dev *dev,
23710 {
23711 struct k_sonet_stats *stats;
23712 stats = &PRIV(_ia_dev[board])->sonet_stats;
23713- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
23714- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
23715- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
23716- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
23717- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
23718- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
23719- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
23720- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
23721- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
23722+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
23723+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
23724+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
23725+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
23726+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
23727+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
23728+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
23729+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
23730+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
23731 }
23732 ia_cmds.status = 0;
23733 break;
23734@@ -2914,7 +2914,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
23735 if ((desc == 0) || (desc > iadev->num_tx_desc))
23736 {
23737 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
23738- atomic_inc(&vcc->stats->tx);
23739+ atomic_inc_unchecked(&vcc->stats->tx);
23740 if (vcc->pop)
23741 vcc->pop(vcc, skb);
23742 else
23743@@ -3019,14 +3019,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
23744 ATM_DESC(skb) = vcc->vci;
23745 skb_queue_tail(&iadev->tx_dma_q, skb);
23746
23747- atomic_inc(&vcc->stats->tx);
23748+ atomic_inc_unchecked(&vcc->stats->tx);
23749 iadev->tx_pkt_cnt++;
23750 /* Increment transaction counter */
23751 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
23752
23753 #if 0
23754 /* add flow control logic */
23755- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
23756+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
23757 if (iavcc->vc_desc_cnt > 10) {
23758 vcc->tx_quota = vcc->tx_quota * 3 / 4;
23759 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
23760diff -urNp linux-3.0.7/drivers/atm/lanai.c linux-3.0.7/drivers/atm/lanai.c
23761--- linux-3.0.7/drivers/atm/lanai.c 2011-07-21 22:17:23.000000000 -0400
23762+++ linux-3.0.7/drivers/atm/lanai.c 2011-08-23 21:47:55.000000000 -0400
23763@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct l
23764 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
23765 lanai_endtx(lanai, lvcc);
23766 lanai_free_skb(lvcc->tx.atmvcc, skb);
23767- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
23768+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
23769 }
23770
23771 /* Try to fill the buffer - don't call unless there is backlog */
23772@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc
23773 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
23774 __net_timestamp(skb);
23775 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
23776- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
23777+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
23778 out:
23779 lvcc->rx.buf.ptr = end;
23780 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
23781@@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_d
23782 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
23783 "vcc %d\n", lanai->number, (unsigned int) s, vci);
23784 lanai->stats.service_rxnotaal5++;
23785- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
23786+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
23787 return 0;
23788 }
23789 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
23790@@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_d
23791 int bytes;
23792 read_unlock(&vcc_sklist_lock);
23793 DPRINTK("got trashed rx pdu on vci %d\n", vci);
23794- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
23795+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
23796 lvcc->stats.x.aal5.service_trash++;
23797 bytes = (SERVICE_GET_END(s) * 16) -
23798 (((unsigned long) lvcc->rx.buf.ptr) -
23799@@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_d
23800 }
23801 if (s & SERVICE_STREAM) {
23802 read_unlock(&vcc_sklist_lock);
23803- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
23804+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
23805 lvcc->stats.x.aal5.service_stream++;
23806 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
23807 "PDU on VCI %d!\n", lanai->number, vci);
23808@@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_d
23809 return 0;
23810 }
23811 DPRINTK("got rx crc error on vci %d\n", vci);
23812- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
23813+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
23814 lvcc->stats.x.aal5.service_rxcrc++;
23815 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
23816 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
23817diff -urNp linux-3.0.7/drivers/atm/nicstar.c linux-3.0.7/drivers/atm/nicstar.c
23818--- linux-3.0.7/drivers/atm/nicstar.c 2011-07-21 22:17:23.000000000 -0400
23819+++ linux-3.0.7/drivers/atm/nicstar.c 2011-08-23 21:47:55.000000000 -0400
23820@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc,
23821 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
23822 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
23823 card->index);
23824- atomic_inc(&vcc->stats->tx_err);
23825+ atomic_inc_unchecked(&vcc->stats->tx_err);
23826 dev_kfree_skb_any(skb);
23827 return -EINVAL;
23828 }
23829@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc,
23830 if (!vc->tx) {
23831 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
23832 card->index);
23833- atomic_inc(&vcc->stats->tx_err);
23834+ atomic_inc_unchecked(&vcc->stats->tx_err);
23835 dev_kfree_skb_any(skb);
23836 return -EINVAL;
23837 }
23838@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc,
23839 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
23840 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
23841 card->index);
23842- atomic_inc(&vcc->stats->tx_err);
23843+ atomic_inc_unchecked(&vcc->stats->tx_err);
23844 dev_kfree_skb_any(skb);
23845 return -EINVAL;
23846 }
23847
23848 if (skb_shinfo(skb)->nr_frags != 0) {
23849 printk("nicstar%d: No scatter-gather yet.\n", card->index);
23850- atomic_inc(&vcc->stats->tx_err);
23851+ atomic_inc_unchecked(&vcc->stats->tx_err);
23852 dev_kfree_skb_any(skb);
23853 return -EINVAL;
23854 }
23855@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc,
23856 }
23857
23858 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
23859- atomic_inc(&vcc->stats->tx_err);
23860+ atomic_inc_unchecked(&vcc->stats->tx_err);
23861 dev_kfree_skb_any(skb);
23862 return -EIO;
23863 }
23864- atomic_inc(&vcc->stats->tx);
23865+ atomic_inc_unchecked(&vcc->stats->tx);
23866
23867 return 0;
23868 }
23869@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns
23870 printk
23871 ("nicstar%d: Can't allocate buffers for aal0.\n",
23872 card->index);
23873- atomic_add(i, &vcc->stats->rx_drop);
23874+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
23875 break;
23876 }
23877 if (!atm_charge(vcc, sb->truesize)) {
23878 RXPRINTK
23879 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
23880 card->index);
23881- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
23882+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
23883 dev_kfree_skb_any(sb);
23884 break;
23885 }
23886@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns
23887 ATM_SKB(sb)->vcc = vcc;
23888 __net_timestamp(sb);
23889 vcc->push(vcc, sb);
23890- atomic_inc(&vcc->stats->rx);
23891+ atomic_inc_unchecked(&vcc->stats->rx);
23892 cell += ATM_CELL_PAYLOAD;
23893 }
23894
23895@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns
23896 if (iovb == NULL) {
23897 printk("nicstar%d: Out of iovec buffers.\n",
23898 card->index);
23899- atomic_inc(&vcc->stats->rx_drop);
23900+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23901 recycle_rx_buf(card, skb);
23902 return;
23903 }
23904@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns
23905 small or large buffer itself. */
23906 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
23907 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
23908- atomic_inc(&vcc->stats->rx_err);
23909+ atomic_inc_unchecked(&vcc->stats->rx_err);
23910 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
23911 NS_MAX_IOVECS);
23912 NS_PRV_IOVCNT(iovb) = 0;
23913@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns
23914 ("nicstar%d: Expected a small buffer, and this is not one.\n",
23915 card->index);
23916 which_list(card, skb);
23917- atomic_inc(&vcc->stats->rx_err);
23918+ atomic_inc_unchecked(&vcc->stats->rx_err);
23919 recycle_rx_buf(card, skb);
23920 vc->rx_iov = NULL;
23921 recycle_iov_buf(card, iovb);
23922@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns
23923 ("nicstar%d: Expected a large buffer, and this is not one.\n",
23924 card->index);
23925 which_list(card, skb);
23926- atomic_inc(&vcc->stats->rx_err);
23927+ atomic_inc_unchecked(&vcc->stats->rx_err);
23928 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
23929 NS_PRV_IOVCNT(iovb));
23930 vc->rx_iov = NULL;
23931@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns
23932 printk(" - PDU size mismatch.\n");
23933 else
23934 printk(".\n");
23935- atomic_inc(&vcc->stats->rx_err);
23936+ atomic_inc_unchecked(&vcc->stats->rx_err);
23937 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
23938 NS_PRV_IOVCNT(iovb));
23939 vc->rx_iov = NULL;
23940@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns
23941 /* skb points to a small buffer */
23942 if (!atm_charge(vcc, skb->truesize)) {
23943 push_rxbufs(card, skb);
23944- atomic_inc(&vcc->stats->rx_drop);
23945+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23946 } else {
23947 skb_put(skb, len);
23948 dequeue_sm_buf(card, skb);
23949@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns
23950 ATM_SKB(skb)->vcc = vcc;
23951 __net_timestamp(skb);
23952 vcc->push(vcc, skb);
23953- atomic_inc(&vcc->stats->rx);
23954+ atomic_inc_unchecked(&vcc->stats->rx);
23955 }
23956 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
23957 struct sk_buff *sb;
23958@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns
23959 if (len <= NS_SMBUFSIZE) {
23960 if (!atm_charge(vcc, sb->truesize)) {
23961 push_rxbufs(card, sb);
23962- atomic_inc(&vcc->stats->rx_drop);
23963+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23964 } else {
23965 skb_put(sb, len);
23966 dequeue_sm_buf(card, sb);
23967@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns
23968 ATM_SKB(sb)->vcc = vcc;
23969 __net_timestamp(sb);
23970 vcc->push(vcc, sb);
23971- atomic_inc(&vcc->stats->rx);
23972+ atomic_inc_unchecked(&vcc->stats->rx);
23973 }
23974
23975 push_rxbufs(card, skb);
23976@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns
23977
23978 if (!atm_charge(vcc, skb->truesize)) {
23979 push_rxbufs(card, skb);
23980- atomic_inc(&vcc->stats->rx_drop);
23981+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23982 } else {
23983 dequeue_lg_buf(card, skb);
23984 #ifdef NS_USE_DESTRUCTORS
23985@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns
23986 ATM_SKB(skb)->vcc = vcc;
23987 __net_timestamp(skb);
23988 vcc->push(vcc, skb);
23989- atomic_inc(&vcc->stats->rx);
23990+ atomic_inc_unchecked(&vcc->stats->rx);
23991 }
23992
23993 push_rxbufs(card, sb);
23994@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns
23995 printk
23996 ("nicstar%d: Out of huge buffers.\n",
23997 card->index);
23998- atomic_inc(&vcc->stats->rx_drop);
23999+ atomic_inc_unchecked(&vcc->stats->rx_drop);
24000 recycle_iovec_rx_bufs(card,
24001 (struct iovec *)
24002 iovb->data,
24003@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns
24004 card->hbpool.count++;
24005 } else
24006 dev_kfree_skb_any(hb);
24007- atomic_inc(&vcc->stats->rx_drop);
24008+ atomic_inc_unchecked(&vcc->stats->rx_drop);
24009 } else {
24010 /* Copy the small buffer to the huge buffer */
24011 sb = (struct sk_buff *)iov->iov_base;
24012@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns
24013 #endif /* NS_USE_DESTRUCTORS */
24014 __net_timestamp(hb);
24015 vcc->push(vcc, hb);
24016- atomic_inc(&vcc->stats->rx);
24017+ atomic_inc_unchecked(&vcc->stats->rx);
24018 }
24019 }
24020
24021diff -urNp linux-3.0.7/drivers/atm/solos-pci.c linux-3.0.7/drivers/atm/solos-pci.c
24022--- linux-3.0.7/drivers/atm/solos-pci.c 2011-07-21 22:17:23.000000000 -0400
24023+++ linux-3.0.7/drivers/atm/solos-pci.c 2011-08-23 21:48:14.000000000 -0400
24024@@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
24025 }
24026 atm_charge(vcc, skb->truesize);
24027 vcc->push(vcc, skb);
24028- atomic_inc(&vcc->stats->rx);
24029+ atomic_inc_unchecked(&vcc->stats->rx);
24030 break;
24031
24032 case PKT_STATUS:
24033@@ -899,6 +899,8 @@ static int print_buffer(struct sk_buff *
24034 char msg[500];
24035 char item[10];
24036
24037+ pax_track_stack();
24038+
24039 len = buf->len;
24040 for (i = 0; i < len; i++){
24041 if(i % 8 == 0)
24042@@ -1008,7 +1010,7 @@ static uint32_t fpga_tx(struct solos_car
24043 vcc = SKB_CB(oldskb)->vcc;
24044
24045 if (vcc) {
24046- atomic_inc(&vcc->stats->tx);
24047+ atomic_inc_unchecked(&vcc->stats->tx);
24048 solos_pop(vcc, oldskb);
24049 } else
24050 dev_kfree_skb_irq(oldskb);
24051diff -urNp linux-3.0.7/drivers/atm/suni.c linux-3.0.7/drivers/atm/suni.c
24052--- linux-3.0.7/drivers/atm/suni.c 2011-07-21 22:17:23.000000000 -0400
24053+++ linux-3.0.7/drivers/atm/suni.c 2011-08-23 21:47:55.000000000 -0400
24054@@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
24055
24056
24057 #define ADD_LIMITED(s,v) \
24058- atomic_add((v),&stats->s); \
24059- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
24060+ atomic_add_unchecked((v),&stats->s); \
24061+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
24062
24063
24064 static void suni_hz(unsigned long from_timer)
24065diff -urNp linux-3.0.7/drivers/atm/uPD98402.c linux-3.0.7/drivers/atm/uPD98402.c
24066--- linux-3.0.7/drivers/atm/uPD98402.c 2011-07-21 22:17:23.000000000 -0400
24067+++ linux-3.0.7/drivers/atm/uPD98402.c 2011-08-23 21:47:55.000000000 -0400
24068@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *d
24069 struct sonet_stats tmp;
24070 int error = 0;
24071
24072- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
24073+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
24074 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
24075 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
24076 if (zero && !error) {
24077@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev
24078
24079
24080 #define ADD_LIMITED(s,v) \
24081- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
24082- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
24083- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
24084+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
24085+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
24086+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
24087
24088
24089 static void stat_event(struct atm_dev *dev)
24090@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev
24091 if (reason & uPD98402_INT_PFM) stat_event(dev);
24092 if (reason & uPD98402_INT_PCO) {
24093 (void) GET(PCOCR); /* clear interrupt cause */
24094- atomic_add(GET(HECCT),
24095+ atomic_add_unchecked(GET(HECCT),
24096 &PRIV(dev)->sonet_stats.uncorr_hcs);
24097 }
24098 if ((reason & uPD98402_INT_RFO) &&
24099@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev
24100 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
24101 uPD98402_INT_LOS),PIMR); /* enable them */
24102 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
24103- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
24104- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
24105- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
24106+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
24107+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
24108+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
24109 return 0;
24110 }
24111
24112diff -urNp linux-3.0.7/drivers/atm/zatm.c linux-3.0.7/drivers/atm/zatm.c
24113--- linux-3.0.7/drivers/atm/zatm.c 2011-07-21 22:17:23.000000000 -0400
24114+++ linux-3.0.7/drivers/atm/zatm.c 2011-08-23 21:47:55.000000000 -0400
24115@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
24116 }
24117 if (!size) {
24118 dev_kfree_skb_irq(skb);
24119- if (vcc) atomic_inc(&vcc->stats->rx_err);
24120+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
24121 continue;
24122 }
24123 if (!atm_charge(vcc,skb->truesize)) {
24124@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
24125 skb->len = size;
24126 ATM_SKB(skb)->vcc = vcc;
24127 vcc->push(vcc,skb);
24128- atomic_inc(&vcc->stats->rx);
24129+ atomic_inc_unchecked(&vcc->stats->rx);
24130 }
24131 zout(pos & 0xffff,MTA(mbx));
24132 #if 0 /* probably a stupid idea */
24133@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
24134 skb_queue_head(&zatm_vcc->backlog,skb);
24135 break;
24136 }
24137- atomic_inc(&vcc->stats->tx);
24138+ atomic_inc_unchecked(&vcc->stats->tx);
24139 wake_up(&zatm_vcc->tx_wait);
24140 }
24141
24142diff -urNp linux-3.0.7/drivers/base/devtmpfs.c linux-3.0.7/drivers/base/devtmpfs.c
24143--- linux-3.0.7/drivers/base/devtmpfs.c 2011-07-21 22:17:23.000000000 -0400
24144+++ linux-3.0.7/drivers/base/devtmpfs.c 2011-10-06 04:17:55.000000000 -0400
24145@@ -357,7 +357,7 @@ int devtmpfs_mount(const char *mntdir)
24146 if (!dev_mnt)
24147 return 0;
24148
24149- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
24150+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
24151 if (err)
24152 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
24153 else
24154diff -urNp linux-3.0.7/drivers/base/power/wakeup.c linux-3.0.7/drivers/base/power/wakeup.c
24155--- linux-3.0.7/drivers/base/power/wakeup.c 2011-07-21 22:17:23.000000000 -0400
24156+++ linux-3.0.7/drivers/base/power/wakeup.c 2011-08-23 21:47:55.000000000 -0400
24157@@ -29,14 +29,14 @@ bool events_check_enabled;
24158 * They need to be modified together atomically, so it's better to use one
24159 * atomic variable to hold them both.
24160 */
24161-static atomic_t combined_event_count = ATOMIC_INIT(0);
24162+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
24163
24164 #define IN_PROGRESS_BITS (sizeof(int) * 4)
24165 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
24166
24167 static void split_counters(unsigned int *cnt, unsigned int *inpr)
24168 {
24169- unsigned int comb = atomic_read(&combined_event_count);
24170+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
24171
24172 *cnt = (comb >> IN_PROGRESS_BITS);
24173 *inpr = comb & MAX_IN_PROGRESS;
24174@@ -350,7 +350,7 @@ static void wakeup_source_activate(struc
24175 ws->last_time = ktime_get();
24176
24177 /* Increment the counter of events in progress. */
24178- atomic_inc(&combined_event_count);
24179+ atomic_inc_unchecked(&combined_event_count);
24180 }
24181
24182 /**
24183@@ -440,7 +440,7 @@ static void wakeup_source_deactivate(str
24184 * Increment the counter of registered wakeup events and decrement the
24185 * couter of wakeup events in progress simultaneously.
24186 */
24187- atomic_add(MAX_IN_PROGRESS, &combined_event_count);
24188+ atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
24189 }
24190
24191 /**
24192diff -urNp linux-3.0.7/drivers/block/cciss.c linux-3.0.7/drivers/block/cciss.c
24193--- linux-3.0.7/drivers/block/cciss.c 2011-07-21 22:17:23.000000000 -0400
24194+++ linux-3.0.7/drivers/block/cciss.c 2011-08-23 21:48:14.000000000 -0400
24195@@ -1179,6 +1179,8 @@ static int cciss_ioctl32_passthru(struct
24196 int err;
24197 u32 cp;
24198
24199+ memset(&arg64, 0, sizeof(arg64));
24200+
24201 err = 0;
24202 err |=
24203 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
24204@@ -2986,7 +2988,7 @@ static void start_io(ctlr_info_t *h)
24205 while (!list_empty(&h->reqQ)) {
24206 c = list_entry(h->reqQ.next, CommandList_struct, list);
24207 /* can't do anything if fifo is full */
24208- if ((h->access.fifo_full(h))) {
24209+ if ((h->access->fifo_full(h))) {
24210 dev_warn(&h->pdev->dev, "fifo full\n");
24211 break;
24212 }
24213@@ -2996,7 +2998,7 @@ static void start_io(ctlr_info_t *h)
24214 h->Qdepth--;
24215
24216 /* Tell the controller execute command */
24217- h->access.submit_command(h, c);
24218+ h->access->submit_command(h, c);
24219
24220 /* Put job onto the completed Q */
24221 addQ(&h->cmpQ, c);
24222@@ -3422,17 +3424,17 @@ startio:
24223
24224 static inline unsigned long get_next_completion(ctlr_info_t *h)
24225 {
24226- return h->access.command_completed(h);
24227+ return h->access->command_completed(h);
24228 }
24229
24230 static inline int interrupt_pending(ctlr_info_t *h)
24231 {
24232- return h->access.intr_pending(h);
24233+ return h->access->intr_pending(h);
24234 }
24235
24236 static inline long interrupt_not_for_us(ctlr_info_t *h)
24237 {
24238- return ((h->access.intr_pending(h) == 0) ||
24239+ return ((h->access->intr_pending(h) == 0) ||
24240 (h->interrupts_enabled == 0));
24241 }
24242
24243@@ -3465,7 +3467,7 @@ static inline u32 next_command(ctlr_info
24244 u32 a;
24245
24246 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
24247- return h->access.command_completed(h);
24248+ return h->access->command_completed(h);
24249
24250 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
24251 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
24252@@ -4020,7 +4022,7 @@ static void __devinit cciss_put_controll
24253 trans_support & CFGTBL_Trans_use_short_tags);
24254
24255 /* Change the access methods to the performant access methods */
24256- h->access = SA5_performant_access;
24257+ h->access = &SA5_performant_access;
24258 h->transMethod = CFGTBL_Trans_Performant;
24259
24260 return;
24261@@ -4292,7 +4294,7 @@ static int __devinit cciss_pci_init(ctlr
24262 if (prod_index < 0)
24263 return -ENODEV;
24264 h->product_name = products[prod_index].product_name;
24265- h->access = *(products[prod_index].access);
24266+ h->access = products[prod_index].access;
24267
24268 if (cciss_board_disabled(h)) {
24269 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
24270@@ -5002,7 +5004,7 @@ reinit_after_soft_reset:
24271 }
24272
24273 /* make sure the board interrupts are off */
24274- h->access.set_intr_mask(h, CCISS_INTR_OFF);
24275+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
24276 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
24277 if (rc)
24278 goto clean2;
24279@@ -5054,7 +5056,7 @@ reinit_after_soft_reset:
24280 * fake ones to scoop up any residual completions.
24281 */
24282 spin_lock_irqsave(&h->lock, flags);
24283- h->access.set_intr_mask(h, CCISS_INTR_OFF);
24284+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
24285 spin_unlock_irqrestore(&h->lock, flags);
24286 free_irq(h->intr[PERF_MODE_INT], h);
24287 rc = cciss_request_irq(h, cciss_msix_discard_completions,
24288@@ -5074,9 +5076,9 @@ reinit_after_soft_reset:
24289 dev_info(&h->pdev->dev, "Board READY.\n");
24290 dev_info(&h->pdev->dev,
24291 "Waiting for stale completions to drain.\n");
24292- h->access.set_intr_mask(h, CCISS_INTR_ON);
24293+ h->access->set_intr_mask(h, CCISS_INTR_ON);
24294 msleep(10000);
24295- h->access.set_intr_mask(h, CCISS_INTR_OFF);
24296+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
24297
24298 rc = controller_reset_failed(h->cfgtable);
24299 if (rc)
24300@@ -5099,7 +5101,7 @@ reinit_after_soft_reset:
24301 cciss_scsi_setup(h);
24302
24303 /* Turn the interrupts on so we can service requests */
24304- h->access.set_intr_mask(h, CCISS_INTR_ON);
24305+ h->access->set_intr_mask(h, CCISS_INTR_ON);
24306
24307 /* Get the firmware version */
24308 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
24309@@ -5171,7 +5173,7 @@ static void cciss_shutdown(struct pci_de
24310 kfree(flush_buf);
24311 if (return_code != IO_OK)
24312 dev_warn(&h->pdev->dev, "Error flushing cache\n");
24313- h->access.set_intr_mask(h, CCISS_INTR_OFF);
24314+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
24315 free_irq(h->intr[PERF_MODE_INT], h);
24316 }
24317
24318diff -urNp linux-3.0.7/drivers/block/cciss.h linux-3.0.7/drivers/block/cciss.h
24319--- linux-3.0.7/drivers/block/cciss.h 2011-09-02 18:11:21.000000000 -0400
24320+++ linux-3.0.7/drivers/block/cciss.h 2011-08-23 21:47:55.000000000 -0400
24321@@ -100,7 +100,7 @@ struct ctlr_info
24322 /* information about each logical volume */
24323 drive_info_struct *drv[CISS_MAX_LUN];
24324
24325- struct access_method access;
24326+ struct access_method *access;
24327
24328 /* queue and queue Info */
24329 struct list_head reqQ;
24330diff -urNp linux-3.0.7/drivers/block/cpqarray.c linux-3.0.7/drivers/block/cpqarray.c
24331--- linux-3.0.7/drivers/block/cpqarray.c 2011-07-21 22:17:23.000000000 -0400
24332+++ linux-3.0.7/drivers/block/cpqarray.c 2011-08-23 21:48:14.000000000 -0400
24333@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_c
24334 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
24335 goto Enomem4;
24336 }
24337- hba[i]->access.set_intr_mask(hba[i], 0);
24338+ hba[i]->access->set_intr_mask(hba[i], 0);
24339 if (request_irq(hba[i]->intr, do_ida_intr,
24340 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
24341 {
24342@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_c
24343 add_timer(&hba[i]->timer);
24344
24345 /* Enable IRQ now that spinlock and rate limit timer are set up */
24346- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
24347+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
24348
24349 for(j=0; j<NWD; j++) {
24350 struct gendisk *disk = ida_gendisk[i][j];
24351@@ -694,7 +694,7 @@ DBGINFO(
24352 for(i=0; i<NR_PRODUCTS; i++) {
24353 if (board_id == products[i].board_id) {
24354 c->product_name = products[i].product_name;
24355- c->access = *(products[i].access);
24356+ c->access = products[i].access;
24357 break;
24358 }
24359 }
24360@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detec
24361 hba[ctlr]->intr = intr;
24362 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
24363 hba[ctlr]->product_name = products[j].product_name;
24364- hba[ctlr]->access = *(products[j].access);
24365+ hba[ctlr]->access = products[j].access;
24366 hba[ctlr]->ctlr = ctlr;
24367 hba[ctlr]->board_id = board_id;
24368 hba[ctlr]->pci_dev = NULL; /* not PCI */
24369@@ -911,6 +911,8 @@ static void do_ida_request(struct reques
24370 struct scatterlist tmp_sg[SG_MAX];
24371 int i, dir, seg;
24372
24373+ pax_track_stack();
24374+
24375 queue_next:
24376 creq = blk_peek_request(q);
24377 if (!creq)
24378@@ -980,7 +982,7 @@ static void start_io(ctlr_info_t *h)
24379
24380 while((c = h->reqQ) != NULL) {
24381 /* Can't do anything if we're busy */
24382- if (h->access.fifo_full(h) == 0)
24383+ if (h->access->fifo_full(h) == 0)
24384 return;
24385
24386 /* Get the first entry from the request Q */
24387@@ -988,7 +990,7 @@ static void start_io(ctlr_info_t *h)
24388 h->Qdepth--;
24389
24390 /* Tell the controller to do our bidding */
24391- h->access.submit_command(h, c);
24392+ h->access->submit_command(h, c);
24393
24394 /* Get onto the completion Q */
24395 addQ(&h->cmpQ, c);
24396@@ -1050,7 +1052,7 @@ static irqreturn_t do_ida_intr(int irq,
24397 unsigned long flags;
24398 __u32 a,a1;
24399
24400- istat = h->access.intr_pending(h);
24401+ istat = h->access->intr_pending(h);
24402 /* Is this interrupt for us? */
24403 if (istat == 0)
24404 return IRQ_NONE;
24405@@ -1061,7 +1063,7 @@ static irqreturn_t do_ida_intr(int irq,
24406 */
24407 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
24408 if (istat & FIFO_NOT_EMPTY) {
24409- while((a = h->access.command_completed(h))) {
24410+ while((a = h->access->command_completed(h))) {
24411 a1 = a; a &= ~3;
24412 if ((c = h->cmpQ) == NULL)
24413 {
24414@@ -1449,11 +1451,11 @@ static int sendcmd(
24415 /*
24416 * Disable interrupt
24417 */
24418- info_p->access.set_intr_mask(info_p, 0);
24419+ info_p->access->set_intr_mask(info_p, 0);
24420 /* Make sure there is room in the command FIFO */
24421 /* Actually it should be completely empty at this time. */
24422 for (i = 200000; i > 0; i--) {
24423- temp = info_p->access.fifo_full(info_p);
24424+ temp = info_p->access->fifo_full(info_p);
24425 if (temp != 0) {
24426 break;
24427 }
24428@@ -1466,7 +1468,7 @@ DBG(
24429 /*
24430 * Send the cmd
24431 */
24432- info_p->access.submit_command(info_p, c);
24433+ info_p->access->submit_command(info_p, c);
24434 complete = pollcomplete(ctlr);
24435
24436 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
24437@@ -1549,9 +1551,9 @@ static int revalidate_allvol(ctlr_info_t
24438 * we check the new geometry. Then turn interrupts back on when
24439 * we're done.
24440 */
24441- host->access.set_intr_mask(host, 0);
24442+ host->access->set_intr_mask(host, 0);
24443 getgeometry(ctlr);
24444- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
24445+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
24446
24447 for(i=0; i<NWD; i++) {
24448 struct gendisk *disk = ida_gendisk[ctlr][i];
24449@@ -1591,7 +1593,7 @@ static int pollcomplete(int ctlr)
24450 /* Wait (up to 2 seconds) for a command to complete */
24451
24452 for (i = 200000; i > 0; i--) {
24453- done = hba[ctlr]->access.command_completed(hba[ctlr]);
24454+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
24455 if (done == 0) {
24456 udelay(10); /* a short fixed delay */
24457 } else
24458diff -urNp linux-3.0.7/drivers/block/cpqarray.h linux-3.0.7/drivers/block/cpqarray.h
24459--- linux-3.0.7/drivers/block/cpqarray.h 2011-07-21 22:17:23.000000000 -0400
24460+++ linux-3.0.7/drivers/block/cpqarray.h 2011-08-23 21:47:55.000000000 -0400
24461@@ -99,7 +99,7 @@ struct ctlr_info {
24462 drv_info_t drv[NWD];
24463 struct proc_dir_entry *proc;
24464
24465- struct access_method access;
24466+ struct access_method *access;
24467
24468 cmdlist_t *reqQ;
24469 cmdlist_t *cmpQ;
24470diff -urNp linux-3.0.7/drivers/block/DAC960.c linux-3.0.7/drivers/block/DAC960.c
24471--- linux-3.0.7/drivers/block/DAC960.c 2011-07-21 22:17:23.000000000 -0400
24472+++ linux-3.0.7/drivers/block/DAC960.c 2011-08-23 21:48:14.000000000 -0400
24473@@ -1980,6 +1980,8 @@ static bool DAC960_V1_ReadDeviceConfigur
24474 unsigned long flags;
24475 int Channel, TargetID;
24476
24477+ pax_track_stack();
24478+
24479 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
24480 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
24481 sizeof(DAC960_SCSI_Inquiry_T) +
24482diff -urNp linux-3.0.7/drivers/block/drbd/drbd_int.h linux-3.0.7/drivers/block/drbd/drbd_int.h
24483--- linux-3.0.7/drivers/block/drbd/drbd_int.h 2011-07-21 22:17:23.000000000 -0400
24484+++ linux-3.0.7/drivers/block/drbd/drbd_int.h 2011-10-06 04:17:55.000000000 -0400
24485@@ -737,7 +737,7 @@ struct drbd_request;
24486 struct drbd_epoch {
24487 struct list_head list;
24488 unsigned int barrier_nr;
24489- atomic_t epoch_size; /* increased on every request added. */
24490+ atomic_unchecked_t epoch_size; /* increased on every request added. */
24491 atomic_t active; /* increased on every req. added, and dec on every finished. */
24492 unsigned long flags;
24493 };
24494@@ -1109,7 +1109,7 @@ struct drbd_conf {
24495 void *int_dig_in;
24496 void *int_dig_vv;
24497 wait_queue_head_t seq_wait;
24498- atomic_t packet_seq;
24499+ atomic_unchecked_t packet_seq;
24500 unsigned int peer_seq;
24501 spinlock_t peer_seq_lock;
24502 unsigned int minor;
24503@@ -1618,30 +1618,30 @@ static inline int drbd_setsockopt(struct
24504
24505 static inline void drbd_tcp_cork(struct socket *sock)
24506 {
24507- int __user val = 1;
24508+ int val = 1;
24509 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
24510- (char __user *)&val, sizeof(val));
24511+ (char __force_user *)&val, sizeof(val));
24512 }
24513
24514 static inline void drbd_tcp_uncork(struct socket *sock)
24515 {
24516- int __user val = 0;
24517+ int val = 0;
24518 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
24519- (char __user *)&val, sizeof(val));
24520+ (char __force_user *)&val, sizeof(val));
24521 }
24522
24523 static inline void drbd_tcp_nodelay(struct socket *sock)
24524 {
24525- int __user val = 1;
24526+ int val = 1;
24527 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
24528- (char __user *)&val, sizeof(val));
24529+ (char __force_user *)&val, sizeof(val));
24530 }
24531
24532 static inline void drbd_tcp_quickack(struct socket *sock)
24533 {
24534- int __user val = 2;
24535+ int val = 2;
24536 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
24537- (char __user *)&val, sizeof(val));
24538+ (char __force_user *)&val, sizeof(val));
24539 }
24540
24541 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
24542diff -urNp linux-3.0.7/drivers/block/drbd/drbd_main.c linux-3.0.7/drivers/block/drbd/drbd_main.c
24543--- linux-3.0.7/drivers/block/drbd/drbd_main.c 2011-07-21 22:17:23.000000000 -0400
24544+++ linux-3.0.7/drivers/block/drbd/drbd_main.c 2011-08-23 21:47:55.000000000 -0400
24545@@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_co
24546 p.sector = sector;
24547 p.block_id = block_id;
24548 p.blksize = blksize;
24549- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
24550+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
24551
24552 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
24553 return false;
24554@@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *m
24555 p.sector = cpu_to_be64(req->sector);
24556 p.block_id = (unsigned long)req;
24557 p.seq_num = cpu_to_be32(req->seq_num =
24558- atomic_add_return(1, &mdev->packet_seq));
24559+ atomic_add_return_unchecked(1, &mdev->packet_seq));
24560
24561 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
24562
24563@@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_
24564 atomic_set(&mdev->unacked_cnt, 0);
24565 atomic_set(&mdev->local_cnt, 0);
24566 atomic_set(&mdev->net_cnt, 0);
24567- atomic_set(&mdev->packet_seq, 0);
24568+ atomic_set_unchecked(&mdev->packet_seq, 0);
24569 atomic_set(&mdev->pp_in_use, 0);
24570 atomic_set(&mdev->pp_in_use_by_net, 0);
24571 atomic_set(&mdev->rs_sect_in, 0);
24572@@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf
24573 mdev->receiver.t_state);
24574
24575 /* no need to lock it, I'm the only thread alive */
24576- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
24577- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
24578+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
24579+ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
24580 mdev->al_writ_cnt =
24581 mdev->bm_writ_cnt =
24582 mdev->read_cnt =
24583diff -urNp linux-3.0.7/drivers/block/drbd/drbd_nl.c linux-3.0.7/drivers/block/drbd/drbd_nl.c
24584--- linux-3.0.7/drivers/block/drbd/drbd_nl.c 2011-07-21 22:17:23.000000000 -0400
24585+++ linux-3.0.7/drivers/block/drbd/drbd_nl.c 2011-08-23 21:47:55.000000000 -0400
24586@@ -2359,7 +2359,7 @@ static void drbd_connector_callback(stru
24587 module_put(THIS_MODULE);
24588 }
24589
24590-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
24591+static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
24592
24593 static unsigned short *
24594 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
24595@@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *
24596 cn_reply->id.idx = CN_IDX_DRBD;
24597 cn_reply->id.val = CN_VAL_DRBD;
24598
24599- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
24600+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
24601 cn_reply->ack = 0; /* not used here. */
24602 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
24603 (int)((char *)tl - (char *)reply->tag_list);
24604@@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_co
24605 cn_reply->id.idx = CN_IDX_DRBD;
24606 cn_reply->id.val = CN_VAL_DRBD;
24607
24608- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
24609+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
24610 cn_reply->ack = 0; /* not used here. */
24611 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
24612 (int)((char *)tl - (char *)reply->tag_list);
24613@@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mde
24614 cn_reply->id.idx = CN_IDX_DRBD;
24615 cn_reply->id.val = CN_VAL_DRBD;
24616
24617- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
24618+ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
24619 cn_reply->ack = 0; // not used here.
24620 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
24621 (int)((char*)tl - (char*)reply->tag_list);
24622@@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drb
24623 cn_reply->id.idx = CN_IDX_DRBD;
24624 cn_reply->id.val = CN_VAL_DRBD;
24625
24626- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
24627+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
24628 cn_reply->ack = 0; /* not used here. */
24629 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
24630 (int)((char *)tl - (char *)reply->tag_list);
24631diff -urNp linux-3.0.7/drivers/block/drbd/drbd_receiver.c linux-3.0.7/drivers/block/drbd/drbd_receiver.c
24632--- linux-3.0.7/drivers/block/drbd/drbd_receiver.c 2011-07-21 22:17:23.000000000 -0400
24633+++ linux-3.0.7/drivers/block/drbd/drbd_receiver.c 2011-08-23 21:47:55.000000000 -0400
24634@@ -894,7 +894,7 @@ retry:
24635 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
24636 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
24637
24638- atomic_set(&mdev->packet_seq, 0);
24639+ atomic_set_unchecked(&mdev->packet_seq, 0);
24640 mdev->peer_seq = 0;
24641
24642 drbd_thread_start(&mdev->asender);
24643@@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish
24644 do {
24645 next_epoch = NULL;
24646
24647- epoch_size = atomic_read(&epoch->epoch_size);
24648+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
24649
24650 switch (ev & ~EV_CLEANUP) {
24651 case EV_PUT:
24652@@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish
24653 rv = FE_DESTROYED;
24654 } else {
24655 epoch->flags = 0;
24656- atomic_set(&epoch->epoch_size, 0);
24657+ atomic_set_unchecked(&epoch->epoch_size, 0);
24658 /* atomic_set(&epoch->active, 0); is already zero */
24659 if (rv == FE_STILL_LIVE)
24660 rv = FE_RECYCLED;
24661@@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_c
24662 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
24663 drbd_flush(mdev);
24664
24665- if (atomic_read(&mdev->current_epoch->epoch_size)) {
24666+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
24667 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
24668 if (epoch)
24669 break;
24670 }
24671
24672 epoch = mdev->current_epoch;
24673- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
24674+ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
24675
24676 D_ASSERT(atomic_read(&epoch->active) == 0);
24677 D_ASSERT(epoch->flags == 0);
24678@@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_c
24679 }
24680
24681 epoch->flags = 0;
24682- atomic_set(&epoch->epoch_size, 0);
24683+ atomic_set_unchecked(&epoch->epoch_size, 0);
24684 atomic_set(&epoch->active, 0);
24685
24686 spin_lock(&mdev->epoch_lock);
24687- if (atomic_read(&mdev->current_epoch->epoch_size)) {
24688+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
24689 list_add(&epoch->list, &mdev->current_epoch->list);
24690 mdev->current_epoch = epoch;
24691 mdev->epochs++;
24692@@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf
24693 spin_unlock(&mdev->peer_seq_lock);
24694
24695 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
24696- atomic_inc(&mdev->current_epoch->epoch_size);
24697+ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
24698 return drbd_drain_block(mdev, data_size);
24699 }
24700
24701@@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf
24702
24703 spin_lock(&mdev->epoch_lock);
24704 e->epoch = mdev->current_epoch;
24705- atomic_inc(&e->epoch->epoch_size);
24706+ atomic_inc_unchecked(&e->epoch->epoch_size);
24707 atomic_inc(&e->epoch->active);
24708 spin_unlock(&mdev->epoch_lock);
24709
24710@@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_
24711 D_ASSERT(list_empty(&mdev->done_ee));
24712
24713 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
24714- atomic_set(&mdev->current_epoch->epoch_size, 0);
24715+ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
24716 D_ASSERT(list_empty(&mdev->current_epoch->list));
24717 }
24718
24719diff -urNp linux-3.0.7/drivers/block/loop.c linux-3.0.7/drivers/block/loop.c
24720--- linux-3.0.7/drivers/block/loop.c 2011-09-02 18:11:26.000000000 -0400
24721+++ linux-3.0.7/drivers/block/loop.c 2011-10-06 04:17:55.000000000 -0400
24722@@ -283,7 +283,7 @@ static int __do_lo_send_write(struct fil
24723 mm_segment_t old_fs = get_fs();
24724
24725 set_fs(get_ds());
24726- bw = file->f_op->write(file, buf, len, &pos);
24727+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
24728 set_fs(old_fs);
24729 if (likely(bw == len))
24730 return 0;
24731diff -urNp linux-3.0.7/drivers/block/nbd.c linux-3.0.7/drivers/block/nbd.c
24732--- linux-3.0.7/drivers/block/nbd.c 2011-07-21 22:17:23.000000000 -0400
24733+++ linux-3.0.7/drivers/block/nbd.c 2011-08-23 21:48:14.000000000 -0400
24734@@ -157,6 +157,8 @@ static int sock_xmit(struct nbd_device *
24735 struct kvec iov;
24736 sigset_t blocked, oldset;
24737
24738+ pax_track_stack();
24739+
24740 if (unlikely(!sock)) {
24741 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
24742 lo->disk->disk_name, (send ? "send" : "recv"));
24743@@ -572,6 +574,8 @@ static void do_nbd_request(struct reques
24744 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
24745 unsigned int cmd, unsigned long arg)
24746 {
24747+ pax_track_stack();
24748+
24749 switch (cmd) {
24750 case NBD_DISCONNECT: {
24751 struct request sreq;
24752diff -urNp linux-3.0.7/drivers/char/agp/frontend.c linux-3.0.7/drivers/char/agp/frontend.c
24753--- linux-3.0.7/drivers/char/agp/frontend.c 2011-07-21 22:17:23.000000000 -0400
24754+++ linux-3.0.7/drivers/char/agp/frontend.c 2011-08-23 21:47:55.000000000 -0400
24755@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct ag
24756 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
24757 return -EFAULT;
24758
24759- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
24760+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
24761 return -EFAULT;
24762
24763 client = agp_find_client_by_pid(reserve.pid);
24764diff -urNp linux-3.0.7/drivers/char/briq_panel.c linux-3.0.7/drivers/char/briq_panel.c
24765--- linux-3.0.7/drivers/char/briq_panel.c 2011-07-21 22:17:23.000000000 -0400
24766+++ linux-3.0.7/drivers/char/briq_panel.c 2011-08-23 21:48:14.000000000 -0400
24767@@ -9,6 +9,7 @@
24768 #include <linux/types.h>
24769 #include <linux/errno.h>
24770 #include <linux/tty.h>
24771+#include <linux/mutex.h>
24772 #include <linux/timer.h>
24773 #include <linux/kernel.h>
24774 #include <linux/wait.h>
24775@@ -34,6 +35,7 @@ static int vfd_is_open;
24776 static unsigned char vfd[40];
24777 static int vfd_cursor;
24778 static unsigned char ledpb, led;
24779+static DEFINE_MUTEX(vfd_mutex);
24780
24781 static void update_vfd(void)
24782 {
24783@@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct f
24784 if (!vfd_is_open)
24785 return -EBUSY;
24786
24787+ mutex_lock(&vfd_mutex);
24788 for (;;) {
24789 char c;
24790 if (!indx)
24791 break;
24792- if (get_user(c, buf))
24793+ if (get_user(c, buf)) {
24794+ mutex_unlock(&vfd_mutex);
24795 return -EFAULT;
24796+ }
24797 if (esc) {
24798 set_led(c);
24799 esc = 0;
24800@@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct f
24801 buf++;
24802 }
24803 update_vfd();
24804+ mutex_unlock(&vfd_mutex);
24805
24806 return len;
24807 }
24808diff -urNp linux-3.0.7/drivers/char/genrtc.c linux-3.0.7/drivers/char/genrtc.c
24809--- linux-3.0.7/drivers/char/genrtc.c 2011-07-21 22:17:23.000000000 -0400
24810+++ linux-3.0.7/drivers/char/genrtc.c 2011-08-23 21:48:14.000000000 -0400
24811@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *fi
24812 switch (cmd) {
24813
24814 case RTC_PLL_GET:
24815+ memset(&pll, 0, sizeof(pll));
24816 if (get_rtc_pll(&pll))
24817 return -EINVAL;
24818 else
24819diff -urNp linux-3.0.7/drivers/char/hpet.c linux-3.0.7/drivers/char/hpet.c
24820--- linux-3.0.7/drivers/char/hpet.c 2011-07-21 22:17:23.000000000 -0400
24821+++ linux-3.0.7/drivers/char/hpet.c 2011-08-23 21:47:55.000000000 -0400
24822@@ -572,7 +572,7 @@ static inline unsigned long hpet_time_di
24823 }
24824
24825 static int
24826-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
24827+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
24828 struct hpet_info *info)
24829 {
24830 struct hpet_timer __iomem *timer;
24831diff -urNp linux-3.0.7/drivers/char/ipmi/ipmi_msghandler.c linux-3.0.7/drivers/char/ipmi/ipmi_msghandler.c
24832--- linux-3.0.7/drivers/char/ipmi/ipmi_msghandler.c 2011-07-21 22:17:23.000000000 -0400
24833+++ linux-3.0.7/drivers/char/ipmi/ipmi_msghandler.c 2011-08-23 21:48:14.000000000 -0400
24834@@ -415,7 +415,7 @@ struct ipmi_smi {
24835 struct proc_dir_entry *proc_dir;
24836 char proc_dir_name[10];
24837
24838- atomic_t stats[IPMI_NUM_STATS];
24839+ atomic_unchecked_t stats[IPMI_NUM_STATS];
24840
24841 /*
24842 * run_to_completion duplicate of smb_info, smi_info
24843@@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
24844
24845
24846 #define ipmi_inc_stat(intf, stat) \
24847- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
24848+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
24849 #define ipmi_get_stat(intf, stat) \
24850- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
24851+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
24852
24853 static int is_lan_addr(struct ipmi_addr *addr)
24854 {
24855@@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
24856 INIT_LIST_HEAD(&intf->cmd_rcvrs);
24857 init_waitqueue_head(&intf->waitq);
24858 for (i = 0; i < IPMI_NUM_STATS; i++)
24859- atomic_set(&intf->stats[i], 0);
24860+ atomic_set_unchecked(&intf->stats[i], 0);
24861
24862 intf->proc_dir = NULL;
24863
24864@@ -4220,6 +4220,8 @@ static void send_panic_events(char *str)
24865 struct ipmi_smi_msg smi_msg;
24866 struct ipmi_recv_msg recv_msg;
24867
24868+ pax_track_stack();
24869+
24870 si = (struct ipmi_system_interface_addr *) &addr;
24871 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
24872 si->channel = IPMI_BMC_CHANNEL;
24873diff -urNp linux-3.0.7/drivers/char/ipmi/ipmi_si_intf.c linux-3.0.7/drivers/char/ipmi/ipmi_si_intf.c
24874--- linux-3.0.7/drivers/char/ipmi/ipmi_si_intf.c 2011-07-21 22:17:23.000000000 -0400
24875+++ linux-3.0.7/drivers/char/ipmi/ipmi_si_intf.c 2011-08-23 21:47:55.000000000 -0400
24876@@ -277,7 +277,7 @@ struct smi_info {
24877 unsigned char slave_addr;
24878
24879 /* Counters and things for the proc filesystem. */
24880- atomic_t stats[SI_NUM_STATS];
24881+ atomic_unchecked_t stats[SI_NUM_STATS];
24882
24883 struct task_struct *thread;
24884
24885@@ -286,9 +286,9 @@ struct smi_info {
24886 };
24887
24888 #define smi_inc_stat(smi, stat) \
24889- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
24890+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
24891 #define smi_get_stat(smi, stat) \
24892- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
24893+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
24894
24895 #define SI_MAX_PARMS 4
24896
24897@@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info
24898 atomic_set(&new_smi->req_events, 0);
24899 new_smi->run_to_completion = 0;
24900 for (i = 0; i < SI_NUM_STATS; i++)
24901- atomic_set(&new_smi->stats[i], 0);
24902+ atomic_set_unchecked(&new_smi->stats[i], 0);
24903
24904 new_smi->interrupt_disabled = 1;
24905 atomic_set(&new_smi->stop_operation, 0);
24906diff -urNp linux-3.0.7/drivers/char/Kconfig linux-3.0.7/drivers/char/Kconfig
24907--- linux-3.0.7/drivers/char/Kconfig 2011-07-21 22:17:23.000000000 -0400
24908+++ linux-3.0.7/drivers/char/Kconfig 2011-08-23 21:48:14.000000000 -0400
24909@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
24910
24911 config DEVKMEM
24912 bool "/dev/kmem virtual device support"
24913- default y
24914+ default n
24915+ depends on !GRKERNSEC_KMEM
24916 help
24917 Say Y here if you want to support the /dev/kmem device. The
24918 /dev/kmem device is rarely used, but can be used for certain
24919@@ -596,6 +597,7 @@ config DEVPORT
24920 bool
24921 depends on !M68K
24922 depends on ISA || PCI
24923+ depends on !GRKERNSEC_KMEM
24924 default y
24925
24926 source "drivers/s390/char/Kconfig"
24927diff -urNp linux-3.0.7/drivers/char/mbcs.c linux-3.0.7/drivers/char/mbcs.c
24928--- linux-3.0.7/drivers/char/mbcs.c 2011-07-21 22:17:23.000000000 -0400
24929+++ linux-3.0.7/drivers/char/mbcs.c 2011-10-11 10:44:33.000000000 -0400
24930@@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *de
24931 return 0;
24932 }
24933
24934-static const struct cx_device_id __devinitdata mbcs_id_table[] = {
24935+static const struct cx_device_id __devinitconst mbcs_id_table[] = {
24936 {
24937 .part_num = MBCS_PART_NUM,
24938 .mfg_num = MBCS_MFG_NUM,
24939diff -urNp linux-3.0.7/drivers/char/mem.c linux-3.0.7/drivers/char/mem.c
24940--- linux-3.0.7/drivers/char/mem.c 2011-07-21 22:17:23.000000000 -0400
24941+++ linux-3.0.7/drivers/char/mem.c 2011-08-23 21:48:14.000000000 -0400
24942@@ -18,6 +18,7 @@
24943 #include <linux/raw.h>
24944 #include <linux/tty.h>
24945 #include <linux/capability.h>
24946+#include <linux/security.h>
24947 #include <linux/ptrace.h>
24948 #include <linux/device.h>
24949 #include <linux/highmem.h>
24950@@ -34,6 +35,10 @@
24951 # include <linux/efi.h>
24952 #endif
24953
24954+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
24955+extern struct file_operations grsec_fops;
24956+#endif
24957+
24958 static inline unsigned long size_inside_page(unsigned long start,
24959 unsigned long size)
24960 {
24961@@ -65,9 +70,13 @@ static inline int range_is_allowed(unsig
24962
24963 while (cursor < to) {
24964 if (!devmem_is_allowed(pfn)) {
24965+#ifdef CONFIG_GRKERNSEC_KMEM
24966+ gr_handle_mem_readwrite(from, to);
24967+#else
24968 printk(KERN_INFO
24969 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
24970 current->comm, from, to);
24971+#endif
24972 return 0;
24973 }
24974 cursor += PAGE_SIZE;
24975@@ -75,6 +84,11 @@ static inline int range_is_allowed(unsig
24976 }
24977 return 1;
24978 }
24979+#elif defined(CONFIG_GRKERNSEC_KMEM)
24980+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
24981+{
24982+ return 0;
24983+}
24984 #else
24985 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
24986 {
24987@@ -117,6 +131,7 @@ static ssize_t read_mem(struct file *fil
24988
24989 while (count > 0) {
24990 unsigned long remaining;
24991+ char *temp;
24992
24993 sz = size_inside_page(p, count);
24994
24995@@ -132,7 +147,23 @@ static ssize_t read_mem(struct file *fil
24996 if (!ptr)
24997 return -EFAULT;
24998
24999- remaining = copy_to_user(buf, ptr, sz);
25000+#ifdef CONFIG_PAX_USERCOPY
25001+ temp = kmalloc(sz, GFP_KERNEL);
25002+ if (!temp) {
25003+ unxlate_dev_mem_ptr(p, ptr);
25004+ return -ENOMEM;
25005+ }
25006+ memcpy(temp, ptr, sz);
25007+#else
25008+ temp = ptr;
25009+#endif
25010+
25011+ remaining = copy_to_user(buf, temp, sz);
25012+
25013+#ifdef CONFIG_PAX_USERCOPY
25014+ kfree(temp);
25015+#endif
25016+
25017 unxlate_dev_mem_ptr(p, ptr);
25018 if (remaining)
25019 return -EFAULT;
25020@@ -395,9 +426,8 @@ static ssize_t read_kmem(struct file *fi
25021 size_t count, loff_t *ppos)
25022 {
25023 unsigned long p = *ppos;
25024- ssize_t low_count, read, sz;
25025+ ssize_t low_count, read, sz, err = 0;
25026 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
25027- int err = 0;
25028
25029 read = 0;
25030 if (p < (unsigned long) high_memory) {
25031@@ -419,6 +449,8 @@ static ssize_t read_kmem(struct file *fi
25032 }
25033 #endif
25034 while (low_count > 0) {
25035+ char *temp;
25036+
25037 sz = size_inside_page(p, low_count);
25038
25039 /*
25040@@ -428,7 +460,22 @@ static ssize_t read_kmem(struct file *fi
25041 */
25042 kbuf = xlate_dev_kmem_ptr((char *)p);
25043
25044- if (copy_to_user(buf, kbuf, sz))
25045+#ifdef CONFIG_PAX_USERCOPY
25046+ temp = kmalloc(sz, GFP_KERNEL);
25047+ if (!temp)
25048+ return -ENOMEM;
25049+ memcpy(temp, kbuf, sz);
25050+#else
25051+ temp = kbuf;
25052+#endif
25053+
25054+ err = copy_to_user(buf, temp, sz);
25055+
25056+#ifdef CONFIG_PAX_USERCOPY
25057+ kfree(temp);
25058+#endif
25059+
25060+ if (err)
25061 return -EFAULT;
25062 buf += sz;
25063 p += sz;
25064@@ -866,6 +913,9 @@ static const struct memdev {
25065 #ifdef CONFIG_CRASH_DUMP
25066 [12] = { "oldmem", 0, &oldmem_fops, NULL },
25067 #endif
25068+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
25069+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
25070+#endif
25071 };
25072
25073 static int memory_open(struct inode *inode, struct file *filp)
25074diff -urNp linux-3.0.7/drivers/char/nvram.c linux-3.0.7/drivers/char/nvram.c
25075--- linux-3.0.7/drivers/char/nvram.c 2011-07-21 22:17:23.000000000 -0400
25076+++ linux-3.0.7/drivers/char/nvram.c 2011-08-23 21:47:55.000000000 -0400
25077@@ -246,7 +246,7 @@ static ssize_t nvram_read(struct file *f
25078
25079 spin_unlock_irq(&rtc_lock);
25080
25081- if (copy_to_user(buf, contents, tmp - contents))
25082+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
25083 return -EFAULT;
25084
25085 *ppos = i;
25086diff -urNp linux-3.0.7/drivers/char/random.c linux-3.0.7/drivers/char/random.c
25087--- linux-3.0.7/drivers/char/random.c 2011-09-02 18:11:21.000000000 -0400
25088+++ linux-3.0.7/drivers/char/random.c 2011-08-23 21:48:14.000000000 -0400
25089@@ -261,8 +261,13 @@
25090 /*
25091 * Configuration information
25092 */
25093+#ifdef CONFIG_GRKERNSEC_RANDNET
25094+#define INPUT_POOL_WORDS 512
25095+#define OUTPUT_POOL_WORDS 128
25096+#else
25097 #define INPUT_POOL_WORDS 128
25098 #define OUTPUT_POOL_WORDS 32
25099+#endif
25100 #define SEC_XFER_SIZE 512
25101 #define EXTRACT_SIZE 10
25102
25103@@ -300,10 +305,17 @@ static struct poolinfo {
25104 int poolwords;
25105 int tap1, tap2, tap3, tap4, tap5;
25106 } poolinfo_table[] = {
25107+#ifdef CONFIG_GRKERNSEC_RANDNET
25108+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
25109+ { 512, 411, 308, 208, 104, 1 },
25110+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
25111+ { 128, 103, 76, 51, 25, 1 },
25112+#else
25113 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
25114 { 128, 103, 76, 51, 25, 1 },
25115 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
25116 { 32, 26, 20, 14, 7, 1 },
25117+#endif
25118 #if 0
25119 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
25120 { 2048, 1638, 1231, 819, 411, 1 },
25121@@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(stru
25122
25123 extract_buf(r, tmp);
25124 i = min_t(int, nbytes, EXTRACT_SIZE);
25125- if (copy_to_user(buf, tmp, i)) {
25126+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
25127 ret = -EFAULT;
25128 break;
25129 }
25130@@ -1214,7 +1226,7 @@ EXPORT_SYMBOL(generate_random_uuid);
25131 #include <linux/sysctl.h>
25132
25133 static int min_read_thresh = 8, min_write_thresh;
25134-static int max_read_thresh = INPUT_POOL_WORDS * 32;
25135+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
25136 static int max_write_thresh = INPUT_POOL_WORDS * 32;
25137 static char sysctl_bootid[16];
25138
25139diff -urNp linux-3.0.7/drivers/char/sonypi.c linux-3.0.7/drivers/char/sonypi.c
25140--- linux-3.0.7/drivers/char/sonypi.c 2011-07-21 22:17:23.000000000 -0400
25141+++ linux-3.0.7/drivers/char/sonypi.c 2011-08-23 21:47:55.000000000 -0400
25142@@ -55,6 +55,7 @@
25143 #include <asm/uaccess.h>
25144 #include <asm/io.h>
25145 #include <asm/system.h>
25146+#include <asm/local.h>
25147
25148 #include <linux/sonypi.h>
25149
25150@@ -491,7 +492,7 @@ static struct sonypi_device {
25151 spinlock_t fifo_lock;
25152 wait_queue_head_t fifo_proc_list;
25153 struct fasync_struct *fifo_async;
25154- int open_count;
25155+ local_t open_count;
25156 int model;
25157 struct input_dev *input_jog_dev;
25158 struct input_dev *input_key_dev;
25159@@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, st
25160 static int sonypi_misc_release(struct inode *inode, struct file *file)
25161 {
25162 mutex_lock(&sonypi_device.lock);
25163- sonypi_device.open_count--;
25164+ local_dec(&sonypi_device.open_count);
25165 mutex_unlock(&sonypi_device.lock);
25166 return 0;
25167 }
25168@@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode
25169 {
25170 mutex_lock(&sonypi_device.lock);
25171 /* Flush input queue on first open */
25172- if (!sonypi_device.open_count)
25173+ if (!local_read(&sonypi_device.open_count))
25174 kfifo_reset(&sonypi_device.fifo);
25175- sonypi_device.open_count++;
25176+ local_inc(&sonypi_device.open_count);
25177 mutex_unlock(&sonypi_device.lock);
25178
25179 return 0;
25180diff -urNp linux-3.0.7/drivers/char/tpm/tpm_bios.c linux-3.0.7/drivers/char/tpm/tpm_bios.c
25181--- linux-3.0.7/drivers/char/tpm/tpm_bios.c 2011-07-21 22:17:23.000000000 -0400
25182+++ linux-3.0.7/drivers/char/tpm/tpm_bios.c 2011-10-06 04:17:55.000000000 -0400
25183@@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start
25184 event = addr;
25185
25186 if ((event->event_type == 0 && event->event_size == 0) ||
25187- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
25188+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
25189 return NULL;
25190
25191 return addr;
25192@@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(
25193 return NULL;
25194
25195 if ((event->event_type == 0 && event->event_size == 0) ||
25196- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
25197+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
25198 return NULL;
25199
25200 (*pos)++;
25201@@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_
25202 int i;
25203
25204 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
25205- seq_putc(m, data[i]);
25206+ if (!seq_putc(m, data[i]))
25207+ return -EFAULT;
25208
25209 return 0;
25210 }
25211@@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log
25212 log->bios_event_log_end = log->bios_event_log + len;
25213
25214 virt = acpi_os_map_memory(start, len);
25215+ if (!virt) {
25216+ kfree(log->bios_event_log);
25217+ log->bios_event_log = NULL;
25218+ return -EFAULT;
25219+ }
25220
25221- memcpy(log->bios_event_log, virt, len);
25222+ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
25223
25224 acpi_os_unmap_memory(virt, len);
25225 return 0;
25226diff -urNp linux-3.0.7/drivers/char/tpm/tpm.c linux-3.0.7/drivers/char/tpm/tpm.c
25227--- linux-3.0.7/drivers/char/tpm/tpm.c 2011-10-16 21:54:53.000000000 -0400
25228+++ linux-3.0.7/drivers/char/tpm/tpm.c 2011-10-16 21:55:27.000000000 -0400
25229@@ -414,7 +414,7 @@ static ssize_t tpm_transmit(struct tpm_c
25230 chip->vendor.req_complete_val)
25231 goto out_recv;
25232
25233- if ((status == chip->vendor.req_canceled)) {
25234+ if (status == chip->vendor.req_canceled) {
25235 dev_err(chip->dev, "Operation Canceled\n");
25236 rc = -ECANCELED;
25237 goto out;
25238@@ -847,6 +847,8 @@ ssize_t tpm_show_pubek(struct device *de
25239
25240 struct tpm_chip *chip = dev_get_drvdata(dev);
25241
25242+ pax_track_stack();
25243+
25244 tpm_cmd.header.in = tpm_readpubek_header;
25245 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
25246 "attempting to read the PUBEK");
25247diff -urNp linux-3.0.7/drivers/char/virtio_console.c linux-3.0.7/drivers/char/virtio_console.c
25248--- linux-3.0.7/drivers/char/virtio_console.c 2011-07-21 22:17:23.000000000 -0400
25249+++ linux-3.0.7/drivers/char/virtio_console.c 2011-10-06 04:17:55.000000000 -0400
25250@@ -555,7 +555,7 @@ static ssize_t fill_readbuf(struct port
25251 if (to_user) {
25252 ssize_t ret;
25253
25254- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
25255+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
25256 if (ret)
25257 return -EFAULT;
25258 } else {
25259@@ -654,7 +654,7 @@ static ssize_t port_fops_read(struct fil
25260 if (!port_has_data(port) && !port->host_connected)
25261 return 0;
25262
25263- return fill_readbuf(port, ubuf, count, true);
25264+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
25265 }
25266
25267 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
25268diff -urNp linux-3.0.7/drivers/crypto/hifn_795x.c linux-3.0.7/drivers/crypto/hifn_795x.c
25269--- linux-3.0.7/drivers/crypto/hifn_795x.c 2011-07-21 22:17:23.000000000 -0400
25270+++ linux-3.0.7/drivers/crypto/hifn_795x.c 2011-08-23 21:48:14.000000000 -0400
25271@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
25272 0xCA, 0x34, 0x2B, 0x2E};
25273 struct scatterlist sg;
25274
25275+ pax_track_stack();
25276+
25277 memset(src, 0, sizeof(src));
25278 memset(ctx.key, 0, sizeof(ctx.key));
25279
25280diff -urNp linux-3.0.7/drivers/crypto/padlock-aes.c linux-3.0.7/drivers/crypto/padlock-aes.c
25281--- linux-3.0.7/drivers/crypto/padlock-aes.c 2011-07-21 22:17:23.000000000 -0400
25282+++ linux-3.0.7/drivers/crypto/padlock-aes.c 2011-08-23 21:48:14.000000000 -0400
25283@@ -109,6 +109,8 @@ static int aes_set_key(struct crypto_tfm
25284 struct crypto_aes_ctx gen_aes;
25285 int cpu;
25286
25287+ pax_track_stack();
25288+
25289 if (key_len % 8) {
25290 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
25291 return -EINVAL;
25292diff -urNp linux-3.0.7/drivers/dma/ioat/dma_v3.c linux-3.0.7/drivers/dma/ioat/dma_v3.c
25293--- linux-3.0.7/drivers/dma/ioat/dma_v3.c 2011-07-21 22:17:23.000000000 -0400
25294+++ linux-3.0.7/drivers/dma/ioat/dma_v3.c 2011-10-11 10:44:33.000000000 -0400
25295@@ -73,10 +73,10 @@
25296 /* provide a lookup table for setting the source address in the base or
25297 * extended descriptor of an xor or pq descriptor
25298 */
25299-static const u8 xor_idx_to_desc __read_mostly = 0xd0;
25300-static const u8 xor_idx_to_field[] __read_mostly = { 1, 4, 5, 6, 7, 0, 1, 2 };
25301-static const u8 pq_idx_to_desc __read_mostly = 0xf8;
25302-static const u8 pq_idx_to_field[] __read_mostly = { 1, 4, 5, 0, 1, 2, 4, 5 };
25303+static const u8 xor_idx_to_desc = 0xd0;
25304+static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
25305+static const u8 pq_idx_to_desc = 0xf8;
25306+static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
25307
25308 static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
25309 {
25310diff -urNp linux-3.0.7/drivers/edac/amd64_edac.c linux-3.0.7/drivers/edac/amd64_edac.c
25311--- linux-3.0.7/drivers/edac/amd64_edac.c 2011-07-21 22:17:23.000000000 -0400
25312+++ linux-3.0.7/drivers/edac/amd64_edac.c 2011-10-11 10:44:33.000000000 -0400
25313@@ -2670,7 +2670,7 @@ static void __devexit amd64_remove_one_i
25314 * PCI core identifies what devices are on a system during boot, and then
25315 * inquiry this table to see if this driver is for a given device found.
25316 */
25317-static const struct pci_device_id amd64_pci_table[] __devinitdata = {
25318+static const struct pci_device_id amd64_pci_table[] __devinitconst = {
25319 {
25320 .vendor = PCI_VENDOR_ID_AMD,
25321 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
25322diff -urNp linux-3.0.7/drivers/edac/amd76x_edac.c linux-3.0.7/drivers/edac/amd76x_edac.c
25323--- linux-3.0.7/drivers/edac/amd76x_edac.c 2011-07-21 22:17:23.000000000 -0400
25324+++ linux-3.0.7/drivers/edac/amd76x_edac.c 2011-10-11 10:44:33.000000000 -0400
25325@@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(
25326 edac_mc_free(mci);
25327 }
25328
25329-static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
25330+static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
25331 {
25332 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25333 AMD762},
25334diff -urNp linux-3.0.7/drivers/edac/e752x_edac.c linux-3.0.7/drivers/edac/e752x_edac.c
25335--- linux-3.0.7/drivers/edac/e752x_edac.c 2011-07-21 22:17:23.000000000 -0400
25336+++ linux-3.0.7/drivers/edac/e752x_edac.c 2011-10-11 10:44:33.000000000 -0400
25337@@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(s
25338 edac_mc_free(mci);
25339 }
25340
25341-static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
25342+static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
25343 {
25344 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25345 E7520},
25346diff -urNp linux-3.0.7/drivers/edac/e7xxx_edac.c linux-3.0.7/drivers/edac/e7xxx_edac.c
25347--- linux-3.0.7/drivers/edac/e7xxx_edac.c 2011-07-21 22:17:23.000000000 -0400
25348+++ linux-3.0.7/drivers/edac/e7xxx_edac.c 2011-10-11 10:44:33.000000000 -0400
25349@@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(s
25350 edac_mc_free(mci);
25351 }
25352
25353-static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
25354+static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
25355 {
25356 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25357 E7205},
25358diff -urNp linux-3.0.7/drivers/edac/edac_pci_sysfs.c linux-3.0.7/drivers/edac/edac_pci_sysfs.c
25359--- linux-3.0.7/drivers/edac/edac_pci_sysfs.c 2011-07-21 22:17:23.000000000 -0400
25360+++ linux-3.0.7/drivers/edac/edac_pci_sysfs.c 2011-08-23 21:47:55.000000000 -0400
25361@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log
25362 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
25363 static int edac_pci_poll_msec = 1000; /* one second workq period */
25364
25365-static atomic_t pci_parity_count = ATOMIC_INIT(0);
25366-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
25367+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
25368+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
25369
25370 static struct kobject *edac_pci_top_main_kobj;
25371 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
25372@@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(str
25373 edac_printk(KERN_CRIT, EDAC_PCI,
25374 "Signaled System Error on %s\n",
25375 pci_name(dev));
25376- atomic_inc(&pci_nonparity_count);
25377+ atomic_inc_unchecked(&pci_nonparity_count);
25378 }
25379
25380 if (status & (PCI_STATUS_PARITY)) {
25381@@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(str
25382 "Master Data Parity Error on %s\n",
25383 pci_name(dev));
25384
25385- atomic_inc(&pci_parity_count);
25386+ atomic_inc_unchecked(&pci_parity_count);
25387 }
25388
25389 if (status & (PCI_STATUS_DETECTED_PARITY)) {
25390@@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(str
25391 "Detected Parity Error on %s\n",
25392 pci_name(dev));
25393
25394- atomic_inc(&pci_parity_count);
25395+ atomic_inc_unchecked(&pci_parity_count);
25396 }
25397 }
25398
25399@@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(str
25400 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
25401 "Signaled System Error on %s\n",
25402 pci_name(dev));
25403- atomic_inc(&pci_nonparity_count);
25404+ atomic_inc_unchecked(&pci_nonparity_count);
25405 }
25406
25407 if (status & (PCI_STATUS_PARITY)) {
25408@@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(str
25409 "Master Data Parity Error on "
25410 "%s\n", pci_name(dev));
25411
25412- atomic_inc(&pci_parity_count);
25413+ atomic_inc_unchecked(&pci_parity_count);
25414 }
25415
25416 if (status & (PCI_STATUS_DETECTED_PARITY)) {
25417@@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(str
25418 "Detected Parity Error on %s\n",
25419 pci_name(dev));
25420
25421- atomic_inc(&pci_parity_count);
25422+ atomic_inc_unchecked(&pci_parity_count);
25423 }
25424 }
25425 }
25426@@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
25427 if (!check_pci_errors)
25428 return;
25429
25430- before_count = atomic_read(&pci_parity_count);
25431+ before_count = atomic_read_unchecked(&pci_parity_count);
25432
25433 /* scan all PCI devices looking for a Parity Error on devices and
25434 * bridges.
25435@@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
25436 /* Only if operator has selected panic on PCI Error */
25437 if (edac_pci_get_panic_on_pe()) {
25438 /* If the count is different 'after' from 'before' */
25439- if (before_count != atomic_read(&pci_parity_count))
25440+ if (before_count != atomic_read_unchecked(&pci_parity_count))
25441 panic("EDAC: PCI Parity Error");
25442 }
25443 }
25444diff -urNp linux-3.0.7/drivers/edac/i3000_edac.c linux-3.0.7/drivers/edac/i3000_edac.c
25445--- linux-3.0.7/drivers/edac/i3000_edac.c 2011-07-21 22:17:23.000000000 -0400
25446+++ linux-3.0.7/drivers/edac/i3000_edac.c 2011-10-11 10:44:33.000000000 -0400
25447@@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(s
25448 edac_mc_free(mci);
25449 }
25450
25451-static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
25452+static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
25453 {
25454 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25455 I3000},
25456diff -urNp linux-3.0.7/drivers/edac/i3200_edac.c linux-3.0.7/drivers/edac/i3200_edac.c
25457--- linux-3.0.7/drivers/edac/i3200_edac.c 2011-07-21 22:17:23.000000000 -0400
25458+++ linux-3.0.7/drivers/edac/i3200_edac.c 2011-10-11 10:44:33.000000000 -0400
25459@@ -456,7 +456,7 @@ static void __devexit i3200_remove_one(s
25460 edac_mc_free(mci);
25461 }
25462
25463-static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
25464+static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
25465 {
25466 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25467 I3200},
25468diff -urNp linux-3.0.7/drivers/edac/i5000_edac.c linux-3.0.7/drivers/edac/i5000_edac.c
25469--- linux-3.0.7/drivers/edac/i5000_edac.c 2011-07-21 22:17:23.000000000 -0400
25470+++ linux-3.0.7/drivers/edac/i5000_edac.c 2011-10-11 10:44:33.000000000 -0400
25471@@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(s
25472 *
25473 * The "E500P" device is the first device supported.
25474 */
25475-static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
25476+static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
25477 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
25478 .driver_data = I5000P},
25479
25480diff -urNp linux-3.0.7/drivers/edac/i5100_edac.c linux-3.0.7/drivers/edac/i5100_edac.c
25481--- linux-3.0.7/drivers/edac/i5100_edac.c 2011-07-21 22:17:23.000000000 -0400
25482+++ linux-3.0.7/drivers/edac/i5100_edac.c 2011-10-11 10:44:33.000000000 -0400
25483@@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(s
25484 edac_mc_free(mci);
25485 }
25486
25487-static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
25488+static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
25489 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
25490 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
25491 { 0, }
25492diff -urNp linux-3.0.7/drivers/edac/i5400_edac.c linux-3.0.7/drivers/edac/i5400_edac.c
25493--- linux-3.0.7/drivers/edac/i5400_edac.c 2011-07-21 22:17:23.000000000 -0400
25494+++ linux-3.0.7/drivers/edac/i5400_edac.c 2011-10-11 10:44:33.000000000 -0400
25495@@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(s
25496 *
25497 * The "E500P" device is the first device supported.
25498 */
25499-static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
25500+static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
25501 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
25502 {0,} /* 0 terminated list. */
25503 };
25504diff -urNp linux-3.0.7/drivers/edac/i7300_edac.c linux-3.0.7/drivers/edac/i7300_edac.c
25505--- linux-3.0.7/drivers/edac/i7300_edac.c 2011-07-21 22:17:23.000000000 -0400
25506+++ linux-3.0.7/drivers/edac/i7300_edac.c 2011-10-11 10:44:33.000000000 -0400
25507@@ -1191,7 +1191,7 @@ static void __devexit i7300_remove_one(s
25508 *
25509 * Has only 8086:360c PCI ID
25510 */
25511-static const struct pci_device_id i7300_pci_tbl[] __devinitdata = {
25512+static const struct pci_device_id i7300_pci_tbl[] __devinitconst = {
25513 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
25514 {0,} /* 0 terminated list. */
25515 };
25516diff -urNp linux-3.0.7/drivers/edac/i7core_edac.c linux-3.0.7/drivers/edac/i7core_edac.c
25517--- linux-3.0.7/drivers/edac/i7core_edac.c 2011-09-02 18:11:26.000000000 -0400
25518+++ linux-3.0.7/drivers/edac/i7core_edac.c 2011-10-11 10:44:33.000000000 -0400
25519@@ -359,7 +359,7 @@ static const struct pci_id_table pci_dev
25520 /*
25521 * pci_device_id table for which devices we are looking for
25522 */
25523-static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
25524+static const struct pci_device_id i7core_pci_tbl[] __devinitconst = {
25525 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
25526 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
25527 {0,} /* 0 terminated list. */
25528diff -urNp linux-3.0.7/drivers/edac/i82443bxgx_edac.c linux-3.0.7/drivers/edac/i82443bxgx_edac.c
25529--- linux-3.0.7/drivers/edac/i82443bxgx_edac.c 2011-07-21 22:17:23.000000000 -0400
25530+++ linux-3.0.7/drivers/edac/i82443bxgx_edac.c 2011-10-11 10:44:33.000000000 -0400
25531@@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_
25532
25533 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
25534
25535-static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
25536+static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
25537 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
25538 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
25539 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
25540diff -urNp linux-3.0.7/drivers/edac/i82860_edac.c linux-3.0.7/drivers/edac/i82860_edac.c
25541--- linux-3.0.7/drivers/edac/i82860_edac.c 2011-07-21 22:17:23.000000000 -0400
25542+++ linux-3.0.7/drivers/edac/i82860_edac.c 2011-10-11 10:44:33.000000000 -0400
25543@@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(
25544 edac_mc_free(mci);
25545 }
25546
25547-static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
25548+static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
25549 {
25550 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25551 I82860},
25552diff -urNp linux-3.0.7/drivers/edac/i82875p_edac.c linux-3.0.7/drivers/edac/i82875p_edac.c
25553--- linux-3.0.7/drivers/edac/i82875p_edac.c 2011-07-21 22:17:23.000000000 -0400
25554+++ linux-3.0.7/drivers/edac/i82875p_edac.c 2011-10-11 10:44:33.000000000 -0400
25555@@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one
25556 edac_mc_free(mci);
25557 }
25558
25559-static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
25560+static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
25561 {
25562 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25563 I82875P},
25564diff -urNp linux-3.0.7/drivers/edac/i82975x_edac.c linux-3.0.7/drivers/edac/i82975x_edac.c
25565--- linux-3.0.7/drivers/edac/i82975x_edac.c 2011-07-21 22:17:23.000000000 -0400
25566+++ linux-3.0.7/drivers/edac/i82975x_edac.c 2011-10-11 10:44:33.000000000 -0400
25567@@ -604,7 +604,7 @@ static void __devexit i82975x_remove_one
25568 edac_mc_free(mci);
25569 }
25570
25571-static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
25572+static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
25573 {
25574 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25575 I82975X
25576diff -urNp linux-3.0.7/drivers/edac/mce_amd.h linux-3.0.7/drivers/edac/mce_amd.h
25577--- linux-3.0.7/drivers/edac/mce_amd.h 2011-07-21 22:17:23.000000000 -0400
25578+++ linux-3.0.7/drivers/edac/mce_amd.h 2011-08-23 21:47:55.000000000 -0400
25579@@ -83,7 +83,7 @@ struct amd_decoder_ops {
25580 bool (*dc_mce)(u16, u8);
25581 bool (*ic_mce)(u16, u8);
25582 bool (*nb_mce)(u16, u8);
25583-};
25584+} __no_const;
25585
25586 void amd_report_gart_errors(bool);
25587 void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32));
25588diff -urNp linux-3.0.7/drivers/edac/r82600_edac.c linux-3.0.7/drivers/edac/r82600_edac.c
25589--- linux-3.0.7/drivers/edac/r82600_edac.c 2011-07-21 22:17:23.000000000 -0400
25590+++ linux-3.0.7/drivers/edac/r82600_edac.c 2011-10-11 10:44:33.000000000 -0400
25591@@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(
25592 edac_mc_free(mci);
25593 }
25594
25595-static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
25596+static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
25597 {
25598 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
25599 },
25600diff -urNp linux-3.0.7/drivers/edac/x38_edac.c linux-3.0.7/drivers/edac/x38_edac.c
25601--- linux-3.0.7/drivers/edac/x38_edac.c 2011-07-21 22:17:23.000000000 -0400
25602+++ linux-3.0.7/drivers/edac/x38_edac.c 2011-10-11 10:44:33.000000000 -0400
25603@@ -440,7 +440,7 @@ static void __devexit x38_remove_one(str
25604 edac_mc_free(mci);
25605 }
25606
25607-static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
25608+static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
25609 {
25610 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25611 X38},
25612diff -urNp linux-3.0.7/drivers/firewire/core-card.c linux-3.0.7/drivers/firewire/core-card.c
25613--- linux-3.0.7/drivers/firewire/core-card.c 2011-07-21 22:17:23.000000000 -0400
25614+++ linux-3.0.7/drivers/firewire/core-card.c 2011-08-23 21:47:55.000000000 -0400
25615@@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
25616
25617 void fw_core_remove_card(struct fw_card *card)
25618 {
25619- struct fw_card_driver dummy_driver = dummy_driver_template;
25620+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
25621
25622 card->driver->update_phy_reg(card, 4,
25623 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
25624diff -urNp linux-3.0.7/drivers/firewire/core-cdev.c linux-3.0.7/drivers/firewire/core-cdev.c
25625--- linux-3.0.7/drivers/firewire/core-cdev.c 2011-09-02 18:11:21.000000000 -0400
25626+++ linux-3.0.7/drivers/firewire/core-cdev.c 2011-08-23 21:47:55.000000000 -0400
25627@@ -1313,8 +1313,7 @@ static int init_iso_resource(struct clie
25628 int ret;
25629
25630 if ((request->channels == 0 && request->bandwidth == 0) ||
25631- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
25632- request->bandwidth < 0)
25633+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
25634 return -EINVAL;
25635
25636 r = kmalloc(sizeof(*r), GFP_KERNEL);
25637diff -urNp linux-3.0.7/drivers/firewire/core.h linux-3.0.7/drivers/firewire/core.h
25638--- linux-3.0.7/drivers/firewire/core.h 2011-07-21 22:17:23.000000000 -0400
25639+++ linux-3.0.7/drivers/firewire/core.h 2011-08-23 21:47:55.000000000 -0400
25640@@ -101,6 +101,7 @@ struct fw_card_driver {
25641
25642 int (*stop_iso)(struct fw_iso_context *ctx);
25643 };
25644+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
25645
25646 void fw_card_initialize(struct fw_card *card,
25647 const struct fw_card_driver *driver, struct device *device);
25648diff -urNp linux-3.0.7/drivers/firewire/core-transaction.c linux-3.0.7/drivers/firewire/core-transaction.c
25649--- linux-3.0.7/drivers/firewire/core-transaction.c 2011-07-21 22:17:23.000000000 -0400
25650+++ linux-3.0.7/drivers/firewire/core-transaction.c 2011-08-23 21:48:14.000000000 -0400
25651@@ -37,6 +37,7 @@
25652 #include <linux/timer.h>
25653 #include <linux/types.h>
25654 #include <linux/workqueue.h>
25655+#include <linux/sched.h>
25656
25657 #include <asm/byteorder.h>
25658
25659@@ -422,6 +423,8 @@ int fw_run_transaction(struct fw_card *c
25660 struct transaction_callback_data d;
25661 struct fw_transaction t;
25662
25663+ pax_track_stack();
25664+
25665 init_timer_on_stack(&t.split_timeout_timer);
25666 init_completion(&d.done);
25667 d.payload = payload;
25668diff -urNp linux-3.0.7/drivers/firmware/dmi_scan.c linux-3.0.7/drivers/firmware/dmi_scan.c
25669--- linux-3.0.7/drivers/firmware/dmi_scan.c 2011-07-21 22:17:23.000000000 -0400
25670+++ linux-3.0.7/drivers/firmware/dmi_scan.c 2011-10-06 04:17:55.000000000 -0400
25671@@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
25672 }
25673 }
25674 else {
25675- /*
25676- * no iounmap() for that ioremap(); it would be a no-op, but
25677- * it's so early in setup that sucker gets confused into doing
25678- * what it shouldn't if we actually call it.
25679- */
25680 p = dmi_ioremap(0xF0000, 0x10000);
25681 if (p == NULL)
25682 goto error;
25683@@ -725,7 +720,7 @@ int dmi_walk(void (*decode)(const struct
25684 if (buf == NULL)
25685 return -1;
25686
25687- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
25688+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
25689
25690 iounmap(buf);
25691 return 0;
25692diff -urNp linux-3.0.7/drivers/gpio/vr41xx_giu.c linux-3.0.7/drivers/gpio/vr41xx_giu.c
25693--- linux-3.0.7/drivers/gpio/vr41xx_giu.c 2011-07-21 22:17:23.000000000 -0400
25694+++ linux-3.0.7/drivers/gpio/vr41xx_giu.c 2011-08-23 21:47:55.000000000 -0400
25695@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
25696 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
25697 maskl, pendl, maskh, pendh);
25698
25699- atomic_inc(&irq_err_count);
25700+ atomic_inc_unchecked(&irq_err_count);
25701
25702 return -EINVAL;
25703 }
25704diff -urNp linux-3.0.7/drivers/gpu/drm/drm_crtc.c linux-3.0.7/drivers/gpu/drm/drm_crtc.c
25705--- linux-3.0.7/drivers/gpu/drm/drm_crtc.c 2011-07-21 22:17:23.000000000 -0400
25706+++ linux-3.0.7/drivers/gpu/drm/drm_crtc.c 2011-10-06 04:17:55.000000000 -0400
25707@@ -1372,7 +1372,7 @@ int drm_mode_getconnector(struct drm_dev
25708 */
25709 if ((out_resp->count_modes >= mode_count) && mode_count) {
25710 copied = 0;
25711- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
25712+ mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
25713 list_for_each_entry(mode, &connector->modes, head) {
25714 drm_crtc_convert_to_umode(&u_mode, mode);
25715 if (copy_to_user(mode_ptr + copied,
25716@@ -1387,8 +1387,8 @@ int drm_mode_getconnector(struct drm_dev
25717
25718 if ((out_resp->count_props >= props_count) && props_count) {
25719 copied = 0;
25720- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
25721- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
25722+ prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
25723+ prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
25724 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
25725 if (connector->property_ids[i] != 0) {
25726 if (put_user(connector->property_ids[i],
25727@@ -1410,7 +1410,7 @@ int drm_mode_getconnector(struct drm_dev
25728
25729 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
25730 copied = 0;
25731- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
25732+ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
25733 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
25734 if (connector->encoder_ids[i] != 0) {
25735 if (put_user(connector->encoder_ids[i],
25736@@ -1569,7 +1569,7 @@ int drm_mode_setcrtc(struct drm_device *
25737 }
25738
25739 for (i = 0; i < crtc_req->count_connectors; i++) {
25740- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
25741+ set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
25742 if (get_user(out_id, &set_connectors_ptr[i])) {
25743 ret = -EFAULT;
25744 goto out;
25745@@ -1850,7 +1850,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_de
25746 fb = obj_to_fb(obj);
25747
25748 num_clips = r->num_clips;
25749- clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
25750+ clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
25751
25752 if (!num_clips != !clips_ptr) {
25753 ret = -EINVAL;
25754@@ -2270,7 +2270,7 @@ int drm_mode_getproperty_ioctl(struct dr
25755 out_resp->flags = property->flags;
25756
25757 if ((out_resp->count_values >= value_count) && value_count) {
25758- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
25759+ values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
25760 for (i = 0; i < value_count; i++) {
25761 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
25762 ret = -EFAULT;
25763@@ -2283,7 +2283,7 @@ int drm_mode_getproperty_ioctl(struct dr
25764 if (property->flags & DRM_MODE_PROP_ENUM) {
25765 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
25766 copied = 0;
25767- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
25768+ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
25769 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
25770
25771 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
25772@@ -2306,7 +2306,7 @@ int drm_mode_getproperty_ioctl(struct dr
25773 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
25774 copied = 0;
25775 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
25776- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
25777+ blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
25778
25779 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
25780 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
25781@@ -2367,7 +2367,7 @@ int drm_mode_getblob_ioctl(struct drm_de
25782 struct drm_mode_get_blob *out_resp = data;
25783 struct drm_property_blob *blob;
25784 int ret = 0;
25785- void *blob_ptr;
25786+ void __user *blob_ptr;
25787
25788 if (!drm_core_check_feature(dev, DRIVER_MODESET))
25789 return -EINVAL;
25790@@ -2381,7 +2381,7 @@ int drm_mode_getblob_ioctl(struct drm_de
25791 blob = obj_to_blob(obj);
25792
25793 if (out_resp->length == blob->length) {
25794- blob_ptr = (void *)(unsigned long)out_resp->data;
25795+ blob_ptr = (void __user *)(unsigned long)out_resp->data;
25796 if (copy_to_user(blob_ptr, blob->data, blob->length)){
25797 ret = -EFAULT;
25798 goto done;
25799diff -urNp linux-3.0.7/drivers/gpu/drm/drm_crtc_helper.c linux-3.0.7/drivers/gpu/drm/drm_crtc_helper.c
25800--- linux-3.0.7/drivers/gpu/drm/drm_crtc_helper.c 2011-07-21 22:17:23.000000000 -0400
25801+++ linux-3.0.7/drivers/gpu/drm/drm_crtc_helper.c 2011-08-23 21:48:14.000000000 -0400
25802@@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct d
25803 struct drm_crtc *tmp;
25804 int crtc_mask = 1;
25805
25806- WARN(!crtc, "checking null crtc?\n");
25807+ BUG_ON(!crtc);
25808
25809 dev = crtc->dev;
25810
25811@@ -343,6 +343,8 @@ bool drm_crtc_helper_set_mode(struct drm
25812 struct drm_encoder *encoder;
25813 bool ret = true;
25814
25815+ pax_track_stack();
25816+
25817 crtc->enabled = drm_helper_crtc_in_use(crtc);
25818 if (!crtc->enabled)
25819 return true;
25820diff -urNp linux-3.0.7/drivers/gpu/drm/drm_drv.c linux-3.0.7/drivers/gpu/drm/drm_drv.c
25821--- linux-3.0.7/drivers/gpu/drm/drm_drv.c 2011-07-21 22:17:23.000000000 -0400
25822+++ linux-3.0.7/drivers/gpu/drm/drm_drv.c 2011-10-06 04:17:55.000000000 -0400
25823@@ -307,7 +307,7 @@ module_exit(drm_core_exit);
25824 /**
25825 * Copy and IOCTL return string to user space
25826 */
25827-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
25828+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
25829 {
25830 int len;
25831
25832@@ -386,7 +386,7 @@ long drm_ioctl(struct file *filp,
25833
25834 dev = file_priv->minor->dev;
25835 atomic_inc(&dev->ioctl_count);
25836- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
25837+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
25838 ++file_priv->ioctl_count;
25839
25840 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
25841diff -urNp linux-3.0.7/drivers/gpu/drm/drm_fops.c linux-3.0.7/drivers/gpu/drm/drm_fops.c
25842--- linux-3.0.7/drivers/gpu/drm/drm_fops.c 2011-07-21 22:17:23.000000000 -0400
25843+++ linux-3.0.7/drivers/gpu/drm/drm_fops.c 2011-08-23 21:47:55.000000000 -0400
25844@@ -70,7 +70,7 @@ static int drm_setup(struct drm_device *
25845 }
25846
25847 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
25848- atomic_set(&dev->counts[i], 0);
25849+ atomic_set_unchecked(&dev->counts[i], 0);
25850
25851 dev->sigdata.lock = NULL;
25852
25853@@ -134,8 +134,8 @@ int drm_open(struct inode *inode, struct
25854
25855 retcode = drm_open_helper(inode, filp, dev);
25856 if (!retcode) {
25857- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
25858- if (!dev->open_count++)
25859+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
25860+ if (local_inc_return(&dev->open_count) == 1)
25861 retcode = drm_setup(dev);
25862 }
25863 if (!retcode) {
25864@@ -472,7 +472,7 @@ int drm_release(struct inode *inode, str
25865
25866 mutex_lock(&drm_global_mutex);
25867
25868- DRM_DEBUG("open_count = %d\n", dev->open_count);
25869+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
25870
25871 if (dev->driver->preclose)
25872 dev->driver->preclose(dev, file_priv);
25873@@ -484,7 +484,7 @@ int drm_release(struct inode *inode, str
25874 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
25875 task_pid_nr(current),
25876 (long)old_encode_dev(file_priv->minor->device),
25877- dev->open_count);
25878+ local_read(&dev->open_count));
25879
25880 /* if the master has gone away we can't do anything with the lock */
25881 if (file_priv->minor->master)
25882@@ -565,8 +565,8 @@ int drm_release(struct inode *inode, str
25883 * End inline drm_release
25884 */
25885
25886- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
25887- if (!--dev->open_count) {
25888+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
25889+ if (local_dec_and_test(&dev->open_count)) {
25890 if (atomic_read(&dev->ioctl_count)) {
25891 DRM_ERROR("Device busy: %d\n",
25892 atomic_read(&dev->ioctl_count));
25893diff -urNp linux-3.0.7/drivers/gpu/drm/drm_global.c linux-3.0.7/drivers/gpu/drm/drm_global.c
25894--- linux-3.0.7/drivers/gpu/drm/drm_global.c 2011-07-21 22:17:23.000000000 -0400
25895+++ linux-3.0.7/drivers/gpu/drm/drm_global.c 2011-08-23 21:47:55.000000000 -0400
25896@@ -36,7 +36,7 @@
25897 struct drm_global_item {
25898 struct mutex mutex;
25899 void *object;
25900- int refcount;
25901+ atomic_t refcount;
25902 };
25903
25904 static struct drm_global_item glob[DRM_GLOBAL_NUM];
25905@@ -49,7 +49,7 @@ void drm_global_init(void)
25906 struct drm_global_item *item = &glob[i];
25907 mutex_init(&item->mutex);
25908 item->object = NULL;
25909- item->refcount = 0;
25910+ atomic_set(&item->refcount, 0);
25911 }
25912 }
25913
25914@@ -59,7 +59,7 @@ void drm_global_release(void)
25915 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
25916 struct drm_global_item *item = &glob[i];
25917 BUG_ON(item->object != NULL);
25918- BUG_ON(item->refcount != 0);
25919+ BUG_ON(atomic_read(&item->refcount) != 0);
25920 }
25921 }
25922
25923@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_globa
25924 void *object;
25925
25926 mutex_lock(&item->mutex);
25927- if (item->refcount == 0) {
25928+ if (atomic_read(&item->refcount) == 0) {
25929 item->object = kzalloc(ref->size, GFP_KERNEL);
25930 if (unlikely(item->object == NULL)) {
25931 ret = -ENOMEM;
25932@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_globa
25933 goto out_err;
25934
25935 }
25936- ++item->refcount;
25937+ atomic_inc(&item->refcount);
25938 ref->object = item->object;
25939 object = item->object;
25940 mutex_unlock(&item->mutex);
25941@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_gl
25942 struct drm_global_item *item = &glob[ref->global_type];
25943
25944 mutex_lock(&item->mutex);
25945- BUG_ON(item->refcount == 0);
25946+ BUG_ON(atomic_read(&item->refcount) == 0);
25947 BUG_ON(ref->object != item->object);
25948- if (--item->refcount == 0) {
25949+ if (atomic_dec_and_test(&item->refcount)) {
25950 ref->release(ref);
25951 item->object = NULL;
25952 }
25953diff -urNp linux-3.0.7/drivers/gpu/drm/drm_info.c linux-3.0.7/drivers/gpu/drm/drm_info.c
25954--- linux-3.0.7/drivers/gpu/drm/drm_info.c 2011-07-21 22:17:23.000000000 -0400
25955+++ linux-3.0.7/drivers/gpu/drm/drm_info.c 2011-08-23 21:48:14.000000000 -0400
25956@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
25957 struct drm_local_map *map;
25958 struct drm_map_list *r_list;
25959
25960- /* Hardcoded from _DRM_FRAME_BUFFER,
25961- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
25962- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
25963- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
25964+ static const char * const types[] = {
25965+ [_DRM_FRAME_BUFFER] = "FB",
25966+ [_DRM_REGISTERS] = "REG",
25967+ [_DRM_SHM] = "SHM",
25968+ [_DRM_AGP] = "AGP",
25969+ [_DRM_SCATTER_GATHER] = "SG",
25970+ [_DRM_CONSISTENT] = "PCI",
25971+ [_DRM_GEM] = "GEM" };
25972 const char *type;
25973 int i;
25974
25975@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
25976 map = r_list->map;
25977 if (!map)
25978 continue;
25979- if (map->type < 0 || map->type > 5)
25980+ if (map->type >= ARRAY_SIZE(types))
25981 type = "??";
25982 else
25983 type = types[map->type];
25984@@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, voi
25985 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
25986 vma->vm_flags & VM_LOCKED ? 'l' : '-',
25987 vma->vm_flags & VM_IO ? 'i' : '-',
25988+#ifdef CONFIG_GRKERNSEC_HIDESYM
25989+ 0);
25990+#else
25991 vma->vm_pgoff);
25992+#endif
25993
25994 #if defined(__i386__)
25995 pgprot = pgprot_val(vma->vm_page_prot);
25996diff -urNp linux-3.0.7/drivers/gpu/drm/drm_ioc32.c linux-3.0.7/drivers/gpu/drm/drm_ioc32.c
25997--- linux-3.0.7/drivers/gpu/drm/drm_ioc32.c 2011-07-21 22:17:23.000000000 -0400
25998+++ linux-3.0.7/drivers/gpu/drm/drm_ioc32.c 2011-10-06 04:17:55.000000000 -0400
25999@@ -455,7 +455,7 @@ static int compat_drm_infobufs(struct fi
26000 request = compat_alloc_user_space(nbytes);
26001 if (!access_ok(VERIFY_WRITE, request, nbytes))
26002 return -EFAULT;
26003- list = (struct drm_buf_desc *) (request + 1);
26004+ list = (struct drm_buf_desc __user *) (request + 1);
26005
26006 if (__put_user(count, &request->count)
26007 || __put_user(list, &request->list))
26008@@ -516,7 +516,7 @@ static int compat_drm_mapbufs(struct fil
26009 request = compat_alloc_user_space(nbytes);
26010 if (!access_ok(VERIFY_WRITE, request, nbytes))
26011 return -EFAULT;
26012- list = (struct drm_buf_pub *) (request + 1);
26013+ list = (struct drm_buf_pub __user *) (request + 1);
26014
26015 if (__put_user(count, &request->count)
26016 || __put_user(list, &request->list))
26017diff -urNp linux-3.0.7/drivers/gpu/drm/drm_ioctl.c linux-3.0.7/drivers/gpu/drm/drm_ioctl.c
26018--- linux-3.0.7/drivers/gpu/drm/drm_ioctl.c 2011-07-21 22:17:23.000000000 -0400
26019+++ linux-3.0.7/drivers/gpu/drm/drm_ioctl.c 2011-08-23 21:47:55.000000000 -0400
26020@@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev,
26021 stats->data[i].value =
26022 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
26023 else
26024- stats->data[i].value = atomic_read(&dev->counts[i]);
26025+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
26026 stats->data[i].type = dev->types[i];
26027 }
26028
26029diff -urNp linux-3.0.7/drivers/gpu/drm/drm_lock.c linux-3.0.7/drivers/gpu/drm/drm_lock.c
26030--- linux-3.0.7/drivers/gpu/drm/drm_lock.c 2011-07-21 22:17:23.000000000 -0400
26031+++ linux-3.0.7/drivers/gpu/drm/drm_lock.c 2011-08-23 21:47:55.000000000 -0400
26032@@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, voi
26033 if (drm_lock_take(&master->lock, lock->context)) {
26034 master->lock.file_priv = file_priv;
26035 master->lock.lock_time = jiffies;
26036- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
26037+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
26038 break; /* Got lock */
26039 }
26040
26041@@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, v
26042 return -EINVAL;
26043 }
26044
26045- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
26046+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
26047
26048 if (drm_lock_free(&master->lock, lock->context)) {
26049 /* FIXME: Should really bail out here. */
26050diff -urNp linux-3.0.7/drivers/gpu/drm/i810/i810_dma.c linux-3.0.7/drivers/gpu/drm/i810/i810_dma.c
26051--- linux-3.0.7/drivers/gpu/drm/i810/i810_dma.c 2011-07-21 22:17:23.000000000 -0400
26052+++ linux-3.0.7/drivers/gpu/drm/i810/i810_dma.c 2011-08-23 21:47:55.000000000 -0400
26053@@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_de
26054 dma->buflist[vertex->idx],
26055 vertex->discard, vertex->used);
26056
26057- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
26058- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
26059+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
26060+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
26061 sarea_priv->last_enqueue = dev_priv->counter - 1;
26062 sarea_priv->last_dispatch = (int)hw_status[5];
26063
26064@@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device
26065 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
26066 mc->last_render);
26067
26068- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
26069- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
26070+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
26071+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
26072 sarea_priv->last_enqueue = dev_priv->counter - 1;
26073 sarea_priv->last_dispatch = (int)hw_status[5];
26074
26075diff -urNp linux-3.0.7/drivers/gpu/drm/i810/i810_drv.h linux-3.0.7/drivers/gpu/drm/i810/i810_drv.h
26076--- linux-3.0.7/drivers/gpu/drm/i810/i810_drv.h 2011-07-21 22:17:23.000000000 -0400
26077+++ linux-3.0.7/drivers/gpu/drm/i810/i810_drv.h 2011-08-23 21:47:55.000000000 -0400
26078@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
26079 int page_flipping;
26080
26081 wait_queue_head_t irq_queue;
26082- atomic_t irq_received;
26083- atomic_t irq_emitted;
26084+ atomic_unchecked_t irq_received;
26085+ atomic_unchecked_t irq_emitted;
26086
26087 int front_offset;
26088 } drm_i810_private_t;
26089diff -urNp linux-3.0.7/drivers/gpu/drm/i915/i915_debugfs.c linux-3.0.7/drivers/gpu/drm/i915/i915_debugfs.c
26090--- linux-3.0.7/drivers/gpu/drm/i915/i915_debugfs.c 2011-07-21 22:17:23.000000000 -0400
26091+++ linux-3.0.7/drivers/gpu/drm/i915/i915_debugfs.c 2011-10-06 04:17:55.000000000 -0400
26092@@ -497,7 +497,7 @@ static int i915_interrupt_info(struct se
26093 I915_READ(GTIMR));
26094 }
26095 seq_printf(m, "Interrupts received: %d\n",
26096- atomic_read(&dev_priv->irq_received));
26097+ atomic_read_unchecked(&dev_priv->irq_received));
26098 for (i = 0; i < I915_NUM_RINGS; i++) {
26099 if (IS_GEN6(dev)) {
26100 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
26101@@ -1147,7 +1147,7 @@ static int i915_opregion(struct seq_file
26102 return ret;
26103
26104 if (opregion->header)
26105- seq_write(m, opregion->header, OPREGION_SIZE);
26106+ seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
26107
26108 mutex_unlock(&dev->struct_mutex);
26109
26110diff -urNp linux-3.0.7/drivers/gpu/drm/i915/i915_dma.c linux-3.0.7/drivers/gpu/drm/i915/i915_dma.c
26111--- linux-3.0.7/drivers/gpu/drm/i915/i915_dma.c 2011-09-02 18:11:21.000000000 -0400
26112+++ linux-3.0.7/drivers/gpu/drm/i915/i915_dma.c 2011-08-23 21:47:55.000000000 -0400
26113@@ -1169,7 +1169,7 @@ static bool i915_switcheroo_can_switch(s
26114 bool can_switch;
26115
26116 spin_lock(&dev->count_lock);
26117- can_switch = (dev->open_count == 0);
26118+ can_switch = (local_read(&dev->open_count) == 0);
26119 spin_unlock(&dev->count_lock);
26120 return can_switch;
26121 }
26122diff -urNp linux-3.0.7/drivers/gpu/drm/i915/i915_drv.h linux-3.0.7/drivers/gpu/drm/i915/i915_drv.h
26123--- linux-3.0.7/drivers/gpu/drm/i915/i915_drv.h 2011-07-21 22:17:23.000000000 -0400
26124+++ linux-3.0.7/drivers/gpu/drm/i915/i915_drv.h 2011-08-23 21:47:55.000000000 -0400
26125@@ -219,7 +219,7 @@ struct drm_i915_display_funcs {
26126 /* render clock increase/decrease */
26127 /* display clock increase/decrease */
26128 /* pll clock increase/decrease */
26129-};
26130+} __no_const;
26131
26132 struct intel_device_info {
26133 u8 gen;
26134@@ -300,7 +300,7 @@ typedef struct drm_i915_private {
26135 int current_page;
26136 int page_flipping;
26137
26138- atomic_t irq_received;
26139+ atomic_unchecked_t irq_received;
26140
26141 /* protects the irq masks */
26142 spinlock_t irq_lock;
26143@@ -874,7 +874,7 @@ struct drm_i915_gem_object {
26144 * will be page flipped away on the next vblank. When it
26145 * reaches 0, dev_priv->pending_flip_queue will be woken up.
26146 */
26147- atomic_t pending_flip;
26148+ atomic_unchecked_t pending_flip;
26149 };
26150
26151 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
26152@@ -1247,7 +1247,7 @@ extern int intel_setup_gmbus(struct drm_
26153 extern void intel_teardown_gmbus(struct drm_device *dev);
26154 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
26155 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
26156-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
26157+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
26158 {
26159 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
26160 }
26161diff -urNp linux-3.0.7/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-3.0.7/drivers/gpu/drm/i915/i915_gem_execbuffer.c
26162--- linux-3.0.7/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-07-21 22:17:23.000000000 -0400
26163+++ linux-3.0.7/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-08-23 21:47:55.000000000 -0400
26164@@ -188,7 +188,7 @@ i915_gem_object_set_to_gpu_domain(struct
26165 i915_gem_clflush_object(obj);
26166
26167 if (obj->base.pending_write_domain)
26168- cd->flips |= atomic_read(&obj->pending_flip);
26169+ cd->flips |= atomic_read_unchecked(&obj->pending_flip);
26170
26171 /* The actual obj->write_domain will be updated with
26172 * pending_write_domain after we emit the accumulated flush for all
26173diff -urNp linux-3.0.7/drivers/gpu/drm/i915/i915_irq.c linux-3.0.7/drivers/gpu/drm/i915/i915_irq.c
26174--- linux-3.0.7/drivers/gpu/drm/i915/i915_irq.c 2011-09-02 18:11:21.000000000 -0400
26175+++ linux-3.0.7/drivers/gpu/drm/i915/i915_irq.c 2011-08-23 21:47:55.000000000 -0400
26176@@ -473,7 +473,7 @@ static irqreturn_t ivybridge_irq_handler
26177 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
26178 struct drm_i915_master_private *master_priv;
26179
26180- atomic_inc(&dev_priv->irq_received);
26181+ atomic_inc_unchecked(&dev_priv->irq_received);
26182
26183 /* disable master interrupt before clearing iir */
26184 de_ier = I915_READ(DEIER);
26185@@ -563,7 +563,7 @@ static irqreturn_t ironlake_irq_handler(
26186 struct drm_i915_master_private *master_priv;
26187 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
26188
26189- atomic_inc(&dev_priv->irq_received);
26190+ atomic_inc_unchecked(&dev_priv->irq_received);
26191
26192 if (IS_GEN6(dev))
26193 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
26194@@ -1226,7 +1226,7 @@ static irqreturn_t i915_driver_irq_handl
26195 int ret = IRQ_NONE, pipe;
26196 bool blc_event = false;
26197
26198- atomic_inc(&dev_priv->irq_received);
26199+ atomic_inc_unchecked(&dev_priv->irq_received);
26200
26201 iir = I915_READ(IIR);
26202
26203@@ -1735,7 +1735,7 @@ static void ironlake_irq_preinstall(stru
26204 {
26205 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
26206
26207- atomic_set(&dev_priv->irq_received, 0);
26208+ atomic_set_unchecked(&dev_priv->irq_received, 0);
26209
26210 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
26211 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
26212@@ -1899,7 +1899,7 @@ static void i915_driver_irq_preinstall(s
26213 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
26214 int pipe;
26215
26216- atomic_set(&dev_priv->irq_received, 0);
26217+ atomic_set_unchecked(&dev_priv->irq_received, 0);
26218
26219 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
26220 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
26221diff -urNp linux-3.0.7/drivers/gpu/drm/i915/intel_display.c linux-3.0.7/drivers/gpu/drm/i915/intel_display.c
26222--- linux-3.0.7/drivers/gpu/drm/i915/intel_display.c 2011-09-02 18:11:21.000000000 -0400
26223+++ linux-3.0.7/drivers/gpu/drm/i915/intel_display.c 2011-08-23 21:47:55.000000000 -0400
26224@@ -1961,7 +1961,7 @@ intel_pipe_set_base(struct drm_crtc *crt
26225
26226 wait_event(dev_priv->pending_flip_queue,
26227 atomic_read(&dev_priv->mm.wedged) ||
26228- atomic_read(&obj->pending_flip) == 0);
26229+ atomic_read_unchecked(&obj->pending_flip) == 0);
26230
26231 /* Big Hammer, we also need to ensure that any pending
26232 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
26233@@ -2548,7 +2548,7 @@ static void intel_crtc_wait_for_pending_
26234 obj = to_intel_framebuffer(crtc->fb)->obj;
26235 dev_priv = crtc->dev->dev_private;
26236 wait_event(dev_priv->pending_flip_queue,
26237- atomic_read(&obj->pending_flip) == 0);
26238+ atomic_read_unchecked(&obj->pending_flip) == 0);
26239 }
26240
26241 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
26242@@ -6225,7 +6225,7 @@ static void do_intel_finish_page_flip(st
26243
26244 atomic_clear_mask(1 << intel_crtc->plane,
26245 &obj->pending_flip.counter);
26246- if (atomic_read(&obj->pending_flip) == 0)
26247+ if (atomic_read_unchecked(&obj->pending_flip) == 0)
26248 wake_up(&dev_priv->pending_flip_queue);
26249
26250 schedule_work(&work->work);
26251@@ -6514,7 +6514,7 @@ static int intel_crtc_page_flip(struct d
26252 /* Block clients from rendering to the new back buffer until
26253 * the flip occurs and the object is no longer visible.
26254 */
26255- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
26256+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
26257
26258 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
26259 if (ret)
26260@@ -6527,7 +6527,7 @@ static int intel_crtc_page_flip(struct d
26261 return 0;
26262
26263 cleanup_pending:
26264- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
26265+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
26266 cleanup_objs:
26267 drm_gem_object_unreference(&work->old_fb_obj->base);
26268 drm_gem_object_unreference(&obj->base);
26269diff -urNp linux-3.0.7/drivers/gpu/drm/mga/mga_drv.h linux-3.0.7/drivers/gpu/drm/mga/mga_drv.h
26270--- linux-3.0.7/drivers/gpu/drm/mga/mga_drv.h 2011-07-21 22:17:23.000000000 -0400
26271+++ linux-3.0.7/drivers/gpu/drm/mga/mga_drv.h 2011-08-23 21:47:55.000000000 -0400
26272@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
26273 u32 clear_cmd;
26274 u32 maccess;
26275
26276- atomic_t vbl_received; /**< Number of vblanks received. */
26277+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
26278 wait_queue_head_t fence_queue;
26279- atomic_t last_fence_retired;
26280+ atomic_unchecked_t last_fence_retired;
26281 u32 next_fence_to_post;
26282
26283 unsigned int fb_cpp;
26284diff -urNp linux-3.0.7/drivers/gpu/drm/mga/mga_irq.c linux-3.0.7/drivers/gpu/drm/mga/mga_irq.c
26285--- linux-3.0.7/drivers/gpu/drm/mga/mga_irq.c 2011-07-21 22:17:23.000000000 -0400
26286+++ linux-3.0.7/drivers/gpu/drm/mga/mga_irq.c 2011-08-23 21:47:55.000000000 -0400
26287@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
26288 if (crtc != 0)
26289 return 0;
26290
26291- return atomic_read(&dev_priv->vbl_received);
26292+ return atomic_read_unchecked(&dev_priv->vbl_received);
26293 }
26294
26295
26296@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
26297 /* VBLANK interrupt */
26298 if (status & MGA_VLINEPEN) {
26299 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
26300- atomic_inc(&dev_priv->vbl_received);
26301+ atomic_inc_unchecked(&dev_priv->vbl_received);
26302 drm_handle_vblank(dev, 0);
26303 handled = 1;
26304 }
26305@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
26306 if ((prim_start & ~0x03) != (prim_end & ~0x03))
26307 MGA_WRITE(MGA_PRIMEND, prim_end);
26308
26309- atomic_inc(&dev_priv->last_fence_retired);
26310+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
26311 DRM_WAKEUP(&dev_priv->fence_queue);
26312 handled = 1;
26313 }
26314@@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_dev
26315 * using fences.
26316 */
26317 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
26318- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
26319+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
26320 - *sequence) <= (1 << 23)));
26321
26322 *sequence = cur_fence;
26323diff -urNp linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_bios.c linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_bios.c
26324--- linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-07-21 22:17:23.000000000 -0400
26325+++ linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-08-26 19:49:56.000000000 -0400
26326@@ -200,7 +200,7 @@ struct methods {
26327 const char desc[8];
26328 void (*loadbios)(struct drm_device *, uint8_t *);
26329 const bool rw;
26330-};
26331+} __do_const;
26332
26333 static struct methods shadow_methods[] = {
26334 { "PRAMIN", load_vbios_pramin, true },
26335@@ -5488,7 +5488,7 @@ parse_bit_displayport_tbl_entry(struct d
26336 struct bit_table {
26337 const char id;
26338 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
26339-};
26340+} __no_const;
26341
26342 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
26343
26344diff -urNp linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_drv.h linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_drv.h
26345--- linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-07-21 22:17:23.000000000 -0400
26346+++ linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-08-23 21:47:55.000000000 -0400
26347@@ -227,7 +227,7 @@ struct nouveau_channel {
26348 struct list_head pending;
26349 uint32_t sequence;
26350 uint32_t sequence_ack;
26351- atomic_t last_sequence_irq;
26352+ atomic_unchecked_t last_sequence_irq;
26353 } fence;
26354
26355 /* DMA push buffer */
26356@@ -304,7 +304,7 @@ struct nouveau_exec_engine {
26357 u32 handle, u16 class);
26358 void (*set_tile_region)(struct drm_device *dev, int i);
26359 void (*tlb_flush)(struct drm_device *, int engine);
26360-};
26361+} __no_const;
26362
26363 struct nouveau_instmem_engine {
26364 void *priv;
26365@@ -325,13 +325,13 @@ struct nouveau_instmem_engine {
26366 struct nouveau_mc_engine {
26367 int (*init)(struct drm_device *dev);
26368 void (*takedown)(struct drm_device *dev);
26369-};
26370+} __no_const;
26371
26372 struct nouveau_timer_engine {
26373 int (*init)(struct drm_device *dev);
26374 void (*takedown)(struct drm_device *dev);
26375 uint64_t (*read)(struct drm_device *dev);
26376-};
26377+} __no_const;
26378
26379 struct nouveau_fb_engine {
26380 int num_tiles;
26381@@ -494,7 +494,7 @@ struct nouveau_vram_engine {
26382 void (*put)(struct drm_device *, struct nouveau_mem **);
26383
26384 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
26385-};
26386+} __no_const;
26387
26388 struct nouveau_engine {
26389 struct nouveau_instmem_engine instmem;
26390@@ -640,7 +640,7 @@ struct drm_nouveau_private {
26391 struct drm_global_reference mem_global_ref;
26392 struct ttm_bo_global_ref bo_global_ref;
26393 struct ttm_bo_device bdev;
26394- atomic_t validate_sequence;
26395+ atomic_unchecked_t validate_sequence;
26396 } ttm;
26397
26398 struct {
26399diff -urNp linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_fence.c linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_fence.c
26400--- linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-07-21 22:17:23.000000000 -0400
26401+++ linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-08-23 21:47:55.000000000 -0400
26402@@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_chan
26403 if (USE_REFCNT(dev))
26404 sequence = nvchan_rd32(chan, 0x48);
26405 else
26406- sequence = atomic_read(&chan->fence.last_sequence_irq);
26407+ sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
26408
26409 if (chan->fence.sequence_ack == sequence)
26410 goto out;
26411@@ -544,7 +544,7 @@ nouveau_fence_channel_init(struct nouvea
26412
26413 INIT_LIST_HEAD(&chan->fence.pending);
26414 spin_lock_init(&chan->fence.lock);
26415- atomic_set(&chan->fence.last_sequence_irq, 0);
26416+ atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
26417 return 0;
26418 }
26419
26420diff -urNp linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_gem.c linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_gem.c
26421--- linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-07-21 22:17:23.000000000 -0400
26422+++ linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-08-23 21:47:55.000000000 -0400
26423@@ -249,7 +249,7 @@ validate_init(struct nouveau_channel *ch
26424 int trycnt = 0;
26425 int ret, i;
26426
26427- sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
26428+ sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
26429 retry:
26430 if (++trycnt > 100000) {
26431 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
26432diff -urNp linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_state.c linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_state.c
26433--- linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_state.c 2011-07-21 22:17:23.000000000 -0400
26434+++ linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_state.c 2011-08-23 21:47:55.000000000 -0400
26435@@ -488,7 +488,7 @@ static bool nouveau_switcheroo_can_switc
26436 bool can_switch;
26437
26438 spin_lock(&dev->count_lock);
26439- can_switch = (dev->open_count == 0);
26440+ can_switch = (local_read(&dev->open_count) == 0);
26441 spin_unlock(&dev->count_lock);
26442 return can_switch;
26443 }
26444diff -urNp linux-3.0.7/drivers/gpu/drm/nouveau/nv04_graph.c linux-3.0.7/drivers/gpu/drm/nouveau/nv04_graph.c
26445--- linux-3.0.7/drivers/gpu/drm/nouveau/nv04_graph.c 2011-07-21 22:17:23.000000000 -0400
26446+++ linux-3.0.7/drivers/gpu/drm/nouveau/nv04_graph.c 2011-08-23 21:47:55.000000000 -0400
26447@@ -560,7 +560,7 @@ static int
26448 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
26449 u32 class, u32 mthd, u32 data)
26450 {
26451- atomic_set(&chan->fence.last_sequence_irq, data);
26452+ atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
26453 return 0;
26454 }
26455
26456diff -urNp linux-3.0.7/drivers/gpu/drm/r128/r128_cce.c linux-3.0.7/drivers/gpu/drm/r128/r128_cce.c
26457--- linux-3.0.7/drivers/gpu/drm/r128/r128_cce.c 2011-07-21 22:17:23.000000000 -0400
26458+++ linux-3.0.7/drivers/gpu/drm/r128/r128_cce.c 2011-08-23 21:47:55.000000000 -0400
26459@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
26460
26461 /* GH: Simple idle check.
26462 */
26463- atomic_set(&dev_priv->idle_count, 0);
26464+ atomic_set_unchecked(&dev_priv->idle_count, 0);
26465
26466 /* We don't support anything other than bus-mastering ring mode,
26467 * but the ring can be in either AGP or PCI space for the ring
26468diff -urNp linux-3.0.7/drivers/gpu/drm/r128/r128_drv.h linux-3.0.7/drivers/gpu/drm/r128/r128_drv.h
26469--- linux-3.0.7/drivers/gpu/drm/r128/r128_drv.h 2011-07-21 22:17:23.000000000 -0400
26470+++ linux-3.0.7/drivers/gpu/drm/r128/r128_drv.h 2011-08-23 21:47:55.000000000 -0400
26471@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
26472 int is_pci;
26473 unsigned long cce_buffers_offset;
26474
26475- atomic_t idle_count;
26476+ atomic_unchecked_t idle_count;
26477
26478 int page_flipping;
26479 int current_page;
26480 u32 crtc_offset;
26481 u32 crtc_offset_cntl;
26482
26483- atomic_t vbl_received;
26484+ atomic_unchecked_t vbl_received;
26485
26486 u32 color_fmt;
26487 unsigned int front_offset;
26488diff -urNp linux-3.0.7/drivers/gpu/drm/r128/r128_irq.c linux-3.0.7/drivers/gpu/drm/r128/r128_irq.c
26489--- linux-3.0.7/drivers/gpu/drm/r128/r128_irq.c 2011-07-21 22:17:23.000000000 -0400
26490+++ linux-3.0.7/drivers/gpu/drm/r128/r128_irq.c 2011-08-23 21:47:55.000000000 -0400
26491@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
26492 if (crtc != 0)
26493 return 0;
26494
26495- return atomic_read(&dev_priv->vbl_received);
26496+ return atomic_read_unchecked(&dev_priv->vbl_received);
26497 }
26498
26499 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
26500@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
26501 /* VBLANK interrupt */
26502 if (status & R128_CRTC_VBLANK_INT) {
26503 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
26504- atomic_inc(&dev_priv->vbl_received);
26505+ atomic_inc_unchecked(&dev_priv->vbl_received);
26506 drm_handle_vblank(dev, 0);
26507 return IRQ_HANDLED;
26508 }
26509diff -urNp linux-3.0.7/drivers/gpu/drm/r128/r128_state.c linux-3.0.7/drivers/gpu/drm/r128/r128_state.c
26510--- linux-3.0.7/drivers/gpu/drm/r128/r128_state.c 2011-07-21 22:17:23.000000000 -0400
26511+++ linux-3.0.7/drivers/gpu/drm/r128/r128_state.c 2011-08-23 21:47:55.000000000 -0400
26512@@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_priv
26513
26514 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
26515 {
26516- if (atomic_read(&dev_priv->idle_count) == 0)
26517+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
26518 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
26519 else
26520- atomic_set(&dev_priv->idle_count, 0);
26521+ atomic_set_unchecked(&dev_priv->idle_count, 0);
26522 }
26523
26524 #endif
26525diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/atom.c linux-3.0.7/drivers/gpu/drm/radeon/atom.c
26526--- linux-3.0.7/drivers/gpu/drm/radeon/atom.c 2011-07-21 22:17:23.000000000 -0400
26527+++ linux-3.0.7/drivers/gpu/drm/radeon/atom.c 2011-08-23 21:48:14.000000000 -0400
26528@@ -1245,6 +1245,8 @@ struct atom_context *atom_parse(struct c
26529 char name[512];
26530 int i;
26531
26532+ pax_track_stack();
26533+
26534 ctx->card = card;
26535 ctx->bios = bios;
26536
26537diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/mkregtable.c linux-3.0.7/drivers/gpu/drm/radeon/mkregtable.c
26538--- linux-3.0.7/drivers/gpu/drm/radeon/mkregtable.c 2011-07-21 22:17:23.000000000 -0400
26539+++ linux-3.0.7/drivers/gpu/drm/radeon/mkregtable.c 2011-08-23 21:47:55.000000000 -0400
26540@@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
26541 regex_t mask_rex;
26542 regmatch_t match[4];
26543 char buf[1024];
26544- size_t end;
26545+ long end;
26546 int len;
26547 int done = 0;
26548 int r;
26549 unsigned o;
26550 struct offset *offset;
26551 char last_reg_s[10];
26552- int last_reg;
26553+ unsigned long last_reg;
26554
26555 if (regcomp
26556 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
26557diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/radeon_atombios.c linux-3.0.7/drivers/gpu/drm/radeon/radeon_atombios.c
26558--- linux-3.0.7/drivers/gpu/drm/radeon/radeon_atombios.c 2011-07-21 22:17:23.000000000 -0400
26559+++ linux-3.0.7/drivers/gpu/drm/radeon/radeon_atombios.c 2011-08-23 21:48:14.000000000 -0400
26560@@ -545,6 +545,8 @@ bool radeon_get_atom_connector_info_from
26561 struct radeon_gpio_rec gpio;
26562 struct radeon_hpd hpd;
26563
26564+ pax_track_stack();
26565+
26566 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
26567 return false;
26568
26569diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/radeon_device.c linux-3.0.7/drivers/gpu/drm/radeon/radeon_device.c
26570--- linux-3.0.7/drivers/gpu/drm/radeon/radeon_device.c 2011-09-02 18:11:21.000000000 -0400
26571+++ linux-3.0.7/drivers/gpu/drm/radeon/radeon_device.c 2011-08-23 21:47:55.000000000 -0400
26572@@ -678,7 +678,7 @@ static bool radeon_switcheroo_can_switch
26573 bool can_switch;
26574
26575 spin_lock(&dev->count_lock);
26576- can_switch = (dev->open_count == 0);
26577+ can_switch = (local_read(&dev->open_count) == 0);
26578 spin_unlock(&dev->count_lock);
26579 return can_switch;
26580 }
26581diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/radeon_display.c linux-3.0.7/drivers/gpu/drm/radeon/radeon_display.c
26582--- linux-3.0.7/drivers/gpu/drm/radeon/radeon_display.c 2011-09-02 18:11:21.000000000 -0400
26583+++ linux-3.0.7/drivers/gpu/drm/radeon/radeon_display.c 2011-08-23 21:48:14.000000000 -0400
26584@@ -946,6 +946,8 @@ void radeon_compute_pll_legacy(struct ra
26585 uint32_t post_div;
26586 u32 pll_out_min, pll_out_max;
26587
26588+ pax_track_stack();
26589+
26590 DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
26591 freq = freq * 1000;
26592
26593diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/radeon_drv.h linux-3.0.7/drivers/gpu/drm/radeon/radeon_drv.h
26594--- linux-3.0.7/drivers/gpu/drm/radeon/radeon_drv.h 2011-07-21 22:17:23.000000000 -0400
26595+++ linux-3.0.7/drivers/gpu/drm/radeon/radeon_drv.h 2011-08-23 21:47:55.000000000 -0400
26596@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
26597
26598 /* SW interrupt */
26599 wait_queue_head_t swi_queue;
26600- atomic_t swi_emitted;
26601+ atomic_unchecked_t swi_emitted;
26602 int vblank_crtc;
26603 uint32_t irq_enable_reg;
26604 uint32_t r500_disp_irq_reg;
26605diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/radeon_fence.c linux-3.0.7/drivers/gpu/drm/radeon/radeon_fence.c
26606--- linux-3.0.7/drivers/gpu/drm/radeon/radeon_fence.c 2011-07-21 22:17:23.000000000 -0400
26607+++ linux-3.0.7/drivers/gpu/drm/radeon/radeon_fence.c 2011-08-23 21:47:55.000000000 -0400
26608@@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_devi
26609 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
26610 return 0;
26611 }
26612- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
26613+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
26614 if (!rdev->cp.ready)
26615 /* FIXME: cp is not running assume everythings is done right
26616 * away
26617@@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct rade
26618 return r;
26619 }
26620 radeon_fence_write(rdev, 0);
26621- atomic_set(&rdev->fence_drv.seq, 0);
26622+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
26623 INIT_LIST_HEAD(&rdev->fence_drv.created);
26624 INIT_LIST_HEAD(&rdev->fence_drv.emited);
26625 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
26626diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/radeon.h linux-3.0.7/drivers/gpu/drm/radeon/radeon.h
26627--- linux-3.0.7/drivers/gpu/drm/radeon/radeon.h 2011-10-16 21:54:53.000000000 -0400
26628+++ linux-3.0.7/drivers/gpu/drm/radeon/radeon.h 2011-10-16 21:55:27.000000000 -0400
26629@@ -191,7 +191,7 @@ extern int sumo_get_temp(struct radeon_d
26630 */
26631 struct radeon_fence_driver {
26632 uint32_t scratch_reg;
26633- atomic_t seq;
26634+ atomic_unchecked_t seq;
26635 uint32_t last_seq;
26636 unsigned long last_jiffies;
26637 unsigned long last_timeout;
26638@@ -961,7 +961,7 @@ struct radeon_asic {
26639 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
26640 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
26641 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
26642-};
26643+} __no_const;
26644
26645 /*
26646 * Asic structures
26647diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/radeon_ioc32.c linux-3.0.7/drivers/gpu/drm/radeon/radeon_ioc32.c
26648--- linux-3.0.7/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-07-21 22:17:23.000000000 -0400
26649+++ linux-3.0.7/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-08-23 21:47:55.000000000 -0400
26650@@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(str
26651 request = compat_alloc_user_space(sizeof(*request));
26652 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
26653 || __put_user(req32.param, &request->param)
26654- || __put_user((void __user *)(unsigned long)req32.value,
26655+ || __put_user((unsigned long)req32.value,
26656 &request->value))
26657 return -EFAULT;
26658
26659diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/radeon_irq.c linux-3.0.7/drivers/gpu/drm/radeon/radeon_irq.c
26660--- linux-3.0.7/drivers/gpu/drm/radeon/radeon_irq.c 2011-07-21 22:17:23.000000000 -0400
26661+++ linux-3.0.7/drivers/gpu/drm/radeon/radeon_irq.c 2011-08-23 21:47:55.000000000 -0400
26662@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
26663 unsigned int ret;
26664 RING_LOCALS;
26665
26666- atomic_inc(&dev_priv->swi_emitted);
26667- ret = atomic_read(&dev_priv->swi_emitted);
26668+ atomic_inc_unchecked(&dev_priv->swi_emitted);
26669+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
26670
26671 BEGIN_RING(4);
26672 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
26673@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
26674 drm_radeon_private_t *dev_priv =
26675 (drm_radeon_private_t *) dev->dev_private;
26676
26677- atomic_set(&dev_priv->swi_emitted, 0);
26678+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
26679 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
26680
26681 dev->max_vblank_count = 0x001fffff;
26682diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/radeon_state.c linux-3.0.7/drivers/gpu/drm/radeon/radeon_state.c
26683--- linux-3.0.7/drivers/gpu/drm/radeon/radeon_state.c 2011-07-21 22:17:23.000000000 -0400
26684+++ linux-3.0.7/drivers/gpu/drm/radeon/radeon_state.c 2011-08-23 21:47:55.000000000 -0400
26685@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_de
26686 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
26687 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
26688
26689- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
26690+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
26691 sarea_priv->nbox * sizeof(depth_boxes[0])))
26692 return -EFAULT;
26693
26694@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm
26695 {
26696 drm_radeon_private_t *dev_priv = dev->dev_private;
26697 drm_radeon_getparam_t *param = data;
26698- int value;
26699+ int value = 0;
26700
26701 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
26702
26703diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/radeon_ttm.c linux-3.0.7/drivers/gpu/drm/radeon/radeon_ttm.c
26704--- linux-3.0.7/drivers/gpu/drm/radeon/radeon_ttm.c 2011-10-16 21:54:53.000000000 -0400
26705+++ linux-3.0.7/drivers/gpu/drm/radeon/radeon_ttm.c 2011-10-16 21:55:27.000000000 -0400
26706@@ -649,8 +649,10 @@ int radeon_mmap(struct file *filp, struc
26707 }
26708 if (unlikely(ttm_vm_ops == NULL)) {
26709 ttm_vm_ops = vma->vm_ops;
26710- radeon_ttm_vm_ops = *ttm_vm_ops;
26711- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
26712+ pax_open_kernel();
26713+ memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
26714+ *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
26715+ pax_close_kernel();
26716 }
26717 vma->vm_ops = &radeon_ttm_vm_ops;
26718 return 0;
26719diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/rs690.c linux-3.0.7/drivers/gpu/drm/radeon/rs690.c
26720--- linux-3.0.7/drivers/gpu/drm/radeon/rs690.c 2011-07-21 22:17:23.000000000 -0400
26721+++ linux-3.0.7/drivers/gpu/drm/radeon/rs690.c 2011-08-23 21:47:55.000000000 -0400
26722@@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct
26723 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
26724 rdev->pm.sideport_bandwidth.full)
26725 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
26726- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
26727+ read_delay_latency.full = dfixed_const(800 * 1000);
26728 read_delay_latency.full = dfixed_div(read_delay_latency,
26729 rdev->pm.igp_sideport_mclk);
26730+ a.full = dfixed_const(370);
26731+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
26732 } else {
26733 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
26734 rdev->pm.k8_bandwidth.full)
26735diff -urNp linux-3.0.7/drivers/gpu/drm/ttm/ttm_page_alloc.c linux-3.0.7/drivers/gpu/drm/ttm/ttm_page_alloc.c
26736--- linux-3.0.7/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-07-21 22:17:23.000000000 -0400
26737+++ linux-3.0.7/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-08-23 21:47:55.000000000 -0400
26738@@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages
26739 static int ttm_pool_mm_shrink(struct shrinker *shrink,
26740 struct shrink_control *sc)
26741 {
26742- static atomic_t start_pool = ATOMIC_INIT(0);
26743+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
26744 unsigned i;
26745- unsigned pool_offset = atomic_add_return(1, &start_pool);
26746+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
26747 struct ttm_page_pool *pool;
26748 int shrink_pages = sc->nr_to_scan;
26749
26750diff -urNp linux-3.0.7/drivers/gpu/drm/via/via_drv.h linux-3.0.7/drivers/gpu/drm/via/via_drv.h
26751--- linux-3.0.7/drivers/gpu/drm/via/via_drv.h 2011-07-21 22:17:23.000000000 -0400
26752+++ linux-3.0.7/drivers/gpu/drm/via/via_drv.h 2011-08-23 21:47:55.000000000 -0400
26753@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
26754 typedef uint32_t maskarray_t[5];
26755
26756 typedef struct drm_via_irq {
26757- atomic_t irq_received;
26758+ atomic_unchecked_t irq_received;
26759 uint32_t pending_mask;
26760 uint32_t enable_mask;
26761 wait_queue_head_t irq_queue;
26762@@ -75,7 +75,7 @@ typedef struct drm_via_private {
26763 struct timeval last_vblank;
26764 int last_vblank_valid;
26765 unsigned usec_per_vblank;
26766- atomic_t vbl_received;
26767+ atomic_unchecked_t vbl_received;
26768 drm_via_state_t hc_state;
26769 char pci_buf[VIA_PCI_BUF_SIZE];
26770 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
26771diff -urNp linux-3.0.7/drivers/gpu/drm/via/via_irq.c linux-3.0.7/drivers/gpu/drm/via/via_irq.c
26772--- linux-3.0.7/drivers/gpu/drm/via/via_irq.c 2011-07-21 22:17:23.000000000 -0400
26773+++ linux-3.0.7/drivers/gpu/drm/via/via_irq.c 2011-08-23 21:47:55.000000000 -0400
26774@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
26775 if (crtc != 0)
26776 return 0;
26777
26778- return atomic_read(&dev_priv->vbl_received);
26779+ return atomic_read_unchecked(&dev_priv->vbl_received);
26780 }
26781
26782 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
26783@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
26784
26785 status = VIA_READ(VIA_REG_INTERRUPT);
26786 if (status & VIA_IRQ_VBLANK_PENDING) {
26787- atomic_inc(&dev_priv->vbl_received);
26788- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
26789+ atomic_inc_unchecked(&dev_priv->vbl_received);
26790+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
26791 do_gettimeofday(&cur_vblank);
26792 if (dev_priv->last_vblank_valid) {
26793 dev_priv->usec_per_vblank =
26794@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
26795 dev_priv->last_vblank = cur_vblank;
26796 dev_priv->last_vblank_valid = 1;
26797 }
26798- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
26799+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
26800 DRM_DEBUG("US per vblank is: %u\n",
26801 dev_priv->usec_per_vblank);
26802 }
26803@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
26804
26805 for (i = 0; i < dev_priv->num_irqs; ++i) {
26806 if (status & cur_irq->pending_mask) {
26807- atomic_inc(&cur_irq->irq_received);
26808+ atomic_inc_unchecked(&cur_irq->irq_received);
26809 DRM_WAKEUP(&cur_irq->irq_queue);
26810 handled = 1;
26811 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
26812@@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *d
26813 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
26814 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
26815 masks[irq][4]));
26816- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
26817+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
26818 } else {
26819 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
26820 (((cur_irq_sequence =
26821- atomic_read(&cur_irq->irq_received)) -
26822+ atomic_read_unchecked(&cur_irq->irq_received)) -
26823 *sequence) <= (1 << 23)));
26824 }
26825 *sequence = cur_irq_sequence;
26826@@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct dr
26827 }
26828
26829 for (i = 0; i < dev_priv->num_irqs; ++i) {
26830- atomic_set(&cur_irq->irq_received, 0);
26831+ atomic_set_unchecked(&cur_irq->irq_received, 0);
26832 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
26833 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
26834 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
26835@@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev,
26836 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
26837 case VIA_IRQ_RELATIVE:
26838 irqwait->request.sequence +=
26839- atomic_read(&cur_irq->irq_received);
26840+ atomic_read_unchecked(&cur_irq->irq_received);
26841 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
26842 case VIA_IRQ_ABSOLUTE:
26843 break;
26844diff -urNp linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
26845--- linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-07-21 22:17:23.000000000 -0400
26846+++ linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-08-23 21:47:55.000000000 -0400
26847@@ -240,7 +240,7 @@ struct vmw_private {
26848 * Fencing and IRQs.
26849 */
26850
26851- atomic_t fence_seq;
26852+ atomic_unchecked_t fence_seq;
26853 wait_queue_head_t fence_queue;
26854 wait_queue_head_t fifo_queue;
26855 atomic_t fence_queue_waiters;
26856diff -urNp linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
26857--- linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c 2011-07-21 22:17:23.000000000 -0400
26858+++ linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c 2011-10-06 04:17:55.000000000 -0400
26859@@ -610,7 +610,7 @@ int vmw_execbuf_ioctl(struct drm_device
26860 struct drm_vmw_fence_rep fence_rep;
26861 struct drm_vmw_fence_rep __user *user_fence_rep;
26862 int ret;
26863- void *user_cmd;
26864+ void __user *user_cmd;
26865 void *cmd;
26866 uint32_t sequence;
26867 struct vmw_sw_context *sw_context = &dev_priv->ctx;
26868diff -urNp linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
26869--- linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-07-21 22:17:23.000000000 -0400
26870+++ linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-08-23 21:47:55.000000000 -0400
26871@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev
26872 while (!vmw_lag_lt(queue, us)) {
26873 spin_lock(&queue->lock);
26874 if (list_empty(&queue->head))
26875- sequence = atomic_read(&dev_priv->fence_seq);
26876+ sequence = atomic_read_unchecked(&dev_priv->fence_seq);
26877 else {
26878 fence = list_first_entry(&queue->head,
26879 struct vmw_fence, head);
26880diff -urNp linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
26881--- linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-07-21 22:17:23.000000000 -0400
26882+++ linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-10-06 04:17:55.000000000 -0400
26883@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *de
26884 (unsigned int) min,
26885 (unsigned int) fifo->capabilities);
26886
26887- atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
26888+ atomic_set_unchecked(&dev_priv->fence_seq, dev_priv->last_read_sequence);
26889 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
26890 vmw_fence_queue_init(&fifo->fence_queue);
26891 return vmw_fifo_send_fence(dev_priv, &dummy);
26892@@ -356,7 +356,7 @@ void *vmw_fifo_reserve(struct vmw_privat
26893 if (reserveable)
26894 iowrite32(bytes, fifo_mem +
26895 SVGA_FIFO_RESERVED);
26896- return fifo_mem + (next_cmd >> 2);
26897+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
26898 } else {
26899 need_bounce = true;
26900 }
26901@@ -476,7 +476,7 @@ int vmw_fifo_send_fence(struct vmw_priva
26902
26903 fm = vmw_fifo_reserve(dev_priv, bytes);
26904 if (unlikely(fm == NULL)) {
26905- *sequence = atomic_read(&dev_priv->fence_seq);
26906+ *sequence = atomic_read_unchecked(&dev_priv->fence_seq);
26907 ret = -ENOMEM;
26908 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
26909 false, 3*HZ);
26910@@ -484,7 +484,7 @@ int vmw_fifo_send_fence(struct vmw_priva
26911 }
26912
26913 do {
26914- *sequence = atomic_add_return(1, &dev_priv->fence_seq);
26915+ *sequence = atomic_add_return_unchecked(1, &dev_priv->fence_seq);
26916 } while (*sequence == 0);
26917
26918 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
26919diff -urNp linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
26920--- linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-07-21 22:17:23.000000000 -0400
26921+++ linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-08-23 21:47:55.000000000 -0400
26922@@ -100,7 +100,7 @@ bool vmw_fence_signaled(struct vmw_priva
26923 * emitted. Then the fence is stale and signaled.
26924 */
26925
26926- ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
26927+ ret = ((atomic_read_unchecked(&dev_priv->fence_seq) - sequence)
26928 > VMW_FENCE_WRAP);
26929
26930 return ret;
26931@@ -131,7 +131,7 @@ int vmw_fallback_wait(struct vmw_private
26932
26933 if (fifo_idle)
26934 down_read(&fifo_state->rwsem);
26935- signal_seq = atomic_read(&dev_priv->fence_seq);
26936+ signal_seq = atomic_read_unchecked(&dev_priv->fence_seq);
26937 ret = 0;
26938
26939 for (;;) {
26940diff -urNp linux-3.0.7/drivers/hid/hid-core.c linux-3.0.7/drivers/hid/hid-core.c
26941--- linux-3.0.7/drivers/hid/hid-core.c 2011-07-21 22:17:23.000000000 -0400
26942+++ linux-3.0.7/drivers/hid/hid-core.c 2011-08-23 21:47:55.000000000 -0400
26943@@ -1923,7 +1923,7 @@ static bool hid_ignore(struct hid_device
26944
26945 int hid_add_device(struct hid_device *hdev)
26946 {
26947- static atomic_t id = ATOMIC_INIT(0);
26948+ static atomic_unchecked_t id = ATOMIC_INIT(0);
26949 int ret;
26950
26951 if (WARN_ON(hdev->status & HID_STAT_ADDED))
26952@@ -1938,7 +1938,7 @@ int hid_add_device(struct hid_device *hd
26953 /* XXX hack, any other cleaner solution after the driver core
26954 * is converted to allow more than 20 bytes as the device name? */
26955 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
26956- hdev->vendor, hdev->product, atomic_inc_return(&id));
26957+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
26958
26959 hid_debug_register(hdev, dev_name(&hdev->dev));
26960 ret = device_add(&hdev->dev);
26961diff -urNp linux-3.0.7/drivers/hid/usbhid/hiddev.c linux-3.0.7/drivers/hid/usbhid/hiddev.c
26962--- linux-3.0.7/drivers/hid/usbhid/hiddev.c 2011-07-21 22:17:23.000000000 -0400
26963+++ linux-3.0.7/drivers/hid/usbhid/hiddev.c 2011-08-23 21:47:55.000000000 -0400
26964@@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *fi
26965 break;
26966
26967 case HIDIOCAPPLICATION:
26968- if (arg < 0 || arg >= hid->maxapplication)
26969+ if (arg >= hid->maxapplication)
26970 break;
26971
26972 for (i = 0; i < hid->maxcollection; i++)
26973diff -urNp linux-3.0.7/drivers/hwmon/acpi_power_meter.c linux-3.0.7/drivers/hwmon/acpi_power_meter.c
26974--- linux-3.0.7/drivers/hwmon/acpi_power_meter.c 2011-07-21 22:17:23.000000000 -0400
26975+++ linux-3.0.7/drivers/hwmon/acpi_power_meter.c 2011-08-23 21:47:55.000000000 -0400
26976@@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *d
26977 return res;
26978
26979 temp /= 1000;
26980- if (temp < 0)
26981- return -EINVAL;
26982
26983 mutex_lock(&resource->lock);
26984 resource->trip[attr->index - 7] = temp;
26985diff -urNp linux-3.0.7/drivers/hwmon/sht15.c linux-3.0.7/drivers/hwmon/sht15.c
26986--- linux-3.0.7/drivers/hwmon/sht15.c 2011-07-21 22:17:23.000000000 -0400
26987+++ linux-3.0.7/drivers/hwmon/sht15.c 2011-08-23 21:47:55.000000000 -0400
26988@@ -166,7 +166,7 @@ struct sht15_data {
26989 int supply_uV;
26990 bool supply_uV_valid;
26991 struct work_struct update_supply_work;
26992- atomic_t interrupt_handled;
26993+ atomic_unchecked_t interrupt_handled;
26994 };
26995
26996 /**
26997@@ -509,13 +509,13 @@ static int sht15_measurement(struct sht1
26998 return ret;
26999
27000 gpio_direction_input(data->pdata->gpio_data);
27001- atomic_set(&data->interrupt_handled, 0);
27002+ atomic_set_unchecked(&data->interrupt_handled, 0);
27003
27004 enable_irq(gpio_to_irq(data->pdata->gpio_data));
27005 if (gpio_get_value(data->pdata->gpio_data) == 0) {
27006 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
27007 /* Only relevant if the interrupt hasn't occurred. */
27008- if (!atomic_read(&data->interrupt_handled))
27009+ if (!atomic_read_unchecked(&data->interrupt_handled))
27010 schedule_work(&data->read_work);
27011 }
27012 ret = wait_event_timeout(data->wait_queue,
27013@@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired
27014
27015 /* First disable the interrupt */
27016 disable_irq_nosync(irq);
27017- atomic_inc(&data->interrupt_handled);
27018+ atomic_inc_unchecked(&data->interrupt_handled);
27019 /* Then schedule a reading work struct */
27020 if (data->state != SHT15_READING_NOTHING)
27021 schedule_work(&data->read_work);
27022@@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct wo
27023 * If not, then start the interrupt again - care here as could
27024 * have gone low in meantime so verify it hasn't!
27025 */
27026- atomic_set(&data->interrupt_handled, 0);
27027+ atomic_set_unchecked(&data->interrupt_handled, 0);
27028 enable_irq(gpio_to_irq(data->pdata->gpio_data));
27029 /* If still not occurred or another handler has been scheduled */
27030 if (gpio_get_value(data->pdata->gpio_data)
27031- || atomic_read(&data->interrupt_handled))
27032+ || atomic_read_unchecked(&data->interrupt_handled))
27033 return;
27034 }
27035
27036diff -urNp linux-3.0.7/drivers/hwmon/w83791d.c linux-3.0.7/drivers/hwmon/w83791d.c
27037--- linux-3.0.7/drivers/hwmon/w83791d.c 2011-07-21 22:17:23.000000000 -0400
27038+++ linux-3.0.7/drivers/hwmon/w83791d.c 2011-08-23 21:47:55.000000000 -0400
27039@@ -329,8 +329,8 @@ static int w83791d_detect(struct i2c_cli
27040 struct i2c_board_info *info);
27041 static int w83791d_remove(struct i2c_client *client);
27042
27043-static int w83791d_read(struct i2c_client *client, u8 register);
27044-static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
27045+static int w83791d_read(struct i2c_client *client, u8 reg);
27046+static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
27047 static struct w83791d_data *w83791d_update_device(struct device *dev);
27048
27049 #ifdef DEBUG
27050diff -urNp linux-3.0.7/drivers/i2c/busses/i2c-amd756-s4882.c linux-3.0.7/drivers/i2c/busses/i2c-amd756-s4882.c
27051--- linux-3.0.7/drivers/i2c/busses/i2c-amd756-s4882.c 2011-07-21 22:17:23.000000000 -0400
27052+++ linux-3.0.7/drivers/i2c/busses/i2c-amd756-s4882.c 2011-08-23 21:47:55.000000000 -0400
27053@@ -43,7 +43,7 @@
27054 extern struct i2c_adapter amd756_smbus;
27055
27056 static struct i2c_adapter *s4882_adapter;
27057-static struct i2c_algorithm *s4882_algo;
27058+static i2c_algorithm_no_const *s4882_algo;
27059
27060 /* Wrapper access functions for multiplexed SMBus */
27061 static DEFINE_MUTEX(amd756_lock);
27062diff -urNp linux-3.0.7/drivers/i2c/busses/i2c-nforce2-s4985.c linux-3.0.7/drivers/i2c/busses/i2c-nforce2-s4985.c
27063--- linux-3.0.7/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-07-21 22:17:23.000000000 -0400
27064+++ linux-3.0.7/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-08-23 21:47:55.000000000 -0400
27065@@ -41,7 +41,7 @@
27066 extern struct i2c_adapter *nforce2_smbus;
27067
27068 static struct i2c_adapter *s4985_adapter;
27069-static struct i2c_algorithm *s4985_algo;
27070+static i2c_algorithm_no_const *s4985_algo;
27071
27072 /* Wrapper access functions for multiplexed SMBus */
27073 static DEFINE_MUTEX(nforce2_lock);
27074diff -urNp linux-3.0.7/drivers/i2c/i2c-mux.c linux-3.0.7/drivers/i2c/i2c-mux.c
27075--- linux-3.0.7/drivers/i2c/i2c-mux.c 2011-07-21 22:17:23.000000000 -0400
27076+++ linux-3.0.7/drivers/i2c/i2c-mux.c 2011-08-23 21:47:55.000000000 -0400
27077@@ -28,7 +28,7 @@
27078 /* multiplexer per channel data */
27079 struct i2c_mux_priv {
27080 struct i2c_adapter adap;
27081- struct i2c_algorithm algo;
27082+ i2c_algorithm_no_const algo;
27083
27084 struct i2c_adapter *parent;
27085 void *mux_dev; /* the mux chip/device */
27086diff -urNp linux-3.0.7/drivers/ide/aec62xx.c linux-3.0.7/drivers/ide/aec62xx.c
27087--- linux-3.0.7/drivers/ide/aec62xx.c 2011-07-21 22:17:23.000000000 -0400
27088+++ linux-3.0.7/drivers/ide/aec62xx.c 2011-10-11 10:44:33.000000000 -0400
27089@@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_
27090 .cable_detect = atp86x_cable_detect,
27091 };
27092
27093-static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
27094+static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
27095 { /* 0: AEC6210 */
27096 .name = DRV_NAME,
27097 .init_chipset = init_chipset_aec62xx,
27098diff -urNp linux-3.0.7/drivers/ide/alim15x3.c linux-3.0.7/drivers/ide/alim15x3.c
27099--- linux-3.0.7/drivers/ide/alim15x3.c 2011-07-21 22:17:23.000000000 -0400
27100+++ linux-3.0.7/drivers/ide/alim15x3.c 2011-10-11 10:44:33.000000000 -0400
27101@@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_
27102 .dma_sff_read_status = ide_dma_sff_read_status,
27103 };
27104
27105-static const struct ide_port_info ali15x3_chipset __devinitdata = {
27106+static const struct ide_port_info ali15x3_chipset __devinitconst = {
27107 .name = DRV_NAME,
27108 .init_chipset = init_chipset_ali15x3,
27109 .init_hwif = init_hwif_ali15x3,
27110diff -urNp linux-3.0.7/drivers/ide/amd74xx.c linux-3.0.7/drivers/ide/amd74xx.c
27111--- linux-3.0.7/drivers/ide/amd74xx.c 2011-07-21 22:17:23.000000000 -0400
27112+++ linux-3.0.7/drivers/ide/amd74xx.c 2011-10-11 10:44:33.000000000 -0400
27113@@ -223,7 +223,7 @@ static const struct ide_port_ops amd_por
27114 .udma_mask = udma, \
27115 }
27116
27117-static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
27118+static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
27119 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
27120 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
27121 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
27122diff -urNp linux-3.0.7/drivers/ide/atiixp.c linux-3.0.7/drivers/ide/atiixp.c
27123--- linux-3.0.7/drivers/ide/atiixp.c 2011-07-21 22:17:23.000000000 -0400
27124+++ linux-3.0.7/drivers/ide/atiixp.c 2011-10-11 10:44:33.000000000 -0400
27125@@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_
27126 .cable_detect = atiixp_cable_detect,
27127 };
27128
27129-static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
27130+static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
27131 { /* 0: IXP200/300/400/700 */
27132 .name = DRV_NAME,
27133 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
27134diff -urNp linux-3.0.7/drivers/ide/cmd64x.c linux-3.0.7/drivers/ide/cmd64x.c
27135--- linux-3.0.7/drivers/ide/cmd64x.c 2011-07-21 22:17:23.000000000 -0400
27136+++ linux-3.0.7/drivers/ide/cmd64x.c 2011-10-11 10:44:33.000000000 -0400
27137@@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_r
27138 .dma_sff_read_status = ide_dma_sff_read_status,
27139 };
27140
27141-static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
27142+static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
27143 { /* 0: CMD643 */
27144 .name = DRV_NAME,
27145 .init_chipset = init_chipset_cmd64x,
27146diff -urNp linux-3.0.7/drivers/ide/cs5520.c linux-3.0.7/drivers/ide/cs5520.c
27147--- linux-3.0.7/drivers/ide/cs5520.c 2011-07-21 22:17:23.000000000 -0400
27148+++ linux-3.0.7/drivers/ide/cs5520.c 2011-10-11 10:44:33.000000000 -0400
27149@@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_
27150 .set_dma_mode = cs5520_set_dma_mode,
27151 };
27152
27153-static const struct ide_port_info cyrix_chipset __devinitdata = {
27154+static const struct ide_port_info cyrix_chipset __devinitconst = {
27155 .name = DRV_NAME,
27156 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
27157 .port_ops = &cs5520_port_ops,
27158diff -urNp linux-3.0.7/drivers/ide/cs5530.c linux-3.0.7/drivers/ide/cs5530.c
27159--- linux-3.0.7/drivers/ide/cs5530.c 2011-07-21 22:17:23.000000000 -0400
27160+++ linux-3.0.7/drivers/ide/cs5530.c 2011-10-11 10:44:33.000000000 -0400
27161@@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_
27162 .udma_filter = cs5530_udma_filter,
27163 };
27164
27165-static const struct ide_port_info cs5530_chipset __devinitdata = {
27166+static const struct ide_port_info cs5530_chipset __devinitconst = {
27167 .name = DRV_NAME,
27168 .init_chipset = init_chipset_cs5530,
27169 .init_hwif = init_hwif_cs5530,
27170diff -urNp linux-3.0.7/drivers/ide/cs5535.c linux-3.0.7/drivers/ide/cs5535.c
27171--- linux-3.0.7/drivers/ide/cs5535.c 2011-07-21 22:17:23.000000000 -0400
27172+++ linux-3.0.7/drivers/ide/cs5535.c 2011-10-11 10:44:33.000000000 -0400
27173@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_
27174 .cable_detect = cs5535_cable_detect,
27175 };
27176
27177-static const struct ide_port_info cs5535_chipset __devinitdata = {
27178+static const struct ide_port_info cs5535_chipset __devinitconst = {
27179 .name = DRV_NAME,
27180 .port_ops = &cs5535_port_ops,
27181 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
27182diff -urNp linux-3.0.7/drivers/ide/cy82c693.c linux-3.0.7/drivers/ide/cy82c693.c
27183--- linux-3.0.7/drivers/ide/cy82c693.c 2011-07-21 22:17:23.000000000 -0400
27184+++ linux-3.0.7/drivers/ide/cy82c693.c 2011-10-11 10:44:33.000000000 -0400
27185@@ -161,7 +161,7 @@ static const struct ide_port_ops cy82c69
27186 .set_dma_mode = cy82c693_set_dma_mode,
27187 };
27188
27189-static const struct ide_port_info cy82c693_chipset __devinitdata = {
27190+static const struct ide_port_info cy82c693_chipset __devinitconst = {
27191 .name = DRV_NAME,
27192 .init_iops = init_iops_cy82c693,
27193 .port_ops = &cy82c693_port_ops,
27194diff -urNp linux-3.0.7/drivers/ide/hpt366.c linux-3.0.7/drivers/ide/hpt366.c
27195--- linux-3.0.7/drivers/ide/hpt366.c 2011-07-21 22:17:23.000000000 -0400
27196+++ linux-3.0.7/drivers/ide/hpt366.c 2011-10-11 10:44:33.000000000 -0400
27197@@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings
27198 }
27199 };
27200
27201-static const struct hpt_info hpt36x __devinitdata = {
27202+static const struct hpt_info hpt36x __devinitconst = {
27203 .chip_name = "HPT36x",
27204 .chip_type = HPT36x,
27205 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
27206@@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __de
27207 .timings = &hpt36x_timings
27208 };
27209
27210-static const struct hpt_info hpt370 __devinitdata = {
27211+static const struct hpt_info hpt370 __devinitconst = {
27212 .chip_name = "HPT370",
27213 .chip_type = HPT370,
27214 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
27215@@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __de
27216 .timings = &hpt37x_timings
27217 };
27218
27219-static const struct hpt_info hpt370a __devinitdata = {
27220+static const struct hpt_info hpt370a __devinitconst = {
27221 .chip_name = "HPT370A",
27222 .chip_type = HPT370A,
27223 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
27224@@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __d
27225 .timings = &hpt37x_timings
27226 };
27227
27228-static const struct hpt_info hpt374 __devinitdata = {
27229+static const struct hpt_info hpt374 __devinitconst = {
27230 .chip_name = "HPT374",
27231 .chip_type = HPT374,
27232 .udma_mask = ATA_UDMA5,
27233@@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __de
27234 .timings = &hpt37x_timings
27235 };
27236
27237-static const struct hpt_info hpt372 __devinitdata = {
27238+static const struct hpt_info hpt372 __devinitconst = {
27239 .chip_name = "HPT372",
27240 .chip_type = HPT372,
27241 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27242@@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __de
27243 .timings = &hpt37x_timings
27244 };
27245
27246-static const struct hpt_info hpt372a __devinitdata = {
27247+static const struct hpt_info hpt372a __devinitconst = {
27248 .chip_name = "HPT372A",
27249 .chip_type = HPT372A,
27250 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27251@@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __d
27252 .timings = &hpt37x_timings
27253 };
27254
27255-static const struct hpt_info hpt302 __devinitdata = {
27256+static const struct hpt_info hpt302 __devinitconst = {
27257 .chip_name = "HPT302",
27258 .chip_type = HPT302,
27259 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27260@@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __de
27261 .timings = &hpt37x_timings
27262 };
27263
27264-static const struct hpt_info hpt371 __devinitdata = {
27265+static const struct hpt_info hpt371 __devinitconst = {
27266 .chip_name = "HPT371",
27267 .chip_type = HPT371,
27268 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27269@@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __de
27270 .timings = &hpt37x_timings
27271 };
27272
27273-static const struct hpt_info hpt372n __devinitdata = {
27274+static const struct hpt_info hpt372n __devinitconst = {
27275 .chip_name = "HPT372N",
27276 .chip_type = HPT372N,
27277 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27278@@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __d
27279 .timings = &hpt37x_timings
27280 };
27281
27282-static const struct hpt_info hpt302n __devinitdata = {
27283+static const struct hpt_info hpt302n __devinitconst = {
27284 .chip_name = "HPT302N",
27285 .chip_type = HPT302N,
27286 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27287@@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __d
27288 .timings = &hpt37x_timings
27289 };
27290
27291-static const struct hpt_info hpt371n __devinitdata = {
27292+static const struct hpt_info hpt371n __devinitconst = {
27293 .chip_name = "HPT371N",
27294 .chip_type = HPT371N,
27295 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27296@@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_d
27297 .dma_sff_read_status = ide_dma_sff_read_status,
27298 };
27299
27300-static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
27301+static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
27302 { /* 0: HPT36x */
27303 .name = DRV_NAME,
27304 .init_chipset = init_chipset_hpt366,
27305diff -urNp linux-3.0.7/drivers/ide/ide-cd.c linux-3.0.7/drivers/ide/ide-cd.c
27306--- linux-3.0.7/drivers/ide/ide-cd.c 2011-07-21 22:17:23.000000000 -0400
27307+++ linux-3.0.7/drivers/ide/ide-cd.c 2011-08-23 21:47:55.000000000 -0400
27308@@ -769,7 +769,7 @@ static void cdrom_do_block_pc(ide_drive_
27309 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
27310 if ((unsigned long)buf & alignment
27311 || blk_rq_bytes(rq) & q->dma_pad_mask
27312- || object_is_on_stack(buf))
27313+ || object_starts_on_stack(buf))
27314 drive->dma = 0;
27315 }
27316 }
27317diff -urNp linux-3.0.7/drivers/ide/ide-floppy.c linux-3.0.7/drivers/ide/ide-floppy.c
27318--- linux-3.0.7/drivers/ide/ide-floppy.c 2011-07-21 22:17:23.000000000 -0400
27319+++ linux-3.0.7/drivers/ide/ide-floppy.c 2011-08-23 21:48:14.000000000 -0400
27320@@ -379,6 +379,8 @@ static int ide_floppy_get_capacity(ide_d
27321 u8 pc_buf[256], header_len, desc_cnt;
27322 int i, rc = 1, blocks, length;
27323
27324+ pax_track_stack();
27325+
27326 ide_debug_log(IDE_DBG_FUNC, "enter");
27327
27328 drive->bios_cyl = 0;
27329diff -urNp linux-3.0.7/drivers/ide/ide-pci-generic.c linux-3.0.7/drivers/ide/ide-pci-generic.c
27330--- linux-3.0.7/drivers/ide/ide-pci-generic.c 2011-07-21 22:17:23.000000000 -0400
27331+++ linux-3.0.7/drivers/ide/ide-pci-generic.c 2011-10-11 10:44:33.000000000 -0400
27332@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell
27333 .udma_mask = ATA_UDMA6, \
27334 }
27335
27336-static const struct ide_port_info generic_chipsets[] __devinitdata = {
27337+static const struct ide_port_info generic_chipsets[] __devinitconst = {
27338 /* 0: Unknown */
27339 DECLARE_GENERIC_PCI_DEV(0),
27340
27341diff -urNp linux-3.0.7/drivers/ide/it8172.c linux-3.0.7/drivers/ide/it8172.c
27342--- linux-3.0.7/drivers/ide/it8172.c 2011-07-21 22:17:23.000000000 -0400
27343+++ linux-3.0.7/drivers/ide/it8172.c 2011-10-11 10:44:33.000000000 -0400
27344@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_
27345 .set_dma_mode = it8172_set_dma_mode,
27346 };
27347
27348-static const struct ide_port_info it8172_port_info __devinitdata = {
27349+static const struct ide_port_info it8172_port_info __devinitconst = {
27350 .name = DRV_NAME,
27351 .port_ops = &it8172_port_ops,
27352 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
27353diff -urNp linux-3.0.7/drivers/ide/it8213.c linux-3.0.7/drivers/ide/it8213.c
27354--- linux-3.0.7/drivers/ide/it8213.c 2011-07-21 22:17:23.000000000 -0400
27355+++ linux-3.0.7/drivers/ide/it8213.c 2011-10-11 10:44:33.000000000 -0400
27356@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_
27357 .cable_detect = it8213_cable_detect,
27358 };
27359
27360-static const struct ide_port_info it8213_chipset __devinitdata = {
27361+static const struct ide_port_info it8213_chipset __devinitconst = {
27362 .name = DRV_NAME,
27363 .enablebits = { {0x41, 0x80, 0x80} },
27364 .port_ops = &it8213_port_ops,
27365diff -urNp linux-3.0.7/drivers/ide/it821x.c linux-3.0.7/drivers/ide/it821x.c
27366--- linux-3.0.7/drivers/ide/it821x.c 2011-07-21 22:17:23.000000000 -0400
27367+++ linux-3.0.7/drivers/ide/it821x.c 2011-10-11 10:44:33.000000000 -0400
27368@@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_
27369 .cable_detect = it821x_cable_detect,
27370 };
27371
27372-static const struct ide_port_info it821x_chipset __devinitdata = {
27373+static const struct ide_port_info it821x_chipset __devinitconst = {
27374 .name = DRV_NAME,
27375 .init_chipset = init_chipset_it821x,
27376 .init_hwif = init_hwif_it821x,
27377diff -urNp linux-3.0.7/drivers/ide/jmicron.c linux-3.0.7/drivers/ide/jmicron.c
27378--- linux-3.0.7/drivers/ide/jmicron.c 2011-07-21 22:17:23.000000000 -0400
27379+++ linux-3.0.7/drivers/ide/jmicron.c 2011-10-11 10:44:33.000000000 -0400
27380@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron
27381 .cable_detect = jmicron_cable_detect,
27382 };
27383
27384-static const struct ide_port_info jmicron_chipset __devinitdata = {
27385+static const struct ide_port_info jmicron_chipset __devinitconst = {
27386 .name = DRV_NAME,
27387 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
27388 .port_ops = &jmicron_port_ops,
27389diff -urNp linux-3.0.7/drivers/ide/ns87415.c linux-3.0.7/drivers/ide/ns87415.c
27390--- linux-3.0.7/drivers/ide/ns87415.c 2011-07-21 22:17:23.000000000 -0400
27391+++ linux-3.0.7/drivers/ide/ns87415.c 2011-10-11 10:44:33.000000000 -0400
27392@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_
27393 .dma_sff_read_status = superio_dma_sff_read_status,
27394 };
27395
27396-static const struct ide_port_info ns87415_chipset __devinitdata = {
27397+static const struct ide_port_info ns87415_chipset __devinitconst = {
27398 .name = DRV_NAME,
27399 .init_hwif = init_hwif_ns87415,
27400 .tp_ops = &ns87415_tp_ops,
27401diff -urNp linux-3.0.7/drivers/ide/opti621.c linux-3.0.7/drivers/ide/opti621.c
27402--- linux-3.0.7/drivers/ide/opti621.c 2011-07-21 22:17:23.000000000 -0400
27403+++ linux-3.0.7/drivers/ide/opti621.c 2011-10-11 10:44:33.000000000 -0400
27404@@ -131,7 +131,7 @@ static const struct ide_port_ops opti621
27405 .set_pio_mode = opti621_set_pio_mode,
27406 };
27407
27408-static const struct ide_port_info opti621_chipset __devinitdata = {
27409+static const struct ide_port_info opti621_chipset __devinitconst = {
27410 .name = DRV_NAME,
27411 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
27412 .port_ops = &opti621_port_ops,
27413diff -urNp linux-3.0.7/drivers/ide/pdc202xx_new.c linux-3.0.7/drivers/ide/pdc202xx_new.c
27414--- linux-3.0.7/drivers/ide/pdc202xx_new.c 2011-07-21 22:17:23.000000000 -0400
27415+++ linux-3.0.7/drivers/ide/pdc202xx_new.c 2011-10-11 10:44:33.000000000 -0400
27416@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_
27417 .udma_mask = udma, \
27418 }
27419
27420-static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
27421+static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
27422 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
27423 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
27424 };
27425diff -urNp linux-3.0.7/drivers/ide/pdc202xx_old.c linux-3.0.7/drivers/ide/pdc202xx_old.c
27426--- linux-3.0.7/drivers/ide/pdc202xx_old.c 2011-07-21 22:17:23.000000000 -0400
27427+++ linux-3.0.7/drivers/ide/pdc202xx_old.c 2011-10-11 10:44:33.000000000 -0400
27428@@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x
27429 .max_sectors = sectors, \
27430 }
27431
27432-static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
27433+static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
27434 { /* 0: PDC20246 */
27435 .name = DRV_NAME,
27436 .init_chipset = init_chipset_pdc202xx,
27437diff -urNp linux-3.0.7/drivers/ide/piix.c linux-3.0.7/drivers/ide/piix.c
27438--- linux-3.0.7/drivers/ide/piix.c 2011-07-21 22:17:23.000000000 -0400
27439+++ linux-3.0.7/drivers/ide/piix.c 2011-10-11 10:44:33.000000000 -0400
27440@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_por
27441 .udma_mask = udma, \
27442 }
27443
27444-static const struct ide_port_info piix_pci_info[] __devinitdata = {
27445+static const struct ide_port_info piix_pci_info[] __devinitconst = {
27446 /* 0: MPIIX */
27447 { /*
27448 * MPIIX actually has only a single IDE channel mapped to
27449diff -urNp linux-3.0.7/drivers/ide/rz1000.c linux-3.0.7/drivers/ide/rz1000.c
27450--- linux-3.0.7/drivers/ide/rz1000.c 2011-07-21 22:17:23.000000000 -0400
27451+++ linux-3.0.7/drivers/ide/rz1000.c 2011-10-11 10:44:33.000000000 -0400
27452@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_read
27453 }
27454 }
27455
27456-static const struct ide_port_info rz1000_chipset __devinitdata = {
27457+static const struct ide_port_info rz1000_chipset __devinitconst = {
27458 .name = DRV_NAME,
27459 .host_flags = IDE_HFLAG_NO_DMA,
27460 };
27461diff -urNp linux-3.0.7/drivers/ide/sc1200.c linux-3.0.7/drivers/ide/sc1200.c
27462--- linux-3.0.7/drivers/ide/sc1200.c 2011-07-21 22:17:23.000000000 -0400
27463+++ linux-3.0.7/drivers/ide/sc1200.c 2011-10-11 10:44:33.000000000 -0400
27464@@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_d
27465 .dma_sff_read_status = ide_dma_sff_read_status,
27466 };
27467
27468-static const struct ide_port_info sc1200_chipset __devinitdata = {
27469+static const struct ide_port_info sc1200_chipset __devinitconst = {
27470 .name = DRV_NAME,
27471 .port_ops = &sc1200_port_ops,
27472 .dma_ops = &sc1200_dma_ops,
27473diff -urNp linux-3.0.7/drivers/ide/scc_pata.c linux-3.0.7/drivers/ide/scc_pata.c
27474--- linux-3.0.7/drivers/ide/scc_pata.c 2011-07-21 22:17:23.000000000 -0400
27475+++ linux-3.0.7/drivers/ide/scc_pata.c 2011-10-11 10:44:33.000000000 -0400
27476@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_
27477 .dma_sff_read_status = scc_dma_sff_read_status,
27478 };
27479
27480-static const struct ide_port_info scc_chipset __devinitdata = {
27481+static const struct ide_port_info scc_chipset __devinitconst = {
27482 .name = "sccIDE",
27483 .init_iops = init_iops_scc,
27484 .init_dma = scc_init_dma,
27485diff -urNp linux-3.0.7/drivers/ide/serverworks.c linux-3.0.7/drivers/ide/serverworks.c
27486--- linux-3.0.7/drivers/ide/serverworks.c 2011-07-21 22:17:23.000000000 -0400
27487+++ linux-3.0.7/drivers/ide/serverworks.c 2011-10-11 10:44:33.000000000 -0400
27488@@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_p
27489 .cable_detect = svwks_cable_detect,
27490 };
27491
27492-static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
27493+static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
27494 { /* 0: OSB4 */
27495 .name = DRV_NAME,
27496 .init_chipset = init_chipset_svwks,
27497diff -urNp linux-3.0.7/drivers/ide/setup-pci.c linux-3.0.7/drivers/ide/setup-pci.c
27498--- linux-3.0.7/drivers/ide/setup-pci.c 2011-07-21 22:17:23.000000000 -0400
27499+++ linux-3.0.7/drivers/ide/setup-pci.c 2011-08-23 21:48:14.000000000 -0400
27500@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
27501 int ret, i, n_ports = dev2 ? 4 : 2;
27502 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
27503
27504+ pax_track_stack();
27505+
27506 for (i = 0; i < n_ports / 2; i++) {
27507 ret = ide_setup_pci_controller(pdev[i], d, !i);
27508 if (ret < 0)
27509diff -urNp linux-3.0.7/drivers/ide/siimage.c linux-3.0.7/drivers/ide/siimage.c
27510--- linux-3.0.7/drivers/ide/siimage.c 2011-07-21 22:17:23.000000000 -0400
27511+++ linux-3.0.7/drivers/ide/siimage.c 2011-10-11 10:44:33.000000000 -0400
27512@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_
27513 .udma_mask = ATA_UDMA6, \
27514 }
27515
27516-static const struct ide_port_info siimage_chipsets[] __devinitdata = {
27517+static const struct ide_port_info siimage_chipsets[] __devinitconst = {
27518 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
27519 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
27520 };
27521diff -urNp linux-3.0.7/drivers/ide/sis5513.c linux-3.0.7/drivers/ide/sis5513.c
27522--- linux-3.0.7/drivers/ide/sis5513.c 2011-07-21 22:17:23.000000000 -0400
27523+++ linux-3.0.7/drivers/ide/sis5513.c 2011-10-11 10:44:33.000000000 -0400
27524@@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata
27525 .cable_detect = sis_cable_detect,
27526 };
27527
27528-static const struct ide_port_info sis5513_chipset __devinitdata = {
27529+static const struct ide_port_info sis5513_chipset __devinitconst = {
27530 .name = DRV_NAME,
27531 .init_chipset = init_chipset_sis5513,
27532 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
27533diff -urNp linux-3.0.7/drivers/ide/sl82c105.c linux-3.0.7/drivers/ide/sl82c105.c
27534--- linux-3.0.7/drivers/ide/sl82c105.c 2011-07-21 22:17:23.000000000 -0400
27535+++ linux-3.0.7/drivers/ide/sl82c105.c 2011-10-11 10:44:33.000000000 -0400
27536@@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105
27537 .dma_sff_read_status = ide_dma_sff_read_status,
27538 };
27539
27540-static const struct ide_port_info sl82c105_chipset __devinitdata = {
27541+static const struct ide_port_info sl82c105_chipset __devinitconst = {
27542 .name = DRV_NAME,
27543 .init_chipset = init_chipset_sl82c105,
27544 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
27545diff -urNp linux-3.0.7/drivers/ide/slc90e66.c linux-3.0.7/drivers/ide/slc90e66.c
27546--- linux-3.0.7/drivers/ide/slc90e66.c 2011-07-21 22:17:23.000000000 -0400
27547+++ linux-3.0.7/drivers/ide/slc90e66.c 2011-10-11 10:44:33.000000000 -0400
27548@@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e6
27549 .cable_detect = slc90e66_cable_detect,
27550 };
27551
27552-static const struct ide_port_info slc90e66_chipset __devinitdata = {
27553+static const struct ide_port_info slc90e66_chipset __devinitconst = {
27554 .name = DRV_NAME,
27555 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
27556 .port_ops = &slc90e66_port_ops,
27557diff -urNp linux-3.0.7/drivers/ide/tc86c001.c linux-3.0.7/drivers/ide/tc86c001.c
27558--- linux-3.0.7/drivers/ide/tc86c001.c 2011-07-21 22:17:23.000000000 -0400
27559+++ linux-3.0.7/drivers/ide/tc86c001.c 2011-10-11 10:44:33.000000000 -0400
27560@@ -191,7 +191,7 @@ static const struct ide_dma_ops tc86c001
27561 .dma_sff_read_status = ide_dma_sff_read_status,
27562 };
27563
27564-static const struct ide_port_info tc86c001_chipset __devinitdata = {
27565+static const struct ide_port_info tc86c001_chipset __devinitconst = {
27566 .name = DRV_NAME,
27567 .init_hwif = init_hwif_tc86c001,
27568 .port_ops = &tc86c001_port_ops,
27569diff -urNp linux-3.0.7/drivers/ide/triflex.c linux-3.0.7/drivers/ide/triflex.c
27570--- linux-3.0.7/drivers/ide/triflex.c 2011-07-21 22:17:23.000000000 -0400
27571+++ linux-3.0.7/drivers/ide/triflex.c 2011-10-11 10:44:33.000000000 -0400
27572@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex
27573 .set_dma_mode = triflex_set_mode,
27574 };
27575
27576-static const struct ide_port_info triflex_device __devinitdata = {
27577+static const struct ide_port_info triflex_device __devinitconst = {
27578 .name = DRV_NAME,
27579 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
27580 .port_ops = &triflex_port_ops,
27581diff -urNp linux-3.0.7/drivers/ide/trm290.c linux-3.0.7/drivers/ide/trm290.c
27582--- linux-3.0.7/drivers/ide/trm290.c 2011-07-21 22:17:23.000000000 -0400
27583+++ linux-3.0.7/drivers/ide/trm290.c 2011-10-11 10:44:33.000000000 -0400
27584@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops
27585 .dma_check = trm290_dma_check,
27586 };
27587
27588-static const struct ide_port_info trm290_chipset __devinitdata = {
27589+static const struct ide_port_info trm290_chipset __devinitconst = {
27590 .name = DRV_NAME,
27591 .init_hwif = init_hwif_trm290,
27592 .tp_ops = &trm290_tp_ops,
27593diff -urNp linux-3.0.7/drivers/ide/via82cxxx.c linux-3.0.7/drivers/ide/via82cxxx.c
27594--- linux-3.0.7/drivers/ide/via82cxxx.c 2011-07-21 22:17:23.000000000 -0400
27595+++ linux-3.0.7/drivers/ide/via82cxxx.c 2011-10-11 10:44:33.000000000 -0400
27596@@ -403,7 +403,7 @@ static const struct ide_port_ops via_por
27597 .cable_detect = via82cxxx_cable_detect,
27598 };
27599
27600-static const struct ide_port_info via82cxxx_chipset __devinitdata = {
27601+static const struct ide_port_info via82cxxx_chipset __devinitconst = {
27602 .name = DRV_NAME,
27603 .init_chipset = init_chipset_via82cxxx,
27604 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
27605diff -urNp linux-3.0.7/drivers/infiniband/core/cm.c linux-3.0.7/drivers/infiniband/core/cm.c
27606--- linux-3.0.7/drivers/infiniband/core/cm.c 2011-07-21 22:17:23.000000000 -0400
27607+++ linux-3.0.7/drivers/infiniband/core/cm.c 2011-08-23 21:47:55.000000000 -0400
27608@@ -113,7 +113,7 @@ static char const counter_group_names[CM
27609
27610 struct cm_counter_group {
27611 struct kobject obj;
27612- atomic_long_t counter[CM_ATTR_COUNT];
27613+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
27614 };
27615
27616 struct cm_counter_attribute {
27617@@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm
27618 struct ib_mad_send_buf *msg = NULL;
27619 int ret;
27620
27621- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
27622+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
27623 counter[CM_REQ_COUNTER]);
27624
27625 /* Quick state check to discard duplicate REQs. */
27626@@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm
27627 if (!cm_id_priv)
27628 return;
27629
27630- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
27631+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
27632 counter[CM_REP_COUNTER]);
27633 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
27634 if (ret)
27635@@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work
27636 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
27637 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
27638 spin_unlock_irq(&cm_id_priv->lock);
27639- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
27640+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
27641 counter[CM_RTU_COUNTER]);
27642 goto out;
27643 }
27644@@ -2115,7 +2115,7 @@ static int cm_dreq_handler(struct cm_wor
27645 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
27646 dreq_msg->local_comm_id);
27647 if (!cm_id_priv) {
27648- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
27649+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
27650 counter[CM_DREQ_COUNTER]);
27651 cm_issue_drep(work->port, work->mad_recv_wc);
27652 return -EINVAL;
27653@@ -2140,7 +2140,7 @@ static int cm_dreq_handler(struct cm_wor
27654 case IB_CM_MRA_REP_RCVD:
27655 break;
27656 case IB_CM_TIMEWAIT:
27657- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
27658+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
27659 counter[CM_DREQ_COUNTER]);
27660 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
27661 goto unlock;
27662@@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_wor
27663 cm_free_msg(msg);
27664 goto deref;
27665 case IB_CM_DREQ_RCVD:
27666- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
27667+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
27668 counter[CM_DREQ_COUNTER]);
27669 goto unlock;
27670 default:
27671@@ -2521,7 +2521,7 @@ static int cm_mra_handler(struct cm_work
27672 ib_modify_mad(cm_id_priv->av.port->mad_agent,
27673 cm_id_priv->msg, timeout)) {
27674 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
27675- atomic_long_inc(&work->port->
27676+ atomic_long_inc_unchecked(&work->port->
27677 counter_group[CM_RECV_DUPLICATES].
27678 counter[CM_MRA_COUNTER]);
27679 goto out;
27680@@ -2530,7 +2530,7 @@ static int cm_mra_handler(struct cm_work
27681 break;
27682 case IB_CM_MRA_REQ_RCVD:
27683 case IB_CM_MRA_REP_RCVD:
27684- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
27685+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
27686 counter[CM_MRA_COUNTER]);
27687 /* fall through */
27688 default:
27689@@ -2692,7 +2692,7 @@ static int cm_lap_handler(struct cm_work
27690 case IB_CM_LAP_IDLE:
27691 break;
27692 case IB_CM_MRA_LAP_SENT:
27693- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
27694+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
27695 counter[CM_LAP_COUNTER]);
27696 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
27697 goto unlock;
27698@@ -2708,7 +2708,7 @@ static int cm_lap_handler(struct cm_work
27699 cm_free_msg(msg);
27700 goto deref;
27701 case IB_CM_LAP_RCVD:
27702- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
27703+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
27704 counter[CM_LAP_COUNTER]);
27705 goto unlock;
27706 default:
27707@@ -2992,7 +2992,7 @@ static int cm_sidr_req_handler(struct cm
27708 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
27709 if (cur_cm_id_priv) {
27710 spin_unlock_irq(&cm.lock);
27711- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
27712+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
27713 counter[CM_SIDR_REQ_COUNTER]);
27714 goto out; /* Duplicate message. */
27715 }
27716@@ -3204,10 +3204,10 @@ static void cm_send_handler(struct ib_ma
27717 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
27718 msg->retries = 1;
27719
27720- atomic_long_add(1 + msg->retries,
27721+ atomic_long_add_unchecked(1 + msg->retries,
27722 &port->counter_group[CM_XMIT].counter[attr_index]);
27723 if (msg->retries)
27724- atomic_long_add(msg->retries,
27725+ atomic_long_add_unchecked(msg->retries,
27726 &port->counter_group[CM_XMIT_RETRIES].
27727 counter[attr_index]);
27728
27729@@ -3417,7 +3417,7 @@ static void cm_recv_handler(struct ib_ma
27730 }
27731
27732 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
27733- atomic_long_inc(&port->counter_group[CM_RECV].
27734+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
27735 counter[attr_id - CM_ATTR_ID_OFFSET]);
27736
27737 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
27738@@ -3615,7 +3615,7 @@ static ssize_t cm_show_counter(struct ko
27739 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
27740
27741 return sprintf(buf, "%ld\n",
27742- atomic_long_read(&group->counter[cm_attr->index]));
27743+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
27744 }
27745
27746 static const struct sysfs_ops cm_counter_ops = {
27747diff -urNp linux-3.0.7/drivers/infiniband/core/fmr_pool.c linux-3.0.7/drivers/infiniband/core/fmr_pool.c
27748--- linux-3.0.7/drivers/infiniband/core/fmr_pool.c 2011-07-21 22:17:23.000000000 -0400
27749+++ linux-3.0.7/drivers/infiniband/core/fmr_pool.c 2011-08-23 21:47:55.000000000 -0400
27750@@ -97,8 +97,8 @@ struct ib_fmr_pool {
27751
27752 struct task_struct *thread;
27753
27754- atomic_t req_ser;
27755- atomic_t flush_ser;
27756+ atomic_unchecked_t req_ser;
27757+ atomic_unchecked_t flush_ser;
27758
27759 wait_queue_head_t force_wait;
27760 };
27761@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
27762 struct ib_fmr_pool *pool = pool_ptr;
27763
27764 do {
27765- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
27766+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
27767 ib_fmr_batch_release(pool);
27768
27769- atomic_inc(&pool->flush_ser);
27770+ atomic_inc_unchecked(&pool->flush_ser);
27771 wake_up_interruptible(&pool->force_wait);
27772
27773 if (pool->flush_function)
27774@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
27775 }
27776
27777 set_current_state(TASK_INTERRUPTIBLE);
27778- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
27779+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
27780 !kthread_should_stop())
27781 schedule();
27782 __set_current_state(TASK_RUNNING);
27783@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
27784 pool->dirty_watermark = params->dirty_watermark;
27785 pool->dirty_len = 0;
27786 spin_lock_init(&pool->pool_lock);
27787- atomic_set(&pool->req_ser, 0);
27788- atomic_set(&pool->flush_ser, 0);
27789+ atomic_set_unchecked(&pool->req_ser, 0);
27790+ atomic_set_unchecked(&pool->flush_ser, 0);
27791 init_waitqueue_head(&pool->force_wait);
27792
27793 pool->thread = kthread_run(ib_fmr_cleanup_thread,
27794@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
27795 }
27796 spin_unlock_irq(&pool->pool_lock);
27797
27798- serial = atomic_inc_return(&pool->req_ser);
27799+ serial = atomic_inc_return_unchecked(&pool->req_ser);
27800 wake_up_process(pool->thread);
27801
27802 if (wait_event_interruptible(pool->force_wait,
27803- atomic_read(&pool->flush_ser) - serial >= 0))
27804+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
27805 return -EINTR;
27806
27807 return 0;
27808@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
27809 } else {
27810 list_add_tail(&fmr->list, &pool->dirty_list);
27811 if (++pool->dirty_len >= pool->dirty_watermark) {
27812- atomic_inc(&pool->req_ser);
27813+ atomic_inc_unchecked(&pool->req_ser);
27814 wake_up_process(pool->thread);
27815 }
27816 }
27817diff -urNp linux-3.0.7/drivers/infiniband/hw/cxgb4/mem.c linux-3.0.7/drivers/infiniband/hw/cxgb4/mem.c
27818--- linux-3.0.7/drivers/infiniband/hw/cxgb4/mem.c 2011-07-21 22:17:23.000000000 -0400
27819+++ linux-3.0.7/drivers/infiniband/hw/cxgb4/mem.c 2011-08-23 21:47:55.000000000 -0400
27820@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_r
27821 int err;
27822 struct fw_ri_tpte tpt;
27823 u32 stag_idx;
27824- static atomic_t key;
27825+ static atomic_unchecked_t key;
27826
27827 if (c4iw_fatal_error(rdev))
27828 return -EIO;
27829@@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_r
27830 &rdev->resource.tpt_fifo_lock);
27831 if (!stag_idx)
27832 return -ENOMEM;
27833- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
27834+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
27835 }
27836 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
27837 __func__, stag_state, type, pdid, stag_idx);
27838diff -urNp linux-3.0.7/drivers/infiniband/hw/ipath/ipath_fs.c linux-3.0.7/drivers/infiniband/hw/ipath/ipath_fs.c
27839--- linux-3.0.7/drivers/infiniband/hw/ipath/ipath_fs.c 2011-07-21 22:17:23.000000000 -0400
27840+++ linux-3.0.7/drivers/infiniband/hw/ipath/ipath_fs.c 2011-08-23 21:48:14.000000000 -0400
27841@@ -113,6 +113,8 @@ static ssize_t atomic_counters_read(stru
27842 struct infinipath_counters counters;
27843 struct ipath_devdata *dd;
27844
27845+ pax_track_stack();
27846+
27847 dd = file->f_path.dentry->d_inode->i_private;
27848 dd->ipath_f_read_counters(dd, &counters);
27849
27850diff -urNp linux-3.0.7/drivers/infiniband/hw/ipath/ipath_rc.c linux-3.0.7/drivers/infiniband/hw/ipath/ipath_rc.c
27851--- linux-3.0.7/drivers/infiniband/hw/ipath/ipath_rc.c 2011-07-21 22:17:23.000000000 -0400
27852+++ linux-3.0.7/drivers/infiniband/hw/ipath/ipath_rc.c 2011-08-23 21:47:55.000000000 -0400
27853@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *de
27854 struct ib_atomic_eth *ateth;
27855 struct ipath_ack_entry *e;
27856 u64 vaddr;
27857- atomic64_t *maddr;
27858+ atomic64_unchecked_t *maddr;
27859 u64 sdata;
27860 u32 rkey;
27861 u8 next;
27862@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *de
27863 IB_ACCESS_REMOTE_ATOMIC)))
27864 goto nack_acc_unlck;
27865 /* Perform atomic OP and save result. */
27866- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
27867+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
27868 sdata = be64_to_cpu(ateth->swap_data);
27869 e = &qp->s_ack_queue[qp->r_head_ack_queue];
27870 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
27871- (u64) atomic64_add_return(sdata, maddr) - sdata :
27872+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
27873 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
27874 be64_to_cpu(ateth->compare_data),
27875 sdata);
27876diff -urNp linux-3.0.7/drivers/infiniband/hw/ipath/ipath_ruc.c linux-3.0.7/drivers/infiniband/hw/ipath/ipath_ruc.c
27877--- linux-3.0.7/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-07-21 22:17:23.000000000 -0400
27878+++ linux-3.0.7/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-08-23 21:47:55.000000000 -0400
27879@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ip
27880 unsigned long flags;
27881 struct ib_wc wc;
27882 u64 sdata;
27883- atomic64_t *maddr;
27884+ atomic64_unchecked_t *maddr;
27885 enum ib_wc_status send_status;
27886
27887 /*
27888@@ -382,11 +382,11 @@ again:
27889 IB_ACCESS_REMOTE_ATOMIC)))
27890 goto acc_err;
27891 /* Perform atomic OP and save result. */
27892- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
27893+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
27894 sdata = wqe->wr.wr.atomic.compare_add;
27895 *(u64 *) sqp->s_sge.sge.vaddr =
27896 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
27897- (u64) atomic64_add_return(sdata, maddr) - sdata :
27898+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
27899 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
27900 sdata, wqe->wr.wr.atomic.swap);
27901 goto send_comp;
27902diff -urNp linux-3.0.7/drivers/infiniband/hw/nes/nes.c linux-3.0.7/drivers/infiniband/hw/nes/nes.c
27903--- linux-3.0.7/drivers/infiniband/hw/nes/nes.c 2011-07-21 22:17:23.000000000 -0400
27904+++ linux-3.0.7/drivers/infiniband/hw/nes/nes.c 2011-08-23 21:47:55.000000000 -0400
27905@@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
27906 LIST_HEAD(nes_adapter_list);
27907 static LIST_HEAD(nes_dev_list);
27908
27909-atomic_t qps_destroyed;
27910+atomic_unchecked_t qps_destroyed;
27911
27912 static unsigned int ee_flsh_adapter;
27913 static unsigned int sysfs_nonidx_addr;
27914@@ -275,7 +275,7 @@ static void nes_cqp_rem_ref_callback(str
27915 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
27916 struct nes_adapter *nesadapter = nesdev->nesadapter;
27917
27918- atomic_inc(&qps_destroyed);
27919+ atomic_inc_unchecked(&qps_destroyed);
27920
27921 /* Free the control structures */
27922
27923diff -urNp linux-3.0.7/drivers/infiniband/hw/nes/nes_cm.c linux-3.0.7/drivers/infiniband/hw/nes/nes_cm.c
27924--- linux-3.0.7/drivers/infiniband/hw/nes/nes_cm.c 2011-07-21 22:17:23.000000000 -0400
27925+++ linux-3.0.7/drivers/infiniband/hw/nes/nes_cm.c 2011-08-23 21:47:55.000000000 -0400
27926@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
27927 u32 cm_packets_retrans;
27928 u32 cm_packets_created;
27929 u32 cm_packets_received;
27930-atomic_t cm_listens_created;
27931-atomic_t cm_listens_destroyed;
27932+atomic_unchecked_t cm_listens_created;
27933+atomic_unchecked_t cm_listens_destroyed;
27934 u32 cm_backlog_drops;
27935-atomic_t cm_loopbacks;
27936-atomic_t cm_nodes_created;
27937-atomic_t cm_nodes_destroyed;
27938-atomic_t cm_accel_dropped_pkts;
27939-atomic_t cm_resets_recvd;
27940+atomic_unchecked_t cm_loopbacks;
27941+atomic_unchecked_t cm_nodes_created;
27942+atomic_unchecked_t cm_nodes_destroyed;
27943+atomic_unchecked_t cm_accel_dropped_pkts;
27944+atomic_unchecked_t cm_resets_recvd;
27945
27946 static inline int mini_cm_accelerated(struct nes_cm_core *,
27947 struct nes_cm_node *);
27948@@ -151,13 +151,13 @@ static struct nes_cm_ops nes_cm_api = {
27949
27950 static struct nes_cm_core *g_cm_core;
27951
27952-atomic_t cm_connects;
27953-atomic_t cm_accepts;
27954-atomic_t cm_disconnects;
27955-atomic_t cm_closes;
27956-atomic_t cm_connecteds;
27957-atomic_t cm_connect_reqs;
27958-atomic_t cm_rejects;
27959+atomic_unchecked_t cm_connects;
27960+atomic_unchecked_t cm_accepts;
27961+atomic_unchecked_t cm_disconnects;
27962+atomic_unchecked_t cm_closes;
27963+atomic_unchecked_t cm_connecteds;
27964+atomic_unchecked_t cm_connect_reqs;
27965+atomic_unchecked_t cm_rejects;
27966
27967
27968 /**
27969@@ -1045,7 +1045,7 @@ static int mini_cm_dec_refcnt_listen(str
27970 kfree(listener);
27971 listener = NULL;
27972 ret = 0;
27973- atomic_inc(&cm_listens_destroyed);
27974+ atomic_inc_unchecked(&cm_listens_destroyed);
27975 } else {
27976 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
27977 }
27978@@ -1240,7 +1240,7 @@ static struct nes_cm_node *make_cm_node(
27979 cm_node->rem_mac);
27980
27981 add_hte_node(cm_core, cm_node);
27982- atomic_inc(&cm_nodes_created);
27983+ atomic_inc_unchecked(&cm_nodes_created);
27984
27985 return cm_node;
27986 }
27987@@ -1298,7 +1298,7 @@ static int rem_ref_cm_node(struct nes_cm
27988 }
27989
27990 atomic_dec(&cm_core->node_cnt);
27991- atomic_inc(&cm_nodes_destroyed);
27992+ atomic_inc_unchecked(&cm_nodes_destroyed);
27993 nesqp = cm_node->nesqp;
27994 if (nesqp) {
27995 nesqp->cm_node = NULL;
27996@@ -1365,7 +1365,7 @@ static int process_options(struct nes_cm
27997
27998 static void drop_packet(struct sk_buff *skb)
27999 {
28000- atomic_inc(&cm_accel_dropped_pkts);
28001+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
28002 dev_kfree_skb_any(skb);
28003 }
28004
28005@@ -1428,7 +1428,7 @@ static void handle_rst_pkt(struct nes_cm
28006 {
28007
28008 int reset = 0; /* whether to send reset in case of err.. */
28009- atomic_inc(&cm_resets_recvd);
28010+ atomic_inc_unchecked(&cm_resets_recvd);
28011 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
28012 " refcnt=%d\n", cm_node, cm_node->state,
28013 atomic_read(&cm_node->ref_count));
28014@@ -2057,7 +2057,7 @@ static struct nes_cm_node *mini_cm_conne
28015 rem_ref_cm_node(cm_node->cm_core, cm_node);
28016 return NULL;
28017 }
28018- atomic_inc(&cm_loopbacks);
28019+ atomic_inc_unchecked(&cm_loopbacks);
28020 loopbackremotenode->loopbackpartner = cm_node;
28021 loopbackremotenode->tcp_cntxt.rcv_wscale =
28022 NES_CM_DEFAULT_RCV_WND_SCALE;
28023@@ -2332,7 +2332,7 @@ static int mini_cm_recv_pkt(struct nes_c
28024 add_ref_cm_node(cm_node);
28025 } else if (cm_node->state == NES_CM_STATE_TSA) {
28026 rem_ref_cm_node(cm_core, cm_node);
28027- atomic_inc(&cm_accel_dropped_pkts);
28028+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
28029 dev_kfree_skb_any(skb);
28030 break;
28031 }
28032@@ -2638,7 +2638,7 @@ static int nes_cm_disconn_true(struct ne
28033
28034 if ((cm_id) && (cm_id->event_handler)) {
28035 if (issue_disconn) {
28036- atomic_inc(&cm_disconnects);
28037+ atomic_inc_unchecked(&cm_disconnects);
28038 cm_event.event = IW_CM_EVENT_DISCONNECT;
28039 cm_event.status = disconn_status;
28040 cm_event.local_addr = cm_id->local_addr;
28041@@ -2660,7 +2660,7 @@ static int nes_cm_disconn_true(struct ne
28042 }
28043
28044 if (issue_close) {
28045- atomic_inc(&cm_closes);
28046+ atomic_inc_unchecked(&cm_closes);
28047 nes_disconnect(nesqp, 1);
28048
28049 cm_id->provider_data = nesqp;
28050@@ -2791,7 +2791,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
28051
28052 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
28053 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
28054- atomic_inc(&cm_accepts);
28055+ atomic_inc_unchecked(&cm_accepts);
28056
28057 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
28058 netdev_refcnt_read(nesvnic->netdev));
28059@@ -3001,7 +3001,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
28060
28061 struct nes_cm_core *cm_core;
28062
28063- atomic_inc(&cm_rejects);
28064+ atomic_inc_unchecked(&cm_rejects);
28065 cm_node = (struct nes_cm_node *) cm_id->provider_data;
28066 loopback = cm_node->loopbackpartner;
28067 cm_core = cm_node->cm_core;
28068@@ -3067,7 +3067,7 @@ int nes_connect(struct iw_cm_id *cm_id,
28069 ntohl(cm_id->local_addr.sin_addr.s_addr),
28070 ntohs(cm_id->local_addr.sin_port));
28071
28072- atomic_inc(&cm_connects);
28073+ atomic_inc_unchecked(&cm_connects);
28074 nesqp->active_conn = 1;
28075
28076 /* cache the cm_id in the qp */
28077@@ -3173,7 +3173,7 @@ int nes_create_listen(struct iw_cm_id *c
28078 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
28079 return err;
28080 }
28081- atomic_inc(&cm_listens_created);
28082+ atomic_inc_unchecked(&cm_listens_created);
28083 }
28084
28085 cm_id->add_ref(cm_id);
28086@@ -3278,7 +3278,7 @@ static void cm_event_connected(struct ne
28087 if (nesqp->destroyed) {
28088 return;
28089 }
28090- atomic_inc(&cm_connecteds);
28091+ atomic_inc_unchecked(&cm_connecteds);
28092 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
28093 " local port 0x%04X. jiffies = %lu.\n",
28094 nesqp->hwqp.qp_id,
28095@@ -3493,7 +3493,7 @@ static void cm_event_reset(struct nes_cm
28096
28097 cm_id->add_ref(cm_id);
28098 ret = cm_id->event_handler(cm_id, &cm_event);
28099- atomic_inc(&cm_closes);
28100+ atomic_inc_unchecked(&cm_closes);
28101 cm_event.event = IW_CM_EVENT_CLOSE;
28102 cm_event.status = 0;
28103 cm_event.provider_data = cm_id->provider_data;
28104@@ -3529,7 +3529,7 @@ static void cm_event_mpa_req(struct nes_
28105 return;
28106 cm_id = cm_node->cm_id;
28107
28108- atomic_inc(&cm_connect_reqs);
28109+ atomic_inc_unchecked(&cm_connect_reqs);
28110 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
28111 cm_node, cm_id, jiffies);
28112
28113@@ -3567,7 +3567,7 @@ static void cm_event_mpa_reject(struct n
28114 return;
28115 cm_id = cm_node->cm_id;
28116
28117- atomic_inc(&cm_connect_reqs);
28118+ atomic_inc_unchecked(&cm_connect_reqs);
28119 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
28120 cm_node, cm_id, jiffies);
28121
28122diff -urNp linux-3.0.7/drivers/infiniband/hw/nes/nes.h linux-3.0.7/drivers/infiniband/hw/nes/nes.h
28123--- linux-3.0.7/drivers/infiniband/hw/nes/nes.h 2011-07-21 22:17:23.000000000 -0400
28124+++ linux-3.0.7/drivers/infiniband/hw/nes/nes.h 2011-08-23 21:47:55.000000000 -0400
28125@@ -175,17 +175,17 @@ extern unsigned int nes_debug_level;
28126 extern unsigned int wqm_quanta;
28127 extern struct list_head nes_adapter_list;
28128
28129-extern atomic_t cm_connects;
28130-extern atomic_t cm_accepts;
28131-extern atomic_t cm_disconnects;
28132-extern atomic_t cm_closes;
28133-extern atomic_t cm_connecteds;
28134-extern atomic_t cm_connect_reqs;
28135-extern atomic_t cm_rejects;
28136-extern atomic_t mod_qp_timouts;
28137-extern atomic_t qps_created;
28138-extern atomic_t qps_destroyed;
28139-extern atomic_t sw_qps_destroyed;
28140+extern atomic_unchecked_t cm_connects;
28141+extern atomic_unchecked_t cm_accepts;
28142+extern atomic_unchecked_t cm_disconnects;
28143+extern atomic_unchecked_t cm_closes;
28144+extern atomic_unchecked_t cm_connecteds;
28145+extern atomic_unchecked_t cm_connect_reqs;
28146+extern atomic_unchecked_t cm_rejects;
28147+extern atomic_unchecked_t mod_qp_timouts;
28148+extern atomic_unchecked_t qps_created;
28149+extern atomic_unchecked_t qps_destroyed;
28150+extern atomic_unchecked_t sw_qps_destroyed;
28151 extern u32 mh_detected;
28152 extern u32 mh_pauses_sent;
28153 extern u32 cm_packets_sent;
28154@@ -194,14 +194,14 @@ extern u32 cm_packets_created;
28155 extern u32 cm_packets_received;
28156 extern u32 cm_packets_dropped;
28157 extern u32 cm_packets_retrans;
28158-extern atomic_t cm_listens_created;
28159-extern atomic_t cm_listens_destroyed;
28160+extern atomic_unchecked_t cm_listens_created;
28161+extern atomic_unchecked_t cm_listens_destroyed;
28162 extern u32 cm_backlog_drops;
28163-extern atomic_t cm_loopbacks;
28164-extern atomic_t cm_nodes_created;
28165-extern atomic_t cm_nodes_destroyed;
28166-extern atomic_t cm_accel_dropped_pkts;
28167-extern atomic_t cm_resets_recvd;
28168+extern atomic_unchecked_t cm_loopbacks;
28169+extern atomic_unchecked_t cm_nodes_created;
28170+extern atomic_unchecked_t cm_nodes_destroyed;
28171+extern atomic_unchecked_t cm_accel_dropped_pkts;
28172+extern atomic_unchecked_t cm_resets_recvd;
28173
28174 extern u32 int_mod_timer_init;
28175 extern u32 int_mod_cq_depth_256;
28176diff -urNp linux-3.0.7/drivers/infiniband/hw/nes/nes_nic.c linux-3.0.7/drivers/infiniband/hw/nes/nes_nic.c
28177--- linux-3.0.7/drivers/infiniband/hw/nes/nes_nic.c 2011-07-21 22:17:23.000000000 -0400
28178+++ linux-3.0.7/drivers/infiniband/hw/nes/nes_nic.c 2011-08-23 21:47:55.000000000 -0400
28179@@ -1274,31 +1274,31 @@ static void nes_netdev_get_ethtool_stats
28180 target_stat_values[++index] = mh_detected;
28181 target_stat_values[++index] = mh_pauses_sent;
28182 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
28183- target_stat_values[++index] = atomic_read(&cm_connects);
28184- target_stat_values[++index] = atomic_read(&cm_accepts);
28185- target_stat_values[++index] = atomic_read(&cm_disconnects);
28186- target_stat_values[++index] = atomic_read(&cm_connecteds);
28187- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
28188- target_stat_values[++index] = atomic_read(&cm_rejects);
28189- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
28190- target_stat_values[++index] = atomic_read(&qps_created);
28191- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
28192- target_stat_values[++index] = atomic_read(&qps_destroyed);
28193- target_stat_values[++index] = atomic_read(&cm_closes);
28194+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
28195+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
28196+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
28197+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
28198+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
28199+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
28200+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
28201+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
28202+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
28203+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
28204+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
28205 target_stat_values[++index] = cm_packets_sent;
28206 target_stat_values[++index] = cm_packets_bounced;
28207 target_stat_values[++index] = cm_packets_created;
28208 target_stat_values[++index] = cm_packets_received;
28209 target_stat_values[++index] = cm_packets_dropped;
28210 target_stat_values[++index] = cm_packets_retrans;
28211- target_stat_values[++index] = atomic_read(&cm_listens_created);
28212- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
28213+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
28214+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
28215 target_stat_values[++index] = cm_backlog_drops;
28216- target_stat_values[++index] = atomic_read(&cm_loopbacks);
28217- target_stat_values[++index] = atomic_read(&cm_nodes_created);
28218- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
28219- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
28220- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
28221+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
28222+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
28223+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
28224+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
28225+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
28226 target_stat_values[++index] = nesadapter->free_4kpbl;
28227 target_stat_values[++index] = nesadapter->free_256pbl;
28228 target_stat_values[++index] = int_mod_timer_init;
28229diff -urNp linux-3.0.7/drivers/infiniband/hw/nes/nes_verbs.c linux-3.0.7/drivers/infiniband/hw/nes/nes_verbs.c
28230--- linux-3.0.7/drivers/infiniband/hw/nes/nes_verbs.c 2011-07-21 22:17:23.000000000 -0400
28231+++ linux-3.0.7/drivers/infiniband/hw/nes/nes_verbs.c 2011-08-23 21:47:55.000000000 -0400
28232@@ -46,9 +46,9 @@
28233
28234 #include <rdma/ib_umem.h>
28235
28236-atomic_t mod_qp_timouts;
28237-atomic_t qps_created;
28238-atomic_t sw_qps_destroyed;
28239+atomic_unchecked_t mod_qp_timouts;
28240+atomic_unchecked_t qps_created;
28241+atomic_unchecked_t sw_qps_destroyed;
28242
28243 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
28244
28245@@ -1141,7 +1141,7 @@ static struct ib_qp *nes_create_qp(struc
28246 if (init_attr->create_flags)
28247 return ERR_PTR(-EINVAL);
28248
28249- atomic_inc(&qps_created);
28250+ atomic_inc_unchecked(&qps_created);
28251 switch (init_attr->qp_type) {
28252 case IB_QPT_RC:
28253 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
28254@@ -1470,7 +1470,7 @@ static int nes_destroy_qp(struct ib_qp *
28255 struct iw_cm_event cm_event;
28256 int ret;
28257
28258- atomic_inc(&sw_qps_destroyed);
28259+ atomic_inc_unchecked(&sw_qps_destroyed);
28260 nesqp->destroyed = 1;
28261
28262 /* Blow away the connection if it exists. */
28263diff -urNp linux-3.0.7/drivers/infiniband/hw/qib/qib.h linux-3.0.7/drivers/infiniband/hw/qib/qib.h
28264--- linux-3.0.7/drivers/infiniband/hw/qib/qib.h 2011-07-21 22:17:23.000000000 -0400
28265+++ linux-3.0.7/drivers/infiniband/hw/qib/qib.h 2011-08-23 21:47:55.000000000 -0400
28266@@ -51,6 +51,7 @@
28267 #include <linux/completion.h>
28268 #include <linux/kref.h>
28269 #include <linux/sched.h>
28270+#include <linux/slab.h>
28271
28272 #include "qib_common.h"
28273 #include "qib_verbs.h"
28274diff -urNp linux-3.0.7/drivers/input/gameport/gameport.c linux-3.0.7/drivers/input/gameport/gameport.c
28275--- linux-3.0.7/drivers/input/gameport/gameport.c 2011-07-21 22:17:23.000000000 -0400
28276+++ linux-3.0.7/drivers/input/gameport/gameport.c 2011-08-23 21:47:55.000000000 -0400
28277@@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
28278 */
28279 static void gameport_init_port(struct gameport *gameport)
28280 {
28281- static atomic_t gameport_no = ATOMIC_INIT(0);
28282+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
28283
28284 __module_get(THIS_MODULE);
28285
28286 mutex_init(&gameport->drv_mutex);
28287 device_initialize(&gameport->dev);
28288 dev_set_name(&gameport->dev, "gameport%lu",
28289- (unsigned long)atomic_inc_return(&gameport_no) - 1);
28290+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
28291 gameport->dev.bus = &gameport_bus;
28292 gameport->dev.release = gameport_release_port;
28293 if (gameport->parent)
28294diff -urNp linux-3.0.7/drivers/input/input.c linux-3.0.7/drivers/input/input.c
28295--- linux-3.0.7/drivers/input/input.c 2011-07-21 22:17:23.000000000 -0400
28296+++ linux-3.0.7/drivers/input/input.c 2011-08-23 21:47:55.000000000 -0400
28297@@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struc
28298 */
28299 int input_register_device(struct input_dev *dev)
28300 {
28301- static atomic_t input_no = ATOMIC_INIT(0);
28302+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
28303 struct input_handler *handler;
28304 const char *path;
28305 int error;
28306@@ -1851,7 +1851,7 @@ int input_register_device(struct input_d
28307 dev->setkeycode = input_default_setkeycode;
28308
28309 dev_set_name(&dev->dev, "input%ld",
28310- (unsigned long) atomic_inc_return(&input_no) - 1);
28311+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
28312
28313 error = device_add(&dev->dev);
28314 if (error)
28315diff -urNp linux-3.0.7/drivers/input/joystick/sidewinder.c linux-3.0.7/drivers/input/joystick/sidewinder.c
28316--- linux-3.0.7/drivers/input/joystick/sidewinder.c 2011-07-21 22:17:23.000000000 -0400
28317+++ linux-3.0.7/drivers/input/joystick/sidewinder.c 2011-08-23 21:48:14.000000000 -0400
28318@@ -30,6 +30,7 @@
28319 #include <linux/kernel.h>
28320 #include <linux/module.h>
28321 #include <linux/slab.h>
28322+#include <linux/sched.h>
28323 #include <linux/init.h>
28324 #include <linux/input.h>
28325 #include <linux/gameport.h>
28326@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
28327 unsigned char buf[SW_LENGTH];
28328 int i;
28329
28330+ pax_track_stack();
28331+
28332 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
28333
28334 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
28335diff -urNp linux-3.0.7/drivers/input/joystick/xpad.c linux-3.0.7/drivers/input/joystick/xpad.c
28336--- linux-3.0.7/drivers/input/joystick/xpad.c 2011-07-21 22:17:23.000000000 -0400
28337+++ linux-3.0.7/drivers/input/joystick/xpad.c 2011-08-23 21:47:55.000000000 -0400
28338@@ -689,7 +689,7 @@ static void xpad_led_set(struct led_clas
28339
28340 static int xpad_led_probe(struct usb_xpad *xpad)
28341 {
28342- static atomic_t led_seq = ATOMIC_INIT(0);
28343+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
28344 long led_no;
28345 struct xpad_led *led;
28346 struct led_classdev *led_cdev;
28347@@ -702,7 +702,7 @@ static int xpad_led_probe(struct usb_xpa
28348 if (!led)
28349 return -ENOMEM;
28350
28351- led_no = (long)atomic_inc_return(&led_seq) - 1;
28352+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
28353
28354 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
28355 led->xpad = xpad;
28356diff -urNp linux-3.0.7/drivers/input/mousedev.c linux-3.0.7/drivers/input/mousedev.c
28357--- linux-3.0.7/drivers/input/mousedev.c 2011-07-21 22:17:23.000000000 -0400
28358+++ linux-3.0.7/drivers/input/mousedev.c 2011-08-23 21:47:55.000000000 -0400
28359@@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file
28360
28361 spin_unlock_irq(&client->packet_lock);
28362
28363- if (copy_to_user(buffer, data, count))
28364+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
28365 return -EFAULT;
28366
28367 return count;
28368diff -urNp linux-3.0.7/drivers/input/serio/serio.c linux-3.0.7/drivers/input/serio/serio.c
28369--- linux-3.0.7/drivers/input/serio/serio.c 2011-07-21 22:17:23.000000000 -0400
28370+++ linux-3.0.7/drivers/input/serio/serio.c 2011-08-23 21:47:55.000000000 -0400
28371@@ -497,7 +497,7 @@ static void serio_release_port(struct de
28372 */
28373 static void serio_init_port(struct serio *serio)
28374 {
28375- static atomic_t serio_no = ATOMIC_INIT(0);
28376+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
28377
28378 __module_get(THIS_MODULE);
28379
28380@@ -508,7 +508,7 @@ static void serio_init_port(struct serio
28381 mutex_init(&serio->drv_mutex);
28382 device_initialize(&serio->dev);
28383 dev_set_name(&serio->dev, "serio%ld",
28384- (long)atomic_inc_return(&serio_no) - 1);
28385+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
28386 serio->dev.bus = &serio_bus;
28387 serio->dev.release = serio_release_port;
28388 serio->dev.groups = serio_device_attr_groups;
28389diff -urNp linux-3.0.7/drivers/isdn/capi/capi.c linux-3.0.7/drivers/isdn/capi/capi.c
28390--- linux-3.0.7/drivers/isdn/capi/capi.c 2011-07-21 22:17:23.000000000 -0400
28391+++ linux-3.0.7/drivers/isdn/capi/capi.c 2011-08-23 21:47:55.000000000 -0400
28392@@ -83,8 +83,8 @@ struct capiminor {
28393
28394 struct capi20_appl *ap;
28395 u32 ncci;
28396- atomic_t datahandle;
28397- atomic_t msgid;
28398+ atomic_unchecked_t datahandle;
28399+ atomic_unchecked_t msgid;
28400
28401 struct tty_port port;
28402 int ttyinstop;
28403@@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *m
28404 capimsg_setu16(s, 2, mp->ap->applid);
28405 capimsg_setu8 (s, 4, CAPI_DATA_B3);
28406 capimsg_setu8 (s, 5, CAPI_RESP);
28407- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
28408+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
28409 capimsg_setu32(s, 8, mp->ncci);
28410 capimsg_setu16(s, 12, datahandle);
28411 }
28412@@ -518,14 +518,14 @@ static void handle_minor_send(struct cap
28413 mp->outbytes -= len;
28414 spin_unlock_bh(&mp->outlock);
28415
28416- datahandle = atomic_inc_return(&mp->datahandle);
28417+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
28418 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
28419 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
28420 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
28421 capimsg_setu16(skb->data, 2, mp->ap->applid);
28422 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
28423 capimsg_setu8 (skb->data, 5, CAPI_REQ);
28424- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
28425+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
28426 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
28427 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
28428 capimsg_setu16(skb->data, 16, len); /* Data length */
28429diff -urNp linux-3.0.7/drivers/isdn/gigaset/common.c linux-3.0.7/drivers/isdn/gigaset/common.c
28430--- linux-3.0.7/drivers/isdn/gigaset/common.c 2011-07-21 22:17:23.000000000 -0400
28431+++ linux-3.0.7/drivers/isdn/gigaset/common.c 2011-08-23 21:47:55.000000000 -0400
28432@@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct
28433 cs->commands_pending = 0;
28434 cs->cur_at_seq = 0;
28435 cs->gotfwver = -1;
28436- cs->open_count = 0;
28437+ local_set(&cs->open_count, 0);
28438 cs->dev = NULL;
28439 cs->tty = NULL;
28440 cs->tty_dev = NULL;
28441diff -urNp linux-3.0.7/drivers/isdn/gigaset/gigaset.h linux-3.0.7/drivers/isdn/gigaset/gigaset.h
28442--- linux-3.0.7/drivers/isdn/gigaset/gigaset.h 2011-07-21 22:17:23.000000000 -0400
28443+++ linux-3.0.7/drivers/isdn/gigaset/gigaset.h 2011-08-23 21:47:55.000000000 -0400
28444@@ -35,6 +35,7 @@
28445 #include <linux/tty_driver.h>
28446 #include <linux/list.h>
28447 #include <asm/atomic.h>
28448+#include <asm/local.h>
28449
28450 #define GIG_VERSION {0, 5, 0, 0}
28451 #define GIG_COMPAT {0, 4, 0, 0}
28452@@ -433,7 +434,7 @@ struct cardstate {
28453 spinlock_t cmdlock;
28454 unsigned curlen, cmdbytes;
28455
28456- unsigned open_count;
28457+ local_t open_count;
28458 struct tty_struct *tty;
28459 struct tasklet_struct if_wake_tasklet;
28460 unsigned control_state;
28461diff -urNp linux-3.0.7/drivers/isdn/gigaset/interface.c linux-3.0.7/drivers/isdn/gigaset/interface.c
28462--- linux-3.0.7/drivers/isdn/gigaset/interface.c 2011-07-21 22:17:23.000000000 -0400
28463+++ linux-3.0.7/drivers/isdn/gigaset/interface.c 2011-08-23 21:47:55.000000000 -0400
28464@@ -162,9 +162,7 @@ static int if_open(struct tty_struct *tt
28465 }
28466 tty->driver_data = cs;
28467
28468- ++cs->open_count;
28469-
28470- if (cs->open_count == 1) {
28471+ if (local_inc_return(&cs->open_count) == 1) {
28472 spin_lock_irqsave(&cs->lock, flags);
28473 cs->tty = tty;
28474 spin_unlock_irqrestore(&cs->lock, flags);
28475@@ -192,10 +190,10 @@ static void if_close(struct tty_struct *
28476
28477 if (!cs->connected)
28478 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
28479- else if (!cs->open_count)
28480+ else if (!local_read(&cs->open_count))
28481 dev_warn(cs->dev, "%s: device not opened\n", __func__);
28482 else {
28483- if (!--cs->open_count) {
28484+ if (!local_dec_return(&cs->open_count)) {
28485 spin_lock_irqsave(&cs->lock, flags);
28486 cs->tty = NULL;
28487 spin_unlock_irqrestore(&cs->lock, flags);
28488@@ -230,7 +228,7 @@ static int if_ioctl(struct tty_struct *t
28489 if (!cs->connected) {
28490 gig_dbg(DEBUG_IF, "not connected");
28491 retval = -ENODEV;
28492- } else if (!cs->open_count)
28493+ } else if (!local_read(&cs->open_count))
28494 dev_warn(cs->dev, "%s: device not opened\n", __func__);
28495 else {
28496 retval = 0;
28497@@ -360,7 +358,7 @@ static int if_write(struct tty_struct *t
28498 retval = -ENODEV;
28499 goto done;
28500 }
28501- if (!cs->open_count) {
28502+ if (!local_read(&cs->open_count)) {
28503 dev_warn(cs->dev, "%s: device not opened\n", __func__);
28504 retval = -ENODEV;
28505 goto done;
28506@@ -413,7 +411,7 @@ static int if_write_room(struct tty_stru
28507 if (!cs->connected) {
28508 gig_dbg(DEBUG_IF, "not connected");
28509 retval = -ENODEV;
28510- } else if (!cs->open_count)
28511+ } else if (!local_read(&cs->open_count))
28512 dev_warn(cs->dev, "%s: device not opened\n", __func__);
28513 else if (cs->mstate != MS_LOCKED) {
28514 dev_warn(cs->dev, "can't write to unlocked device\n");
28515@@ -443,7 +441,7 @@ static int if_chars_in_buffer(struct tty
28516
28517 if (!cs->connected)
28518 gig_dbg(DEBUG_IF, "not connected");
28519- else if (!cs->open_count)
28520+ else if (!local_read(&cs->open_count))
28521 dev_warn(cs->dev, "%s: device not opened\n", __func__);
28522 else if (cs->mstate != MS_LOCKED)
28523 dev_warn(cs->dev, "can't write to unlocked device\n");
28524@@ -471,7 +469,7 @@ static void if_throttle(struct tty_struc
28525
28526 if (!cs->connected)
28527 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
28528- else if (!cs->open_count)
28529+ else if (!local_read(&cs->open_count))
28530 dev_warn(cs->dev, "%s: device not opened\n", __func__);
28531 else
28532 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
28533@@ -495,7 +493,7 @@ static void if_unthrottle(struct tty_str
28534
28535 if (!cs->connected)
28536 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
28537- else if (!cs->open_count)
28538+ else if (!local_read(&cs->open_count))
28539 dev_warn(cs->dev, "%s: device not opened\n", __func__);
28540 else
28541 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
28542@@ -526,7 +524,7 @@ static void if_set_termios(struct tty_st
28543 goto out;
28544 }
28545
28546- if (!cs->open_count) {
28547+ if (!local_read(&cs->open_count)) {
28548 dev_warn(cs->dev, "%s: device not opened\n", __func__);
28549 goto out;
28550 }
28551diff -urNp linux-3.0.7/drivers/isdn/hardware/avm/b1.c linux-3.0.7/drivers/isdn/hardware/avm/b1.c
28552--- linux-3.0.7/drivers/isdn/hardware/avm/b1.c 2011-07-21 22:17:23.000000000 -0400
28553+++ linux-3.0.7/drivers/isdn/hardware/avm/b1.c 2011-08-23 21:47:55.000000000 -0400
28554@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capilo
28555 }
28556 if (left) {
28557 if (t4file->user) {
28558- if (copy_from_user(buf, dp, left))
28559+ if (left > sizeof buf || copy_from_user(buf, dp, left))
28560 return -EFAULT;
28561 } else {
28562 memcpy(buf, dp, left);
28563@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capilo
28564 }
28565 if (left) {
28566 if (config->user) {
28567- if (copy_from_user(buf, dp, left))
28568+ if (left > sizeof buf || copy_from_user(buf, dp, left))
28569 return -EFAULT;
28570 } else {
28571 memcpy(buf, dp, left);
28572diff -urNp linux-3.0.7/drivers/isdn/hardware/eicon/capidtmf.c linux-3.0.7/drivers/isdn/hardware/eicon/capidtmf.c
28573--- linux-3.0.7/drivers/isdn/hardware/eicon/capidtmf.c 2011-07-21 22:17:23.000000000 -0400
28574+++ linux-3.0.7/drivers/isdn/hardware/eicon/capidtmf.c 2011-08-23 21:48:14.000000000 -0400
28575@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
28576 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
28577 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
28578
28579+ pax_track_stack();
28580
28581 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
28582 {
28583diff -urNp linux-3.0.7/drivers/isdn/hardware/eicon/capifunc.c linux-3.0.7/drivers/isdn/hardware/eicon/capifunc.c
28584--- linux-3.0.7/drivers/isdn/hardware/eicon/capifunc.c 2011-07-21 22:17:23.000000000 -0400
28585+++ linux-3.0.7/drivers/isdn/hardware/eicon/capifunc.c 2011-08-23 21:48:14.000000000 -0400
28586@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
28587 IDI_SYNC_REQ req;
28588 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
28589
28590+ pax_track_stack();
28591+
28592 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
28593
28594 for (x = 0; x < MAX_DESCRIPTORS; x++) {
28595diff -urNp linux-3.0.7/drivers/isdn/hardware/eicon/diddfunc.c linux-3.0.7/drivers/isdn/hardware/eicon/diddfunc.c
28596--- linux-3.0.7/drivers/isdn/hardware/eicon/diddfunc.c 2011-07-21 22:17:23.000000000 -0400
28597+++ linux-3.0.7/drivers/isdn/hardware/eicon/diddfunc.c 2011-08-23 21:48:14.000000000 -0400
28598@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
28599 IDI_SYNC_REQ req;
28600 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
28601
28602+ pax_track_stack();
28603+
28604 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
28605
28606 for (x = 0; x < MAX_DESCRIPTORS; x++) {
28607diff -urNp linux-3.0.7/drivers/isdn/hardware/eicon/divasfunc.c linux-3.0.7/drivers/isdn/hardware/eicon/divasfunc.c
28608--- linux-3.0.7/drivers/isdn/hardware/eicon/divasfunc.c 2011-07-21 22:17:23.000000000 -0400
28609+++ linux-3.0.7/drivers/isdn/hardware/eicon/divasfunc.c 2011-08-23 21:48:14.000000000 -0400
28610@@ -160,6 +160,8 @@ static int DIVA_INIT_FUNCTION connect_di
28611 IDI_SYNC_REQ req;
28612 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
28613
28614+ pax_track_stack();
28615+
28616 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
28617
28618 for (x = 0; x < MAX_DESCRIPTORS; x++) {
28619diff -urNp linux-3.0.7/drivers/isdn/hardware/eicon/divasync.h linux-3.0.7/drivers/isdn/hardware/eicon/divasync.h
28620--- linux-3.0.7/drivers/isdn/hardware/eicon/divasync.h 2011-07-21 22:17:23.000000000 -0400
28621+++ linux-3.0.7/drivers/isdn/hardware/eicon/divasync.h 2011-08-23 21:47:55.000000000 -0400
28622@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
28623 } diva_didd_add_adapter_t;
28624 typedef struct _diva_didd_remove_adapter {
28625 IDI_CALL p_request;
28626-} diva_didd_remove_adapter_t;
28627+} __no_const diva_didd_remove_adapter_t;
28628 typedef struct _diva_didd_read_adapter_array {
28629 void * buffer;
28630 dword length;
28631diff -urNp linux-3.0.7/drivers/isdn/hardware/eicon/idifunc.c linux-3.0.7/drivers/isdn/hardware/eicon/idifunc.c
28632--- linux-3.0.7/drivers/isdn/hardware/eicon/idifunc.c 2011-07-21 22:17:23.000000000 -0400
28633+++ linux-3.0.7/drivers/isdn/hardware/eicon/idifunc.c 2011-08-23 21:48:14.000000000 -0400
28634@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
28635 IDI_SYNC_REQ req;
28636 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
28637
28638+ pax_track_stack();
28639+
28640 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
28641
28642 for (x = 0; x < MAX_DESCRIPTORS; x++) {
28643diff -urNp linux-3.0.7/drivers/isdn/hardware/eicon/message.c linux-3.0.7/drivers/isdn/hardware/eicon/message.c
28644--- linux-3.0.7/drivers/isdn/hardware/eicon/message.c 2011-07-21 22:17:23.000000000 -0400
28645+++ linux-3.0.7/drivers/isdn/hardware/eicon/message.c 2011-08-23 21:48:14.000000000 -0400
28646@@ -4886,6 +4886,8 @@ static void sig_ind(PLCI *plci)
28647 dword d;
28648 word w;
28649
28650+ pax_track_stack();
28651+
28652 a = plci->adapter;
28653 Id = ((word)plci->Id<<8)|a->Id;
28654 PUT_WORD(&SS_Ind[4],0x0000);
28655@@ -7480,6 +7482,8 @@ static word add_b1(PLCI *plci, API_PARSE
28656 word j, n, w;
28657 dword d;
28658
28659+ pax_track_stack();
28660+
28661
28662 for(i=0;i<8;i++) bp_parms[i].length = 0;
28663 for(i=0;i<2;i++) global_config[i].length = 0;
28664@@ -7954,6 +7958,8 @@ static word add_b23(PLCI *plci, API_PARS
28665 const byte llc3[] = {4,3,2,2,6,6,0};
28666 const byte header[] = {0,2,3,3,0,0,0};
28667
28668+ pax_track_stack();
28669+
28670 for(i=0;i<8;i++) bp_parms[i].length = 0;
28671 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
28672 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
28673@@ -14741,6 +14747,8 @@ static void group_optimization(DIVA_CAPI
28674 word appl_number_group_type[MAX_APPL];
28675 PLCI *auxplci;
28676
28677+ pax_track_stack();
28678+
28679 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
28680
28681 if(!a->group_optimization_enabled)
28682diff -urNp linux-3.0.7/drivers/isdn/hardware/eicon/mntfunc.c linux-3.0.7/drivers/isdn/hardware/eicon/mntfunc.c
28683--- linux-3.0.7/drivers/isdn/hardware/eicon/mntfunc.c 2011-07-21 22:17:23.000000000 -0400
28684+++ linux-3.0.7/drivers/isdn/hardware/eicon/mntfunc.c 2011-08-23 21:48:14.000000000 -0400
28685@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
28686 IDI_SYNC_REQ req;
28687 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
28688
28689+ pax_track_stack();
28690+
28691 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
28692
28693 for (x = 0; x < MAX_DESCRIPTORS; x++) {
28694diff -urNp linux-3.0.7/drivers/isdn/hardware/eicon/xdi_adapter.h linux-3.0.7/drivers/isdn/hardware/eicon/xdi_adapter.h
28695--- linux-3.0.7/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-07-21 22:17:23.000000000 -0400
28696+++ linux-3.0.7/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-08-23 21:47:55.000000000 -0400
28697@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
28698 typedef struct _diva_os_idi_adapter_interface {
28699 diva_init_card_proc_t cleanup_adapter_proc;
28700 diva_cmd_card_proc_t cmd_proc;
28701-} diva_os_idi_adapter_interface_t;
28702+} __no_const diva_os_idi_adapter_interface_t;
28703
28704 typedef struct _diva_os_xdi_adapter {
28705 struct list_head link;
28706diff -urNp linux-3.0.7/drivers/isdn/i4l/isdn_common.c linux-3.0.7/drivers/isdn/i4l/isdn_common.c
28707--- linux-3.0.7/drivers/isdn/i4l/isdn_common.c 2011-07-21 22:17:23.000000000 -0400
28708+++ linux-3.0.7/drivers/isdn/i4l/isdn_common.c 2011-08-23 21:48:14.000000000 -0400
28709@@ -1286,6 +1286,8 @@ isdn_ioctl(struct file *file, uint cmd,
28710 } iocpar;
28711 void __user *argp = (void __user *)arg;
28712
28713+ pax_track_stack();
28714+
28715 #define name iocpar.name
28716 #define bname iocpar.bname
28717 #define iocts iocpar.iocts
28718diff -urNp linux-3.0.7/drivers/isdn/icn/icn.c linux-3.0.7/drivers/isdn/icn/icn.c
28719--- linux-3.0.7/drivers/isdn/icn/icn.c 2011-07-21 22:17:23.000000000 -0400
28720+++ linux-3.0.7/drivers/isdn/icn/icn.c 2011-08-23 21:47:55.000000000 -0400
28721@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len
28722 if (count > len)
28723 count = len;
28724 if (user) {
28725- if (copy_from_user(msg, buf, count))
28726+ if (count > sizeof msg || copy_from_user(msg, buf, count))
28727 return -EFAULT;
28728 } else
28729 memcpy(msg, buf, count);
28730diff -urNp linux-3.0.7/drivers/lguest/core.c linux-3.0.7/drivers/lguest/core.c
28731--- linux-3.0.7/drivers/lguest/core.c 2011-07-21 22:17:23.000000000 -0400
28732+++ linux-3.0.7/drivers/lguest/core.c 2011-08-23 21:47:55.000000000 -0400
28733@@ -92,9 +92,17 @@ static __init int map_switcher(void)
28734 * it's worked so far. The end address needs +1 because __get_vm_area
28735 * allocates an extra guard page, so we need space for that.
28736 */
28737+
28738+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28739+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
28740+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
28741+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
28742+#else
28743 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
28744 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
28745 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
28746+#endif
28747+
28748 if (!switcher_vma) {
28749 err = -ENOMEM;
28750 printk("lguest: could not map switcher pages high\n");
28751@@ -119,7 +127,7 @@ static __init int map_switcher(void)
28752 * Now the Switcher is mapped at the right address, we can't fail!
28753 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
28754 */
28755- memcpy(switcher_vma->addr, start_switcher_text,
28756+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
28757 end_switcher_text - start_switcher_text);
28758
28759 printk(KERN_INFO "lguest: mapped switcher at %p\n",
28760diff -urNp linux-3.0.7/drivers/lguest/x86/core.c linux-3.0.7/drivers/lguest/x86/core.c
28761--- linux-3.0.7/drivers/lguest/x86/core.c 2011-07-21 22:17:23.000000000 -0400
28762+++ linux-3.0.7/drivers/lguest/x86/core.c 2011-08-23 21:47:55.000000000 -0400
28763@@ -59,7 +59,7 @@ static struct {
28764 /* Offset from where switcher.S was compiled to where we've copied it */
28765 static unsigned long switcher_offset(void)
28766 {
28767- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
28768+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
28769 }
28770
28771 /* This cpu's struct lguest_pages. */
28772@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
28773 * These copies are pretty cheap, so we do them unconditionally: */
28774 /* Save the current Host top-level page directory.
28775 */
28776+
28777+#ifdef CONFIG_PAX_PER_CPU_PGD
28778+ pages->state.host_cr3 = read_cr3();
28779+#else
28780 pages->state.host_cr3 = __pa(current->mm->pgd);
28781+#endif
28782+
28783 /*
28784 * Set up the Guest's page tables to see this CPU's pages (and no
28785 * other CPU's pages).
28786@@ -547,7 +553,7 @@ void __init lguest_arch_host_init(void)
28787 * compiled-in switcher code and the high-mapped copy we just made.
28788 */
28789 for (i = 0; i < IDT_ENTRIES; i++)
28790- default_idt_entries[i] += switcher_offset();
28791+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
28792
28793 /*
28794 * Set up the Switcher's per-cpu areas.
28795@@ -630,7 +636,7 @@ void __init lguest_arch_host_init(void)
28796 * it will be undisturbed when we switch. To change %cs and jump we
28797 * need this structure to feed to Intel's "lcall" instruction.
28798 */
28799- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
28800+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
28801 lguest_entry.segment = LGUEST_CS;
28802
28803 /*
28804diff -urNp linux-3.0.7/drivers/lguest/x86/switcher_32.S linux-3.0.7/drivers/lguest/x86/switcher_32.S
28805--- linux-3.0.7/drivers/lguest/x86/switcher_32.S 2011-07-21 22:17:23.000000000 -0400
28806+++ linux-3.0.7/drivers/lguest/x86/switcher_32.S 2011-08-23 21:47:55.000000000 -0400
28807@@ -87,6 +87,7 @@
28808 #include <asm/page.h>
28809 #include <asm/segment.h>
28810 #include <asm/lguest.h>
28811+#include <asm/processor-flags.h>
28812
28813 // We mark the start of the code to copy
28814 // It's placed in .text tho it's never run here
28815@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
28816 // Changes type when we load it: damn Intel!
28817 // For after we switch over our page tables
28818 // That entry will be read-only: we'd crash.
28819+
28820+#ifdef CONFIG_PAX_KERNEXEC
28821+ mov %cr0, %edx
28822+ xor $X86_CR0_WP, %edx
28823+ mov %edx, %cr0
28824+#endif
28825+
28826 movl $(GDT_ENTRY_TSS*8), %edx
28827 ltr %dx
28828
28829@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
28830 // Let's clear it again for our return.
28831 // The GDT descriptor of the Host
28832 // Points to the table after two "size" bytes
28833- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
28834+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
28835 // Clear "used" from type field (byte 5, bit 2)
28836- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
28837+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
28838+
28839+#ifdef CONFIG_PAX_KERNEXEC
28840+ mov %cr0, %eax
28841+ xor $X86_CR0_WP, %eax
28842+ mov %eax, %cr0
28843+#endif
28844
28845 // Once our page table's switched, the Guest is live!
28846 // The Host fades as we run this final step.
28847@@ -295,13 +309,12 @@ deliver_to_host:
28848 // I consulted gcc, and it gave
28849 // These instructions, which I gladly credit:
28850 leal (%edx,%ebx,8), %eax
28851- movzwl (%eax),%edx
28852- movl 4(%eax), %eax
28853- xorw %ax, %ax
28854- orl %eax, %edx
28855+ movl 4(%eax), %edx
28856+ movw (%eax), %dx
28857 // Now the address of the handler's in %edx
28858 // We call it now: its "iret" drops us home.
28859- jmp *%edx
28860+ ljmp $__KERNEL_CS, $1f
28861+1: jmp *%edx
28862
28863 // Every interrupt can come to us here
28864 // But we must truly tell each apart.
28865diff -urNp linux-3.0.7/drivers/macintosh/macio_asic.c linux-3.0.7/drivers/macintosh/macio_asic.c
28866--- linux-3.0.7/drivers/macintosh/macio_asic.c 2011-07-21 22:17:23.000000000 -0400
28867+++ linux-3.0.7/drivers/macintosh/macio_asic.c 2011-10-11 10:44:33.000000000 -0400
28868@@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(s
28869 * MacIO is matched against any Apple ID, it's probe() function
28870 * will then decide wether it applies or not
28871 */
28872-static const struct pci_device_id __devinitdata pci_ids [] = { {
28873+static const struct pci_device_id __devinitconst pci_ids [] = { {
28874 .vendor = PCI_VENDOR_ID_APPLE,
28875 .device = PCI_ANY_ID,
28876 .subvendor = PCI_ANY_ID,
28877diff -urNp linux-3.0.7/drivers/md/dm.c linux-3.0.7/drivers/md/dm.c
28878--- linux-3.0.7/drivers/md/dm.c 2011-09-02 18:11:21.000000000 -0400
28879+++ linux-3.0.7/drivers/md/dm.c 2011-08-23 21:47:55.000000000 -0400
28880@@ -164,9 +164,9 @@ struct mapped_device {
28881 /*
28882 * Event handling.
28883 */
28884- atomic_t event_nr;
28885+ atomic_unchecked_t event_nr;
28886 wait_queue_head_t eventq;
28887- atomic_t uevent_seq;
28888+ atomic_unchecked_t uevent_seq;
28889 struct list_head uevent_list;
28890 spinlock_t uevent_lock; /* Protect access to uevent_list */
28891
28892@@ -1842,8 +1842,8 @@ static struct mapped_device *alloc_dev(i
28893 rwlock_init(&md->map_lock);
28894 atomic_set(&md->holders, 1);
28895 atomic_set(&md->open_count, 0);
28896- atomic_set(&md->event_nr, 0);
28897- atomic_set(&md->uevent_seq, 0);
28898+ atomic_set_unchecked(&md->event_nr, 0);
28899+ atomic_set_unchecked(&md->uevent_seq, 0);
28900 INIT_LIST_HEAD(&md->uevent_list);
28901 spin_lock_init(&md->uevent_lock);
28902
28903@@ -1977,7 +1977,7 @@ static void event_callback(void *context
28904
28905 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
28906
28907- atomic_inc(&md->event_nr);
28908+ atomic_inc_unchecked(&md->event_nr);
28909 wake_up(&md->eventq);
28910 }
28911
28912@@ -2553,18 +2553,18 @@ int dm_kobject_uevent(struct mapped_devi
28913
28914 uint32_t dm_next_uevent_seq(struct mapped_device *md)
28915 {
28916- return atomic_add_return(1, &md->uevent_seq);
28917+ return atomic_add_return_unchecked(1, &md->uevent_seq);
28918 }
28919
28920 uint32_t dm_get_event_nr(struct mapped_device *md)
28921 {
28922- return atomic_read(&md->event_nr);
28923+ return atomic_read_unchecked(&md->event_nr);
28924 }
28925
28926 int dm_wait_event(struct mapped_device *md, int event_nr)
28927 {
28928 return wait_event_interruptible(md->eventq,
28929- (event_nr != atomic_read(&md->event_nr)));
28930+ (event_nr != atomic_read_unchecked(&md->event_nr)));
28931 }
28932
28933 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
28934diff -urNp linux-3.0.7/drivers/md/dm-ioctl.c linux-3.0.7/drivers/md/dm-ioctl.c
28935--- linux-3.0.7/drivers/md/dm-ioctl.c 2011-07-21 22:17:23.000000000 -0400
28936+++ linux-3.0.7/drivers/md/dm-ioctl.c 2011-08-23 21:47:55.000000000 -0400
28937@@ -1551,7 +1551,7 @@ static int validate_params(uint cmd, str
28938 cmd == DM_LIST_VERSIONS_CMD)
28939 return 0;
28940
28941- if ((cmd == DM_DEV_CREATE_CMD)) {
28942+ if (cmd == DM_DEV_CREATE_CMD) {
28943 if (!*param->name) {
28944 DMWARN("name not supplied when creating device");
28945 return -EINVAL;
28946diff -urNp linux-3.0.7/drivers/md/dm-raid1.c linux-3.0.7/drivers/md/dm-raid1.c
28947--- linux-3.0.7/drivers/md/dm-raid1.c 2011-07-21 22:17:23.000000000 -0400
28948+++ linux-3.0.7/drivers/md/dm-raid1.c 2011-08-23 21:47:55.000000000 -0400
28949@@ -40,7 +40,7 @@ enum dm_raid1_error {
28950
28951 struct mirror {
28952 struct mirror_set *ms;
28953- atomic_t error_count;
28954+ atomic_unchecked_t error_count;
28955 unsigned long error_type;
28956 struct dm_dev *dev;
28957 sector_t offset;
28958@@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(s
28959 struct mirror *m;
28960
28961 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
28962- if (!atomic_read(&m->error_count))
28963+ if (!atomic_read_unchecked(&m->error_count))
28964 return m;
28965
28966 return NULL;
28967@@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m
28968 * simple way to tell if a device has encountered
28969 * errors.
28970 */
28971- atomic_inc(&m->error_count);
28972+ atomic_inc_unchecked(&m->error_count);
28973
28974 if (test_and_set_bit(error_type, &m->error_type))
28975 return;
28976@@ -408,7 +408,7 @@ static struct mirror *choose_mirror(stru
28977 struct mirror *m = get_default_mirror(ms);
28978
28979 do {
28980- if (likely(!atomic_read(&m->error_count)))
28981+ if (likely(!atomic_read_unchecked(&m->error_count)))
28982 return m;
28983
28984 if (m-- == ms->mirror)
28985@@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
28986 {
28987 struct mirror *default_mirror = get_default_mirror(m->ms);
28988
28989- return !atomic_read(&default_mirror->error_count);
28990+ return !atomic_read_unchecked(&default_mirror->error_count);
28991 }
28992
28993 static int mirror_available(struct mirror_set *ms, struct bio *bio)
28994@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *
28995 */
28996 if (likely(region_in_sync(ms, region, 1)))
28997 m = choose_mirror(ms, bio->bi_sector);
28998- else if (m && atomic_read(&m->error_count))
28999+ else if (m && atomic_read_unchecked(&m->error_count))
29000 m = NULL;
29001
29002 if (likely(m))
29003@@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set
29004 }
29005
29006 ms->mirror[mirror].ms = ms;
29007- atomic_set(&(ms->mirror[mirror].error_count), 0);
29008+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
29009 ms->mirror[mirror].error_type = 0;
29010 ms->mirror[mirror].offset = offset;
29011
29012@@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_targ
29013 */
29014 static char device_status_char(struct mirror *m)
29015 {
29016- if (!atomic_read(&(m->error_count)))
29017+ if (!atomic_read_unchecked(&(m->error_count)))
29018 return 'A';
29019
29020 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
29021diff -urNp linux-3.0.7/drivers/md/dm-stripe.c linux-3.0.7/drivers/md/dm-stripe.c
29022--- linux-3.0.7/drivers/md/dm-stripe.c 2011-07-21 22:17:23.000000000 -0400
29023+++ linux-3.0.7/drivers/md/dm-stripe.c 2011-08-23 21:47:55.000000000 -0400
29024@@ -20,7 +20,7 @@ struct stripe {
29025 struct dm_dev *dev;
29026 sector_t physical_start;
29027
29028- atomic_t error_count;
29029+ atomic_unchecked_t error_count;
29030 };
29031
29032 struct stripe_c {
29033@@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *
29034 kfree(sc);
29035 return r;
29036 }
29037- atomic_set(&(sc->stripe[i].error_count), 0);
29038+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
29039 }
29040
29041 ti->private = sc;
29042@@ -314,7 +314,7 @@ static int stripe_status(struct dm_targe
29043 DMEMIT("%d ", sc->stripes);
29044 for (i = 0; i < sc->stripes; i++) {
29045 DMEMIT("%s ", sc->stripe[i].dev->name);
29046- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
29047+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
29048 'D' : 'A';
29049 }
29050 buffer[i] = '\0';
29051@@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_targe
29052 */
29053 for (i = 0; i < sc->stripes; i++)
29054 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
29055- atomic_inc(&(sc->stripe[i].error_count));
29056- if (atomic_read(&(sc->stripe[i].error_count)) <
29057+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
29058+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
29059 DM_IO_ERROR_THRESHOLD)
29060 schedule_work(&sc->trigger_event);
29061 }
29062diff -urNp linux-3.0.7/drivers/md/dm-table.c linux-3.0.7/drivers/md/dm-table.c
29063--- linux-3.0.7/drivers/md/dm-table.c 2011-10-17 23:17:09.000000000 -0400
29064+++ linux-3.0.7/drivers/md/dm-table.c 2011-10-17 23:17:19.000000000 -0400
29065@@ -390,7 +390,7 @@ static int device_area_is_invalid(struct
29066 if (!dev_size)
29067 return 0;
29068
29069- if ((start >= dev_size) || (start + len > dev_size)) {
29070+ if ((start >= dev_size) || (len > dev_size - start)) {
29071 DMWARN("%s: %s too small for target: "
29072 "start=%llu, len=%llu, dev_size=%llu",
29073 dm_device_name(ti->table->md), bdevname(bdev, b),
29074diff -urNp linux-3.0.7/drivers/md/md.c linux-3.0.7/drivers/md/md.c
29075--- linux-3.0.7/drivers/md/md.c 2011-10-17 23:17:09.000000000 -0400
29076+++ linux-3.0.7/drivers/md/md.c 2011-10-17 23:17:19.000000000 -0400
29077@@ -231,10 +231,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
29078 * start build, activate spare
29079 */
29080 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
29081-static atomic_t md_event_count;
29082+static atomic_unchecked_t md_event_count;
29083 void md_new_event(mddev_t *mddev)
29084 {
29085- atomic_inc(&md_event_count);
29086+ atomic_inc_unchecked(&md_event_count);
29087 wake_up(&md_event_waiters);
29088 }
29089 EXPORT_SYMBOL_GPL(md_new_event);
29090@@ -244,7 +244,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
29091 */
29092 static void md_new_event_inintr(mddev_t *mddev)
29093 {
29094- atomic_inc(&md_event_count);
29095+ atomic_inc_unchecked(&md_event_count);
29096 wake_up(&md_event_waiters);
29097 }
29098
29099@@ -1475,7 +1475,7 @@ static int super_1_load(mdk_rdev_t *rdev
29100
29101 rdev->preferred_minor = 0xffff;
29102 rdev->data_offset = le64_to_cpu(sb->data_offset);
29103- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
29104+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
29105
29106 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
29107 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
29108@@ -1653,7 +1653,7 @@ static void super_1_sync(mddev_t *mddev,
29109 else
29110 sb->resync_offset = cpu_to_le64(0);
29111
29112- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
29113+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
29114
29115 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
29116 sb->size = cpu_to_le64(mddev->dev_sectors);
29117@@ -2446,7 +2446,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
29118 static ssize_t
29119 errors_show(mdk_rdev_t *rdev, char *page)
29120 {
29121- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
29122+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
29123 }
29124
29125 static ssize_t
29126@@ -2455,7 +2455,7 @@ errors_store(mdk_rdev_t *rdev, const cha
29127 char *e;
29128 unsigned long n = simple_strtoul(buf, &e, 10);
29129 if (*buf && (*e == 0 || *e == '\n')) {
29130- atomic_set(&rdev->corrected_errors, n);
29131+ atomic_set_unchecked(&rdev->corrected_errors, n);
29132 return len;
29133 }
29134 return -EINVAL;
29135@@ -2811,8 +2811,8 @@ void md_rdev_init(mdk_rdev_t *rdev)
29136 rdev->last_read_error.tv_sec = 0;
29137 rdev->last_read_error.tv_nsec = 0;
29138 atomic_set(&rdev->nr_pending, 0);
29139- atomic_set(&rdev->read_errors, 0);
29140- atomic_set(&rdev->corrected_errors, 0);
29141+ atomic_set_unchecked(&rdev->read_errors, 0);
29142+ atomic_set_unchecked(&rdev->corrected_errors, 0);
29143
29144 INIT_LIST_HEAD(&rdev->same_set);
29145 init_waitqueue_head(&rdev->blocked_wait);
29146@@ -6440,7 +6440,7 @@ static int md_seq_show(struct seq_file *
29147
29148 spin_unlock(&pers_lock);
29149 seq_printf(seq, "\n");
29150- mi->event = atomic_read(&md_event_count);
29151+ mi->event = atomic_read_unchecked(&md_event_count);
29152 return 0;
29153 }
29154 if (v == (void*)2) {
29155@@ -6529,7 +6529,7 @@ static int md_seq_show(struct seq_file *
29156 chunk_kb ? "KB" : "B");
29157 if (bitmap->file) {
29158 seq_printf(seq, ", file: ");
29159- seq_path(seq, &bitmap->file->f_path, " \t\n");
29160+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
29161 }
29162
29163 seq_printf(seq, "\n");
29164@@ -6563,7 +6563,7 @@ static int md_seq_open(struct inode *ino
29165 else {
29166 struct seq_file *p = file->private_data;
29167 p->private = mi;
29168- mi->event = atomic_read(&md_event_count);
29169+ mi->event = atomic_read_unchecked(&md_event_count);
29170 }
29171 return error;
29172 }
29173@@ -6579,7 +6579,7 @@ static unsigned int mdstat_poll(struct f
29174 /* always allow read */
29175 mask = POLLIN | POLLRDNORM;
29176
29177- if (mi->event != atomic_read(&md_event_count))
29178+ if (mi->event != atomic_read_unchecked(&md_event_count))
29179 mask |= POLLERR | POLLPRI;
29180 return mask;
29181 }
29182@@ -6623,7 +6623,7 @@ static int is_mddev_idle(mddev_t *mddev,
29183 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
29184 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
29185 (int)part_stat_read(&disk->part0, sectors[1]) -
29186- atomic_read(&disk->sync_io);
29187+ atomic_read_unchecked(&disk->sync_io);
29188 /* sync IO will cause sync_io to increase before the disk_stats
29189 * as sync_io is counted when a request starts, and
29190 * disk_stats is counted when it completes.
29191diff -urNp linux-3.0.7/drivers/md/md.h linux-3.0.7/drivers/md/md.h
29192--- linux-3.0.7/drivers/md/md.h 2011-10-17 23:17:09.000000000 -0400
29193+++ linux-3.0.7/drivers/md/md.h 2011-10-17 23:17:19.000000000 -0400
29194@@ -97,13 +97,13 @@ struct mdk_rdev_s
29195 * only maintained for arrays that
29196 * support hot removal
29197 */
29198- atomic_t read_errors; /* number of consecutive read errors that
29199+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
29200 * we have tried to ignore.
29201 */
29202 struct timespec last_read_error; /* monotonic time since our
29203 * last read error
29204 */
29205- atomic_t corrected_errors; /* number of corrected read errors,
29206+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
29207 * for reporting to userspace and storing
29208 * in superblock.
29209 */
29210@@ -344,7 +344,7 @@ static inline void rdev_dec_pending(mdk_
29211
29212 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
29213 {
29214- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
29215+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
29216 }
29217
29218 struct mdk_personality
29219diff -urNp linux-3.0.7/drivers/md/raid10.c linux-3.0.7/drivers/md/raid10.c
29220--- linux-3.0.7/drivers/md/raid10.c 2011-10-17 23:17:09.000000000 -0400
29221+++ linux-3.0.7/drivers/md/raid10.c 2011-10-17 23:17:19.000000000 -0400
29222@@ -1186,7 +1186,7 @@ static void end_sync_read(struct bio *bi
29223 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
29224 set_bit(R10BIO_Uptodate, &r10_bio->state);
29225 else {
29226- atomic_add(r10_bio->sectors,
29227+ atomic_add_unchecked(r10_bio->sectors,
29228 &conf->mirrors[d].rdev->corrected_errors);
29229 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
29230 md_error(r10_bio->mddev,
29231@@ -1394,7 +1394,7 @@ static void check_decay_read_errors(mdde
29232 {
29233 struct timespec cur_time_mon;
29234 unsigned long hours_since_last;
29235- unsigned int read_errors = atomic_read(&rdev->read_errors);
29236+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
29237
29238 ktime_get_ts(&cur_time_mon);
29239
29240@@ -1416,9 +1416,9 @@ static void check_decay_read_errors(mdde
29241 * overflowing the shift of read_errors by hours_since_last.
29242 */
29243 if (hours_since_last >= 8 * sizeof(read_errors))
29244- atomic_set(&rdev->read_errors, 0);
29245+ atomic_set_unchecked(&rdev->read_errors, 0);
29246 else
29247- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
29248+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
29249 }
29250
29251 /*
29252@@ -1448,8 +1448,8 @@ static void fix_read_error(conf_t *conf,
29253 return;
29254
29255 check_decay_read_errors(mddev, rdev);
29256- atomic_inc(&rdev->read_errors);
29257- if (atomic_read(&rdev->read_errors) > max_read_errors) {
29258+ atomic_inc_unchecked(&rdev->read_errors);
29259+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
29260 char b[BDEVNAME_SIZE];
29261 bdevname(rdev->bdev, b);
29262
29263@@ -1457,7 +1457,7 @@ static void fix_read_error(conf_t *conf,
29264 "md/raid10:%s: %s: Raid device exceeded "
29265 "read_error threshold [cur %d:max %d]\n",
29266 mdname(mddev), b,
29267- atomic_read(&rdev->read_errors), max_read_errors);
29268+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
29269 printk(KERN_NOTICE
29270 "md/raid10:%s: %s: Failing raid device\n",
29271 mdname(mddev), b);
29272@@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
29273 test_bit(In_sync, &rdev->flags)) {
29274 atomic_inc(&rdev->nr_pending);
29275 rcu_read_unlock();
29276- atomic_add(s, &rdev->corrected_errors);
29277+ atomic_add_unchecked(s, &rdev->corrected_errors);
29278 if (sync_page_io(rdev,
29279 r10_bio->devs[sl].addr +
29280 sect,
29281diff -urNp linux-3.0.7/drivers/md/raid1.c linux-3.0.7/drivers/md/raid1.c
29282--- linux-3.0.7/drivers/md/raid1.c 2011-10-17 23:17:09.000000000 -0400
29283+++ linux-3.0.7/drivers/md/raid1.c 2011-10-17 23:17:19.000000000 -0400
29284@@ -1263,7 +1263,7 @@ static int fix_sync_read_error(r1bio_t *
29285 rdev_dec_pending(rdev, mddev);
29286 md_error(mddev, rdev);
29287 } else
29288- atomic_add(s, &rdev->corrected_errors);
29289+ atomic_add_unchecked(s, &rdev->corrected_errors);
29290 }
29291 d = start;
29292 while (d != r1_bio->read_disk) {
29293@@ -1492,7 +1492,7 @@ static void fix_read_error(conf_t *conf,
29294 /* Well, this device is dead */
29295 md_error(mddev, rdev);
29296 else {
29297- atomic_add(s, &rdev->corrected_errors);
29298+ atomic_add_unchecked(s, &rdev->corrected_errors);
29299 printk(KERN_INFO
29300 "md/raid1:%s: read error corrected "
29301 "(%d sectors at %llu on %s)\n",
29302diff -urNp linux-3.0.7/drivers/md/raid5.c linux-3.0.7/drivers/md/raid5.c
29303--- linux-3.0.7/drivers/md/raid5.c 2011-10-17 23:17:09.000000000 -0400
29304+++ linux-3.0.7/drivers/md/raid5.c 2011-10-17 23:17:19.000000000 -0400
29305@@ -550,7 +550,7 @@ static void ops_run_io(struct stripe_hea
29306 bi->bi_next = NULL;
29307 if ((rw & WRITE) &&
29308 test_bit(R5_ReWrite, &sh->dev[i].flags))
29309- atomic_add(STRIPE_SECTORS,
29310+ atomic_add_unchecked(STRIPE_SECTORS,
29311 &rdev->corrected_errors);
29312 generic_make_request(bi);
29313 } else {
29314@@ -1596,15 +1596,15 @@ static void raid5_end_read_request(struc
29315 clear_bit(R5_ReadError, &sh->dev[i].flags);
29316 clear_bit(R5_ReWrite, &sh->dev[i].flags);
29317 }
29318- if (atomic_read(&conf->disks[i].rdev->read_errors))
29319- atomic_set(&conf->disks[i].rdev->read_errors, 0);
29320+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
29321+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
29322 } else {
29323 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
29324 int retry = 0;
29325 rdev = conf->disks[i].rdev;
29326
29327 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
29328- atomic_inc(&rdev->read_errors);
29329+ atomic_inc_unchecked(&rdev->read_errors);
29330 if (conf->mddev->degraded >= conf->max_degraded)
29331 printk_rl(KERN_WARNING
29332 "md/raid:%s: read error not correctable "
29333@@ -1622,7 +1622,7 @@ static void raid5_end_read_request(struc
29334 (unsigned long long)(sh->sector
29335 + rdev->data_offset),
29336 bdn);
29337- else if (atomic_read(&rdev->read_errors)
29338+ else if (atomic_read_unchecked(&rdev->read_errors)
29339 > conf->max_nr_stripes)
29340 printk(KERN_WARNING
29341 "md/raid:%s: Too many read errors, failing device %s.\n",
29342@@ -1945,6 +1945,7 @@ static sector_t compute_blocknr(struct s
29343 sector_t r_sector;
29344 struct stripe_head sh2;
29345
29346+ pax_track_stack();
29347
29348 chunk_offset = sector_div(new_sector, sectors_per_chunk);
29349 stripe = new_sector;
29350diff -urNp linux-3.0.7/drivers/media/common/saa7146_hlp.c linux-3.0.7/drivers/media/common/saa7146_hlp.c
29351--- linux-3.0.7/drivers/media/common/saa7146_hlp.c 2011-07-21 22:17:23.000000000 -0400
29352+++ linux-3.0.7/drivers/media/common/saa7146_hlp.c 2011-08-23 21:48:14.000000000 -0400
29353@@ -353,6 +353,8 @@ static void calculate_clipping_registers
29354
29355 int x[32], y[32], w[32], h[32];
29356
29357+ pax_track_stack();
29358+
29359 /* clear out memory */
29360 memset(&line_list[0], 0x00, sizeof(u32)*32);
29361 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
29362diff -urNp linux-3.0.7/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-3.0.7/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
29363--- linux-3.0.7/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-07-21 22:17:23.000000000 -0400
29364+++ linux-3.0.7/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-08-23 21:48:14.000000000 -0400
29365@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
29366 u8 buf[HOST_LINK_BUF_SIZE];
29367 int i;
29368
29369+ pax_track_stack();
29370+
29371 dprintk("%s\n", __func__);
29372
29373 /* check if we have space for a link buf in the rx_buffer */
29374@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
29375 unsigned long timeout;
29376 int written;
29377
29378+ pax_track_stack();
29379+
29380 dprintk("%s\n", __func__);
29381
29382 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
29383diff -urNp linux-3.0.7/drivers/media/dvb/dvb-core/dvb_demux.h linux-3.0.7/drivers/media/dvb/dvb-core/dvb_demux.h
29384--- linux-3.0.7/drivers/media/dvb/dvb-core/dvb_demux.h 2011-07-21 22:17:23.000000000 -0400
29385+++ linux-3.0.7/drivers/media/dvb/dvb-core/dvb_demux.h 2011-08-24 18:24:40.000000000 -0400
29386@@ -68,12 +68,12 @@ struct dvb_demux_feed {
29387 union {
29388 struct dmx_ts_feed ts;
29389 struct dmx_section_feed sec;
29390- } feed;
29391+ } __no_const feed;
29392
29393 union {
29394 dmx_ts_cb ts;
29395 dmx_section_cb sec;
29396- } cb;
29397+ } __no_const cb;
29398
29399 struct dvb_demux *demux;
29400 void *priv;
29401diff -urNp linux-3.0.7/drivers/media/dvb/dvb-core/dvbdev.c linux-3.0.7/drivers/media/dvb/dvb-core/dvbdev.c
29402--- linux-3.0.7/drivers/media/dvb/dvb-core/dvbdev.c 2011-07-21 22:17:23.000000000 -0400
29403+++ linux-3.0.7/drivers/media/dvb/dvb-core/dvbdev.c 2011-08-24 18:24:19.000000000 -0400
29404@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapt
29405 const struct dvb_device *template, void *priv, int type)
29406 {
29407 struct dvb_device *dvbdev;
29408- struct file_operations *dvbdevfops;
29409+ file_operations_no_const *dvbdevfops;
29410 struct device *clsdev;
29411 int minor;
29412 int id;
29413diff -urNp linux-3.0.7/drivers/media/dvb/dvb-usb/cxusb.c linux-3.0.7/drivers/media/dvb/dvb-usb/cxusb.c
29414--- linux-3.0.7/drivers/media/dvb/dvb-usb/cxusb.c 2011-07-21 22:17:23.000000000 -0400
29415+++ linux-3.0.7/drivers/media/dvb/dvb-usb/cxusb.c 2011-08-24 18:26:33.000000000 -0400
29416@@ -1059,7 +1059,7 @@ static struct dib0070_config dib7070p_di
29417 struct dib0700_adapter_state {
29418 int (*set_param_save) (struct dvb_frontend *,
29419 struct dvb_frontend_parameters *);
29420-};
29421+} __no_const;
29422
29423 static int dib7070_set_param_override(struct dvb_frontend *fe,
29424 struct dvb_frontend_parameters *fep)
29425diff -urNp linux-3.0.7/drivers/media/dvb/dvb-usb/dib0700_core.c linux-3.0.7/drivers/media/dvb/dvb-usb/dib0700_core.c
29426--- linux-3.0.7/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-07-21 22:17:23.000000000 -0400
29427+++ linux-3.0.7/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-08-23 21:48:14.000000000 -0400
29428@@ -434,6 +434,8 @@ int dib0700_download_firmware(struct usb
29429 if (!buf)
29430 return -ENOMEM;
29431
29432+ pax_track_stack();
29433+
29434 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
29435 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",
29436 hx.addr, hx.len, hx.chk);
29437diff -urNp linux-3.0.7/drivers/media/dvb/dvb-usb/dibusb.h linux-3.0.7/drivers/media/dvb/dvb-usb/dibusb.h
29438--- linux-3.0.7/drivers/media/dvb/dvb-usb/dibusb.h 2011-07-21 22:17:23.000000000 -0400
29439+++ linux-3.0.7/drivers/media/dvb/dvb-usb/dibusb.h 2011-08-24 18:27:27.000000000 -0400
29440@@ -97,7 +97,7 @@
29441 #define DIBUSB_IOCTL_CMD_DISABLE_STREAM 0x02
29442
29443 struct dibusb_state {
29444- struct dib_fe_xfer_ops ops;
29445+ dib_fe_xfer_ops_no_const ops;
29446 int mt2060_present;
29447 u8 tuner_addr;
29448 };
29449diff -urNp linux-3.0.7/drivers/media/dvb/dvb-usb/dw2102.c linux-3.0.7/drivers/media/dvb/dvb-usb/dw2102.c
29450--- linux-3.0.7/drivers/media/dvb/dvb-usb/dw2102.c 2011-07-21 22:17:23.000000000 -0400
29451+++ linux-3.0.7/drivers/media/dvb/dvb-usb/dw2102.c 2011-08-24 18:27:45.000000000 -0400
29452@@ -95,7 +95,7 @@ struct su3000_state {
29453
29454 struct s6x0_state {
29455 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
29456-};
29457+} __no_const;
29458
29459 /* debug */
29460 static int dvb_usb_dw2102_debug;
29461diff -urNp linux-3.0.7/drivers/media/dvb/dvb-usb/lmedm04.c linux-3.0.7/drivers/media/dvb/dvb-usb/lmedm04.c
29462--- linux-3.0.7/drivers/media/dvb/dvb-usb/lmedm04.c 2011-07-21 22:17:23.000000000 -0400
29463+++ linux-3.0.7/drivers/media/dvb/dvb-usb/lmedm04.c 2011-08-23 21:48:14.000000000 -0400
29464@@ -742,6 +742,7 @@ static int lme2510_download_firmware(str
29465 usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
29466 0x06, 0x80, 0x0200, 0x00, data, 0x0109, 1000);
29467
29468+ pax_track_stack();
29469
29470 data[0] = 0x8a;
29471 len_in = 1;
29472@@ -764,6 +765,8 @@ static void lme_coldreset(struct usb_dev
29473 int ret = 0, len_in;
29474 u8 data[512] = {0};
29475
29476+ pax_track_stack();
29477+
29478 data[0] = 0x0a;
29479 len_in = 1;
29480 info("FRM Firmware Cold Reset");
29481diff -urNp linux-3.0.7/drivers/media/dvb/frontends/dib3000.h linux-3.0.7/drivers/media/dvb/frontends/dib3000.h
29482--- linux-3.0.7/drivers/media/dvb/frontends/dib3000.h 2011-07-21 22:17:23.000000000 -0400
29483+++ linux-3.0.7/drivers/media/dvb/frontends/dib3000.h 2011-10-07 19:07:39.000000000 -0400
29484@@ -40,10 +40,11 @@ struct dib_fe_xfer_ops
29485 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
29486 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
29487 };
29488+typedef struct dib_fe_xfer_ops __no_const dib_fe_xfer_ops_no_const;
29489
29490 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
29491 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
29492- struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops);
29493+ struct i2c_adapter* i2c, dib_fe_xfer_ops_no_const *xfer_ops);
29494 #else
29495 static inline struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
29496 struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops)
29497diff -urNp linux-3.0.7/drivers/media/dvb/frontends/dib3000mb.c linux-3.0.7/drivers/media/dvb/frontends/dib3000mb.c
29498--- linux-3.0.7/drivers/media/dvb/frontends/dib3000mb.c 2011-07-21 22:17:23.000000000 -0400
29499+++ linux-3.0.7/drivers/media/dvb/frontends/dib3000mb.c 2011-08-24 18:28:42.000000000 -0400
29500@@ -756,7 +756,7 @@ static int dib3000mb_tuner_pass_ctrl(str
29501 static struct dvb_frontend_ops dib3000mb_ops;
29502
29503 struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
29504- struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops)
29505+ struct i2c_adapter* i2c, dib_fe_xfer_ops_no_const *xfer_ops)
29506 {
29507 struct dib3000_state* state = NULL;
29508
29509diff -urNp linux-3.0.7/drivers/media/dvb/frontends/mb86a16.c linux-3.0.7/drivers/media/dvb/frontends/mb86a16.c
29510--- linux-3.0.7/drivers/media/dvb/frontends/mb86a16.c 2011-07-21 22:17:23.000000000 -0400
29511+++ linux-3.0.7/drivers/media/dvb/frontends/mb86a16.c 2011-08-23 21:48:14.000000000 -0400
29512@@ -1060,6 +1060,8 @@ static int mb86a16_set_fe(struct mb86a16
29513 int ret = -1;
29514 int sync;
29515
29516+ pax_track_stack();
29517+
29518 dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate);
29519
29520 fcp = 3000;
29521diff -urNp linux-3.0.7/drivers/media/dvb/frontends/or51211.c linux-3.0.7/drivers/media/dvb/frontends/or51211.c
29522--- linux-3.0.7/drivers/media/dvb/frontends/or51211.c 2011-07-21 22:17:23.000000000 -0400
29523+++ linux-3.0.7/drivers/media/dvb/frontends/or51211.c 2011-08-23 21:48:14.000000000 -0400
29524@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
29525 u8 tudata[585];
29526 int i;
29527
29528+ pax_track_stack();
29529+
29530 dprintk("Firmware is %zd bytes\n",fw->size);
29531
29532 /* Get eprom data */
29533diff -urNp linux-3.0.7/drivers/media/dvb/ngene/ngene-cards.c linux-3.0.7/drivers/media/dvb/ngene/ngene-cards.c
29534--- linux-3.0.7/drivers/media/dvb/ngene/ngene-cards.c 2011-07-21 22:17:23.000000000 -0400
29535+++ linux-3.0.7/drivers/media/dvb/ngene/ngene-cards.c 2011-10-11 10:44:33.000000000 -0400
29536@@ -379,7 +379,7 @@ static struct ngene_info ngene_info_m780
29537
29538 /****************************************************************************/
29539
29540-static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
29541+static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
29542 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
29543 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
29544 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
29545diff -urNp linux-3.0.7/drivers/media/video/cx18/cx18-driver.c linux-3.0.7/drivers/media/video/cx18/cx18-driver.c
29546--- linux-3.0.7/drivers/media/video/cx18/cx18-driver.c 2011-07-21 22:17:23.000000000 -0400
29547+++ linux-3.0.7/drivers/media/video/cx18/cx18-driver.c 2011-08-23 21:48:14.000000000 -0400
29548@@ -327,6 +327,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
29549 struct i2c_client c;
29550 u8 eedata[256];
29551
29552+ pax_track_stack();
29553+
29554 memset(&c, 0, sizeof(c));
29555 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
29556 c.adapter = &cx->i2c_adap[0];
29557diff -urNp linux-3.0.7/drivers/media/video/cx23885/cx23885-input.c linux-3.0.7/drivers/media/video/cx23885/cx23885-input.c
29558--- linux-3.0.7/drivers/media/video/cx23885/cx23885-input.c 2011-07-21 22:17:23.000000000 -0400
29559+++ linux-3.0.7/drivers/media/video/cx23885/cx23885-input.c 2011-08-23 21:48:14.000000000 -0400
29560@@ -53,6 +53,8 @@ static void cx23885_input_process_measur
29561 bool handle = false;
29562 struct ir_raw_event ir_core_event[64];
29563
29564+ pax_track_stack();
29565+
29566 do {
29567 num = 0;
29568 v4l2_subdev_call(dev->sd_ir, ir, rx_read, (u8 *) ir_core_event,
29569diff -urNp linux-3.0.7/drivers/media/video/cx88/cx88-alsa.c linux-3.0.7/drivers/media/video/cx88/cx88-alsa.c
29570--- linux-3.0.7/drivers/media/video/cx88/cx88-alsa.c 2011-07-21 22:17:23.000000000 -0400
29571+++ linux-3.0.7/drivers/media/video/cx88/cx88-alsa.c 2011-10-11 10:44:33.000000000 -0400
29572@@ -764,7 +764,7 @@ static struct snd_kcontrol_new snd_cx88_
29573 * Only boards with eeprom and byte 1 at eeprom=1 have it
29574 */
29575
29576-static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
29577+static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
29578 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
29579 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
29580 {0, }
29581diff -urNp linux-3.0.7/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-3.0.7/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
29582--- linux-3.0.7/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-07-21 22:17:23.000000000 -0400
29583+++ linux-3.0.7/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-08-23 21:48:14.000000000 -0400
29584@@ -120,6 +120,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
29585 u8 *eeprom;
29586 struct tveeprom tvdata;
29587
29588+ pax_track_stack();
29589+
29590 memset(&tvdata,0,sizeof(tvdata));
29591
29592 eeprom = pvr2_eeprom_fetch(hdw);
29593diff -urNp linux-3.0.7/drivers/media/video/saa7134/saa6752hs.c linux-3.0.7/drivers/media/video/saa7134/saa6752hs.c
29594--- linux-3.0.7/drivers/media/video/saa7134/saa6752hs.c 2011-07-21 22:17:23.000000000 -0400
29595+++ linux-3.0.7/drivers/media/video/saa7134/saa6752hs.c 2011-08-23 21:48:14.000000000 -0400
29596@@ -682,6 +682,8 @@ static int saa6752hs_init(struct v4l2_su
29597 unsigned char localPAT[256];
29598 unsigned char localPMT[256];
29599
29600+ pax_track_stack();
29601+
29602 /* Set video format - must be done first as it resets other settings */
29603 set_reg8(client, 0x41, h->video_format);
29604
29605diff -urNp linux-3.0.7/drivers/media/video/saa7164/saa7164-cmd.c linux-3.0.7/drivers/media/video/saa7164/saa7164-cmd.c
29606--- linux-3.0.7/drivers/media/video/saa7164/saa7164-cmd.c 2011-07-21 22:17:23.000000000 -0400
29607+++ linux-3.0.7/drivers/media/video/saa7164/saa7164-cmd.c 2011-08-23 21:48:14.000000000 -0400
29608@@ -88,6 +88,8 @@ int saa7164_irq_dequeue(struct saa7164_d
29609 u8 tmp[512];
29610 dprintk(DBGLVL_CMD, "%s()\n", __func__);
29611
29612+ pax_track_stack();
29613+
29614 /* While any outstand message on the bus exists... */
29615 do {
29616
29617@@ -141,6 +143,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
29618 u8 tmp[512];
29619 dprintk(DBGLVL_CMD, "%s()\n", __func__);
29620
29621+ pax_track_stack();
29622+
29623 while (loop) {
29624
29625 struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 };
29626diff -urNp linux-3.0.7/drivers/media/video/timblogiw.c linux-3.0.7/drivers/media/video/timblogiw.c
29627--- linux-3.0.7/drivers/media/video/timblogiw.c 2011-07-21 22:17:23.000000000 -0400
29628+++ linux-3.0.7/drivers/media/video/timblogiw.c 2011-08-24 18:29:20.000000000 -0400
29629@@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *f
29630
29631 /* Platform device functions */
29632
29633-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
29634+static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
29635 .vidioc_querycap = timblogiw_querycap,
29636 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
29637 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
29638diff -urNp linux-3.0.7/drivers/media/video/usbvision/usbvision-core.c linux-3.0.7/drivers/media/video/usbvision/usbvision-core.c
29639--- linux-3.0.7/drivers/media/video/usbvision/usbvision-core.c 2011-07-21 22:17:23.000000000 -0400
29640+++ linux-3.0.7/drivers/media/video/usbvision/usbvision-core.c 2011-08-23 21:48:14.000000000 -0400
29641@@ -707,6 +707,8 @@ static enum parse_state usbvision_parse_
29642 unsigned char rv, gv, bv;
29643 static unsigned char *Y, *U, *V;
29644
29645+ pax_track_stack();
29646+
29647 frame = usbvision->cur_frame;
29648 image_size = frame->frmwidth * frame->frmheight;
29649 if ((frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
29650diff -urNp linux-3.0.7/drivers/media/video/videobuf-dma-sg.c linux-3.0.7/drivers/media/video/videobuf-dma-sg.c
29651--- linux-3.0.7/drivers/media/video/videobuf-dma-sg.c 2011-07-21 22:17:23.000000000 -0400
29652+++ linux-3.0.7/drivers/media/video/videobuf-dma-sg.c 2011-08-23 21:48:14.000000000 -0400
29653@@ -606,6 +606,8 @@ void *videobuf_sg_alloc(size_t size)
29654 {
29655 struct videobuf_queue q;
29656
29657+ pax_track_stack();
29658+
29659 /* Required to make generic handler to call __videobuf_alloc */
29660 q.int_ops = &sg_ops;
29661
29662diff -urNp linux-3.0.7/drivers/message/fusion/mptbase.c linux-3.0.7/drivers/message/fusion/mptbase.c
29663--- linux-3.0.7/drivers/message/fusion/mptbase.c 2011-07-21 22:17:23.000000000 -0400
29664+++ linux-3.0.7/drivers/message/fusion/mptbase.c 2011-08-23 21:48:14.000000000 -0400
29665@@ -6681,8 +6681,13 @@ static int mpt_iocinfo_proc_show(struct
29666 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
29667 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
29668
29669+#ifdef CONFIG_GRKERNSEC_HIDESYM
29670+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
29671+#else
29672 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
29673 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
29674+#endif
29675+
29676 /*
29677 * Rounding UP to nearest 4-kB boundary here...
29678 */
29679diff -urNp linux-3.0.7/drivers/message/fusion/mptsas.c linux-3.0.7/drivers/message/fusion/mptsas.c
29680--- linux-3.0.7/drivers/message/fusion/mptsas.c 2011-07-21 22:17:23.000000000 -0400
29681+++ linux-3.0.7/drivers/message/fusion/mptsas.c 2011-08-23 21:47:55.000000000 -0400
29682@@ -439,6 +439,23 @@ mptsas_is_end_device(struct mptsas_devin
29683 return 0;
29684 }
29685
29686+static inline void
29687+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
29688+{
29689+ if (phy_info->port_details) {
29690+ phy_info->port_details->rphy = rphy;
29691+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
29692+ ioc->name, rphy));
29693+ }
29694+
29695+ if (rphy) {
29696+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
29697+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
29698+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
29699+ ioc->name, rphy, rphy->dev.release));
29700+ }
29701+}
29702+
29703 /* no mutex */
29704 static void
29705 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
29706@@ -477,23 +494,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
29707 return NULL;
29708 }
29709
29710-static inline void
29711-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
29712-{
29713- if (phy_info->port_details) {
29714- phy_info->port_details->rphy = rphy;
29715- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
29716- ioc->name, rphy));
29717- }
29718-
29719- if (rphy) {
29720- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
29721- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
29722- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
29723- ioc->name, rphy, rphy->dev.release));
29724- }
29725-}
29726-
29727 static inline struct sas_port *
29728 mptsas_get_port(struct mptsas_phyinfo *phy_info)
29729 {
29730diff -urNp linux-3.0.7/drivers/message/fusion/mptscsih.c linux-3.0.7/drivers/message/fusion/mptscsih.c
29731--- linux-3.0.7/drivers/message/fusion/mptscsih.c 2011-07-21 22:17:23.000000000 -0400
29732+++ linux-3.0.7/drivers/message/fusion/mptscsih.c 2011-08-23 21:47:55.000000000 -0400
29733@@ -1268,15 +1268,16 @@ mptscsih_info(struct Scsi_Host *SChost)
29734
29735 h = shost_priv(SChost);
29736
29737- if (h) {
29738- if (h->info_kbuf == NULL)
29739- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
29740- return h->info_kbuf;
29741- h->info_kbuf[0] = '\0';
29742+ if (!h)
29743+ return NULL;
29744
29745- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
29746- h->info_kbuf[size-1] = '\0';
29747- }
29748+ if (h->info_kbuf == NULL)
29749+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
29750+ return h->info_kbuf;
29751+ h->info_kbuf[0] = '\0';
29752+
29753+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
29754+ h->info_kbuf[size-1] = '\0';
29755
29756 return h->info_kbuf;
29757 }
29758diff -urNp linux-3.0.7/drivers/message/i2o/i2o_config.c linux-3.0.7/drivers/message/i2o/i2o_config.c
29759--- linux-3.0.7/drivers/message/i2o/i2o_config.c 2011-07-21 22:17:23.000000000 -0400
29760+++ linux-3.0.7/drivers/message/i2o/i2o_config.c 2011-08-23 21:48:14.000000000 -0400
29761@@ -781,6 +781,8 @@ static int i2o_cfg_passthru(unsigned lon
29762 struct i2o_message *msg;
29763 unsigned int iop;
29764
29765+ pax_track_stack();
29766+
29767 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
29768 return -EFAULT;
29769
29770diff -urNp linux-3.0.7/drivers/message/i2o/i2o_proc.c linux-3.0.7/drivers/message/i2o/i2o_proc.c
29771--- linux-3.0.7/drivers/message/i2o/i2o_proc.c 2011-07-21 22:17:23.000000000 -0400
29772+++ linux-3.0.7/drivers/message/i2o/i2o_proc.c 2011-08-23 21:47:55.000000000 -0400
29773@@ -255,13 +255,6 @@ static char *scsi_devices[] = {
29774 "Array Controller Device"
29775 };
29776
29777-static char *chtostr(u8 * chars, int n)
29778-{
29779- char tmp[256];
29780- tmp[0] = 0;
29781- return strncat(tmp, (char *)chars, n);
29782-}
29783-
29784 static int i2o_report_query_status(struct seq_file *seq, int block_status,
29785 char *group)
29786 {
29787@@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct
29788
29789 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
29790 seq_printf(seq, "%-#8x", ddm_table.module_id);
29791- seq_printf(seq, "%-29s",
29792- chtostr(ddm_table.module_name_version, 28));
29793+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
29794 seq_printf(seq, "%9d ", ddm_table.data_size);
29795 seq_printf(seq, "%8d", ddm_table.code_size);
29796
29797@@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(s
29798
29799 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
29800 seq_printf(seq, "%-#8x", dst->module_id);
29801- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
29802- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
29803+ seq_printf(seq, "%-.28s", dst->module_name_version);
29804+ seq_printf(seq, "%-.8s", dst->date);
29805 seq_printf(seq, "%8d ", dst->module_size);
29806 seq_printf(seq, "%8d ", dst->mpb_size);
29807 seq_printf(seq, "0x%04x", dst->module_flags);
29808@@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(str
29809 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
29810 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
29811 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
29812- seq_printf(seq, "Vendor info : %s\n",
29813- chtostr((u8 *) (work32 + 2), 16));
29814- seq_printf(seq, "Product info : %s\n",
29815- chtostr((u8 *) (work32 + 6), 16));
29816- seq_printf(seq, "Description : %s\n",
29817- chtostr((u8 *) (work32 + 10), 16));
29818- seq_printf(seq, "Product rev. : %s\n",
29819- chtostr((u8 *) (work32 + 14), 8));
29820+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
29821+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
29822+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
29823+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
29824
29825 seq_printf(seq, "Serial number : ");
29826 print_serial_number(seq, (u8 *) (work32 + 16),
29827@@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(str
29828 }
29829
29830 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
29831- seq_printf(seq, "Module name : %s\n",
29832- chtostr(result.module_name, 24));
29833- seq_printf(seq, "Module revision : %s\n",
29834- chtostr(result.module_rev, 8));
29835+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
29836+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
29837
29838 seq_printf(seq, "Serial number : ");
29839 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
29840@@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq
29841 return 0;
29842 }
29843
29844- seq_printf(seq, "Device name : %s\n",
29845- chtostr(result.device_name, 64));
29846- seq_printf(seq, "Service name : %s\n",
29847- chtostr(result.service_name, 64));
29848- seq_printf(seq, "Physical name : %s\n",
29849- chtostr(result.physical_location, 64));
29850- seq_printf(seq, "Instance number : %s\n",
29851- chtostr(result.instance_number, 4));
29852+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
29853+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
29854+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
29855+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
29856
29857 return 0;
29858 }
29859diff -urNp linux-3.0.7/drivers/message/i2o/iop.c linux-3.0.7/drivers/message/i2o/iop.c
29860--- linux-3.0.7/drivers/message/i2o/iop.c 2011-07-21 22:17:23.000000000 -0400
29861+++ linux-3.0.7/drivers/message/i2o/iop.c 2011-08-23 21:47:55.000000000 -0400
29862@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
29863
29864 spin_lock_irqsave(&c->context_list_lock, flags);
29865
29866- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
29867- atomic_inc(&c->context_list_counter);
29868+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
29869+ atomic_inc_unchecked(&c->context_list_counter);
29870
29871- entry->context = atomic_read(&c->context_list_counter);
29872+ entry->context = atomic_read_unchecked(&c->context_list_counter);
29873
29874 list_add(&entry->list, &c->context_list);
29875
29876@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(voi
29877
29878 #if BITS_PER_LONG == 64
29879 spin_lock_init(&c->context_list_lock);
29880- atomic_set(&c->context_list_counter, 0);
29881+ atomic_set_unchecked(&c->context_list_counter, 0);
29882 INIT_LIST_HEAD(&c->context_list);
29883 #endif
29884
29885diff -urNp linux-3.0.7/drivers/mfd/ab3100-core.c linux-3.0.7/drivers/mfd/ab3100-core.c
29886--- linux-3.0.7/drivers/mfd/ab3100-core.c 2011-07-21 22:17:23.000000000 -0400
29887+++ linux-3.0.7/drivers/mfd/ab3100-core.c 2011-10-11 10:44:33.000000000 -0400
29888@@ -809,7 +809,7 @@ struct ab_family_id {
29889 char *name;
29890 };
29891
29892-static const struct ab_family_id ids[] __devinitdata = {
29893+static const struct ab_family_id ids[] __devinitconst = {
29894 /* AB3100 */
29895 {
29896 .id = 0xc0,
29897diff -urNp linux-3.0.7/drivers/mfd/abx500-core.c linux-3.0.7/drivers/mfd/abx500-core.c
29898--- linux-3.0.7/drivers/mfd/abx500-core.c 2011-07-21 22:17:23.000000000 -0400
29899+++ linux-3.0.7/drivers/mfd/abx500-core.c 2011-08-23 21:47:55.000000000 -0400
29900@@ -14,7 +14,7 @@ static LIST_HEAD(abx500_list);
29901
29902 struct abx500_device_entry {
29903 struct list_head list;
29904- struct abx500_ops ops;
29905+ abx500_ops_no_const ops;
29906 struct device *dev;
29907 };
29908
29909diff -urNp linux-3.0.7/drivers/mfd/janz-cmodio.c linux-3.0.7/drivers/mfd/janz-cmodio.c
29910--- linux-3.0.7/drivers/mfd/janz-cmodio.c 2011-07-21 22:17:23.000000000 -0400
29911+++ linux-3.0.7/drivers/mfd/janz-cmodio.c 2011-08-23 21:47:55.000000000 -0400
29912@@ -13,6 +13,7 @@
29913
29914 #include <linux/kernel.h>
29915 #include <linux/module.h>
29916+#include <linux/slab.h>
29917 #include <linux/init.h>
29918 #include <linux/pci.h>
29919 #include <linux/interrupt.h>
29920diff -urNp linux-3.0.7/drivers/mfd/wm8350-i2c.c linux-3.0.7/drivers/mfd/wm8350-i2c.c
29921--- linux-3.0.7/drivers/mfd/wm8350-i2c.c 2011-07-21 22:17:23.000000000 -0400
29922+++ linux-3.0.7/drivers/mfd/wm8350-i2c.c 2011-08-23 21:48:14.000000000 -0400
29923@@ -44,6 +44,8 @@ static int wm8350_i2c_write_device(struc
29924 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
29925 int ret;
29926
29927+ pax_track_stack();
29928+
29929 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
29930 return -EINVAL;
29931
29932diff -urNp linux-3.0.7/drivers/misc/lis3lv02d/lis3lv02d.c linux-3.0.7/drivers/misc/lis3lv02d/lis3lv02d.c
29933--- linux-3.0.7/drivers/misc/lis3lv02d/lis3lv02d.c 2011-10-17 23:17:09.000000000 -0400
29934+++ linux-3.0.7/drivers/misc/lis3lv02d/lis3lv02d.c 2011-10-17 23:17:19.000000000 -0400
29935@@ -437,7 +437,7 @@ static irqreturn_t lis302dl_interrupt(in
29936 * the lid is closed. This leads to interrupts as soon as a little move
29937 * is done.
29938 */
29939- atomic_inc(&lis3_dev.count);
29940+ atomic_inc_unchecked(&lis3_dev.count);
29941
29942 wake_up_interruptible(&lis3_dev.misc_wait);
29943 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
29944@@ -520,7 +520,7 @@ static int lis3lv02d_misc_open(struct in
29945 if (lis3_dev.pm_dev)
29946 pm_runtime_get_sync(lis3_dev.pm_dev);
29947
29948- atomic_set(&lis3_dev.count, 0);
29949+ atomic_set_unchecked(&lis3_dev.count, 0);
29950 return 0;
29951 }
29952
29953@@ -547,7 +547,7 @@ static ssize_t lis3lv02d_misc_read(struc
29954 add_wait_queue(&lis3_dev.misc_wait, &wait);
29955 while (true) {
29956 set_current_state(TASK_INTERRUPTIBLE);
29957- data = atomic_xchg(&lis3_dev.count, 0);
29958+ data = atomic_xchg_unchecked(&lis3_dev.count, 0);
29959 if (data)
29960 break;
29961
29962@@ -585,7 +585,7 @@ out:
29963 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
29964 {
29965 poll_wait(file, &lis3_dev.misc_wait, wait);
29966- if (atomic_read(&lis3_dev.count))
29967+ if (atomic_read_unchecked(&lis3_dev.count))
29968 return POLLIN | POLLRDNORM;
29969 return 0;
29970 }
29971diff -urNp linux-3.0.7/drivers/misc/lis3lv02d/lis3lv02d.h linux-3.0.7/drivers/misc/lis3lv02d/lis3lv02d.h
29972--- linux-3.0.7/drivers/misc/lis3lv02d/lis3lv02d.h 2011-07-21 22:17:23.000000000 -0400
29973+++ linux-3.0.7/drivers/misc/lis3lv02d/lis3lv02d.h 2011-08-23 21:47:55.000000000 -0400
29974@@ -265,7 +265,7 @@ struct lis3lv02d {
29975 struct input_polled_dev *idev; /* input device */
29976 struct platform_device *pdev; /* platform device */
29977 struct regulator_bulk_data regulators[2];
29978- atomic_t count; /* interrupt count after last read */
29979+ atomic_unchecked_t count; /* interrupt count after last read */
29980 union axis_conversion ac; /* hw -> logical axis */
29981 int mapped_btns[3];
29982
29983diff -urNp linux-3.0.7/drivers/misc/sgi-gru/gruhandles.c linux-3.0.7/drivers/misc/sgi-gru/gruhandles.c
29984--- linux-3.0.7/drivers/misc/sgi-gru/gruhandles.c 2011-07-21 22:17:23.000000000 -0400
29985+++ linux-3.0.7/drivers/misc/sgi-gru/gruhandles.c 2011-08-23 21:47:55.000000000 -0400
29986@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op
29987 unsigned long nsec;
29988
29989 nsec = CLKS2NSEC(clks);
29990- atomic_long_inc(&mcs_op_statistics[op].count);
29991- atomic_long_add(nsec, &mcs_op_statistics[op].total);
29992+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
29993+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
29994 if (mcs_op_statistics[op].max < nsec)
29995 mcs_op_statistics[op].max = nsec;
29996 }
29997diff -urNp linux-3.0.7/drivers/misc/sgi-gru/gruprocfs.c linux-3.0.7/drivers/misc/sgi-gru/gruprocfs.c
29998--- linux-3.0.7/drivers/misc/sgi-gru/gruprocfs.c 2011-07-21 22:17:23.000000000 -0400
29999+++ linux-3.0.7/drivers/misc/sgi-gru/gruprocfs.c 2011-08-23 21:47:55.000000000 -0400
30000@@ -32,9 +32,9 @@
30001
30002 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
30003
30004-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
30005+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
30006 {
30007- unsigned long val = atomic_long_read(v);
30008+ unsigned long val = atomic_long_read_unchecked(v);
30009
30010 seq_printf(s, "%16lu %s\n", val, id);
30011 }
30012@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct se
30013
30014 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
30015 for (op = 0; op < mcsop_last; op++) {
30016- count = atomic_long_read(&mcs_op_statistics[op].count);
30017- total = atomic_long_read(&mcs_op_statistics[op].total);
30018+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
30019+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
30020 max = mcs_op_statistics[op].max;
30021 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
30022 count ? total / count : 0, max);
30023diff -urNp linux-3.0.7/drivers/misc/sgi-gru/grutables.h linux-3.0.7/drivers/misc/sgi-gru/grutables.h
30024--- linux-3.0.7/drivers/misc/sgi-gru/grutables.h 2011-07-21 22:17:23.000000000 -0400
30025+++ linux-3.0.7/drivers/misc/sgi-gru/grutables.h 2011-08-23 21:47:55.000000000 -0400
30026@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
30027 * GRU statistics.
30028 */
30029 struct gru_stats_s {
30030- atomic_long_t vdata_alloc;
30031- atomic_long_t vdata_free;
30032- atomic_long_t gts_alloc;
30033- atomic_long_t gts_free;
30034- atomic_long_t gms_alloc;
30035- atomic_long_t gms_free;
30036- atomic_long_t gts_double_allocate;
30037- atomic_long_t assign_context;
30038- atomic_long_t assign_context_failed;
30039- atomic_long_t free_context;
30040- atomic_long_t load_user_context;
30041- atomic_long_t load_kernel_context;
30042- atomic_long_t lock_kernel_context;
30043- atomic_long_t unlock_kernel_context;
30044- atomic_long_t steal_user_context;
30045- atomic_long_t steal_kernel_context;
30046- atomic_long_t steal_context_failed;
30047- atomic_long_t nopfn;
30048- atomic_long_t asid_new;
30049- atomic_long_t asid_next;
30050- atomic_long_t asid_wrap;
30051- atomic_long_t asid_reuse;
30052- atomic_long_t intr;
30053- atomic_long_t intr_cbr;
30054- atomic_long_t intr_tfh;
30055- atomic_long_t intr_spurious;
30056- atomic_long_t intr_mm_lock_failed;
30057- atomic_long_t call_os;
30058- atomic_long_t call_os_wait_queue;
30059- atomic_long_t user_flush_tlb;
30060- atomic_long_t user_unload_context;
30061- atomic_long_t user_exception;
30062- atomic_long_t set_context_option;
30063- atomic_long_t check_context_retarget_intr;
30064- atomic_long_t check_context_unload;
30065- atomic_long_t tlb_dropin;
30066- atomic_long_t tlb_preload_page;
30067- atomic_long_t tlb_dropin_fail_no_asid;
30068- atomic_long_t tlb_dropin_fail_upm;
30069- atomic_long_t tlb_dropin_fail_invalid;
30070- atomic_long_t tlb_dropin_fail_range_active;
30071- atomic_long_t tlb_dropin_fail_idle;
30072- atomic_long_t tlb_dropin_fail_fmm;
30073- atomic_long_t tlb_dropin_fail_no_exception;
30074- atomic_long_t tfh_stale_on_fault;
30075- atomic_long_t mmu_invalidate_range;
30076- atomic_long_t mmu_invalidate_page;
30077- atomic_long_t flush_tlb;
30078- atomic_long_t flush_tlb_gru;
30079- atomic_long_t flush_tlb_gru_tgh;
30080- atomic_long_t flush_tlb_gru_zero_asid;
30081-
30082- atomic_long_t copy_gpa;
30083- atomic_long_t read_gpa;
30084-
30085- atomic_long_t mesq_receive;
30086- atomic_long_t mesq_receive_none;
30087- atomic_long_t mesq_send;
30088- atomic_long_t mesq_send_failed;
30089- atomic_long_t mesq_noop;
30090- atomic_long_t mesq_send_unexpected_error;
30091- atomic_long_t mesq_send_lb_overflow;
30092- atomic_long_t mesq_send_qlimit_reached;
30093- atomic_long_t mesq_send_amo_nacked;
30094- atomic_long_t mesq_send_put_nacked;
30095- atomic_long_t mesq_page_overflow;
30096- atomic_long_t mesq_qf_locked;
30097- atomic_long_t mesq_qf_noop_not_full;
30098- atomic_long_t mesq_qf_switch_head_failed;
30099- atomic_long_t mesq_qf_unexpected_error;
30100- atomic_long_t mesq_noop_unexpected_error;
30101- atomic_long_t mesq_noop_lb_overflow;
30102- atomic_long_t mesq_noop_qlimit_reached;
30103- atomic_long_t mesq_noop_amo_nacked;
30104- atomic_long_t mesq_noop_put_nacked;
30105- atomic_long_t mesq_noop_page_overflow;
30106+ atomic_long_unchecked_t vdata_alloc;
30107+ atomic_long_unchecked_t vdata_free;
30108+ atomic_long_unchecked_t gts_alloc;
30109+ atomic_long_unchecked_t gts_free;
30110+ atomic_long_unchecked_t gms_alloc;
30111+ atomic_long_unchecked_t gms_free;
30112+ atomic_long_unchecked_t gts_double_allocate;
30113+ atomic_long_unchecked_t assign_context;
30114+ atomic_long_unchecked_t assign_context_failed;
30115+ atomic_long_unchecked_t free_context;
30116+ atomic_long_unchecked_t load_user_context;
30117+ atomic_long_unchecked_t load_kernel_context;
30118+ atomic_long_unchecked_t lock_kernel_context;
30119+ atomic_long_unchecked_t unlock_kernel_context;
30120+ atomic_long_unchecked_t steal_user_context;
30121+ atomic_long_unchecked_t steal_kernel_context;
30122+ atomic_long_unchecked_t steal_context_failed;
30123+ atomic_long_unchecked_t nopfn;
30124+ atomic_long_unchecked_t asid_new;
30125+ atomic_long_unchecked_t asid_next;
30126+ atomic_long_unchecked_t asid_wrap;
30127+ atomic_long_unchecked_t asid_reuse;
30128+ atomic_long_unchecked_t intr;
30129+ atomic_long_unchecked_t intr_cbr;
30130+ atomic_long_unchecked_t intr_tfh;
30131+ atomic_long_unchecked_t intr_spurious;
30132+ atomic_long_unchecked_t intr_mm_lock_failed;
30133+ atomic_long_unchecked_t call_os;
30134+ atomic_long_unchecked_t call_os_wait_queue;
30135+ atomic_long_unchecked_t user_flush_tlb;
30136+ atomic_long_unchecked_t user_unload_context;
30137+ atomic_long_unchecked_t user_exception;
30138+ atomic_long_unchecked_t set_context_option;
30139+ atomic_long_unchecked_t check_context_retarget_intr;
30140+ atomic_long_unchecked_t check_context_unload;
30141+ atomic_long_unchecked_t tlb_dropin;
30142+ atomic_long_unchecked_t tlb_preload_page;
30143+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
30144+ atomic_long_unchecked_t tlb_dropin_fail_upm;
30145+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
30146+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
30147+ atomic_long_unchecked_t tlb_dropin_fail_idle;
30148+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
30149+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
30150+ atomic_long_unchecked_t tfh_stale_on_fault;
30151+ atomic_long_unchecked_t mmu_invalidate_range;
30152+ atomic_long_unchecked_t mmu_invalidate_page;
30153+ atomic_long_unchecked_t flush_tlb;
30154+ atomic_long_unchecked_t flush_tlb_gru;
30155+ atomic_long_unchecked_t flush_tlb_gru_tgh;
30156+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
30157+
30158+ atomic_long_unchecked_t copy_gpa;
30159+ atomic_long_unchecked_t read_gpa;
30160+
30161+ atomic_long_unchecked_t mesq_receive;
30162+ atomic_long_unchecked_t mesq_receive_none;
30163+ atomic_long_unchecked_t mesq_send;
30164+ atomic_long_unchecked_t mesq_send_failed;
30165+ atomic_long_unchecked_t mesq_noop;
30166+ atomic_long_unchecked_t mesq_send_unexpected_error;
30167+ atomic_long_unchecked_t mesq_send_lb_overflow;
30168+ atomic_long_unchecked_t mesq_send_qlimit_reached;
30169+ atomic_long_unchecked_t mesq_send_amo_nacked;
30170+ atomic_long_unchecked_t mesq_send_put_nacked;
30171+ atomic_long_unchecked_t mesq_page_overflow;
30172+ atomic_long_unchecked_t mesq_qf_locked;
30173+ atomic_long_unchecked_t mesq_qf_noop_not_full;
30174+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
30175+ atomic_long_unchecked_t mesq_qf_unexpected_error;
30176+ atomic_long_unchecked_t mesq_noop_unexpected_error;
30177+ atomic_long_unchecked_t mesq_noop_lb_overflow;
30178+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
30179+ atomic_long_unchecked_t mesq_noop_amo_nacked;
30180+ atomic_long_unchecked_t mesq_noop_put_nacked;
30181+ atomic_long_unchecked_t mesq_noop_page_overflow;
30182
30183 };
30184
30185@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start
30186 tghop_invalidate, mcsop_last};
30187
30188 struct mcs_op_statistic {
30189- atomic_long_t count;
30190- atomic_long_t total;
30191+ atomic_long_unchecked_t count;
30192+ atomic_long_unchecked_t total;
30193 unsigned long max;
30194 };
30195
30196@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_st
30197
30198 #define STAT(id) do { \
30199 if (gru_options & OPT_STATS) \
30200- atomic_long_inc(&gru_stats.id); \
30201+ atomic_long_inc_unchecked(&gru_stats.id); \
30202 } while (0)
30203
30204 #ifdef CONFIG_SGI_GRU_DEBUG
30205diff -urNp linux-3.0.7/drivers/misc/sgi-xp/xpc.h linux-3.0.7/drivers/misc/sgi-xp/xpc.h
30206--- linux-3.0.7/drivers/misc/sgi-xp/xpc.h 2011-07-21 22:17:23.000000000 -0400
30207+++ linux-3.0.7/drivers/misc/sgi-xp/xpc.h 2011-10-11 10:44:33.000000000 -0400
30208@@ -835,6 +835,7 @@ struct xpc_arch_operations {
30209 void (*received_payload) (struct xpc_channel *, void *);
30210 void (*notify_senders_of_disconnect) (struct xpc_channel *);
30211 };
30212+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
30213
30214 /* struct xpc_partition act_state values (for XPC HB) */
30215
30216@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_regis
30217 /* found in xpc_main.c */
30218 extern struct device *xpc_part;
30219 extern struct device *xpc_chan;
30220-extern struct xpc_arch_operations xpc_arch_ops;
30221+extern xpc_arch_operations_no_const xpc_arch_ops;
30222 extern int xpc_disengage_timelimit;
30223 extern int xpc_disengage_timedout;
30224 extern int xpc_activate_IRQ_rcvd;
30225diff -urNp linux-3.0.7/drivers/misc/sgi-xp/xpc_main.c linux-3.0.7/drivers/misc/sgi-xp/xpc_main.c
30226--- linux-3.0.7/drivers/misc/sgi-xp/xpc_main.c 2011-07-21 22:17:23.000000000 -0400
30227+++ linux-3.0.7/drivers/misc/sgi-xp/xpc_main.c 2011-10-11 10:44:33.000000000 -0400
30228@@ -162,7 +162,7 @@ static struct notifier_block xpc_die_not
30229 .notifier_call = xpc_system_die,
30230 };
30231
30232-struct xpc_arch_operations xpc_arch_ops;
30233+xpc_arch_operations_no_const xpc_arch_ops;
30234
30235 /*
30236 * Timer function to enforce the timelimit on the partition disengage.
30237diff -urNp linux-3.0.7/drivers/misc/sgi-xp/xp.h linux-3.0.7/drivers/misc/sgi-xp/xp.h
30238--- linux-3.0.7/drivers/misc/sgi-xp/xp.h 2011-07-21 22:17:23.000000000 -0400
30239+++ linux-3.0.7/drivers/misc/sgi-xp/xp.h 2011-08-23 21:47:55.000000000 -0400
30240@@ -289,7 +289,7 @@ struct xpc_interface {
30241 xpc_notify_func, void *);
30242 void (*received) (short, int, void *);
30243 enum xp_retval (*partid_to_nasids) (short, void *);
30244-};
30245+} __no_const;
30246
30247 extern struct xpc_interface xpc_interface;
30248
30249diff -urNp linux-3.0.7/drivers/mmc/host/sdhci-pci.c linux-3.0.7/drivers/mmc/host/sdhci-pci.c
30250--- linux-3.0.7/drivers/mmc/host/sdhci-pci.c 2011-07-21 22:17:23.000000000 -0400
30251+++ linux-3.0.7/drivers/mmc/host/sdhci-pci.c 2011-10-11 10:44:33.000000000 -0400
30252@@ -524,7 +524,7 @@ static const struct sdhci_pci_fixes sdhc
30253 .probe = via_probe,
30254 };
30255
30256-static const struct pci_device_id pci_ids[] __devinitdata = {
30257+static const struct pci_device_id pci_ids[] __devinitconst = {
30258 {
30259 .vendor = PCI_VENDOR_ID_RICOH,
30260 .device = PCI_DEVICE_ID_RICOH_R5C822,
30261diff -urNp linux-3.0.7/drivers/mtd/chips/cfi_cmdset_0001.c linux-3.0.7/drivers/mtd/chips/cfi_cmdset_0001.c
30262--- linux-3.0.7/drivers/mtd/chips/cfi_cmdset_0001.c 2011-07-21 22:17:23.000000000 -0400
30263+++ linux-3.0.7/drivers/mtd/chips/cfi_cmdset_0001.c 2011-08-23 21:48:14.000000000 -0400
30264@@ -757,6 +757,8 @@ static int chip_ready (struct map_info *
30265 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
30266 unsigned long timeo = jiffies + HZ;
30267
30268+ pax_track_stack();
30269+
30270 /* Prevent setting state FL_SYNCING for chip in suspended state. */
30271 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
30272 goto sleep;
30273@@ -1653,6 +1655,8 @@ static int __xipram do_write_buffer(stru
30274 unsigned long initial_adr;
30275 int initial_len = len;
30276
30277+ pax_track_stack();
30278+
30279 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
30280 adr += chip->start;
30281 initial_adr = adr;
30282@@ -1871,6 +1875,8 @@ static int __xipram do_erase_oneblock(st
30283 int retries = 3;
30284 int ret;
30285
30286+ pax_track_stack();
30287+
30288 adr += chip->start;
30289
30290 retry:
30291diff -urNp linux-3.0.7/drivers/mtd/chips/cfi_cmdset_0020.c linux-3.0.7/drivers/mtd/chips/cfi_cmdset_0020.c
30292--- linux-3.0.7/drivers/mtd/chips/cfi_cmdset_0020.c 2011-07-21 22:17:23.000000000 -0400
30293+++ linux-3.0.7/drivers/mtd/chips/cfi_cmdset_0020.c 2011-08-23 21:48:14.000000000 -0400
30294@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
30295 unsigned long cmd_addr;
30296 struct cfi_private *cfi = map->fldrv_priv;
30297
30298+ pax_track_stack();
30299+
30300 adr += chip->start;
30301
30302 /* Ensure cmd read/writes are aligned. */
30303@@ -429,6 +431,8 @@ static inline int do_write_buffer(struct
30304 DECLARE_WAITQUEUE(wait, current);
30305 int wbufsize, z;
30306
30307+ pax_track_stack();
30308+
30309 /* M58LW064A requires bus alignment for buffer wriets -- saw */
30310 if (adr & (map_bankwidth(map)-1))
30311 return -EINVAL;
30312@@ -743,6 +747,8 @@ static inline int do_erase_oneblock(stru
30313 DECLARE_WAITQUEUE(wait, current);
30314 int ret = 0;
30315
30316+ pax_track_stack();
30317+
30318 adr += chip->start;
30319
30320 /* Let's determine this according to the interleave only once */
30321@@ -1048,6 +1054,8 @@ static inline int do_lock_oneblock(struc
30322 unsigned long timeo = jiffies + HZ;
30323 DECLARE_WAITQUEUE(wait, current);
30324
30325+ pax_track_stack();
30326+
30327 adr += chip->start;
30328
30329 /* Let's determine this according to the interleave only once */
30330@@ -1197,6 +1205,8 @@ static inline int do_unlock_oneblock(str
30331 unsigned long timeo = jiffies + HZ;
30332 DECLARE_WAITQUEUE(wait, current);
30333
30334+ pax_track_stack();
30335+
30336 adr += chip->start;
30337
30338 /* Let's determine this according to the interleave only once */
30339diff -urNp linux-3.0.7/drivers/mtd/devices/doc2000.c linux-3.0.7/drivers/mtd/devices/doc2000.c
30340--- linux-3.0.7/drivers/mtd/devices/doc2000.c 2011-07-21 22:17:23.000000000 -0400
30341+++ linux-3.0.7/drivers/mtd/devices/doc2000.c 2011-08-23 21:47:55.000000000 -0400
30342@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
30343
30344 /* The ECC will not be calculated correctly if less than 512 is written */
30345 /* DBB-
30346- if (len != 0x200 && eccbuf)
30347+ if (len != 0x200)
30348 printk(KERN_WARNING
30349 "ECC needs a full sector write (adr: %lx size %lx)\n",
30350 (long) to, (long) len);
30351diff -urNp linux-3.0.7/drivers/mtd/devices/doc2001.c linux-3.0.7/drivers/mtd/devices/doc2001.c
30352--- linux-3.0.7/drivers/mtd/devices/doc2001.c 2011-07-21 22:17:23.000000000 -0400
30353+++ linux-3.0.7/drivers/mtd/devices/doc2001.c 2011-08-23 21:47:55.000000000 -0400
30354@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
30355 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
30356
30357 /* Don't allow read past end of device */
30358- if (from >= this->totlen)
30359+ if (from >= this->totlen || !len)
30360 return -EINVAL;
30361
30362 /* Don't allow a single read to cross a 512-byte block boundary */
30363diff -urNp linux-3.0.7/drivers/mtd/ftl.c linux-3.0.7/drivers/mtd/ftl.c
30364--- linux-3.0.7/drivers/mtd/ftl.c 2011-07-21 22:17:23.000000000 -0400
30365+++ linux-3.0.7/drivers/mtd/ftl.c 2011-08-23 21:48:14.000000000 -0400
30366@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
30367 loff_t offset;
30368 uint16_t srcunitswap = cpu_to_le16(srcunit);
30369
30370+ pax_track_stack();
30371+
30372 eun = &part->EUNInfo[srcunit];
30373 xfer = &part->XferInfo[xferunit];
30374 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
30375diff -urNp linux-3.0.7/drivers/mtd/inftlcore.c linux-3.0.7/drivers/mtd/inftlcore.c
30376--- linux-3.0.7/drivers/mtd/inftlcore.c 2011-07-21 22:17:23.000000000 -0400
30377+++ linux-3.0.7/drivers/mtd/inftlcore.c 2011-08-23 21:48:14.000000000 -0400
30378@@ -259,6 +259,8 @@ static u16 INFTL_foldchain(struct INFTLr
30379 struct inftl_oob oob;
30380 size_t retlen;
30381
30382+ pax_track_stack();
30383+
30384 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
30385 "pending=%d)\n", inftl, thisVUC, pendingblock);
30386
30387diff -urNp linux-3.0.7/drivers/mtd/inftlmount.c linux-3.0.7/drivers/mtd/inftlmount.c
30388--- linux-3.0.7/drivers/mtd/inftlmount.c 2011-07-21 22:17:23.000000000 -0400
30389+++ linux-3.0.7/drivers/mtd/inftlmount.c 2011-08-23 21:48:14.000000000 -0400
30390@@ -53,6 +53,8 @@ static int find_boot_record(struct INFTL
30391 struct INFTLPartition *ip;
30392 size_t retlen;
30393
30394+ pax_track_stack();
30395+
30396 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
30397
30398 /*
30399diff -urNp linux-3.0.7/drivers/mtd/lpddr/qinfo_probe.c linux-3.0.7/drivers/mtd/lpddr/qinfo_probe.c
30400--- linux-3.0.7/drivers/mtd/lpddr/qinfo_probe.c 2011-07-21 22:17:23.000000000 -0400
30401+++ linux-3.0.7/drivers/mtd/lpddr/qinfo_probe.c 2011-08-23 21:48:14.000000000 -0400
30402@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
30403 {
30404 map_word pfow_val[4];
30405
30406+ pax_track_stack();
30407+
30408 /* Check identification string */
30409 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
30410 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
30411diff -urNp linux-3.0.7/drivers/mtd/mtdchar.c linux-3.0.7/drivers/mtd/mtdchar.c
30412--- linux-3.0.7/drivers/mtd/mtdchar.c 2011-07-21 22:17:23.000000000 -0400
30413+++ linux-3.0.7/drivers/mtd/mtdchar.c 2011-08-23 21:48:14.000000000 -0400
30414@@ -553,6 +553,8 @@ static int mtd_ioctl(struct file *file,
30415 u_long size;
30416 struct mtd_info_user info;
30417
30418+ pax_track_stack();
30419+
30420 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
30421
30422 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
30423diff -urNp linux-3.0.7/drivers/mtd/nand/denali.c linux-3.0.7/drivers/mtd/nand/denali.c
30424--- linux-3.0.7/drivers/mtd/nand/denali.c 2011-07-21 22:17:23.000000000 -0400
30425+++ linux-3.0.7/drivers/mtd/nand/denali.c 2011-08-23 21:47:55.000000000 -0400
30426@@ -26,6 +26,7 @@
30427 #include <linux/pci.h>
30428 #include <linux/mtd/mtd.h>
30429 #include <linux/module.h>
30430+#include <linux/slab.h>
30431
30432 #include "denali.h"
30433
30434diff -urNp linux-3.0.7/drivers/mtd/nftlcore.c linux-3.0.7/drivers/mtd/nftlcore.c
30435--- linux-3.0.7/drivers/mtd/nftlcore.c 2011-07-21 22:17:23.000000000 -0400
30436+++ linux-3.0.7/drivers/mtd/nftlcore.c 2011-08-23 21:48:14.000000000 -0400
30437@@ -264,6 +264,8 @@ static u16 NFTL_foldchain (struct NFTLre
30438 int inplace = 1;
30439 size_t retlen;
30440
30441+ pax_track_stack();
30442+
30443 memset(BlockMap, 0xff, sizeof(BlockMap));
30444 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
30445
30446diff -urNp linux-3.0.7/drivers/mtd/nftlmount.c linux-3.0.7/drivers/mtd/nftlmount.c
30447--- linux-3.0.7/drivers/mtd/nftlmount.c 2011-07-21 22:17:23.000000000 -0400
30448+++ linux-3.0.7/drivers/mtd/nftlmount.c 2011-08-23 21:48:14.000000000 -0400
30449@@ -24,6 +24,7 @@
30450 #include <asm/errno.h>
30451 #include <linux/delay.h>
30452 #include <linux/slab.h>
30453+#include <linux/sched.h>
30454 #include <linux/mtd/mtd.h>
30455 #include <linux/mtd/nand.h>
30456 #include <linux/mtd/nftl.h>
30457@@ -45,6 +46,8 @@ static int find_boot_record(struct NFTLr
30458 struct mtd_info *mtd = nftl->mbd.mtd;
30459 unsigned int i;
30460
30461+ pax_track_stack();
30462+
30463 /* Assume logical EraseSize == physical erasesize for starting the scan.
30464 We'll sort it out later if we find a MediaHeader which says otherwise */
30465 /* Actually, we won't. The new DiskOnChip driver has already scanned
30466diff -urNp linux-3.0.7/drivers/mtd/ubi/build.c linux-3.0.7/drivers/mtd/ubi/build.c
30467--- linux-3.0.7/drivers/mtd/ubi/build.c 2011-07-21 22:17:23.000000000 -0400
30468+++ linux-3.0.7/drivers/mtd/ubi/build.c 2011-08-23 21:47:55.000000000 -0400
30469@@ -1287,7 +1287,7 @@ module_exit(ubi_exit);
30470 static int __init bytes_str_to_int(const char *str)
30471 {
30472 char *endp;
30473- unsigned long result;
30474+ unsigned long result, scale = 1;
30475
30476 result = simple_strtoul(str, &endp, 0);
30477 if (str == endp || result >= INT_MAX) {
30478@@ -1298,11 +1298,11 @@ static int __init bytes_str_to_int(const
30479
30480 switch (*endp) {
30481 case 'G':
30482- result *= 1024;
30483+ scale *= 1024;
30484 case 'M':
30485- result *= 1024;
30486+ scale *= 1024;
30487 case 'K':
30488- result *= 1024;
30489+ scale *= 1024;
30490 if (endp[1] == 'i' && endp[2] == 'B')
30491 endp += 2;
30492 case '\0':
30493@@ -1313,7 +1313,13 @@ static int __init bytes_str_to_int(const
30494 return -EINVAL;
30495 }
30496
30497- return result;
30498+ if ((intoverflow_t)result*scale >= INT_MAX) {
30499+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
30500+ str);
30501+ return -EINVAL;
30502+ }
30503+
30504+ return result*scale;
30505 }
30506
30507 /**
30508diff -urNp linux-3.0.7/drivers/net/atlx/atl2.c linux-3.0.7/drivers/net/atlx/atl2.c
30509--- linux-3.0.7/drivers/net/atlx/atl2.c 2011-07-21 22:17:23.000000000 -0400
30510+++ linux-3.0.7/drivers/net/atlx/atl2.c 2011-10-11 10:44:33.000000000 -0400
30511@@ -2840,7 +2840,7 @@ static void atl2_force_ps(struct atl2_hw
30512 */
30513
30514 #define ATL2_PARAM(X, desc) \
30515- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
30516+ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
30517 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
30518 MODULE_PARM_DESC(X, desc);
30519 #else
30520diff -urNp linux-3.0.7/drivers/net/bna/bfa_ioc_ct.c linux-3.0.7/drivers/net/bna/bfa_ioc_ct.c
30521--- linux-3.0.7/drivers/net/bna/bfa_ioc_ct.c 2011-07-21 22:17:23.000000000 -0400
30522+++ linux-3.0.7/drivers/net/bna/bfa_ioc_ct.c 2011-08-23 21:47:55.000000000 -0400
30523@@ -48,7 +48,21 @@ static void bfa_ioc_ct_sync_ack(struct b
30524 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
30525 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
30526
30527-static struct bfa_ioc_hwif nw_hwif_ct;
30528+static struct bfa_ioc_hwif nw_hwif_ct = {
30529+ .ioc_pll_init = bfa_ioc_ct_pll_init,
30530+ .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
30531+ .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
30532+ .ioc_reg_init = bfa_ioc_ct_reg_init,
30533+ .ioc_map_port = bfa_ioc_ct_map_port,
30534+ .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
30535+ .ioc_notify_fail = bfa_ioc_ct_notify_fail,
30536+ .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
30537+ .ioc_sync_start = bfa_ioc_ct_sync_start,
30538+ .ioc_sync_join = bfa_ioc_ct_sync_join,
30539+ .ioc_sync_leave = bfa_ioc_ct_sync_leave,
30540+ .ioc_sync_ack = bfa_ioc_ct_sync_ack,
30541+ .ioc_sync_complete = bfa_ioc_ct_sync_complete
30542+};
30543
30544 /**
30545 * Called from bfa_ioc_attach() to map asic specific calls.
30546@@ -56,20 +70,6 @@ static struct bfa_ioc_hwif nw_hwif_ct;
30547 void
30548 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
30549 {
30550- nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
30551- nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
30552- nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
30553- nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
30554- nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
30555- nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
30556- nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
30557- nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
30558- nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
30559- nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
30560- nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
30561- nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
30562- nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
30563-
30564 ioc->ioc_hwif = &nw_hwif_ct;
30565 }
30566
30567diff -urNp linux-3.0.7/drivers/net/bna/bnad.c linux-3.0.7/drivers/net/bna/bnad.c
30568--- linux-3.0.7/drivers/net/bna/bnad.c 2011-07-21 22:17:23.000000000 -0400
30569+++ linux-3.0.7/drivers/net/bna/bnad.c 2011-08-23 21:47:55.000000000 -0400
30570@@ -1681,7 +1681,14 @@ bnad_setup_tx(struct bnad *bnad, uint tx
30571 struct bna_intr_info *intr_info =
30572 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
30573 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
30574- struct bna_tx_event_cbfn tx_cbfn;
30575+ static struct bna_tx_event_cbfn tx_cbfn = {
30576+ /* Initialize the tx event handlers */
30577+ .tcb_setup_cbfn = bnad_cb_tcb_setup,
30578+ .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
30579+ .tx_stall_cbfn = bnad_cb_tx_stall,
30580+ .tx_resume_cbfn = bnad_cb_tx_resume,
30581+ .tx_cleanup_cbfn = bnad_cb_tx_cleanup
30582+ };
30583 struct bna_tx *tx;
30584 unsigned long flags;
30585
30586@@ -1690,13 +1697,6 @@ bnad_setup_tx(struct bnad *bnad, uint tx
30587 tx_config->txq_depth = bnad->txq_depth;
30588 tx_config->tx_type = BNA_TX_T_REGULAR;
30589
30590- /* Initialize the tx event handlers */
30591- tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
30592- tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
30593- tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
30594- tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
30595- tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
30596-
30597 /* Get BNA's resource requirement for one tx object */
30598 spin_lock_irqsave(&bnad->bna_lock, flags);
30599 bna_tx_res_req(bnad->num_txq_per_tx,
30600@@ -1827,21 +1827,21 @@ bnad_setup_rx(struct bnad *bnad, uint rx
30601 struct bna_intr_info *intr_info =
30602 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
30603 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
30604- struct bna_rx_event_cbfn rx_cbfn;
30605+ static struct bna_rx_event_cbfn rx_cbfn = {
30606+ /* Initialize the Rx event handlers */
30607+ .rcb_setup_cbfn = bnad_cb_rcb_setup,
30608+ .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
30609+ .ccb_setup_cbfn = bnad_cb_ccb_setup,
30610+ .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
30611+ .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
30612+ .rx_post_cbfn = bnad_cb_rx_post
30613+ };
30614 struct bna_rx *rx;
30615 unsigned long flags;
30616
30617 /* Initialize the Rx object configuration */
30618 bnad_init_rx_config(bnad, rx_config);
30619
30620- /* Initialize the Rx event handlers */
30621- rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
30622- rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
30623- rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
30624- rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
30625- rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
30626- rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
30627-
30628 /* Get BNA's resource requirement for one Rx object */
30629 spin_lock_irqsave(&bnad->bna_lock, flags);
30630 bna_rx_res_req(rx_config, res_info);
30631diff -urNp linux-3.0.7/drivers/net/bnx2.c linux-3.0.7/drivers/net/bnx2.c
30632--- linux-3.0.7/drivers/net/bnx2.c 2011-10-16 21:54:54.000000000 -0400
30633+++ linux-3.0.7/drivers/net/bnx2.c 2011-10-16 21:55:27.000000000 -0400
30634@@ -5831,6 +5831,8 @@ bnx2_test_nvram(struct bnx2 *bp)
30635 int rc = 0;
30636 u32 magic, csum;
30637
30638+ pax_track_stack();
30639+
30640 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
30641 goto test_nvram_done;
30642
30643diff -urNp linux-3.0.7/drivers/net/bnx2x/bnx2x_ethtool.c linux-3.0.7/drivers/net/bnx2x/bnx2x_ethtool.c
30644--- linux-3.0.7/drivers/net/bnx2x/bnx2x_ethtool.c 2011-07-21 22:17:23.000000000 -0400
30645+++ linux-3.0.7/drivers/net/bnx2x/bnx2x_ethtool.c 2011-08-23 21:48:14.000000000 -0400
30646@@ -1705,6 +1705,8 @@ static int bnx2x_test_nvram(struct bnx2x
30647 int i, rc;
30648 u32 magic, crc;
30649
30650+ pax_track_stack();
30651+
30652 if (BP_NOMCP(bp))
30653 return 0;
30654
30655diff -urNp linux-3.0.7/drivers/net/can/mscan/mscan.c linux-3.0.7/drivers/net/can/mscan/mscan.c
30656--- linux-3.0.7/drivers/net/can/mscan/mscan.c 2011-07-21 22:17:23.000000000 -0400
30657+++ linux-3.0.7/drivers/net/can/mscan/mscan.c 2011-10-17 02:51:46.000000000 -0400
30658@@ -261,11 +261,13 @@ static netdev_tx_t mscan_start_xmit(stru
30659 void __iomem *data = &regs->tx.dsr1_0;
30660 u16 *payload = (u16 *)frame->data;
30661
30662- /* It is safe to write into dsr[dlc+1] */
30663- for (i = 0; i < (frame->can_dlc + 1) / 2; i++) {
30664+ for (i = 0; i < frame->can_dlc / 2; i++) {
30665 out_be16(data, *payload++);
30666 data += 2 + _MSCAN_RESERVED_DSR_SIZE;
30667 }
30668+ /* write remaining byte if necessary */
30669+ if (frame->can_dlc & 1)
30670+ out_8(data, frame->data[frame->can_dlc - 1]);
30671 }
30672
30673 out_8(&regs->tx.dlr, frame->can_dlc);
30674@@ -330,10 +332,13 @@ static void mscan_get_rx_frame(struct ne
30675 void __iomem *data = &regs->rx.dsr1_0;
30676 u16 *payload = (u16 *)frame->data;
30677
30678- for (i = 0; i < (frame->can_dlc + 1) / 2; i++) {
30679+ for (i = 0; i < frame->can_dlc / 2; i++) {
30680 *payload++ = in_be16(data);
30681 data += 2 + _MSCAN_RESERVED_DSR_SIZE;
30682 }
30683+ /* read remaining byte if necessary */
30684+ if (frame->can_dlc & 1)
30685+ frame->data[frame->can_dlc - 1] = in_8(data);
30686 }
30687
30688 out_8(&regs->canrflg, MSCAN_RXF);
30689diff -urNp linux-3.0.7/drivers/net/cxgb3/l2t.h linux-3.0.7/drivers/net/cxgb3/l2t.h
30690--- linux-3.0.7/drivers/net/cxgb3/l2t.h 2011-10-16 21:54:54.000000000 -0400
30691+++ linux-3.0.7/drivers/net/cxgb3/l2t.h 2011-10-16 21:55:27.000000000 -0400
30692@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)
30693 */
30694 struct l2t_skb_cb {
30695 arp_failure_handler_func arp_failure_handler;
30696-};
30697+} __no_const;
30698
30699 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
30700
30701diff -urNp linux-3.0.7/drivers/net/cxgb4/cxgb4_main.c linux-3.0.7/drivers/net/cxgb4/cxgb4_main.c
30702--- linux-3.0.7/drivers/net/cxgb4/cxgb4_main.c 2011-07-21 22:17:23.000000000 -0400
30703+++ linux-3.0.7/drivers/net/cxgb4/cxgb4_main.c 2011-08-23 21:48:14.000000000 -0400
30704@@ -3396,6 +3396,8 @@ static int __devinit enable_msix(struct
30705 unsigned int nchan = adap->params.nports;
30706 struct msix_entry entries[MAX_INGQ + 1];
30707
30708+ pax_track_stack();
30709+
30710 for (i = 0; i < ARRAY_SIZE(entries); ++i)
30711 entries[i].entry = i;
30712
30713diff -urNp linux-3.0.7/drivers/net/cxgb4/t4_hw.c linux-3.0.7/drivers/net/cxgb4/t4_hw.c
30714--- linux-3.0.7/drivers/net/cxgb4/t4_hw.c 2011-07-21 22:17:23.000000000 -0400
30715+++ linux-3.0.7/drivers/net/cxgb4/t4_hw.c 2011-08-23 21:48:14.000000000 -0400
30716@@ -362,6 +362,8 @@ static int get_vpd_params(struct adapter
30717 u8 vpd[VPD_LEN], csum;
30718 unsigned int vpdr_len, kw_offset, id_len;
30719
30720+ pax_track_stack();
30721+
30722 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
30723 if (ret < 0)
30724 return ret;
30725diff -urNp linux-3.0.7/drivers/net/e1000e/82571.c linux-3.0.7/drivers/net/e1000e/82571.c
30726--- linux-3.0.7/drivers/net/e1000e/82571.c 2011-07-21 22:17:23.000000000 -0400
30727+++ linux-3.0.7/drivers/net/e1000e/82571.c 2011-08-23 21:47:55.000000000 -0400
30728@@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(s
30729 {
30730 struct e1000_hw *hw = &adapter->hw;
30731 struct e1000_mac_info *mac = &hw->mac;
30732- struct e1000_mac_operations *func = &mac->ops;
30733+ e1000_mac_operations_no_const *func = &mac->ops;
30734 u32 swsm = 0;
30735 u32 swsm2 = 0;
30736 bool force_clear_smbi = false;
30737diff -urNp linux-3.0.7/drivers/net/e1000e/es2lan.c linux-3.0.7/drivers/net/e1000e/es2lan.c
30738--- linux-3.0.7/drivers/net/e1000e/es2lan.c 2011-07-21 22:17:23.000000000 -0400
30739+++ linux-3.0.7/drivers/net/e1000e/es2lan.c 2011-08-23 21:47:55.000000000 -0400
30740@@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es
30741 {
30742 struct e1000_hw *hw = &adapter->hw;
30743 struct e1000_mac_info *mac = &hw->mac;
30744- struct e1000_mac_operations *func = &mac->ops;
30745+ e1000_mac_operations_no_const *func = &mac->ops;
30746
30747 /* Set media type */
30748 switch (adapter->pdev->device) {
30749diff -urNp linux-3.0.7/drivers/net/e1000e/hw.h linux-3.0.7/drivers/net/e1000e/hw.h
30750--- linux-3.0.7/drivers/net/e1000e/hw.h 2011-07-21 22:17:23.000000000 -0400
30751+++ linux-3.0.7/drivers/net/e1000e/hw.h 2011-08-23 21:47:55.000000000 -0400
30752@@ -776,6 +776,7 @@ struct e1000_mac_operations {
30753 void (*write_vfta)(struct e1000_hw *, u32, u32);
30754 s32 (*read_mac_addr)(struct e1000_hw *);
30755 };
30756+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
30757
30758 /* Function pointers for the PHY. */
30759 struct e1000_phy_operations {
30760@@ -799,6 +800,7 @@ struct e1000_phy_operations {
30761 void (*power_up)(struct e1000_hw *);
30762 void (*power_down)(struct e1000_hw *);
30763 };
30764+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
30765
30766 /* Function pointers for the NVM. */
30767 struct e1000_nvm_operations {
30768@@ -810,9 +812,10 @@ struct e1000_nvm_operations {
30769 s32 (*validate)(struct e1000_hw *);
30770 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
30771 };
30772+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
30773
30774 struct e1000_mac_info {
30775- struct e1000_mac_operations ops;
30776+ e1000_mac_operations_no_const ops;
30777 u8 addr[ETH_ALEN];
30778 u8 perm_addr[ETH_ALEN];
30779
30780@@ -853,7 +856,7 @@ struct e1000_mac_info {
30781 };
30782
30783 struct e1000_phy_info {
30784- struct e1000_phy_operations ops;
30785+ e1000_phy_operations_no_const ops;
30786
30787 enum e1000_phy_type type;
30788
30789@@ -887,7 +890,7 @@ struct e1000_phy_info {
30790 };
30791
30792 struct e1000_nvm_info {
30793- struct e1000_nvm_operations ops;
30794+ e1000_nvm_operations_no_const ops;
30795
30796 enum e1000_nvm_type type;
30797 enum e1000_nvm_override override;
30798diff -urNp linux-3.0.7/drivers/net/fealnx.c linux-3.0.7/drivers/net/fealnx.c
30799--- linux-3.0.7/drivers/net/fealnx.c 2011-07-21 22:17:23.000000000 -0400
30800+++ linux-3.0.7/drivers/net/fealnx.c 2011-10-11 10:44:33.000000000 -0400
30801@@ -150,7 +150,7 @@ struct chip_info {
30802 int flags;
30803 };
30804
30805-static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
30806+static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
30807 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
30808 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
30809 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
30810diff -urNp linux-3.0.7/drivers/net/hamradio/6pack.c linux-3.0.7/drivers/net/hamradio/6pack.c
30811--- linux-3.0.7/drivers/net/hamradio/6pack.c 2011-07-21 22:17:23.000000000 -0400
30812+++ linux-3.0.7/drivers/net/hamradio/6pack.c 2011-08-23 21:48:14.000000000 -0400
30813@@ -463,6 +463,8 @@ static void sixpack_receive_buf(struct t
30814 unsigned char buf[512];
30815 int count1;
30816
30817+ pax_track_stack();
30818+
30819 if (!count)
30820 return;
30821
30822diff -urNp linux-3.0.7/drivers/net/igb/e1000_hw.h linux-3.0.7/drivers/net/igb/e1000_hw.h
30823--- linux-3.0.7/drivers/net/igb/e1000_hw.h 2011-07-21 22:17:23.000000000 -0400
30824+++ linux-3.0.7/drivers/net/igb/e1000_hw.h 2011-08-23 21:47:55.000000000 -0400
30825@@ -314,6 +314,7 @@ struct e1000_mac_operations {
30826 s32 (*read_mac_addr)(struct e1000_hw *);
30827 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
30828 };
30829+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
30830
30831 struct e1000_phy_operations {
30832 s32 (*acquire)(struct e1000_hw *);
30833@@ -330,6 +331,7 @@ struct e1000_phy_operations {
30834 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
30835 s32 (*write_reg)(struct e1000_hw *, u32, u16);
30836 };
30837+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
30838
30839 struct e1000_nvm_operations {
30840 s32 (*acquire)(struct e1000_hw *);
30841@@ -339,6 +341,7 @@ struct e1000_nvm_operations {
30842 s32 (*update)(struct e1000_hw *);
30843 s32 (*validate)(struct e1000_hw *);
30844 };
30845+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
30846
30847 struct e1000_info {
30848 s32 (*get_invariants)(struct e1000_hw *);
30849@@ -350,7 +353,7 @@ struct e1000_info {
30850 extern const struct e1000_info e1000_82575_info;
30851
30852 struct e1000_mac_info {
30853- struct e1000_mac_operations ops;
30854+ e1000_mac_operations_no_const ops;
30855
30856 u8 addr[6];
30857 u8 perm_addr[6];
30858@@ -388,7 +391,7 @@ struct e1000_mac_info {
30859 };
30860
30861 struct e1000_phy_info {
30862- struct e1000_phy_operations ops;
30863+ e1000_phy_operations_no_const ops;
30864
30865 enum e1000_phy_type type;
30866
30867@@ -423,7 +426,7 @@ struct e1000_phy_info {
30868 };
30869
30870 struct e1000_nvm_info {
30871- struct e1000_nvm_operations ops;
30872+ e1000_nvm_operations_no_const ops;
30873 enum e1000_nvm_type type;
30874 enum e1000_nvm_override override;
30875
30876@@ -468,6 +471,7 @@ struct e1000_mbx_operations {
30877 s32 (*check_for_ack)(struct e1000_hw *, u16);
30878 s32 (*check_for_rst)(struct e1000_hw *, u16);
30879 };
30880+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
30881
30882 struct e1000_mbx_stats {
30883 u32 msgs_tx;
30884@@ -479,7 +483,7 @@ struct e1000_mbx_stats {
30885 };
30886
30887 struct e1000_mbx_info {
30888- struct e1000_mbx_operations ops;
30889+ e1000_mbx_operations_no_const ops;
30890 struct e1000_mbx_stats stats;
30891 u32 timeout;
30892 u32 usec_delay;
30893diff -urNp linux-3.0.7/drivers/net/igbvf/vf.h linux-3.0.7/drivers/net/igbvf/vf.h
30894--- linux-3.0.7/drivers/net/igbvf/vf.h 2011-07-21 22:17:23.000000000 -0400
30895+++ linux-3.0.7/drivers/net/igbvf/vf.h 2011-08-23 21:47:55.000000000 -0400
30896@@ -189,9 +189,10 @@ struct e1000_mac_operations {
30897 s32 (*read_mac_addr)(struct e1000_hw *);
30898 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
30899 };
30900+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
30901
30902 struct e1000_mac_info {
30903- struct e1000_mac_operations ops;
30904+ e1000_mac_operations_no_const ops;
30905 u8 addr[6];
30906 u8 perm_addr[6];
30907
30908@@ -213,6 +214,7 @@ struct e1000_mbx_operations {
30909 s32 (*check_for_ack)(struct e1000_hw *);
30910 s32 (*check_for_rst)(struct e1000_hw *);
30911 };
30912+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
30913
30914 struct e1000_mbx_stats {
30915 u32 msgs_tx;
30916@@ -224,7 +226,7 @@ struct e1000_mbx_stats {
30917 };
30918
30919 struct e1000_mbx_info {
30920- struct e1000_mbx_operations ops;
30921+ e1000_mbx_operations_no_const ops;
30922 struct e1000_mbx_stats stats;
30923 u32 timeout;
30924 u32 usec_delay;
30925diff -urNp linux-3.0.7/drivers/net/ixgb/ixgb_main.c linux-3.0.7/drivers/net/ixgb/ixgb_main.c
30926--- linux-3.0.7/drivers/net/ixgb/ixgb_main.c 2011-07-21 22:17:23.000000000 -0400
30927+++ linux-3.0.7/drivers/net/ixgb/ixgb_main.c 2011-08-23 21:48:14.000000000 -0400
30928@@ -1070,6 +1070,8 @@ ixgb_set_multi(struct net_device *netdev
30929 u32 rctl;
30930 int i;
30931
30932+ pax_track_stack();
30933+
30934 /* Check for Promiscuous and All Multicast modes */
30935
30936 rctl = IXGB_READ_REG(hw, RCTL);
30937diff -urNp linux-3.0.7/drivers/net/ixgb/ixgb_param.c linux-3.0.7/drivers/net/ixgb/ixgb_param.c
30938--- linux-3.0.7/drivers/net/ixgb/ixgb_param.c 2011-07-21 22:17:23.000000000 -0400
30939+++ linux-3.0.7/drivers/net/ixgb/ixgb_param.c 2011-08-23 21:48:14.000000000 -0400
30940@@ -261,6 +261,9 @@ void __devinit
30941 ixgb_check_options(struct ixgb_adapter *adapter)
30942 {
30943 int bd = adapter->bd_number;
30944+
30945+ pax_track_stack();
30946+
30947 if (bd >= IXGB_MAX_NIC) {
30948 pr_notice("Warning: no configuration for board #%i\n", bd);
30949 pr_notice("Using defaults for all values\n");
30950diff -urNp linux-3.0.7/drivers/net/ixgbe/ixgbe_type.h linux-3.0.7/drivers/net/ixgbe/ixgbe_type.h
30951--- linux-3.0.7/drivers/net/ixgbe/ixgbe_type.h 2011-07-21 22:17:23.000000000 -0400
30952+++ linux-3.0.7/drivers/net/ixgbe/ixgbe_type.h 2011-08-23 21:47:55.000000000 -0400
30953@@ -2584,6 +2584,7 @@ struct ixgbe_eeprom_operations {
30954 s32 (*update_checksum)(struct ixgbe_hw *);
30955 u16 (*calc_checksum)(struct ixgbe_hw *);
30956 };
30957+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
30958
30959 struct ixgbe_mac_operations {
30960 s32 (*init_hw)(struct ixgbe_hw *);
30961@@ -2639,6 +2640,7 @@ struct ixgbe_mac_operations {
30962 /* Flow Control */
30963 s32 (*fc_enable)(struct ixgbe_hw *, s32);
30964 };
30965+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
30966
30967 struct ixgbe_phy_operations {
30968 s32 (*identify)(struct ixgbe_hw *);
30969@@ -2658,9 +2660,10 @@ struct ixgbe_phy_operations {
30970 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
30971 s32 (*check_overtemp)(struct ixgbe_hw *);
30972 };
30973+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
30974
30975 struct ixgbe_eeprom_info {
30976- struct ixgbe_eeprom_operations ops;
30977+ ixgbe_eeprom_operations_no_const ops;
30978 enum ixgbe_eeprom_type type;
30979 u32 semaphore_delay;
30980 u16 word_size;
30981@@ -2670,7 +2673,7 @@ struct ixgbe_eeprom_info {
30982
30983 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
30984 struct ixgbe_mac_info {
30985- struct ixgbe_mac_operations ops;
30986+ ixgbe_mac_operations_no_const ops;
30987 enum ixgbe_mac_type type;
30988 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
30989 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
30990@@ -2698,7 +2701,7 @@ struct ixgbe_mac_info {
30991 };
30992
30993 struct ixgbe_phy_info {
30994- struct ixgbe_phy_operations ops;
30995+ ixgbe_phy_operations_no_const ops;
30996 struct mdio_if_info mdio;
30997 enum ixgbe_phy_type type;
30998 u32 id;
30999@@ -2726,6 +2729,7 @@ struct ixgbe_mbx_operations {
31000 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
31001 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
31002 };
31003+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
31004
31005 struct ixgbe_mbx_stats {
31006 u32 msgs_tx;
31007@@ -2737,7 +2741,7 @@ struct ixgbe_mbx_stats {
31008 };
31009
31010 struct ixgbe_mbx_info {
31011- struct ixgbe_mbx_operations ops;
31012+ ixgbe_mbx_operations_no_const ops;
31013 struct ixgbe_mbx_stats stats;
31014 u32 timeout;
31015 u32 usec_delay;
31016diff -urNp linux-3.0.7/drivers/net/ixgbevf/vf.h linux-3.0.7/drivers/net/ixgbevf/vf.h
31017--- linux-3.0.7/drivers/net/ixgbevf/vf.h 2011-07-21 22:17:23.000000000 -0400
31018+++ linux-3.0.7/drivers/net/ixgbevf/vf.h 2011-08-23 21:47:55.000000000 -0400
31019@@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
31020 s32 (*clear_vfta)(struct ixgbe_hw *);
31021 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
31022 };
31023+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
31024
31025 enum ixgbe_mac_type {
31026 ixgbe_mac_unknown = 0,
31027@@ -79,7 +80,7 @@ enum ixgbe_mac_type {
31028 };
31029
31030 struct ixgbe_mac_info {
31031- struct ixgbe_mac_operations ops;
31032+ ixgbe_mac_operations_no_const ops;
31033 u8 addr[6];
31034 u8 perm_addr[6];
31035
31036@@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
31037 s32 (*check_for_ack)(struct ixgbe_hw *);
31038 s32 (*check_for_rst)(struct ixgbe_hw *);
31039 };
31040+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
31041
31042 struct ixgbe_mbx_stats {
31043 u32 msgs_tx;
31044@@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
31045 };
31046
31047 struct ixgbe_mbx_info {
31048- struct ixgbe_mbx_operations ops;
31049+ ixgbe_mbx_operations_no_const ops;
31050 struct ixgbe_mbx_stats stats;
31051 u32 timeout;
31052 u32 udelay;
31053diff -urNp linux-3.0.7/drivers/net/ksz884x.c linux-3.0.7/drivers/net/ksz884x.c
31054--- linux-3.0.7/drivers/net/ksz884x.c 2011-07-21 22:17:23.000000000 -0400
31055+++ linux-3.0.7/drivers/net/ksz884x.c 2011-08-23 21:48:14.000000000 -0400
31056@@ -6534,6 +6534,8 @@ static void netdev_get_ethtool_stats(str
31057 int rc;
31058 u64 counter[TOTAL_PORT_COUNTER_NUM];
31059
31060+ pax_track_stack();
31061+
31062 mutex_lock(&hw_priv->lock);
31063 n = SWITCH_PORT_NUM;
31064 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
31065diff -urNp linux-3.0.7/drivers/net/mlx4/main.c linux-3.0.7/drivers/net/mlx4/main.c
31066--- linux-3.0.7/drivers/net/mlx4/main.c 2011-07-21 22:17:23.000000000 -0400
31067+++ linux-3.0.7/drivers/net/mlx4/main.c 2011-08-23 21:48:14.000000000 -0400
31068@@ -40,6 +40,7 @@
31069 #include <linux/dma-mapping.h>
31070 #include <linux/slab.h>
31071 #include <linux/io-mapping.h>
31072+#include <linux/sched.h>
31073
31074 #include <linux/mlx4/device.h>
31075 #include <linux/mlx4/doorbell.h>
31076@@ -764,6 +765,8 @@ static int mlx4_init_hca(struct mlx4_dev
31077 u64 icm_size;
31078 int err;
31079
31080+ pax_track_stack();
31081+
31082 err = mlx4_QUERY_FW(dev);
31083 if (err) {
31084 if (err == -EACCES)
31085diff -urNp linux-3.0.7/drivers/net/niu.c linux-3.0.7/drivers/net/niu.c
31086--- linux-3.0.7/drivers/net/niu.c 2011-09-02 18:11:21.000000000 -0400
31087+++ linux-3.0.7/drivers/net/niu.c 2011-08-23 21:48:14.000000000 -0400
31088@@ -9056,6 +9056,8 @@ static void __devinit niu_try_msix(struc
31089 int i, num_irqs, err;
31090 u8 first_ldg;
31091
31092+ pax_track_stack();
31093+
31094 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
31095 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
31096 ldg_num_map[i] = first_ldg + i;
31097diff -urNp linux-3.0.7/drivers/net/pcnet32.c linux-3.0.7/drivers/net/pcnet32.c
31098--- linux-3.0.7/drivers/net/pcnet32.c 2011-07-21 22:17:23.000000000 -0400
31099+++ linux-3.0.7/drivers/net/pcnet32.c 2011-08-23 21:47:55.000000000 -0400
31100@@ -82,7 +82,7 @@ static int cards_found;
31101 /*
31102 * VLB I/O addresses
31103 */
31104-static unsigned int pcnet32_portlist[] __initdata =
31105+static unsigned int pcnet32_portlist[] __devinitdata =
31106 { 0x300, 0x320, 0x340, 0x360, 0 };
31107
31108 static int pcnet32_debug;
31109@@ -270,7 +270,7 @@ struct pcnet32_private {
31110 struct sk_buff **rx_skbuff;
31111 dma_addr_t *tx_dma_addr;
31112 dma_addr_t *rx_dma_addr;
31113- struct pcnet32_access a;
31114+ struct pcnet32_access *a;
31115 spinlock_t lock; /* Guard lock */
31116 unsigned int cur_rx, cur_tx; /* The next free ring entry */
31117 unsigned int rx_ring_size; /* current rx ring size */
31118@@ -460,9 +460,9 @@ static void pcnet32_netif_start(struct n
31119 u16 val;
31120
31121 netif_wake_queue(dev);
31122- val = lp->a.read_csr(ioaddr, CSR3);
31123+ val = lp->a->read_csr(ioaddr, CSR3);
31124 val &= 0x00ff;
31125- lp->a.write_csr(ioaddr, CSR3, val);
31126+ lp->a->write_csr(ioaddr, CSR3, val);
31127 napi_enable(&lp->napi);
31128 }
31129
31130@@ -730,7 +730,7 @@ static u32 pcnet32_get_link(struct net_d
31131 r = mii_link_ok(&lp->mii_if);
31132 } else if (lp->chip_version >= PCNET32_79C970A) {
31133 ulong ioaddr = dev->base_addr; /* card base I/O address */
31134- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
31135+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
31136 } else { /* can not detect link on really old chips */
31137 r = 1;
31138 }
31139@@ -792,7 +792,7 @@ static int pcnet32_set_ringparam(struct
31140 pcnet32_netif_stop(dev);
31141
31142 spin_lock_irqsave(&lp->lock, flags);
31143- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
31144+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
31145
31146 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
31147
31148@@ -868,7 +868,7 @@ static void pcnet32_ethtool_test(struct
31149 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
31150 {
31151 struct pcnet32_private *lp = netdev_priv(dev);
31152- struct pcnet32_access *a = &lp->a; /* access to registers */
31153+ struct pcnet32_access *a = lp->a; /* access to registers */
31154 ulong ioaddr = dev->base_addr; /* card base I/O address */
31155 struct sk_buff *skb; /* sk buff */
31156 int x, i; /* counters */
31157@@ -888,21 +888,21 @@ static int pcnet32_loopback_test(struct
31158 pcnet32_netif_stop(dev);
31159
31160 spin_lock_irqsave(&lp->lock, flags);
31161- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
31162+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
31163
31164 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
31165
31166 /* Reset the PCNET32 */
31167- lp->a.reset(ioaddr);
31168- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31169+ lp->a->reset(ioaddr);
31170+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31171
31172 /* switch pcnet32 to 32bit mode */
31173- lp->a.write_bcr(ioaddr, 20, 2);
31174+ lp->a->write_bcr(ioaddr, 20, 2);
31175
31176 /* purge & init rings but don't actually restart */
31177 pcnet32_restart(dev, 0x0000);
31178
31179- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
31180+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
31181
31182 /* Initialize Transmit buffers. */
31183 size = data_len + 15;
31184@@ -947,10 +947,10 @@ static int pcnet32_loopback_test(struct
31185
31186 /* set int loopback in CSR15 */
31187 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
31188- lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
31189+ lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
31190
31191 teststatus = cpu_to_le16(0x8000);
31192- lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
31193+ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
31194
31195 /* Check status of descriptors */
31196 for (x = 0; x < numbuffs; x++) {
31197@@ -969,7 +969,7 @@ static int pcnet32_loopback_test(struct
31198 }
31199 }
31200
31201- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
31202+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
31203 wmb();
31204 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
31205 netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
31206@@ -1015,7 +1015,7 @@ clean_up:
31207 pcnet32_restart(dev, CSR0_NORMAL);
31208 } else {
31209 pcnet32_purge_rx_ring(dev);
31210- lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
31211+ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
31212 }
31213 spin_unlock_irqrestore(&lp->lock, flags);
31214
31215@@ -1026,7 +1026,7 @@ static int pcnet32_set_phys_id(struct ne
31216 enum ethtool_phys_id_state state)
31217 {
31218 struct pcnet32_private *lp = netdev_priv(dev);
31219- struct pcnet32_access *a = &lp->a;
31220+ struct pcnet32_access *a = lp->a;
31221 ulong ioaddr = dev->base_addr;
31222 unsigned long flags;
31223 int i;
31224@@ -1067,7 +1067,7 @@ static int pcnet32_suspend(struct net_de
31225 {
31226 int csr5;
31227 struct pcnet32_private *lp = netdev_priv(dev);
31228- struct pcnet32_access *a = &lp->a;
31229+ struct pcnet32_access *a = lp->a;
31230 ulong ioaddr = dev->base_addr;
31231 int ticks;
31232
31233@@ -1324,8 +1324,8 @@ static int pcnet32_poll(struct napi_stru
31234 spin_lock_irqsave(&lp->lock, flags);
31235 if (pcnet32_tx(dev)) {
31236 /* reset the chip to clear the error condition, then restart */
31237- lp->a.reset(ioaddr);
31238- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31239+ lp->a->reset(ioaddr);
31240+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31241 pcnet32_restart(dev, CSR0_START);
31242 netif_wake_queue(dev);
31243 }
31244@@ -1337,12 +1337,12 @@ static int pcnet32_poll(struct napi_stru
31245 __napi_complete(napi);
31246
31247 /* clear interrupt masks */
31248- val = lp->a.read_csr(ioaddr, CSR3);
31249+ val = lp->a->read_csr(ioaddr, CSR3);
31250 val &= 0x00ff;
31251- lp->a.write_csr(ioaddr, CSR3, val);
31252+ lp->a->write_csr(ioaddr, CSR3, val);
31253
31254 /* Set interrupt enable. */
31255- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
31256+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
31257
31258 spin_unlock_irqrestore(&lp->lock, flags);
31259 }
31260@@ -1365,7 +1365,7 @@ static void pcnet32_get_regs(struct net_
31261 int i, csr0;
31262 u16 *buff = ptr;
31263 struct pcnet32_private *lp = netdev_priv(dev);
31264- struct pcnet32_access *a = &lp->a;
31265+ struct pcnet32_access *a = lp->a;
31266 ulong ioaddr = dev->base_addr;
31267 unsigned long flags;
31268
31269@@ -1401,9 +1401,9 @@ static void pcnet32_get_regs(struct net_
31270 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
31271 if (lp->phymask & (1 << j)) {
31272 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
31273- lp->a.write_bcr(ioaddr, 33,
31274+ lp->a->write_bcr(ioaddr, 33,
31275 (j << 5) | i);
31276- *buff++ = lp->a.read_bcr(ioaddr, 34);
31277+ *buff++ = lp->a->read_bcr(ioaddr, 34);
31278 }
31279 }
31280 }
31281@@ -1785,7 +1785,7 @@ pcnet32_probe1(unsigned long ioaddr, int
31282 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
31283 lp->options |= PCNET32_PORT_FD;
31284
31285- lp->a = *a;
31286+ lp->a = a;
31287
31288 /* prior to register_netdev, dev->name is not yet correct */
31289 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
31290@@ -1844,7 +1844,7 @@ pcnet32_probe1(unsigned long ioaddr, int
31291 if (lp->mii) {
31292 /* lp->phycount and lp->phymask are set to 0 by memset above */
31293
31294- lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
31295+ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
31296 /* scan for PHYs */
31297 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
31298 unsigned short id1, id2;
31299@@ -1864,7 +1864,7 @@ pcnet32_probe1(unsigned long ioaddr, int
31300 pr_info("Found PHY %04x:%04x at address %d\n",
31301 id1, id2, i);
31302 }
31303- lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
31304+ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
31305 if (lp->phycount > 1)
31306 lp->options |= PCNET32_PORT_MII;
31307 }
31308@@ -2020,10 +2020,10 @@ static int pcnet32_open(struct net_devic
31309 }
31310
31311 /* Reset the PCNET32 */
31312- lp->a.reset(ioaddr);
31313+ lp->a->reset(ioaddr);
31314
31315 /* switch pcnet32 to 32bit mode */
31316- lp->a.write_bcr(ioaddr, 20, 2);
31317+ lp->a->write_bcr(ioaddr, 20, 2);
31318
31319 netif_printk(lp, ifup, KERN_DEBUG, dev,
31320 "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
31321@@ -2032,14 +2032,14 @@ static int pcnet32_open(struct net_devic
31322 (u32) (lp->init_dma_addr));
31323
31324 /* set/reset autoselect bit */
31325- val = lp->a.read_bcr(ioaddr, 2) & ~2;
31326+ val = lp->a->read_bcr(ioaddr, 2) & ~2;
31327 if (lp->options & PCNET32_PORT_ASEL)
31328 val |= 2;
31329- lp->a.write_bcr(ioaddr, 2, val);
31330+ lp->a->write_bcr(ioaddr, 2, val);
31331
31332 /* handle full duplex setting */
31333 if (lp->mii_if.full_duplex) {
31334- val = lp->a.read_bcr(ioaddr, 9) & ~3;
31335+ val = lp->a->read_bcr(ioaddr, 9) & ~3;
31336 if (lp->options & PCNET32_PORT_FD) {
31337 val |= 1;
31338 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
31339@@ -2049,14 +2049,14 @@ static int pcnet32_open(struct net_devic
31340 if (lp->chip_version == 0x2627)
31341 val |= 3;
31342 }
31343- lp->a.write_bcr(ioaddr, 9, val);
31344+ lp->a->write_bcr(ioaddr, 9, val);
31345 }
31346
31347 /* set/reset GPSI bit in test register */
31348- val = lp->a.read_csr(ioaddr, 124) & ~0x10;
31349+ val = lp->a->read_csr(ioaddr, 124) & ~0x10;
31350 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
31351 val |= 0x10;
31352- lp->a.write_csr(ioaddr, 124, val);
31353+ lp->a->write_csr(ioaddr, 124, val);
31354
31355 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
31356 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
31357@@ -2075,24 +2075,24 @@ static int pcnet32_open(struct net_devic
31358 * duplex, and/or enable auto negotiation, and clear DANAS
31359 */
31360 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
31361- lp->a.write_bcr(ioaddr, 32,
31362- lp->a.read_bcr(ioaddr, 32) | 0x0080);
31363+ lp->a->write_bcr(ioaddr, 32,
31364+ lp->a->read_bcr(ioaddr, 32) | 0x0080);
31365 /* disable Auto Negotiation, set 10Mpbs, HD */
31366- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
31367+ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
31368 if (lp->options & PCNET32_PORT_FD)
31369 val |= 0x10;
31370 if (lp->options & PCNET32_PORT_100)
31371 val |= 0x08;
31372- lp->a.write_bcr(ioaddr, 32, val);
31373+ lp->a->write_bcr(ioaddr, 32, val);
31374 } else {
31375 if (lp->options & PCNET32_PORT_ASEL) {
31376- lp->a.write_bcr(ioaddr, 32,
31377- lp->a.read_bcr(ioaddr,
31378+ lp->a->write_bcr(ioaddr, 32,
31379+ lp->a->read_bcr(ioaddr,
31380 32) | 0x0080);
31381 /* enable auto negotiate, setup, disable fd */
31382- val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
31383+ val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
31384 val |= 0x20;
31385- lp->a.write_bcr(ioaddr, 32, val);
31386+ lp->a->write_bcr(ioaddr, 32, val);
31387 }
31388 }
31389 } else {
31390@@ -2105,10 +2105,10 @@ static int pcnet32_open(struct net_devic
31391 * There is really no good other way to handle multiple PHYs
31392 * other than turning off all automatics
31393 */
31394- val = lp->a.read_bcr(ioaddr, 2);
31395- lp->a.write_bcr(ioaddr, 2, val & ~2);
31396- val = lp->a.read_bcr(ioaddr, 32);
31397- lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
31398+ val = lp->a->read_bcr(ioaddr, 2);
31399+ lp->a->write_bcr(ioaddr, 2, val & ~2);
31400+ val = lp->a->read_bcr(ioaddr, 32);
31401+ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
31402
31403 if (!(lp->options & PCNET32_PORT_ASEL)) {
31404 /* setup ecmd */
31405@@ -2118,7 +2118,7 @@ static int pcnet32_open(struct net_devic
31406 ethtool_cmd_speed_set(&ecmd,
31407 (lp->options & PCNET32_PORT_100) ?
31408 SPEED_100 : SPEED_10);
31409- bcr9 = lp->a.read_bcr(ioaddr, 9);
31410+ bcr9 = lp->a->read_bcr(ioaddr, 9);
31411
31412 if (lp->options & PCNET32_PORT_FD) {
31413 ecmd.duplex = DUPLEX_FULL;
31414@@ -2127,7 +2127,7 @@ static int pcnet32_open(struct net_devic
31415 ecmd.duplex = DUPLEX_HALF;
31416 bcr9 |= ~(1 << 0);
31417 }
31418- lp->a.write_bcr(ioaddr, 9, bcr9);
31419+ lp->a->write_bcr(ioaddr, 9, bcr9);
31420 }
31421
31422 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
31423@@ -2158,9 +2158,9 @@ static int pcnet32_open(struct net_devic
31424
31425 #ifdef DO_DXSUFLO
31426 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
31427- val = lp->a.read_csr(ioaddr, CSR3);
31428+ val = lp->a->read_csr(ioaddr, CSR3);
31429 val |= 0x40;
31430- lp->a.write_csr(ioaddr, CSR3, val);
31431+ lp->a->write_csr(ioaddr, CSR3, val);
31432 }
31433 #endif
31434
31435@@ -2176,11 +2176,11 @@ static int pcnet32_open(struct net_devic
31436 napi_enable(&lp->napi);
31437
31438 /* Re-initialize the PCNET32, and start it when done. */
31439- lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
31440- lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
31441+ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
31442+ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
31443
31444- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31445- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
31446+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31447+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
31448
31449 netif_start_queue(dev);
31450
31451@@ -2192,19 +2192,19 @@ static int pcnet32_open(struct net_devic
31452
31453 i = 0;
31454 while (i++ < 100)
31455- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
31456+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
31457 break;
31458 /*
31459 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
31460 * reports that doing so triggers a bug in the '974.
31461 */
31462- lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
31463+ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
31464
31465 netif_printk(lp, ifup, KERN_DEBUG, dev,
31466 "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
31467 i,
31468 (u32) (lp->init_dma_addr),
31469- lp->a.read_csr(ioaddr, CSR0));
31470+ lp->a->read_csr(ioaddr, CSR0));
31471
31472 spin_unlock_irqrestore(&lp->lock, flags);
31473
31474@@ -2218,7 +2218,7 @@ err_free_ring:
31475 * Switch back to 16bit mode to avoid problems with dumb
31476 * DOS packet driver after a warm reboot
31477 */
31478- lp->a.write_bcr(ioaddr, 20, 4);
31479+ lp->a->write_bcr(ioaddr, 20, 4);
31480
31481 err_free_irq:
31482 spin_unlock_irqrestore(&lp->lock, flags);
31483@@ -2323,7 +2323,7 @@ static void pcnet32_restart(struct net_d
31484
31485 /* wait for stop */
31486 for (i = 0; i < 100; i++)
31487- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
31488+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
31489 break;
31490
31491 if (i >= 100)
31492@@ -2335,13 +2335,13 @@ static void pcnet32_restart(struct net_d
31493 return;
31494
31495 /* ReInit Ring */
31496- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
31497+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
31498 i = 0;
31499 while (i++ < 1000)
31500- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
31501+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
31502 break;
31503
31504- lp->a.write_csr(ioaddr, CSR0, csr0_bits);
31505+ lp->a->write_csr(ioaddr, CSR0, csr0_bits);
31506 }
31507
31508 static void pcnet32_tx_timeout(struct net_device *dev)
31509@@ -2353,8 +2353,8 @@ static void pcnet32_tx_timeout(struct ne
31510 /* Transmitter timeout, serious problems. */
31511 if (pcnet32_debug & NETIF_MSG_DRV)
31512 pr_err("%s: transmit timed out, status %4.4x, resetting\n",
31513- dev->name, lp->a.read_csr(ioaddr, CSR0));
31514- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
31515+ dev->name, lp->a->read_csr(ioaddr, CSR0));
31516+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
31517 dev->stats.tx_errors++;
31518 if (netif_msg_tx_err(lp)) {
31519 int i;
31520@@ -2397,7 +2397,7 @@ static netdev_tx_t pcnet32_start_xmit(st
31521
31522 netif_printk(lp, tx_queued, KERN_DEBUG, dev,
31523 "%s() called, csr0 %4.4x\n",
31524- __func__, lp->a.read_csr(ioaddr, CSR0));
31525+ __func__, lp->a->read_csr(ioaddr, CSR0));
31526
31527 /* Default status -- will not enable Successful-TxDone
31528 * interrupt when that option is available to us.
31529@@ -2427,7 +2427,7 @@ static netdev_tx_t pcnet32_start_xmit(st
31530 dev->stats.tx_bytes += skb->len;
31531
31532 /* Trigger an immediate send poll. */
31533- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
31534+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
31535
31536 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
31537 lp->tx_full = 1;
31538@@ -2452,16 +2452,16 @@ pcnet32_interrupt(int irq, void *dev_id)
31539
31540 spin_lock(&lp->lock);
31541
31542- csr0 = lp->a.read_csr(ioaddr, CSR0);
31543+ csr0 = lp->a->read_csr(ioaddr, CSR0);
31544 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
31545 if (csr0 == 0xffff)
31546 break; /* PCMCIA remove happened */
31547 /* Acknowledge all of the current interrupt sources ASAP. */
31548- lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
31549+ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
31550
31551 netif_printk(lp, intr, KERN_DEBUG, dev,
31552 "interrupt csr0=%#2.2x new csr=%#2.2x\n",
31553- csr0, lp->a.read_csr(ioaddr, CSR0));
31554+ csr0, lp->a->read_csr(ioaddr, CSR0));
31555
31556 /* Log misc errors. */
31557 if (csr0 & 0x4000)
31558@@ -2488,19 +2488,19 @@ pcnet32_interrupt(int irq, void *dev_id)
31559 if (napi_schedule_prep(&lp->napi)) {
31560 u16 val;
31561 /* set interrupt masks */
31562- val = lp->a.read_csr(ioaddr, CSR3);
31563+ val = lp->a->read_csr(ioaddr, CSR3);
31564 val |= 0x5f00;
31565- lp->a.write_csr(ioaddr, CSR3, val);
31566+ lp->a->write_csr(ioaddr, CSR3, val);
31567
31568 __napi_schedule(&lp->napi);
31569 break;
31570 }
31571- csr0 = lp->a.read_csr(ioaddr, CSR0);
31572+ csr0 = lp->a->read_csr(ioaddr, CSR0);
31573 }
31574
31575 netif_printk(lp, intr, KERN_DEBUG, dev,
31576 "exiting interrupt, csr0=%#4.4x\n",
31577- lp->a.read_csr(ioaddr, CSR0));
31578+ lp->a->read_csr(ioaddr, CSR0));
31579
31580 spin_unlock(&lp->lock);
31581
31582@@ -2520,20 +2520,20 @@ static int pcnet32_close(struct net_devi
31583
31584 spin_lock_irqsave(&lp->lock, flags);
31585
31586- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
31587+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
31588
31589 netif_printk(lp, ifdown, KERN_DEBUG, dev,
31590 "Shutting down ethercard, status was %2.2x\n",
31591- lp->a.read_csr(ioaddr, CSR0));
31592+ lp->a->read_csr(ioaddr, CSR0));
31593
31594 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
31595- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
31596+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
31597
31598 /*
31599 * Switch back to 16bit mode to avoid problems with dumb
31600 * DOS packet driver after a warm reboot
31601 */
31602- lp->a.write_bcr(ioaddr, 20, 4);
31603+ lp->a->write_bcr(ioaddr, 20, 4);
31604
31605 spin_unlock_irqrestore(&lp->lock, flags);
31606
31607@@ -2556,7 +2556,7 @@ static struct net_device_stats *pcnet32_
31608 unsigned long flags;
31609
31610 spin_lock_irqsave(&lp->lock, flags);
31611- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
31612+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
31613 spin_unlock_irqrestore(&lp->lock, flags);
31614
31615 return &dev->stats;
31616@@ -2578,10 +2578,10 @@ static void pcnet32_load_multicast(struc
31617 if (dev->flags & IFF_ALLMULTI) {
31618 ib->filter[0] = cpu_to_le32(~0U);
31619 ib->filter[1] = cpu_to_le32(~0U);
31620- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
31621- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
31622- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
31623- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
31624+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
31625+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
31626+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
31627+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
31628 return;
31629 }
31630 /* clear the multicast filter */
31631@@ -2601,7 +2601,7 @@ static void pcnet32_load_multicast(struc
31632 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
31633 }
31634 for (i = 0; i < 4; i++)
31635- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
31636+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
31637 le16_to_cpu(mcast_table[i]));
31638 }
31639
31640@@ -2616,28 +2616,28 @@ static void pcnet32_set_multicast_list(s
31641
31642 spin_lock_irqsave(&lp->lock, flags);
31643 suspended = pcnet32_suspend(dev, &flags, 0);
31644- csr15 = lp->a.read_csr(ioaddr, CSR15);
31645+ csr15 = lp->a->read_csr(ioaddr, CSR15);
31646 if (dev->flags & IFF_PROMISC) {
31647 /* Log any net taps. */
31648 netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
31649 lp->init_block->mode =
31650 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
31651 7);
31652- lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
31653+ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
31654 } else {
31655 lp->init_block->mode =
31656 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
31657- lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
31658+ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
31659 pcnet32_load_multicast(dev);
31660 }
31661
31662 if (suspended) {
31663 int csr5;
31664 /* clear SUSPEND (SPND) - CSR5 bit 0 */
31665- csr5 = lp->a.read_csr(ioaddr, CSR5);
31666- lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
31667+ csr5 = lp->a->read_csr(ioaddr, CSR5);
31668+ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
31669 } else {
31670- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
31671+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
31672 pcnet32_restart(dev, CSR0_NORMAL);
31673 netif_wake_queue(dev);
31674 }
31675@@ -2655,8 +2655,8 @@ static int mdio_read(struct net_device *
31676 if (!lp->mii)
31677 return 0;
31678
31679- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
31680- val_out = lp->a.read_bcr(ioaddr, 34);
31681+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
31682+ val_out = lp->a->read_bcr(ioaddr, 34);
31683
31684 return val_out;
31685 }
31686@@ -2670,8 +2670,8 @@ static void mdio_write(struct net_device
31687 if (!lp->mii)
31688 return;
31689
31690- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
31691- lp->a.write_bcr(ioaddr, 34, val);
31692+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
31693+ lp->a->write_bcr(ioaddr, 34, val);
31694 }
31695
31696 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
31697@@ -2748,7 +2748,7 @@ static void pcnet32_check_media(struct n
31698 curr_link = mii_link_ok(&lp->mii_if);
31699 } else {
31700 ulong ioaddr = dev->base_addr; /* card base I/O address */
31701- curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
31702+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
31703 }
31704 if (!curr_link) {
31705 if (prev_link || verbose) {
31706@@ -2771,13 +2771,13 @@ static void pcnet32_check_media(struct n
31707 (ecmd.duplex == DUPLEX_FULL)
31708 ? "full" : "half");
31709 }
31710- bcr9 = lp->a.read_bcr(dev->base_addr, 9);
31711+ bcr9 = lp->a->read_bcr(dev->base_addr, 9);
31712 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
31713 if (lp->mii_if.full_duplex)
31714 bcr9 |= (1 << 0);
31715 else
31716 bcr9 &= ~(1 << 0);
31717- lp->a.write_bcr(dev->base_addr, 9, bcr9);
31718+ lp->a->write_bcr(dev->base_addr, 9, bcr9);
31719 }
31720 } else {
31721 netif_info(lp, link, dev, "link up\n");
31722diff -urNp linux-3.0.7/drivers/net/ppp_generic.c linux-3.0.7/drivers/net/ppp_generic.c
31723--- linux-3.0.7/drivers/net/ppp_generic.c 2011-07-21 22:17:23.000000000 -0400
31724+++ linux-3.0.7/drivers/net/ppp_generic.c 2011-08-23 21:47:55.000000000 -0400
31725@@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, st
31726 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
31727 struct ppp_stats stats;
31728 struct ppp_comp_stats cstats;
31729- char *vers;
31730
31731 switch (cmd) {
31732 case SIOCGPPPSTATS:
31733@@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, st
31734 break;
31735
31736 case SIOCGPPPVER:
31737- vers = PPP_VERSION;
31738- if (copy_to_user(addr, vers, strlen(vers) + 1))
31739+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
31740 break;
31741 err = 0;
31742 break;
31743diff -urNp linux-3.0.7/drivers/net/r8169.c linux-3.0.7/drivers/net/r8169.c
31744--- linux-3.0.7/drivers/net/r8169.c 2011-09-02 18:11:21.000000000 -0400
31745+++ linux-3.0.7/drivers/net/r8169.c 2011-08-23 21:47:55.000000000 -0400
31746@@ -645,12 +645,12 @@ struct rtl8169_private {
31747 struct mdio_ops {
31748 void (*write)(void __iomem *, int, int);
31749 int (*read)(void __iomem *, int);
31750- } mdio_ops;
31751+ } __no_const mdio_ops;
31752
31753 struct pll_power_ops {
31754 void (*down)(struct rtl8169_private *);
31755 void (*up)(struct rtl8169_private *);
31756- } pll_power_ops;
31757+ } __no_const pll_power_ops;
31758
31759 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
31760 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
31761diff -urNp linux-3.0.7/drivers/net/sis190.c linux-3.0.7/drivers/net/sis190.c
31762--- linux-3.0.7/drivers/net/sis190.c 2011-09-02 18:11:21.000000000 -0400
31763+++ linux-3.0.7/drivers/net/sis190.c 2011-10-11 10:44:33.000000000 -0400
31764@@ -1623,7 +1623,7 @@ static int __devinit sis190_get_mac_addr
31765 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
31766 struct net_device *dev)
31767 {
31768- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
31769+ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
31770 struct sis190_private *tp = netdev_priv(dev);
31771 struct pci_dev *isa_bridge;
31772 u8 reg, tmp8;
31773diff -urNp linux-3.0.7/drivers/net/sundance.c linux-3.0.7/drivers/net/sundance.c
31774--- linux-3.0.7/drivers/net/sundance.c 2011-07-21 22:17:23.000000000 -0400
31775+++ linux-3.0.7/drivers/net/sundance.c 2011-10-11 10:44:33.000000000 -0400
31776@@ -218,7 +218,7 @@ enum {
31777 struct pci_id_info {
31778 const char *name;
31779 };
31780-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
31781+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
31782 {"D-Link DFE-550TX FAST Ethernet Adapter"},
31783 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
31784 {"D-Link DFE-580TX 4 port Server Adapter"},
31785diff -urNp linux-3.0.7/drivers/net/tg3.h linux-3.0.7/drivers/net/tg3.h
31786--- linux-3.0.7/drivers/net/tg3.h 2011-07-21 22:17:23.000000000 -0400
31787+++ linux-3.0.7/drivers/net/tg3.h 2011-08-23 21:47:55.000000000 -0400
31788@@ -134,6 +134,7 @@
31789 #define CHIPREV_ID_5750_A0 0x4000
31790 #define CHIPREV_ID_5750_A1 0x4001
31791 #define CHIPREV_ID_5750_A3 0x4003
31792+#define CHIPREV_ID_5750_C1 0x4201
31793 #define CHIPREV_ID_5750_C2 0x4202
31794 #define CHIPREV_ID_5752_A0_HW 0x5000
31795 #define CHIPREV_ID_5752_A0 0x6000
31796diff -urNp linux-3.0.7/drivers/net/tokenring/abyss.c linux-3.0.7/drivers/net/tokenring/abyss.c
31797--- linux-3.0.7/drivers/net/tokenring/abyss.c 2011-07-21 22:17:23.000000000 -0400
31798+++ linux-3.0.7/drivers/net/tokenring/abyss.c 2011-08-23 21:47:55.000000000 -0400
31799@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
31800
31801 static int __init abyss_init (void)
31802 {
31803- abyss_netdev_ops = tms380tr_netdev_ops;
31804+ pax_open_kernel();
31805+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
31806
31807- abyss_netdev_ops.ndo_open = abyss_open;
31808- abyss_netdev_ops.ndo_stop = abyss_close;
31809+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
31810+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
31811+ pax_close_kernel();
31812
31813 return pci_register_driver(&abyss_driver);
31814 }
31815diff -urNp linux-3.0.7/drivers/net/tokenring/madgemc.c linux-3.0.7/drivers/net/tokenring/madgemc.c
31816--- linux-3.0.7/drivers/net/tokenring/madgemc.c 2011-07-21 22:17:23.000000000 -0400
31817+++ linux-3.0.7/drivers/net/tokenring/madgemc.c 2011-08-23 21:47:55.000000000 -0400
31818@@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver
31819
31820 static int __init madgemc_init (void)
31821 {
31822- madgemc_netdev_ops = tms380tr_netdev_ops;
31823- madgemc_netdev_ops.ndo_open = madgemc_open;
31824- madgemc_netdev_ops.ndo_stop = madgemc_close;
31825+ pax_open_kernel();
31826+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
31827+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
31828+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
31829+ pax_close_kernel();
31830
31831 return mca_register_driver (&madgemc_driver);
31832 }
31833diff -urNp linux-3.0.7/drivers/net/tokenring/proteon.c linux-3.0.7/drivers/net/tokenring/proteon.c
31834--- linux-3.0.7/drivers/net/tokenring/proteon.c 2011-07-21 22:17:23.000000000 -0400
31835+++ linux-3.0.7/drivers/net/tokenring/proteon.c 2011-08-23 21:47:55.000000000 -0400
31836@@ -353,9 +353,11 @@ static int __init proteon_init(void)
31837 struct platform_device *pdev;
31838 int i, num = 0, err = 0;
31839
31840- proteon_netdev_ops = tms380tr_netdev_ops;
31841- proteon_netdev_ops.ndo_open = proteon_open;
31842- proteon_netdev_ops.ndo_stop = tms380tr_close;
31843+ pax_open_kernel();
31844+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
31845+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
31846+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
31847+ pax_close_kernel();
31848
31849 err = platform_driver_register(&proteon_driver);
31850 if (err)
31851diff -urNp linux-3.0.7/drivers/net/tokenring/skisa.c linux-3.0.7/drivers/net/tokenring/skisa.c
31852--- linux-3.0.7/drivers/net/tokenring/skisa.c 2011-07-21 22:17:23.000000000 -0400
31853+++ linux-3.0.7/drivers/net/tokenring/skisa.c 2011-08-23 21:47:55.000000000 -0400
31854@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
31855 struct platform_device *pdev;
31856 int i, num = 0, err = 0;
31857
31858- sk_isa_netdev_ops = tms380tr_netdev_ops;
31859- sk_isa_netdev_ops.ndo_open = sk_isa_open;
31860- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
31861+ pax_open_kernel();
31862+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
31863+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
31864+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
31865+ pax_close_kernel();
31866
31867 err = platform_driver_register(&sk_isa_driver);
31868 if (err)
31869diff -urNp linux-3.0.7/drivers/net/tulip/de2104x.c linux-3.0.7/drivers/net/tulip/de2104x.c
31870--- linux-3.0.7/drivers/net/tulip/de2104x.c 2011-07-21 22:17:23.000000000 -0400
31871+++ linux-3.0.7/drivers/net/tulip/de2104x.c 2011-08-23 21:48:14.000000000 -0400
31872@@ -1794,6 +1794,8 @@ static void __devinit de21041_get_srom_i
31873 struct de_srom_info_leaf *il;
31874 void *bufp;
31875
31876+ pax_track_stack();
31877+
31878 /* download entire eeprom */
31879 for (i = 0; i < DE_EEPROM_WORDS; i++)
31880 ((__le16 *)ee_data)[i] =
31881diff -urNp linux-3.0.7/drivers/net/tulip/de4x5.c linux-3.0.7/drivers/net/tulip/de4x5.c
31882--- linux-3.0.7/drivers/net/tulip/de4x5.c 2011-07-21 22:17:23.000000000 -0400
31883+++ linux-3.0.7/drivers/net/tulip/de4x5.c 2011-08-23 21:47:55.000000000 -0400
31884@@ -5401,7 +5401,7 @@ de4x5_ioctl(struct net_device *dev, stru
31885 for (i=0; i<ETH_ALEN; i++) {
31886 tmp.addr[i] = dev->dev_addr[i];
31887 }
31888- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
31889+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
31890 break;
31891
31892 case DE4X5_SET_HWADDR: /* Set the hardware address */
31893@@ -5441,7 +5441,7 @@ de4x5_ioctl(struct net_device *dev, stru
31894 spin_lock_irqsave(&lp->lock, flags);
31895 memcpy(&statbuf, &lp->pktStats, ioc->len);
31896 spin_unlock_irqrestore(&lp->lock, flags);
31897- if (copy_to_user(ioc->data, &statbuf, ioc->len))
31898+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
31899 return -EFAULT;
31900 break;
31901 }
31902diff -urNp linux-3.0.7/drivers/net/tulip/eeprom.c linux-3.0.7/drivers/net/tulip/eeprom.c
31903--- linux-3.0.7/drivers/net/tulip/eeprom.c 2011-07-21 22:17:23.000000000 -0400
31904+++ linux-3.0.7/drivers/net/tulip/eeprom.c 2011-10-11 10:44:33.000000000 -0400
31905@@ -81,7 +81,7 @@ static struct eeprom_fixup eeprom_fixups
31906 {NULL}};
31907
31908
31909-static const char *block_name[] __devinitdata = {
31910+static const char *block_name[] __devinitconst = {
31911 "21140 non-MII",
31912 "21140 MII PHY",
31913 "21142 Serial PHY",
31914diff -urNp linux-3.0.7/drivers/net/tulip/winbond-840.c linux-3.0.7/drivers/net/tulip/winbond-840.c
31915--- linux-3.0.7/drivers/net/tulip/winbond-840.c 2011-07-21 22:17:23.000000000 -0400
31916+++ linux-3.0.7/drivers/net/tulip/winbond-840.c 2011-10-11 10:44:33.000000000 -0400
31917@@ -236,7 +236,7 @@ struct pci_id_info {
31918 int drv_flags; /* Driver use, intended as capability flags. */
31919 };
31920
31921-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
31922+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
31923 { /* Sometime a Level-One switch card. */
31924 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
31925 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
31926diff -urNp linux-3.0.7/drivers/net/usb/hso.c linux-3.0.7/drivers/net/usb/hso.c
31927--- linux-3.0.7/drivers/net/usb/hso.c 2011-07-21 22:17:23.000000000 -0400
31928+++ linux-3.0.7/drivers/net/usb/hso.c 2011-08-23 21:47:55.000000000 -0400
31929@@ -71,7 +71,7 @@
31930 #include <asm/byteorder.h>
31931 #include <linux/serial_core.h>
31932 #include <linux/serial.h>
31933-
31934+#include <asm/local.h>
31935
31936 #define MOD_AUTHOR "Option Wireless"
31937 #define MOD_DESCRIPTION "USB High Speed Option driver"
31938@@ -257,7 +257,7 @@ struct hso_serial {
31939
31940 /* from usb_serial_port */
31941 struct tty_struct *tty;
31942- int open_count;
31943+ local_t open_count;
31944 spinlock_t serial_lock;
31945
31946 int (*write_data) (struct hso_serial *serial);
31947@@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_
31948 struct urb *urb;
31949
31950 urb = serial->rx_urb[0];
31951- if (serial->open_count > 0) {
31952+ if (local_read(&serial->open_count) > 0) {
31953 count = put_rxbuf_data(urb, serial);
31954 if (count == -1)
31955 return;
31956@@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_cal
31957 DUMP1(urb->transfer_buffer, urb->actual_length);
31958
31959 /* Anyone listening? */
31960- if (serial->open_count == 0)
31961+ if (local_read(&serial->open_count) == 0)
31962 return;
31963
31964 if (status == 0) {
31965@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
31966 spin_unlock_irq(&serial->serial_lock);
31967
31968 /* check for port already opened, if not set the termios */
31969- serial->open_count++;
31970- if (serial->open_count == 1) {
31971+ if (local_inc_return(&serial->open_count) == 1) {
31972 serial->rx_state = RX_IDLE;
31973 /* Force default termio settings */
31974 _hso_serial_set_termios(tty, NULL);
31975@@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_st
31976 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
31977 if (result) {
31978 hso_stop_serial_device(serial->parent);
31979- serial->open_count--;
31980+ local_dec(&serial->open_count);
31981 kref_put(&serial->parent->ref, hso_serial_ref_free);
31982 }
31983 } else {
31984@@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_
31985
31986 /* reset the rts and dtr */
31987 /* do the actual close */
31988- serial->open_count--;
31989+ local_dec(&serial->open_count);
31990
31991- if (serial->open_count <= 0) {
31992- serial->open_count = 0;
31993+ if (local_read(&serial->open_count) <= 0) {
31994+ local_set(&serial->open_count, 0);
31995 spin_lock_irq(&serial->serial_lock);
31996 if (serial->tty == tty) {
31997 serial->tty->driver_data = NULL;
31998@@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struc
31999
32000 /* the actual setup */
32001 spin_lock_irqsave(&serial->serial_lock, flags);
32002- if (serial->open_count)
32003+ if (local_read(&serial->open_count))
32004 _hso_serial_set_termios(tty, old);
32005 else
32006 tty->termios = old;
32007@@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *ur
32008 D1("Pending read interrupt on port %d\n", i);
32009 spin_lock(&serial->serial_lock);
32010 if (serial->rx_state == RX_IDLE &&
32011- serial->open_count > 0) {
32012+ local_read(&serial->open_count) > 0) {
32013 /* Setup and send a ctrl req read on
32014 * port i */
32015 if (!serial->rx_urb_filled[0]) {
32016@@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interfa
32017 /* Start all serial ports */
32018 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
32019 if (serial_table[i] && (serial_table[i]->interface == iface)) {
32020- if (dev2ser(serial_table[i])->open_count) {
32021+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
32022 result =
32023 hso_start_serial_device(serial_table[i], GFP_NOIO);
32024 hso_kick_transmit(dev2ser(serial_table[i]));
32025diff -urNp linux-3.0.7/drivers/net/vmxnet3/vmxnet3_ethtool.c linux-3.0.7/drivers/net/vmxnet3/vmxnet3_ethtool.c
32026--- linux-3.0.7/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-07-21 22:17:23.000000000 -0400
32027+++ linux-3.0.7/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-08-23 21:47:55.000000000 -0400
32028@@ -594,8 +594,7 @@ vmxnet3_set_rss_indir(struct net_device
32029 * Return with error code if any of the queue indices
32030 * is out of range
32031 */
32032- if (p->ring_index[i] < 0 ||
32033- p->ring_index[i] >= adapter->num_rx_queues)
32034+ if (p->ring_index[i] >= adapter->num_rx_queues)
32035 return -EINVAL;
32036 }
32037
32038diff -urNp linux-3.0.7/drivers/net/vxge/vxge-config.h linux-3.0.7/drivers/net/vxge/vxge-config.h
32039--- linux-3.0.7/drivers/net/vxge/vxge-config.h 2011-07-21 22:17:23.000000000 -0400
32040+++ linux-3.0.7/drivers/net/vxge/vxge-config.h 2011-08-23 21:47:55.000000000 -0400
32041@@ -512,7 +512,7 @@ struct vxge_hw_uld_cbs {
32042 void (*link_down)(struct __vxge_hw_device *devh);
32043 void (*crit_err)(struct __vxge_hw_device *devh,
32044 enum vxge_hw_event type, u64 ext_data);
32045-};
32046+} __no_const;
32047
32048 /*
32049 * struct __vxge_hw_blockpool_entry - Block private data structure
32050diff -urNp linux-3.0.7/drivers/net/vxge/vxge-main.c linux-3.0.7/drivers/net/vxge/vxge-main.c
32051--- linux-3.0.7/drivers/net/vxge/vxge-main.c 2011-07-21 22:17:23.000000000 -0400
32052+++ linux-3.0.7/drivers/net/vxge/vxge-main.c 2011-08-23 21:48:14.000000000 -0400
32053@@ -98,6 +98,8 @@ static inline void VXGE_COMPLETE_VPATH_T
32054 struct sk_buff *completed[NR_SKB_COMPLETED];
32055 int more;
32056
32057+ pax_track_stack();
32058+
32059 do {
32060 more = 0;
32061 skb_ptr = completed;
32062@@ -1920,6 +1922,8 @@ static enum vxge_hw_status vxge_rth_conf
32063 u8 mtable[256] = {0}; /* CPU to vpath mapping */
32064 int index;
32065
32066+ pax_track_stack();
32067+
32068 /*
32069 * Filling
32070 * - itable with bucket numbers
32071diff -urNp linux-3.0.7/drivers/net/vxge/vxge-traffic.h linux-3.0.7/drivers/net/vxge/vxge-traffic.h
32072--- linux-3.0.7/drivers/net/vxge/vxge-traffic.h 2011-07-21 22:17:23.000000000 -0400
32073+++ linux-3.0.7/drivers/net/vxge/vxge-traffic.h 2011-08-23 21:47:55.000000000 -0400
32074@@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
32075 struct vxge_hw_mempool_dma *dma_object,
32076 u32 index,
32077 u32 is_last);
32078-};
32079+} __no_const;
32080
32081 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
32082 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
32083diff -urNp linux-3.0.7/drivers/net/wan/cycx_x25.c linux-3.0.7/drivers/net/wan/cycx_x25.c
32084--- linux-3.0.7/drivers/net/wan/cycx_x25.c 2011-07-21 22:17:23.000000000 -0400
32085+++ linux-3.0.7/drivers/net/wan/cycx_x25.c 2011-08-23 21:48:14.000000000 -0400
32086@@ -1018,6 +1018,8 @@ static void hex_dump(char *msg, unsigned
32087 unsigned char hex[1024],
32088 * phex = hex;
32089
32090+ pax_track_stack();
32091+
32092 if (len >= (sizeof(hex) / 2))
32093 len = (sizeof(hex) / 2) - 1;
32094
32095diff -urNp linux-3.0.7/drivers/net/wan/hdlc_x25.c linux-3.0.7/drivers/net/wan/hdlc_x25.c
32096--- linux-3.0.7/drivers/net/wan/hdlc_x25.c 2011-07-21 22:17:23.000000000 -0400
32097+++ linux-3.0.7/drivers/net/wan/hdlc_x25.c 2011-08-23 21:47:55.000000000 -0400
32098@@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
32099
32100 static int x25_open(struct net_device *dev)
32101 {
32102- struct lapb_register_struct cb;
32103+ static struct lapb_register_struct cb = {
32104+ .connect_confirmation = x25_connected,
32105+ .connect_indication = x25_connected,
32106+ .disconnect_confirmation = x25_disconnected,
32107+ .disconnect_indication = x25_disconnected,
32108+ .data_indication = x25_data_indication,
32109+ .data_transmit = x25_data_transmit
32110+ };
32111 int result;
32112
32113- cb.connect_confirmation = x25_connected;
32114- cb.connect_indication = x25_connected;
32115- cb.disconnect_confirmation = x25_disconnected;
32116- cb.disconnect_indication = x25_disconnected;
32117- cb.data_indication = x25_data_indication;
32118- cb.data_transmit = x25_data_transmit;
32119-
32120 result = lapb_register(dev, &cb);
32121 if (result != LAPB_OK)
32122 return result;
32123diff -urNp linux-3.0.7/drivers/net/wimax/i2400m/usb-fw.c linux-3.0.7/drivers/net/wimax/i2400m/usb-fw.c
32124--- linux-3.0.7/drivers/net/wimax/i2400m/usb-fw.c 2011-07-21 22:17:23.000000000 -0400
32125+++ linux-3.0.7/drivers/net/wimax/i2400m/usb-fw.c 2011-08-23 21:48:14.000000000 -0400
32126@@ -287,6 +287,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
32127 int do_autopm = 1;
32128 DECLARE_COMPLETION_ONSTACK(notif_completion);
32129
32130+ pax_track_stack();
32131+
32132 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
32133 i2400m, ack, ack_size);
32134 BUG_ON(_ack == i2400m->bm_ack_buf);
32135diff -urNp linux-3.0.7/drivers/net/wireless/airo.c linux-3.0.7/drivers/net/wireless/airo.c
32136--- linux-3.0.7/drivers/net/wireless/airo.c 2011-09-02 18:11:21.000000000 -0400
32137+++ linux-3.0.7/drivers/net/wireless/airo.c 2011-08-23 21:48:14.000000000 -0400
32138@@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
32139 BSSListElement * loop_net;
32140 BSSListElement * tmp_net;
32141
32142+ pax_track_stack();
32143+
32144 /* Blow away current list of scan results */
32145 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
32146 list_move_tail (&loop_net->list, &ai->network_free_list);
32147@@ -3794,6 +3796,8 @@ static u16 setup_card(struct airo_info *
32148 WepKeyRid wkr;
32149 int rc;
32150
32151+ pax_track_stack();
32152+
32153 memset( &mySsid, 0, sizeof( mySsid ) );
32154 kfree (ai->flash);
32155 ai->flash = NULL;
32156@@ -4753,6 +4757,8 @@ static int proc_stats_rid_open( struct i
32157 __le32 *vals = stats.vals;
32158 int len;
32159
32160+ pax_track_stack();
32161+
32162 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
32163 return -ENOMEM;
32164 data = file->private_data;
32165@@ -5476,6 +5482,8 @@ static int proc_BSSList_open( struct ino
32166 /* If doLoseSync is not 1, we won't do a Lose Sync */
32167 int doLoseSync = -1;
32168
32169+ pax_track_stack();
32170+
32171 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
32172 return -ENOMEM;
32173 data = file->private_data;
32174@@ -7181,6 +7189,8 @@ static int airo_get_aplist(struct net_de
32175 int i;
32176 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
32177
32178+ pax_track_stack();
32179+
32180 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
32181 if (!qual)
32182 return -ENOMEM;
32183@@ -7741,6 +7751,8 @@ static void airo_read_wireless_stats(str
32184 CapabilityRid cap_rid;
32185 __le32 *vals = stats_rid.vals;
32186
32187+ pax_track_stack();
32188+
32189 /* Get stats out of the card */
32190 clear_bit(JOB_WSTATS, &local->jobs);
32191 if (local->power.event) {
32192diff -urNp linux-3.0.7/drivers/net/wireless/ath/ath5k/debug.c linux-3.0.7/drivers/net/wireless/ath/ath5k/debug.c
32193--- linux-3.0.7/drivers/net/wireless/ath/ath5k/debug.c 2011-07-21 22:17:23.000000000 -0400
32194+++ linux-3.0.7/drivers/net/wireless/ath/ath5k/debug.c 2011-08-23 21:48:14.000000000 -0400
32195@@ -204,6 +204,8 @@ static ssize_t read_file_beacon(struct f
32196 unsigned int v;
32197 u64 tsf;
32198
32199+ pax_track_stack();
32200+
32201 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
32202 len += snprintf(buf+len, sizeof(buf)-len,
32203 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
32204@@ -323,6 +325,8 @@ static ssize_t read_file_debug(struct fi
32205 unsigned int len = 0;
32206 unsigned int i;
32207
32208+ pax_track_stack();
32209+
32210 len += snprintf(buf+len, sizeof(buf)-len,
32211 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
32212
32213@@ -384,6 +388,8 @@ static ssize_t read_file_antenna(struct
32214 unsigned int i;
32215 unsigned int v;
32216
32217+ pax_track_stack();
32218+
32219 len += snprintf(buf+len, sizeof(buf)-len, "antenna mode\t%d\n",
32220 sc->ah->ah_ant_mode);
32221 len += snprintf(buf+len, sizeof(buf)-len, "default antenna\t%d\n",
32222@@ -494,6 +500,8 @@ static ssize_t read_file_misc(struct fil
32223 unsigned int len = 0;
32224 u32 filt = ath5k_hw_get_rx_filter(sc->ah);
32225
32226+ pax_track_stack();
32227+
32228 len += snprintf(buf+len, sizeof(buf)-len, "bssid-mask: %pM\n",
32229 sc->bssidmask);
32230 len += snprintf(buf+len, sizeof(buf)-len, "filter-flags: 0x%x ",
32231@@ -550,6 +558,8 @@ static ssize_t read_file_frameerrors(str
32232 unsigned int len = 0;
32233 int i;
32234
32235+ pax_track_stack();
32236+
32237 len += snprintf(buf+len, sizeof(buf)-len,
32238 "RX\n---------------------\n");
32239 len += snprintf(buf+len, sizeof(buf)-len, "CRC\t%u\t(%u%%)\n",
32240@@ -667,6 +677,8 @@ static ssize_t read_file_ani(struct file
32241 char buf[700];
32242 unsigned int len = 0;
32243
32244+ pax_track_stack();
32245+
32246 len += snprintf(buf+len, sizeof(buf)-len,
32247 "HW has PHY error counters:\t%s\n",
32248 sc->ah->ah_capabilities.cap_has_phyerr_counters ?
32249@@ -827,6 +839,8 @@ static ssize_t read_file_queue(struct fi
32250 struct ath5k_buf *bf, *bf0;
32251 int i, n;
32252
32253+ pax_track_stack();
32254+
32255 len += snprintf(buf+len, sizeof(buf)-len,
32256 "available txbuffers: %d\n", sc->txbuf_len);
32257
32258diff -urNp linux-3.0.7/drivers/net/wireless/ath/ath9k/ar9003_calib.c linux-3.0.7/drivers/net/wireless/ath/ath9k/ar9003_calib.c
32259--- linux-3.0.7/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-07-21 22:17:23.000000000 -0400
32260+++ linux-3.0.7/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-08-23 21:48:14.000000000 -0400
32261@@ -757,6 +757,8 @@ static void ar9003_hw_tx_iq_cal_post_pro
32262 int i, im, j;
32263 int nmeasurement;
32264
32265+ pax_track_stack();
32266+
32267 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
32268 if (ah->txchainmask & (1 << i))
32269 num_chains++;
32270diff -urNp linux-3.0.7/drivers/net/wireless/ath/ath9k/ar9003_paprd.c linux-3.0.7/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
32271--- linux-3.0.7/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-07-21 22:17:23.000000000 -0400
32272+++ linux-3.0.7/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-08-23 21:48:14.000000000 -0400
32273@@ -356,6 +356,8 @@ static bool create_pa_curve(u32 *data_L,
32274 int theta_low_bin = 0;
32275 int i;
32276
32277+ pax_track_stack();
32278+
32279 /* disregard any bin that contains <= 16 samples */
32280 thresh_accum_cnt = 16;
32281 scale_factor = 5;
32282diff -urNp linux-3.0.7/drivers/net/wireless/ath/ath9k/debug.c linux-3.0.7/drivers/net/wireless/ath/ath9k/debug.c
32283--- linux-3.0.7/drivers/net/wireless/ath/ath9k/debug.c 2011-07-21 22:17:23.000000000 -0400
32284+++ linux-3.0.7/drivers/net/wireless/ath/ath9k/debug.c 2011-08-23 21:48:14.000000000 -0400
32285@@ -337,6 +337,8 @@ static ssize_t read_file_interrupt(struc
32286 char buf[512];
32287 unsigned int len = 0;
32288
32289+ pax_track_stack();
32290+
32291 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
32292 len += snprintf(buf + len, sizeof(buf) - len,
32293 "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
32294@@ -427,6 +429,8 @@ static ssize_t read_file_wiphy(struct fi
32295 u8 addr[ETH_ALEN];
32296 u32 tmp;
32297
32298+ pax_track_stack();
32299+
32300 len += snprintf(buf + len, sizeof(buf) - len,
32301 "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n",
32302 wiphy_name(sc->hw->wiphy),
32303diff -urNp linux-3.0.7/drivers/net/wireless/ath/ath9k/htc_drv_debug.c linux-3.0.7/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
32304--- linux-3.0.7/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-07-21 22:17:23.000000000 -0400
32305+++ linux-3.0.7/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-08-23 21:48:14.000000000 -0400
32306@@ -31,6 +31,8 @@ static ssize_t read_file_tgt_int_stats(s
32307 unsigned int len = 0;
32308 int ret = 0;
32309
32310+ pax_track_stack();
32311+
32312 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
32313
32314 ath9k_htc_ps_wakeup(priv);
32315@@ -89,6 +91,8 @@ static ssize_t read_file_tgt_tx_stats(st
32316 unsigned int len = 0;
32317 int ret = 0;
32318
32319+ pax_track_stack();
32320+
32321 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
32322
32323 ath9k_htc_ps_wakeup(priv);
32324@@ -159,6 +163,8 @@ static ssize_t read_file_tgt_rx_stats(st
32325 unsigned int len = 0;
32326 int ret = 0;
32327
32328+ pax_track_stack();
32329+
32330 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
32331
32332 ath9k_htc_ps_wakeup(priv);
32333@@ -203,6 +209,8 @@ static ssize_t read_file_xmit(struct fil
32334 char buf[512];
32335 unsigned int len = 0;
32336
32337+ pax_track_stack();
32338+
32339 len += snprintf(buf + len, sizeof(buf) - len,
32340 "%20s : %10u\n", "Buffers queued",
32341 priv->debug.tx_stats.buf_queued);
32342@@ -376,6 +384,8 @@ static ssize_t read_file_slot(struct fil
32343 char buf[512];
32344 unsigned int len = 0;
32345
32346+ pax_track_stack();
32347+
32348 spin_lock_bh(&priv->tx.tx_lock);
32349
32350 len += snprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
32351@@ -411,6 +421,8 @@ static ssize_t read_file_queue(struct fi
32352 char buf[512];
32353 unsigned int len = 0;
32354
32355+ pax_track_stack();
32356+
32357 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
32358 "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
32359
32360diff -urNp linux-3.0.7/drivers/net/wireless/ath/ath9k/hw.h linux-3.0.7/drivers/net/wireless/ath/ath9k/hw.h
32361--- linux-3.0.7/drivers/net/wireless/ath/ath9k/hw.h 2011-09-02 18:11:21.000000000 -0400
32362+++ linux-3.0.7/drivers/net/wireless/ath/ath9k/hw.h 2011-08-23 21:47:55.000000000 -0400
32363@@ -585,7 +585,7 @@ struct ath_hw_private_ops {
32364
32365 /* ANI */
32366 void (*ani_cache_ini_regs)(struct ath_hw *ah);
32367-};
32368+} __no_const;
32369
32370 /**
32371 * struct ath_hw_ops - callbacks used by hardware code and driver code
32372@@ -637,7 +637,7 @@ struct ath_hw_ops {
32373 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
32374 struct ath_hw_antcomb_conf *antconf);
32375
32376-};
32377+} __no_const;
32378
32379 struct ath_nf_limits {
32380 s16 max;
32381@@ -650,7 +650,7 @@ struct ath_nf_limits {
32382 #define AH_UNPLUGGED 0x2 /* The card has been physically removed. */
32383
32384 struct ath_hw {
32385- struct ath_ops reg_ops;
32386+ ath_ops_no_const reg_ops;
32387
32388 struct ieee80211_hw *hw;
32389 struct ath_common common;
32390diff -urNp linux-3.0.7/drivers/net/wireless/ath/ath.h linux-3.0.7/drivers/net/wireless/ath/ath.h
32391--- linux-3.0.7/drivers/net/wireless/ath/ath.h 2011-07-21 22:17:23.000000000 -0400
32392+++ linux-3.0.7/drivers/net/wireless/ath/ath.h 2011-08-23 21:47:55.000000000 -0400
32393@@ -121,6 +121,7 @@ struct ath_ops {
32394 void (*write_flush) (void *);
32395 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
32396 };
32397+typedef struct ath_ops __no_const ath_ops_no_const;
32398
32399 struct ath_common;
32400 struct ath_bus_ops;
32401diff -urNp linux-3.0.7/drivers/net/wireless/ipw2x00/ipw2100.c linux-3.0.7/drivers/net/wireless/ipw2x00/ipw2100.c
32402--- linux-3.0.7/drivers/net/wireless/ipw2x00/ipw2100.c 2011-07-21 22:17:23.000000000 -0400
32403+++ linux-3.0.7/drivers/net/wireless/ipw2x00/ipw2100.c 2011-08-23 21:48:14.000000000 -0400
32404@@ -2100,6 +2100,8 @@ static int ipw2100_set_essid(struct ipw2
32405 int err;
32406 DECLARE_SSID_BUF(ssid);
32407
32408+ pax_track_stack();
32409+
32410 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
32411
32412 if (ssid_len)
32413@@ -5449,6 +5451,8 @@ static int ipw2100_set_key(struct ipw210
32414 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
32415 int err;
32416
32417+ pax_track_stack();
32418+
32419 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
32420 idx, keylen, len);
32421
32422diff -urNp linux-3.0.7/drivers/net/wireless/ipw2x00/libipw_rx.c linux-3.0.7/drivers/net/wireless/ipw2x00/libipw_rx.c
32423--- linux-3.0.7/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-07-21 22:17:23.000000000 -0400
32424+++ linux-3.0.7/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-08-23 21:48:14.000000000 -0400
32425@@ -1565,6 +1565,8 @@ static void libipw_process_probe_respons
32426 unsigned long flags;
32427 DECLARE_SSID_BUF(ssid);
32428
32429+ pax_track_stack();
32430+
32431 LIBIPW_DEBUG_SCAN("'%s' (%pM"
32432 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
32433 print_ssid(ssid, info_element->data, info_element->len),
32434diff -urNp linux-3.0.7/drivers/net/wireless/iwlegacy/iwl3945-base.c linux-3.0.7/drivers/net/wireless/iwlegacy/iwl3945-base.c
32435--- linux-3.0.7/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-10-16 21:54:54.000000000 -0400
32436+++ linux-3.0.7/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-10-16 21:55:27.000000000 -0400
32437@@ -3962,7 +3962,9 @@ static int iwl3945_pci_probe(struct pci_
32438 */
32439 if (iwl3945_mod_params.disable_hw_scan) {
32440 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
32441- iwl3945_hw_ops.hw_scan = NULL;
32442+ pax_open_kernel();
32443+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
32444+ pax_close_kernel();
32445 }
32446
32447 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
32448diff -urNp linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
32449--- linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-07-21 22:17:23.000000000 -0400
32450+++ linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-08-23 21:48:14.000000000 -0400
32451@@ -910,6 +910,8 @@ static void rs_tx_status(void *priv_r, s
32452 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
32453 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
32454
32455+ pax_track_stack();
32456+
32457 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
32458
32459 /* Treat uninitialized rate scaling data same as non-existing. */
32460@@ -2918,6 +2920,8 @@ static void rs_fill_link_cmd(struct iwl_
32461 container_of(lq_sta, struct iwl_station_priv, lq_sta);
32462 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
32463
32464+ pax_track_stack();
32465+
32466 /* Override starting rate (index 0) if needed for debug purposes */
32467 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
32468
32469diff -urNp linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-debugfs.c
32470--- linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-07-21 22:17:23.000000000 -0400
32471+++ linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-08-23 21:48:14.000000000 -0400
32472@@ -548,6 +548,8 @@ static ssize_t iwl_dbgfs_status_read(str
32473 int pos = 0;
32474 const size_t bufsz = sizeof(buf);
32475
32476+ pax_track_stack();
32477+
32478 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
32479 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
32480 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
32481@@ -680,6 +682,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
32482 char buf[256 * NUM_IWL_RXON_CTX];
32483 const size_t bufsz = sizeof(buf);
32484
32485+ pax_track_stack();
32486+
32487 for_each_context(priv, ctx) {
32488 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
32489 ctx->ctxid);
32490diff -urNp linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-debug.h linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-debug.h
32491--- linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-07-21 22:17:23.000000000 -0400
32492+++ linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-08-23 21:47:55.000000000 -0400
32493@@ -68,8 +68,8 @@ do {
32494 } while (0)
32495
32496 #else
32497-#define IWL_DEBUG(__priv, level, fmt, args...)
32498-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
32499+#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
32500+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
32501 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
32502 const void *p, u32 len)
32503 {}
32504diff -urNp linux-3.0.7/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-3.0.7/drivers/net/wireless/iwmc3200wifi/debugfs.c
32505--- linux-3.0.7/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-07-21 22:17:23.000000000 -0400
32506+++ linux-3.0.7/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-08-23 21:48:14.000000000 -0400
32507@@ -327,6 +327,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
32508 int buf_len = 512;
32509 size_t len = 0;
32510
32511+ pax_track_stack();
32512+
32513 if (*ppos != 0)
32514 return 0;
32515 if (count < sizeof(buf))
32516diff -urNp linux-3.0.7/drivers/net/wireless/mac80211_hwsim.c linux-3.0.7/drivers/net/wireless/mac80211_hwsim.c
32517--- linux-3.0.7/drivers/net/wireless/mac80211_hwsim.c 2011-07-21 22:17:23.000000000 -0400
32518+++ linux-3.0.7/drivers/net/wireless/mac80211_hwsim.c 2011-08-23 21:47:55.000000000 -0400
32519@@ -1260,9 +1260,11 @@ static int __init init_mac80211_hwsim(vo
32520 return -EINVAL;
32521
32522 if (fake_hw_scan) {
32523- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
32524- mac80211_hwsim_ops.sw_scan_start = NULL;
32525- mac80211_hwsim_ops.sw_scan_complete = NULL;
32526+ pax_open_kernel();
32527+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
32528+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
32529+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
32530+ pax_close_kernel();
32531 }
32532
32533 spin_lock_init(&hwsim_radio_lock);
32534diff -urNp linux-3.0.7/drivers/net/wireless/rndis_wlan.c linux-3.0.7/drivers/net/wireless/rndis_wlan.c
32535--- linux-3.0.7/drivers/net/wireless/rndis_wlan.c 2011-07-21 22:17:23.000000000 -0400
32536+++ linux-3.0.7/drivers/net/wireless/rndis_wlan.c 2011-08-23 21:47:55.000000000 -0400
32537@@ -1277,7 +1277,7 @@ static int set_rts_threshold(struct usbn
32538
32539 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
32540
32541- if (rts_threshold < 0 || rts_threshold > 2347)
32542+ if (rts_threshold > 2347)
32543 rts_threshold = 2347;
32544
32545 tmp = cpu_to_le32(rts_threshold);
32546diff -urNp linux-3.0.7/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c linux-3.0.7/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
32547--- linux-3.0.7/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-07-21 22:17:23.000000000 -0400
32548+++ linux-3.0.7/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-08-23 21:48:14.000000000 -0400
32549@@ -837,6 +837,8 @@ bool _rtl92c_phy_sw_chnl_step_by_step(st
32550 u8 rfpath;
32551 u8 num_total_rfpath = rtlphy->num_total_rfpath;
32552
32553+ pax_track_stack();
32554+
32555 precommoncmdcnt = 0;
32556 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
32557 MAX_PRECMD_CNT,
32558diff -urNp linux-3.0.7/drivers/net/wireless/wl1251/wl1251.h linux-3.0.7/drivers/net/wireless/wl1251/wl1251.h
32559--- linux-3.0.7/drivers/net/wireless/wl1251/wl1251.h 2011-07-21 22:17:23.000000000 -0400
32560+++ linux-3.0.7/drivers/net/wireless/wl1251/wl1251.h 2011-08-23 21:47:55.000000000 -0400
32561@@ -266,7 +266,7 @@ struct wl1251_if_operations {
32562 void (*reset)(struct wl1251 *wl);
32563 void (*enable_irq)(struct wl1251 *wl);
32564 void (*disable_irq)(struct wl1251 *wl);
32565-};
32566+} __no_const;
32567
32568 struct wl1251 {
32569 struct ieee80211_hw *hw;
32570diff -urNp linux-3.0.7/drivers/net/wireless/wl12xx/spi.c linux-3.0.7/drivers/net/wireless/wl12xx/spi.c
32571--- linux-3.0.7/drivers/net/wireless/wl12xx/spi.c 2011-07-21 22:17:23.000000000 -0400
32572+++ linux-3.0.7/drivers/net/wireless/wl12xx/spi.c 2011-08-23 21:48:14.000000000 -0400
32573@@ -280,6 +280,8 @@ static void wl1271_spi_raw_write(struct
32574 u32 chunk_len;
32575 int i;
32576
32577+ pax_track_stack();
32578+
32579 WARN_ON(len > WL1271_AGGR_BUFFER_SIZE);
32580
32581 spi_message_init(&m);
32582diff -urNp linux-3.0.7/drivers/oprofile/buffer_sync.c linux-3.0.7/drivers/oprofile/buffer_sync.c
32583--- linux-3.0.7/drivers/oprofile/buffer_sync.c 2011-07-21 22:17:23.000000000 -0400
32584+++ linux-3.0.7/drivers/oprofile/buffer_sync.c 2011-08-23 21:47:55.000000000 -0400
32585@@ -343,7 +343,7 @@ static void add_data(struct op_entry *en
32586 if (cookie == NO_COOKIE)
32587 offset = pc;
32588 if (cookie == INVALID_COOKIE) {
32589- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
32590+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
32591 offset = pc;
32592 }
32593 if (cookie != last_cookie) {
32594@@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct
32595 /* add userspace sample */
32596
32597 if (!mm) {
32598- atomic_inc(&oprofile_stats.sample_lost_no_mm);
32599+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
32600 return 0;
32601 }
32602
32603 cookie = lookup_dcookie(mm, s->eip, &offset);
32604
32605 if (cookie == INVALID_COOKIE) {
32606- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
32607+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
32608 return 0;
32609 }
32610
32611@@ -563,7 +563,7 @@ void sync_buffer(int cpu)
32612 /* ignore backtraces if failed to add a sample */
32613 if (state == sb_bt_start) {
32614 state = sb_bt_ignore;
32615- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
32616+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
32617 }
32618 }
32619 release_mm(mm);
32620diff -urNp linux-3.0.7/drivers/oprofile/event_buffer.c linux-3.0.7/drivers/oprofile/event_buffer.c
32621--- linux-3.0.7/drivers/oprofile/event_buffer.c 2011-07-21 22:17:23.000000000 -0400
32622+++ linux-3.0.7/drivers/oprofile/event_buffer.c 2011-08-23 21:47:55.000000000 -0400
32623@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
32624 }
32625
32626 if (buffer_pos == buffer_size) {
32627- atomic_inc(&oprofile_stats.event_lost_overflow);
32628+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
32629 return;
32630 }
32631
32632diff -urNp linux-3.0.7/drivers/oprofile/oprof.c linux-3.0.7/drivers/oprofile/oprof.c
32633--- linux-3.0.7/drivers/oprofile/oprof.c 2011-07-21 22:17:23.000000000 -0400
32634+++ linux-3.0.7/drivers/oprofile/oprof.c 2011-08-23 21:47:55.000000000 -0400
32635@@ -110,7 +110,7 @@ static void switch_worker(struct work_st
32636 if (oprofile_ops.switch_events())
32637 return;
32638
32639- atomic_inc(&oprofile_stats.multiplex_counter);
32640+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
32641 start_switch_worker();
32642 }
32643
32644diff -urNp linux-3.0.7/drivers/oprofile/oprofilefs.c linux-3.0.7/drivers/oprofile/oprofilefs.c
32645--- linux-3.0.7/drivers/oprofile/oprofilefs.c 2011-07-21 22:17:23.000000000 -0400
32646+++ linux-3.0.7/drivers/oprofile/oprofilefs.c 2011-08-23 21:47:55.000000000 -0400
32647@@ -186,7 +186,7 @@ static const struct file_operations atom
32648
32649
32650 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
32651- char const *name, atomic_t *val)
32652+ char const *name, atomic_unchecked_t *val)
32653 {
32654 return __oprofilefs_create_file(sb, root, name,
32655 &atomic_ro_fops, 0444, val);
32656diff -urNp linux-3.0.7/drivers/oprofile/oprofile_stats.c linux-3.0.7/drivers/oprofile/oprofile_stats.c
32657--- linux-3.0.7/drivers/oprofile/oprofile_stats.c 2011-07-21 22:17:23.000000000 -0400
32658+++ linux-3.0.7/drivers/oprofile/oprofile_stats.c 2011-08-23 21:47:55.000000000 -0400
32659@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
32660 cpu_buf->sample_invalid_eip = 0;
32661 }
32662
32663- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
32664- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
32665- atomic_set(&oprofile_stats.event_lost_overflow, 0);
32666- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
32667- atomic_set(&oprofile_stats.multiplex_counter, 0);
32668+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
32669+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
32670+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
32671+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
32672+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
32673 }
32674
32675
32676diff -urNp linux-3.0.7/drivers/oprofile/oprofile_stats.h linux-3.0.7/drivers/oprofile/oprofile_stats.h
32677--- linux-3.0.7/drivers/oprofile/oprofile_stats.h 2011-07-21 22:17:23.000000000 -0400
32678+++ linux-3.0.7/drivers/oprofile/oprofile_stats.h 2011-08-23 21:47:55.000000000 -0400
32679@@ -13,11 +13,11 @@
32680 #include <asm/atomic.h>
32681
32682 struct oprofile_stat_struct {
32683- atomic_t sample_lost_no_mm;
32684- atomic_t sample_lost_no_mapping;
32685- atomic_t bt_lost_no_mapping;
32686- atomic_t event_lost_overflow;
32687- atomic_t multiplex_counter;
32688+ atomic_unchecked_t sample_lost_no_mm;
32689+ atomic_unchecked_t sample_lost_no_mapping;
32690+ atomic_unchecked_t bt_lost_no_mapping;
32691+ atomic_unchecked_t event_lost_overflow;
32692+ atomic_unchecked_t multiplex_counter;
32693 };
32694
32695 extern struct oprofile_stat_struct oprofile_stats;
32696diff -urNp linux-3.0.7/drivers/parport/procfs.c linux-3.0.7/drivers/parport/procfs.c
32697--- linux-3.0.7/drivers/parport/procfs.c 2011-07-21 22:17:23.000000000 -0400
32698+++ linux-3.0.7/drivers/parport/procfs.c 2011-08-23 21:47:55.000000000 -0400
32699@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
32700
32701 *ppos += len;
32702
32703- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
32704+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
32705 }
32706
32707 #ifdef CONFIG_PARPORT_1284
32708@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
32709
32710 *ppos += len;
32711
32712- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
32713+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
32714 }
32715 #endif /* IEEE1284.3 support. */
32716
32717diff -urNp linux-3.0.7/drivers/pci/hotplug/cpci_hotplug.h linux-3.0.7/drivers/pci/hotplug/cpci_hotplug.h
32718--- linux-3.0.7/drivers/pci/hotplug/cpci_hotplug.h 2011-07-21 22:17:23.000000000 -0400
32719+++ linux-3.0.7/drivers/pci/hotplug/cpci_hotplug.h 2011-08-23 21:47:55.000000000 -0400
32720@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
32721 int (*hardware_test) (struct slot* slot, u32 value);
32722 u8 (*get_power) (struct slot* slot);
32723 int (*set_power) (struct slot* slot, int value);
32724-};
32725+} __no_const;
32726
32727 struct cpci_hp_controller {
32728 unsigned int irq;
32729diff -urNp linux-3.0.7/drivers/pci/hotplug/cpqphp_nvram.c linux-3.0.7/drivers/pci/hotplug/cpqphp_nvram.c
32730--- linux-3.0.7/drivers/pci/hotplug/cpqphp_nvram.c 2011-07-21 22:17:23.000000000 -0400
32731+++ linux-3.0.7/drivers/pci/hotplug/cpqphp_nvram.c 2011-08-23 21:47:55.000000000 -0400
32732@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
32733
32734 void compaq_nvram_init (void __iomem *rom_start)
32735 {
32736+
32737+#ifndef CONFIG_PAX_KERNEXEC
32738 if (rom_start) {
32739 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
32740 }
32741+#endif
32742+
32743 dbg("int15 entry = %p\n", compaq_int15_entry_point);
32744
32745 /* initialize our int15 lock */
32746diff -urNp linux-3.0.7/drivers/pci/pcie/aspm.c linux-3.0.7/drivers/pci/pcie/aspm.c
32747--- linux-3.0.7/drivers/pci/pcie/aspm.c 2011-07-21 22:17:23.000000000 -0400
32748+++ linux-3.0.7/drivers/pci/pcie/aspm.c 2011-08-23 21:47:55.000000000 -0400
32749@@ -27,9 +27,9 @@
32750 #define MODULE_PARAM_PREFIX "pcie_aspm."
32751
32752 /* Note: those are not register definitions */
32753-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
32754-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
32755-#define ASPM_STATE_L1 (4) /* L1 state */
32756+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
32757+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
32758+#define ASPM_STATE_L1 (4U) /* L1 state */
32759 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
32760 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
32761
32762diff -urNp linux-3.0.7/drivers/pci/probe.c linux-3.0.7/drivers/pci/probe.c
32763--- linux-3.0.7/drivers/pci/probe.c 2011-07-21 22:17:23.000000000 -0400
32764+++ linux-3.0.7/drivers/pci/probe.c 2011-08-23 21:47:55.000000000 -0400
32765@@ -129,7 +129,7 @@ int __pci_read_base(struct pci_dev *dev,
32766 u32 l, sz, mask;
32767 u16 orig_cmd;
32768
32769- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
32770+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
32771
32772 if (!dev->mmio_always_on) {
32773 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
32774diff -urNp linux-3.0.7/drivers/pci/proc.c linux-3.0.7/drivers/pci/proc.c
32775--- linux-3.0.7/drivers/pci/proc.c 2011-07-21 22:17:23.000000000 -0400
32776+++ linux-3.0.7/drivers/pci/proc.c 2011-08-23 21:48:14.000000000 -0400
32777@@ -476,7 +476,16 @@ static const struct file_operations proc
32778 static int __init pci_proc_init(void)
32779 {
32780 struct pci_dev *dev = NULL;
32781+
32782+#ifdef CONFIG_GRKERNSEC_PROC_ADD
32783+#ifdef CONFIG_GRKERNSEC_PROC_USER
32784+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
32785+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
32786+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
32787+#endif
32788+#else
32789 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
32790+#endif
32791 proc_create("devices", 0, proc_bus_pci_dir,
32792 &proc_bus_pci_dev_operations);
32793 proc_initialized = 1;
32794diff -urNp linux-3.0.7/drivers/pci/xen-pcifront.c linux-3.0.7/drivers/pci/xen-pcifront.c
32795--- linux-3.0.7/drivers/pci/xen-pcifront.c 2011-07-21 22:17:23.000000000 -0400
32796+++ linux-3.0.7/drivers/pci/xen-pcifront.c 2011-08-23 21:48:14.000000000 -0400
32797@@ -187,6 +187,8 @@ static int pcifront_bus_read(struct pci_
32798 struct pcifront_sd *sd = bus->sysdata;
32799 struct pcifront_device *pdev = pcifront_get_pdev(sd);
32800
32801+ pax_track_stack();
32802+
32803 if (verbose_request)
32804 dev_info(&pdev->xdev->dev,
32805 "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
32806@@ -226,6 +228,8 @@ static int pcifront_bus_write(struct pci
32807 struct pcifront_sd *sd = bus->sysdata;
32808 struct pcifront_device *pdev = pcifront_get_pdev(sd);
32809
32810+ pax_track_stack();
32811+
32812 if (verbose_request)
32813 dev_info(&pdev->xdev->dev,
32814 "write dev=%04x:%02x:%02x.%01x - "
32815@@ -258,6 +262,8 @@ static int pci_frontend_enable_msix(stru
32816 struct pcifront_device *pdev = pcifront_get_pdev(sd);
32817 struct msi_desc *entry;
32818
32819+ pax_track_stack();
32820+
32821 if (nvec > SH_INFO_MAX_VEC) {
32822 dev_err(&dev->dev, "too much vector for pci frontend: %x."
32823 " Increase SH_INFO_MAX_VEC.\n", nvec);
32824@@ -309,6 +315,8 @@ static void pci_frontend_disable_msix(st
32825 struct pcifront_sd *sd = dev->bus->sysdata;
32826 struct pcifront_device *pdev = pcifront_get_pdev(sd);
32827
32828+ pax_track_stack();
32829+
32830 err = do_pci_op(pdev, &op);
32831
32832 /* What should do for error ? */
32833@@ -328,6 +336,8 @@ static int pci_frontend_enable_msi(struc
32834 struct pcifront_sd *sd = dev->bus->sysdata;
32835 struct pcifront_device *pdev = pcifront_get_pdev(sd);
32836
32837+ pax_track_stack();
32838+
32839 err = do_pci_op(pdev, &op);
32840 if (likely(!err)) {
32841 vector[0] = op.value;
32842diff -urNp linux-3.0.7/drivers/platform/x86/thinkpad_acpi.c linux-3.0.7/drivers/platform/x86/thinkpad_acpi.c
32843--- linux-3.0.7/drivers/platform/x86/thinkpad_acpi.c 2011-07-21 22:17:23.000000000 -0400
32844+++ linux-3.0.7/drivers/platform/x86/thinkpad_acpi.c 2011-08-23 21:47:55.000000000 -0400
32845@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
32846 return 0;
32847 }
32848
32849-void static hotkey_mask_warn_incomplete_mask(void)
32850+static void hotkey_mask_warn_incomplete_mask(void)
32851 {
32852 /* log only what the user can fix... */
32853 const u32 wantedmask = hotkey_driver_mask &
32854diff -urNp linux-3.0.7/drivers/pnp/pnpbios/bioscalls.c linux-3.0.7/drivers/pnp/pnpbios/bioscalls.c
32855--- linux-3.0.7/drivers/pnp/pnpbios/bioscalls.c 2011-07-21 22:17:23.000000000 -0400
32856+++ linux-3.0.7/drivers/pnp/pnpbios/bioscalls.c 2011-08-23 21:47:55.000000000 -0400
32857@@ -59,7 +59,7 @@ do { \
32858 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
32859 } while(0)
32860
32861-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
32862+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
32863 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
32864
32865 /*
32866@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func
32867
32868 cpu = get_cpu();
32869 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
32870+
32871+ pax_open_kernel();
32872 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
32873+ pax_close_kernel();
32874
32875 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
32876 spin_lock_irqsave(&pnp_bios_lock, flags);
32877@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func
32878 :"memory");
32879 spin_unlock_irqrestore(&pnp_bios_lock, flags);
32880
32881+ pax_open_kernel();
32882 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
32883+ pax_close_kernel();
32884+
32885 put_cpu();
32886
32887 /* If we get here and this is set then the PnP BIOS faulted on us. */
32888@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 n
32889 return status;
32890 }
32891
32892-void pnpbios_calls_init(union pnp_bios_install_struct *header)
32893+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
32894 {
32895 int i;
32896
32897@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_i
32898 pnp_bios_callpoint.offset = header->fields.pm16offset;
32899 pnp_bios_callpoint.segment = PNP_CS16;
32900
32901+ pax_open_kernel();
32902+
32903 for_each_possible_cpu(i) {
32904 struct desc_struct *gdt = get_cpu_gdt_table(i);
32905 if (!gdt)
32906@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_i
32907 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
32908 (unsigned long)__va(header->fields.pm16dseg));
32909 }
32910+
32911+ pax_close_kernel();
32912 }
32913diff -urNp linux-3.0.7/drivers/pnp/resource.c linux-3.0.7/drivers/pnp/resource.c
32914--- linux-3.0.7/drivers/pnp/resource.c 2011-07-21 22:17:23.000000000 -0400
32915+++ linux-3.0.7/drivers/pnp/resource.c 2011-08-23 21:47:55.000000000 -0400
32916@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
32917 return 1;
32918
32919 /* check if the resource is valid */
32920- if (*irq < 0 || *irq > 15)
32921+ if (*irq > 15)
32922 return 0;
32923
32924 /* check if the resource is reserved */
32925@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
32926 return 1;
32927
32928 /* check if the resource is valid */
32929- if (*dma < 0 || *dma == 4 || *dma > 7)
32930+ if (*dma == 4 || *dma > 7)
32931 return 0;
32932
32933 /* check if the resource is reserved */
32934diff -urNp linux-3.0.7/drivers/power/bq27x00_battery.c linux-3.0.7/drivers/power/bq27x00_battery.c
32935--- linux-3.0.7/drivers/power/bq27x00_battery.c 2011-07-21 22:17:23.000000000 -0400
32936+++ linux-3.0.7/drivers/power/bq27x00_battery.c 2011-08-23 21:47:55.000000000 -0400
32937@@ -67,7 +67,7 @@
32938 struct bq27x00_device_info;
32939 struct bq27x00_access_methods {
32940 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
32941-};
32942+} __no_const;
32943
32944 enum bq27x00_chip { BQ27000, BQ27500 };
32945
32946diff -urNp linux-3.0.7/drivers/regulator/max8660.c linux-3.0.7/drivers/regulator/max8660.c
32947--- linux-3.0.7/drivers/regulator/max8660.c 2011-07-21 22:17:23.000000000 -0400
32948+++ linux-3.0.7/drivers/regulator/max8660.c 2011-08-23 21:47:55.000000000 -0400
32949@@ -383,8 +383,10 @@ static int __devinit max8660_probe(struc
32950 max8660->shadow_regs[MAX8660_OVER1] = 5;
32951 } else {
32952 /* Otherwise devices can be toggled via software */
32953- max8660_dcdc_ops.enable = max8660_dcdc_enable;
32954- max8660_dcdc_ops.disable = max8660_dcdc_disable;
32955+ pax_open_kernel();
32956+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
32957+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
32958+ pax_close_kernel();
32959 }
32960
32961 /*
32962diff -urNp linux-3.0.7/drivers/regulator/mc13892-regulator.c linux-3.0.7/drivers/regulator/mc13892-regulator.c
32963--- linux-3.0.7/drivers/regulator/mc13892-regulator.c 2011-07-21 22:17:23.000000000 -0400
32964+++ linux-3.0.7/drivers/regulator/mc13892-regulator.c 2011-08-23 21:47:55.000000000 -0400
32965@@ -564,10 +564,12 @@ static int __devinit mc13892_regulator_p
32966 }
32967 mc13xxx_unlock(mc13892);
32968
32969- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
32970+ pax_open_kernel();
32971+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
32972 = mc13892_vcam_set_mode;
32973- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
32974+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
32975 = mc13892_vcam_get_mode;
32976+ pax_close_kernel();
32977 for (i = 0; i < pdata->num_regulators; i++) {
32978 init_data = &pdata->regulators[i];
32979 priv->regulators[i] = regulator_register(
32980diff -urNp linux-3.0.7/drivers/rtc/rtc-dev.c linux-3.0.7/drivers/rtc/rtc-dev.c
32981--- linux-3.0.7/drivers/rtc/rtc-dev.c 2011-07-21 22:17:23.000000000 -0400
32982+++ linux-3.0.7/drivers/rtc/rtc-dev.c 2011-08-23 21:48:14.000000000 -0400
32983@@ -14,6 +14,7 @@
32984 #include <linux/module.h>
32985 #include <linux/rtc.h>
32986 #include <linux/sched.h>
32987+#include <linux/grsecurity.h>
32988 #include "rtc-core.h"
32989
32990 static dev_t rtc_devt;
32991@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *f
32992 if (copy_from_user(&tm, uarg, sizeof(tm)))
32993 return -EFAULT;
32994
32995+ gr_log_timechange();
32996+
32997 return rtc_set_time(rtc, &tm);
32998
32999 case RTC_PIE_ON:
33000diff -urNp linux-3.0.7/drivers/scsi/aacraid/aacraid.h linux-3.0.7/drivers/scsi/aacraid/aacraid.h
33001--- linux-3.0.7/drivers/scsi/aacraid/aacraid.h 2011-07-21 22:17:23.000000000 -0400
33002+++ linux-3.0.7/drivers/scsi/aacraid/aacraid.h 2011-08-23 21:47:55.000000000 -0400
33003@@ -492,7 +492,7 @@ struct adapter_ops
33004 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
33005 /* Administrative operations */
33006 int (*adapter_comm)(struct aac_dev * dev, int comm);
33007-};
33008+} __no_const;
33009
33010 /*
33011 * Define which interrupt handler needs to be installed
33012diff -urNp linux-3.0.7/drivers/scsi/aacraid/commctrl.c linux-3.0.7/drivers/scsi/aacraid/commctrl.c
33013--- linux-3.0.7/drivers/scsi/aacraid/commctrl.c 2011-07-21 22:17:23.000000000 -0400
33014+++ linux-3.0.7/drivers/scsi/aacraid/commctrl.c 2011-08-23 21:48:14.000000000 -0400
33015@@ -482,6 +482,7 @@ static int aac_send_raw_srb(struct aac_d
33016 u32 actual_fibsize64, actual_fibsize = 0;
33017 int i;
33018
33019+ pax_track_stack();
33020
33021 if (dev->in_reset) {
33022 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
33023diff -urNp linux-3.0.7/drivers/scsi/aacraid/linit.c linux-3.0.7/drivers/scsi/aacraid/linit.c
33024--- linux-3.0.7/drivers/scsi/aacraid/linit.c 2011-07-21 22:17:23.000000000 -0400
33025+++ linux-3.0.7/drivers/scsi/aacraid/linit.c 2011-10-11 10:44:33.000000000 -0400
33026@@ -92,7 +92,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_
33027 #elif defined(__devinitconst)
33028 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
33029 #else
33030-static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
33031+static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
33032 #endif
33033 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
33034 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
33035diff -urNp linux-3.0.7/drivers/scsi/aic94xx/aic94xx_init.c linux-3.0.7/drivers/scsi/aic94xx/aic94xx_init.c
33036--- linux-3.0.7/drivers/scsi/aic94xx/aic94xx_init.c 2011-07-21 22:17:23.000000000 -0400
33037+++ linux-3.0.7/drivers/scsi/aic94xx/aic94xx_init.c 2011-10-11 10:44:33.000000000 -0400
33038@@ -1012,7 +1012,7 @@ static struct sas_domain_function_templa
33039 .lldd_control_phy = asd_control_phy,
33040 };
33041
33042-static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
33043+static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
33044 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
33045 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
33046 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
33047diff -urNp linux-3.0.7/drivers/scsi/bfa/bfad.c linux-3.0.7/drivers/scsi/bfa/bfad.c
33048--- linux-3.0.7/drivers/scsi/bfa/bfad.c 2011-07-21 22:17:23.000000000 -0400
33049+++ linux-3.0.7/drivers/scsi/bfa/bfad.c 2011-08-23 21:48:14.000000000 -0400
33050@@ -1032,6 +1032,8 @@ bfad_start_ops(struct bfad_s *bfad) {
33051 struct bfad_vport_s *vport, *vport_new;
33052 struct bfa_fcs_driver_info_s driver_info;
33053
33054+ pax_track_stack();
33055+
33056 /* Fill the driver_info info to fcs*/
33057 memset(&driver_info, 0, sizeof(driver_info));
33058 strncpy(driver_info.version, BFAD_DRIVER_VERSION,
33059diff -urNp linux-3.0.7/drivers/scsi/bfa/bfa_fcs_lport.c linux-3.0.7/drivers/scsi/bfa/bfa_fcs_lport.c
33060--- linux-3.0.7/drivers/scsi/bfa/bfa_fcs_lport.c 2011-07-21 22:17:23.000000000 -0400
33061+++ linux-3.0.7/drivers/scsi/bfa/bfa_fcs_lport.c 2011-08-23 21:48:14.000000000 -0400
33062@@ -1559,6 +1559,8 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struc
33063 u16 len, count;
33064 u16 templen;
33065
33066+ pax_track_stack();
33067+
33068 /*
33069 * get hba attributes
33070 */
33071@@ -1836,6 +1838,8 @@ bfa_fcs_lport_fdmi_build_portattr_block(
33072 u8 count = 0;
33073 u16 templen;
33074
33075+ pax_track_stack();
33076+
33077 /*
33078 * get port attributes
33079 */
33080diff -urNp linux-3.0.7/drivers/scsi/bfa/bfa_fcs_rport.c linux-3.0.7/drivers/scsi/bfa/bfa_fcs_rport.c
33081--- linux-3.0.7/drivers/scsi/bfa/bfa_fcs_rport.c 2011-07-21 22:17:23.000000000 -0400
33082+++ linux-3.0.7/drivers/scsi/bfa/bfa_fcs_rport.c 2011-08-23 21:48:14.000000000 -0400
33083@@ -1844,6 +1844,8 @@ bfa_fcs_rport_process_rpsc(struct bfa_fc
33084 struct fc_rpsc_speed_info_s speeds;
33085 struct bfa_port_attr_s pport_attr;
33086
33087+ pax_track_stack();
33088+
33089 bfa_trc(port->fcs, rx_fchs->s_id);
33090 bfa_trc(port->fcs, rx_fchs->d_id);
33091
33092diff -urNp linux-3.0.7/drivers/scsi/bfa/bfa.h linux-3.0.7/drivers/scsi/bfa/bfa.h
33093--- linux-3.0.7/drivers/scsi/bfa/bfa.h 2011-07-21 22:17:23.000000000 -0400
33094+++ linux-3.0.7/drivers/scsi/bfa/bfa.h 2011-08-23 21:47:55.000000000 -0400
33095@@ -238,7 +238,7 @@ struct bfa_hwif_s {
33096 u32 *nvecs, u32 *maxvec);
33097 void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start,
33098 u32 *end);
33099-};
33100+} __no_const;
33101 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
33102
33103 struct bfa_iocfc_s {
33104diff -urNp linux-3.0.7/drivers/scsi/bfa/bfa_ioc.h linux-3.0.7/drivers/scsi/bfa/bfa_ioc.h
33105--- linux-3.0.7/drivers/scsi/bfa/bfa_ioc.h 2011-07-21 22:17:23.000000000 -0400
33106+++ linux-3.0.7/drivers/scsi/bfa/bfa_ioc.h 2011-08-23 21:47:55.000000000 -0400
33107@@ -196,7 +196,7 @@ struct bfa_ioc_cbfn_s {
33108 bfa_ioc_disable_cbfn_t disable_cbfn;
33109 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
33110 bfa_ioc_reset_cbfn_t reset_cbfn;
33111-};
33112+} __no_const;
33113
33114 /*
33115 * Heartbeat failure notification queue element.
33116@@ -268,7 +268,7 @@ struct bfa_ioc_hwif_s {
33117 void (*ioc_sync_leave) (struct bfa_ioc_s *ioc);
33118 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
33119 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
33120-};
33121+} __no_const;
33122
33123 #define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
33124 #define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id)
33125diff -urNp linux-3.0.7/drivers/scsi/BusLogic.c linux-3.0.7/drivers/scsi/BusLogic.c
33126--- linux-3.0.7/drivers/scsi/BusLogic.c 2011-07-21 22:17:23.000000000 -0400
33127+++ linux-3.0.7/drivers/scsi/BusLogic.c 2011-08-23 21:48:14.000000000 -0400
33128@@ -962,6 +962,8 @@ static int __init BusLogic_InitializeFla
33129 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
33130 *PrototypeHostAdapter)
33131 {
33132+ pax_track_stack();
33133+
33134 /*
33135 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
33136 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
33137diff -urNp linux-3.0.7/drivers/scsi/dpt_i2o.c linux-3.0.7/drivers/scsi/dpt_i2o.c
33138--- linux-3.0.7/drivers/scsi/dpt_i2o.c 2011-07-21 22:17:23.000000000 -0400
33139+++ linux-3.0.7/drivers/scsi/dpt_i2o.c 2011-08-23 21:48:14.000000000 -0400
33140@@ -1811,6 +1811,8 @@ static int adpt_i2o_passthru(adpt_hba* p
33141 dma_addr_t addr;
33142 ulong flags = 0;
33143
33144+ pax_track_stack();
33145+
33146 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
33147 // get user msg size in u32s
33148 if(get_user(size, &user_msg[0])){
33149@@ -2317,6 +2319,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
33150 s32 rcode;
33151 dma_addr_t addr;
33152
33153+ pax_track_stack();
33154+
33155 memset(msg, 0 , sizeof(msg));
33156 len = scsi_bufflen(cmd);
33157 direction = 0x00000000;
33158diff -urNp linux-3.0.7/drivers/scsi/eata.c linux-3.0.7/drivers/scsi/eata.c
33159--- linux-3.0.7/drivers/scsi/eata.c 2011-07-21 22:17:23.000000000 -0400
33160+++ linux-3.0.7/drivers/scsi/eata.c 2011-08-23 21:48:14.000000000 -0400
33161@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
33162 struct hostdata *ha;
33163 char name[16];
33164
33165+ pax_track_stack();
33166+
33167 sprintf(name, "%s%d", driver_name, j);
33168
33169 if (!request_region(port_base, REGION_SIZE, driver_name)) {
33170diff -urNp linux-3.0.7/drivers/scsi/fcoe/fcoe_ctlr.c linux-3.0.7/drivers/scsi/fcoe/fcoe_ctlr.c
33171--- linux-3.0.7/drivers/scsi/fcoe/fcoe_ctlr.c 2011-07-21 22:17:23.000000000 -0400
33172+++ linux-3.0.7/drivers/scsi/fcoe/fcoe_ctlr.c 2011-08-23 21:48:14.000000000 -0400
33173@@ -2503,6 +2503,8 @@ static int fcoe_ctlr_vn_recv(struct fcoe
33174 } buf;
33175 int rc;
33176
33177+ pax_track_stack();
33178+
33179 fiph = (struct fip_header *)skb->data;
33180 sub = fiph->fip_subcode;
33181
33182diff -urNp linux-3.0.7/drivers/scsi/gdth.c linux-3.0.7/drivers/scsi/gdth.c
33183--- linux-3.0.7/drivers/scsi/gdth.c 2011-07-21 22:17:23.000000000 -0400
33184+++ linux-3.0.7/drivers/scsi/gdth.c 2011-08-23 21:48:14.000000000 -0400
33185@@ -4107,6 +4107,8 @@ static int ioc_lockdrv(void __user *arg)
33186 unsigned long flags;
33187 gdth_ha_str *ha;
33188
33189+ pax_track_stack();
33190+
33191 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
33192 return -EFAULT;
33193 ha = gdth_find_ha(ldrv.ionode);
33194@@ -4139,6 +4141,8 @@ static int ioc_resetdrv(void __user *arg
33195 gdth_ha_str *ha;
33196 int rval;
33197
33198+ pax_track_stack();
33199+
33200 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
33201 res.number >= MAX_HDRIVES)
33202 return -EFAULT;
33203@@ -4174,6 +4178,8 @@ static int ioc_general(void __user *arg,
33204 gdth_ha_str *ha;
33205 int rval;
33206
33207+ pax_track_stack();
33208+
33209 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
33210 return -EFAULT;
33211 ha = gdth_find_ha(gen.ionode);
33212@@ -4642,6 +4648,9 @@ static void gdth_flush(gdth_ha_str *ha)
33213 int i;
33214 gdth_cmd_str gdtcmd;
33215 char cmnd[MAX_COMMAND_SIZE];
33216+
33217+ pax_track_stack();
33218+
33219 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
33220
33221 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
33222diff -urNp linux-3.0.7/drivers/scsi/gdth_proc.c linux-3.0.7/drivers/scsi/gdth_proc.c
33223--- linux-3.0.7/drivers/scsi/gdth_proc.c 2011-07-21 22:17:23.000000000 -0400
33224+++ linux-3.0.7/drivers/scsi/gdth_proc.c 2011-08-23 21:48:14.000000000 -0400
33225@@ -47,6 +47,9 @@ static int gdth_set_asc_info(struct Scsi
33226 u64 paddr;
33227
33228 char cmnd[MAX_COMMAND_SIZE];
33229+
33230+ pax_track_stack();
33231+
33232 memset(cmnd, 0xff, 12);
33233 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
33234
33235@@ -175,6 +178,8 @@ static int gdth_get_info(char *buffer,ch
33236 gdth_hget_str *phg;
33237 char cmnd[MAX_COMMAND_SIZE];
33238
33239+ pax_track_stack();
33240+
33241 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
33242 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
33243 if (!gdtcmd || !estr)
33244diff -urNp linux-3.0.7/drivers/scsi/hosts.c linux-3.0.7/drivers/scsi/hosts.c
33245--- linux-3.0.7/drivers/scsi/hosts.c 2011-07-21 22:17:23.000000000 -0400
33246+++ linux-3.0.7/drivers/scsi/hosts.c 2011-08-23 21:47:55.000000000 -0400
33247@@ -42,7 +42,7 @@
33248 #include "scsi_logging.h"
33249
33250
33251-static atomic_t scsi_host_next_hn; /* host_no for next new host */
33252+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
33253
33254
33255 static void scsi_host_cls_release(struct device *dev)
33256@@ -354,7 +354,7 @@ struct Scsi_Host *scsi_host_alloc(struct
33257 * subtract one because we increment first then return, but we need to
33258 * know what the next host number was before increment
33259 */
33260- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
33261+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
33262 shost->dma_channel = 0xff;
33263
33264 /* These three are default values which can be overridden */
33265diff -urNp linux-3.0.7/drivers/scsi/hpsa.c linux-3.0.7/drivers/scsi/hpsa.c
33266--- linux-3.0.7/drivers/scsi/hpsa.c 2011-10-16 21:54:54.000000000 -0400
33267+++ linux-3.0.7/drivers/scsi/hpsa.c 2011-10-16 21:55:27.000000000 -0400
33268@@ -498,7 +498,7 @@ static inline u32 next_command(struct ct
33269 u32 a;
33270
33271 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
33272- return h->access.command_completed(h);
33273+ return h->access->command_completed(h);
33274
33275 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
33276 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
33277@@ -2955,7 +2955,7 @@ static void start_io(struct ctlr_info *h
33278 while (!list_empty(&h->reqQ)) {
33279 c = list_entry(h->reqQ.next, struct CommandList, list);
33280 /* can't do anything if fifo is full */
33281- if ((h->access.fifo_full(h))) {
33282+ if ((h->access->fifo_full(h))) {
33283 dev_warn(&h->pdev->dev, "fifo full\n");
33284 break;
33285 }
33286@@ -2965,7 +2965,7 @@ static void start_io(struct ctlr_info *h
33287 h->Qdepth--;
33288
33289 /* Tell the controller execute command */
33290- h->access.submit_command(h, c);
33291+ h->access->submit_command(h, c);
33292
33293 /* Put job onto the completed Q */
33294 addQ(&h->cmpQ, c);
33295@@ -2974,17 +2974,17 @@ static void start_io(struct ctlr_info *h
33296
33297 static inline unsigned long get_next_completion(struct ctlr_info *h)
33298 {
33299- return h->access.command_completed(h);
33300+ return h->access->command_completed(h);
33301 }
33302
33303 static inline bool interrupt_pending(struct ctlr_info *h)
33304 {
33305- return h->access.intr_pending(h);
33306+ return h->access->intr_pending(h);
33307 }
33308
33309 static inline long interrupt_not_for_us(struct ctlr_info *h)
33310 {
33311- return (h->access.intr_pending(h) == 0) ||
33312+ return (h->access->intr_pending(h) == 0) ||
33313 (h->interrupts_enabled == 0);
33314 }
33315
33316@@ -3874,7 +3874,7 @@ static int __devinit hpsa_pci_init(struc
33317 if (prod_index < 0)
33318 return -ENODEV;
33319 h->product_name = products[prod_index].product_name;
33320- h->access = *(products[prod_index].access);
33321+ h->access = products[prod_index].access;
33322
33323 if (hpsa_board_disabled(h->pdev)) {
33324 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
33325@@ -4151,7 +4151,7 @@ reinit_after_soft_reset:
33326 }
33327
33328 /* make sure the board interrupts are off */
33329- h->access.set_intr_mask(h, HPSA_INTR_OFF);
33330+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
33331
33332 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
33333 goto clean2;
33334@@ -4185,7 +4185,7 @@ reinit_after_soft_reset:
33335 * fake ones to scoop up any residual completions.
33336 */
33337 spin_lock_irqsave(&h->lock, flags);
33338- h->access.set_intr_mask(h, HPSA_INTR_OFF);
33339+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
33340 spin_unlock_irqrestore(&h->lock, flags);
33341 free_irq(h->intr[h->intr_mode], h);
33342 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
33343@@ -4204,9 +4204,9 @@ reinit_after_soft_reset:
33344 dev_info(&h->pdev->dev, "Board READY.\n");
33345 dev_info(&h->pdev->dev,
33346 "Waiting for stale completions to drain.\n");
33347- h->access.set_intr_mask(h, HPSA_INTR_ON);
33348+ h->access->set_intr_mask(h, HPSA_INTR_ON);
33349 msleep(10000);
33350- h->access.set_intr_mask(h, HPSA_INTR_OFF);
33351+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
33352
33353 rc = controller_reset_failed(h->cfgtable);
33354 if (rc)
33355@@ -4227,7 +4227,7 @@ reinit_after_soft_reset:
33356 }
33357
33358 /* Turn the interrupts on so we can service requests */
33359- h->access.set_intr_mask(h, HPSA_INTR_ON);
33360+ h->access->set_intr_mask(h, HPSA_INTR_ON);
33361
33362 hpsa_hba_inquiry(h);
33363 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
33364@@ -4280,7 +4280,7 @@ static void hpsa_shutdown(struct pci_dev
33365 * To write all data in the battery backed cache to disks
33366 */
33367 hpsa_flush_cache(h);
33368- h->access.set_intr_mask(h, HPSA_INTR_OFF);
33369+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
33370 free_irq(h->intr[h->intr_mode], h);
33371 #ifdef CONFIG_PCI_MSI
33372 if (h->msix_vector)
33373@@ -4443,7 +4443,7 @@ static __devinit void hpsa_enter_perform
33374 return;
33375 }
33376 /* Change the access methods to the performant access methods */
33377- h->access = SA5_performant_access;
33378+ h->access = &SA5_performant_access;
33379 h->transMethod = CFGTBL_Trans_Performant;
33380 }
33381
33382diff -urNp linux-3.0.7/drivers/scsi/hpsa.h linux-3.0.7/drivers/scsi/hpsa.h
33383--- linux-3.0.7/drivers/scsi/hpsa.h 2011-09-02 18:11:21.000000000 -0400
33384+++ linux-3.0.7/drivers/scsi/hpsa.h 2011-08-23 21:47:55.000000000 -0400
33385@@ -73,7 +73,7 @@ struct ctlr_info {
33386 unsigned int msix_vector;
33387 unsigned int msi_vector;
33388 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
33389- struct access_method access;
33390+ struct access_method *access;
33391
33392 /* queue and queue Info */
33393 struct list_head reqQ;
33394diff -urNp linux-3.0.7/drivers/scsi/ips.h linux-3.0.7/drivers/scsi/ips.h
33395--- linux-3.0.7/drivers/scsi/ips.h 2011-07-21 22:17:23.000000000 -0400
33396+++ linux-3.0.7/drivers/scsi/ips.h 2011-08-23 21:47:55.000000000 -0400
33397@@ -1027,7 +1027,7 @@ typedef struct {
33398 int (*intr)(struct ips_ha *);
33399 void (*enableint)(struct ips_ha *);
33400 uint32_t (*statupd)(struct ips_ha *);
33401-} ips_hw_func_t;
33402+} __no_const ips_hw_func_t;
33403
33404 typedef struct ips_ha {
33405 uint8_t ha_id[IPS_MAX_CHANNELS+1];
33406diff -urNp linux-3.0.7/drivers/scsi/libfc/fc_exch.c linux-3.0.7/drivers/scsi/libfc/fc_exch.c
33407--- linux-3.0.7/drivers/scsi/libfc/fc_exch.c 2011-07-21 22:17:23.000000000 -0400
33408+++ linux-3.0.7/drivers/scsi/libfc/fc_exch.c 2011-08-23 21:47:55.000000000 -0400
33409@@ -105,12 +105,12 @@ struct fc_exch_mgr {
33410 * all together if not used XXX
33411 */
33412 struct {
33413- atomic_t no_free_exch;
33414- atomic_t no_free_exch_xid;
33415- atomic_t xid_not_found;
33416- atomic_t xid_busy;
33417- atomic_t seq_not_found;
33418- atomic_t non_bls_resp;
33419+ atomic_unchecked_t no_free_exch;
33420+ atomic_unchecked_t no_free_exch_xid;
33421+ atomic_unchecked_t xid_not_found;
33422+ atomic_unchecked_t xid_busy;
33423+ atomic_unchecked_t seq_not_found;
33424+ atomic_unchecked_t non_bls_resp;
33425 } stats;
33426 };
33427
33428@@ -700,7 +700,7 @@ static struct fc_exch *fc_exch_em_alloc(
33429 /* allocate memory for exchange */
33430 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
33431 if (!ep) {
33432- atomic_inc(&mp->stats.no_free_exch);
33433+ atomic_inc_unchecked(&mp->stats.no_free_exch);
33434 goto out;
33435 }
33436 memset(ep, 0, sizeof(*ep));
33437@@ -761,7 +761,7 @@ out:
33438 return ep;
33439 err:
33440 spin_unlock_bh(&pool->lock);
33441- atomic_inc(&mp->stats.no_free_exch_xid);
33442+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
33443 mempool_free(ep, mp->ep_pool);
33444 return NULL;
33445 }
33446@@ -906,7 +906,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33447 xid = ntohs(fh->fh_ox_id); /* we originated exch */
33448 ep = fc_exch_find(mp, xid);
33449 if (!ep) {
33450- atomic_inc(&mp->stats.xid_not_found);
33451+ atomic_inc_unchecked(&mp->stats.xid_not_found);
33452 reject = FC_RJT_OX_ID;
33453 goto out;
33454 }
33455@@ -936,7 +936,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33456 ep = fc_exch_find(mp, xid);
33457 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
33458 if (ep) {
33459- atomic_inc(&mp->stats.xid_busy);
33460+ atomic_inc_unchecked(&mp->stats.xid_busy);
33461 reject = FC_RJT_RX_ID;
33462 goto rel;
33463 }
33464@@ -947,7 +947,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33465 }
33466 xid = ep->xid; /* get our XID */
33467 } else if (!ep) {
33468- atomic_inc(&mp->stats.xid_not_found);
33469+ atomic_inc_unchecked(&mp->stats.xid_not_found);
33470 reject = FC_RJT_RX_ID; /* XID not found */
33471 goto out;
33472 }
33473@@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33474 } else {
33475 sp = &ep->seq;
33476 if (sp->id != fh->fh_seq_id) {
33477- atomic_inc(&mp->stats.seq_not_found);
33478+ atomic_inc_unchecked(&mp->stats.seq_not_found);
33479 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
33480 goto rel;
33481 }
33482@@ -1392,22 +1392,22 @@ static void fc_exch_recv_seq_resp(struct
33483
33484 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
33485 if (!ep) {
33486- atomic_inc(&mp->stats.xid_not_found);
33487+ atomic_inc_unchecked(&mp->stats.xid_not_found);
33488 goto out;
33489 }
33490 if (ep->esb_stat & ESB_ST_COMPLETE) {
33491- atomic_inc(&mp->stats.xid_not_found);
33492+ atomic_inc_unchecked(&mp->stats.xid_not_found);
33493 goto rel;
33494 }
33495 if (ep->rxid == FC_XID_UNKNOWN)
33496 ep->rxid = ntohs(fh->fh_rx_id);
33497 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
33498- atomic_inc(&mp->stats.xid_not_found);
33499+ atomic_inc_unchecked(&mp->stats.xid_not_found);
33500 goto rel;
33501 }
33502 if (ep->did != ntoh24(fh->fh_s_id) &&
33503 ep->did != FC_FID_FLOGI) {
33504- atomic_inc(&mp->stats.xid_not_found);
33505+ atomic_inc_unchecked(&mp->stats.xid_not_found);
33506 goto rel;
33507 }
33508 sof = fr_sof(fp);
33509@@ -1416,7 +1416,7 @@ static void fc_exch_recv_seq_resp(struct
33510 sp->ssb_stat |= SSB_ST_RESP;
33511 sp->id = fh->fh_seq_id;
33512 } else if (sp->id != fh->fh_seq_id) {
33513- atomic_inc(&mp->stats.seq_not_found);
33514+ atomic_inc_unchecked(&mp->stats.seq_not_found);
33515 goto rel;
33516 }
33517
33518@@ -1480,9 +1480,9 @@ static void fc_exch_recv_resp(struct fc_
33519 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
33520
33521 if (!sp)
33522- atomic_inc(&mp->stats.xid_not_found);
33523+ atomic_inc_unchecked(&mp->stats.xid_not_found);
33524 else
33525- atomic_inc(&mp->stats.non_bls_resp);
33526+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
33527
33528 fc_frame_free(fp);
33529 }
33530diff -urNp linux-3.0.7/drivers/scsi/libsas/sas_ata.c linux-3.0.7/drivers/scsi/libsas/sas_ata.c
33531--- linux-3.0.7/drivers/scsi/libsas/sas_ata.c 2011-07-21 22:17:23.000000000 -0400
33532+++ linux-3.0.7/drivers/scsi/libsas/sas_ata.c 2011-08-23 21:47:55.000000000 -0400
33533@@ -368,7 +368,7 @@ static struct ata_port_operations sas_sa
33534 .postreset = ata_std_postreset,
33535 .error_handler = ata_std_error_handler,
33536 .post_internal_cmd = sas_ata_post_internal,
33537- .qc_defer = ata_std_qc_defer,
33538+ .qc_defer = ata_std_qc_defer,
33539 .qc_prep = ata_noop_qc_prep,
33540 .qc_issue = sas_ata_qc_issue,
33541 .qc_fill_rtf = sas_ata_qc_fill_rtf,
33542diff -urNp linux-3.0.7/drivers/scsi/lpfc/lpfc_debugfs.c linux-3.0.7/drivers/scsi/lpfc/lpfc_debugfs.c
33543--- linux-3.0.7/drivers/scsi/lpfc/lpfc_debugfs.c 2011-07-21 22:17:23.000000000 -0400
33544+++ linux-3.0.7/drivers/scsi/lpfc/lpfc_debugfs.c 2011-08-23 21:48:14.000000000 -0400
33545@@ -104,7 +104,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_
33546
33547 #include <linux/debugfs.h>
33548
33549-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
33550+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
33551 static unsigned long lpfc_debugfs_start_time = 0L;
33552
33553 /* iDiag */
33554@@ -141,7 +141,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
33555 lpfc_debugfs_enable = 0;
33556
33557 len = 0;
33558- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
33559+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
33560 (lpfc_debugfs_max_disc_trc - 1);
33561 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
33562 dtp = vport->disc_trc + i;
33563@@ -202,7 +202,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
33564 lpfc_debugfs_enable = 0;
33565
33566 len = 0;
33567- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
33568+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
33569 (lpfc_debugfs_max_slow_ring_trc - 1);
33570 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
33571 dtp = phba->slow_ring_trc + i;
33572@@ -380,6 +380,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
33573 uint32_t *ptr;
33574 char buffer[1024];
33575
33576+ pax_track_stack();
33577+
33578 off = 0;
33579 spin_lock_irq(&phba->hbalock);
33580
33581@@ -617,14 +619,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
33582 !vport || !vport->disc_trc)
33583 return;
33584
33585- index = atomic_inc_return(&vport->disc_trc_cnt) &
33586+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
33587 (lpfc_debugfs_max_disc_trc - 1);
33588 dtp = vport->disc_trc + index;
33589 dtp->fmt = fmt;
33590 dtp->data1 = data1;
33591 dtp->data2 = data2;
33592 dtp->data3 = data3;
33593- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
33594+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
33595 dtp->jif = jiffies;
33596 #endif
33597 return;
33598@@ -655,14 +657,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
33599 !phba || !phba->slow_ring_trc)
33600 return;
33601
33602- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
33603+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
33604 (lpfc_debugfs_max_slow_ring_trc - 1);
33605 dtp = phba->slow_ring_trc + index;
33606 dtp->fmt = fmt;
33607 dtp->data1 = data1;
33608 dtp->data2 = data2;
33609 dtp->data3 = data3;
33610- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
33611+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
33612 dtp->jif = jiffies;
33613 #endif
33614 return;
33615@@ -2606,7 +2608,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
33616 "slow_ring buffer\n");
33617 goto debug_failed;
33618 }
33619- atomic_set(&phba->slow_ring_trc_cnt, 0);
33620+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
33621 memset(phba->slow_ring_trc, 0,
33622 (sizeof(struct lpfc_debugfs_trc) *
33623 lpfc_debugfs_max_slow_ring_trc));
33624@@ -2652,7 +2654,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
33625 "buffer\n");
33626 goto debug_failed;
33627 }
33628- atomic_set(&vport->disc_trc_cnt, 0);
33629+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
33630
33631 snprintf(name, sizeof(name), "discovery_trace");
33632 vport->debug_disc_trc =
33633diff -urNp linux-3.0.7/drivers/scsi/lpfc/lpfc.h linux-3.0.7/drivers/scsi/lpfc/lpfc.h
33634--- linux-3.0.7/drivers/scsi/lpfc/lpfc.h 2011-10-16 21:54:54.000000000 -0400
33635+++ linux-3.0.7/drivers/scsi/lpfc/lpfc.h 2011-10-16 21:55:27.000000000 -0400
33636@@ -425,7 +425,7 @@ struct lpfc_vport {
33637 struct dentry *debug_nodelist;
33638 struct dentry *vport_debugfs_root;
33639 struct lpfc_debugfs_trc *disc_trc;
33640- atomic_t disc_trc_cnt;
33641+ atomic_unchecked_t disc_trc_cnt;
33642 #endif
33643 uint8_t stat_data_enabled;
33644 uint8_t stat_data_blocked;
33645@@ -832,8 +832,8 @@ struct lpfc_hba {
33646 struct timer_list fabric_block_timer;
33647 unsigned long bit_flags;
33648 #define FABRIC_COMANDS_BLOCKED 0
33649- atomic_t num_rsrc_err;
33650- atomic_t num_cmd_success;
33651+ atomic_unchecked_t num_rsrc_err;
33652+ atomic_unchecked_t num_cmd_success;
33653 unsigned long last_rsrc_error_time;
33654 unsigned long last_ramp_down_time;
33655 unsigned long last_ramp_up_time;
33656@@ -847,7 +847,7 @@ struct lpfc_hba {
33657 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
33658 struct dentry *debug_slow_ring_trc;
33659 struct lpfc_debugfs_trc *slow_ring_trc;
33660- atomic_t slow_ring_trc_cnt;
33661+ atomic_unchecked_t slow_ring_trc_cnt;
33662 /* iDiag debugfs sub-directory */
33663 struct dentry *idiag_root;
33664 struct dentry *idiag_pci_cfg;
33665diff -urNp linux-3.0.7/drivers/scsi/lpfc/lpfc_init.c linux-3.0.7/drivers/scsi/lpfc/lpfc_init.c
33666--- linux-3.0.7/drivers/scsi/lpfc/lpfc_init.c 2011-10-16 21:54:54.000000000 -0400
33667+++ linux-3.0.7/drivers/scsi/lpfc/lpfc_init.c 2011-10-16 21:55:27.000000000 -0400
33668@@ -9971,8 +9971,10 @@ lpfc_init(void)
33669 printk(LPFC_COPYRIGHT "\n");
33670
33671 if (lpfc_enable_npiv) {
33672- lpfc_transport_functions.vport_create = lpfc_vport_create;
33673- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
33674+ pax_open_kernel();
33675+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
33676+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
33677+ pax_close_kernel();
33678 }
33679 lpfc_transport_template =
33680 fc_attach_transport(&lpfc_transport_functions);
33681diff -urNp linux-3.0.7/drivers/scsi/lpfc/lpfc_scsi.c linux-3.0.7/drivers/scsi/lpfc/lpfc_scsi.c
33682--- linux-3.0.7/drivers/scsi/lpfc/lpfc_scsi.c 2011-10-16 21:54:54.000000000 -0400
33683+++ linux-3.0.7/drivers/scsi/lpfc/lpfc_scsi.c 2011-10-16 21:55:27.000000000 -0400
33684@@ -297,7 +297,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
33685 uint32_t evt_posted;
33686
33687 spin_lock_irqsave(&phba->hbalock, flags);
33688- atomic_inc(&phba->num_rsrc_err);
33689+ atomic_inc_unchecked(&phba->num_rsrc_err);
33690 phba->last_rsrc_error_time = jiffies;
33691
33692 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
33693@@ -338,7 +338,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
33694 unsigned long flags;
33695 struct lpfc_hba *phba = vport->phba;
33696 uint32_t evt_posted;
33697- atomic_inc(&phba->num_cmd_success);
33698+ atomic_inc_unchecked(&phba->num_cmd_success);
33699
33700 if (vport->cfg_lun_queue_depth <= queue_depth)
33701 return;
33702@@ -382,8 +382,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
33703 unsigned long num_rsrc_err, num_cmd_success;
33704 int i;
33705
33706- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
33707- num_cmd_success = atomic_read(&phba->num_cmd_success);
33708+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
33709+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
33710
33711 vports = lpfc_create_vport_work_array(phba);
33712 if (vports != NULL)
33713@@ -403,8 +403,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
33714 }
33715 }
33716 lpfc_destroy_vport_work_array(phba, vports);
33717- atomic_set(&phba->num_rsrc_err, 0);
33718- atomic_set(&phba->num_cmd_success, 0);
33719+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
33720+ atomic_set_unchecked(&phba->num_cmd_success, 0);
33721 }
33722
33723 /**
33724@@ -438,8 +438,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
33725 }
33726 }
33727 lpfc_destroy_vport_work_array(phba, vports);
33728- atomic_set(&phba->num_rsrc_err, 0);
33729- atomic_set(&phba->num_cmd_success, 0);
33730+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
33731+ atomic_set_unchecked(&phba->num_cmd_success, 0);
33732 }
33733
33734 /**
33735diff -urNp linux-3.0.7/drivers/scsi/megaraid/megaraid_mbox.c linux-3.0.7/drivers/scsi/megaraid/megaraid_mbox.c
33736--- linux-3.0.7/drivers/scsi/megaraid/megaraid_mbox.c 2011-07-21 22:17:23.000000000 -0400
33737+++ linux-3.0.7/drivers/scsi/megaraid/megaraid_mbox.c 2011-08-23 21:48:14.000000000 -0400
33738@@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
33739 int rval;
33740 int i;
33741
33742+ pax_track_stack();
33743+
33744 // Allocate memory for the base list of scb for management module.
33745 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
33746
33747diff -urNp linux-3.0.7/drivers/scsi/osd/osd_initiator.c linux-3.0.7/drivers/scsi/osd/osd_initiator.c
33748--- linux-3.0.7/drivers/scsi/osd/osd_initiator.c 2011-07-21 22:17:23.000000000 -0400
33749+++ linux-3.0.7/drivers/scsi/osd/osd_initiator.c 2011-08-23 21:48:14.000000000 -0400
33750@@ -97,6 +97,8 @@ static int _osd_get_print_system_info(st
33751 int nelem = ARRAY_SIZE(get_attrs), a = 0;
33752 int ret;
33753
33754+ pax_track_stack();
33755+
33756 or = osd_start_request(od, GFP_KERNEL);
33757 if (!or)
33758 return -ENOMEM;
33759diff -urNp linux-3.0.7/drivers/scsi/pmcraid.c linux-3.0.7/drivers/scsi/pmcraid.c
33760--- linux-3.0.7/drivers/scsi/pmcraid.c 2011-09-02 18:11:21.000000000 -0400
33761+++ linux-3.0.7/drivers/scsi/pmcraid.c 2011-08-23 21:47:56.000000000 -0400
33762@@ -201,8 +201,8 @@ static int pmcraid_slave_alloc(struct sc
33763 res->scsi_dev = scsi_dev;
33764 scsi_dev->hostdata = res;
33765 res->change_detected = 0;
33766- atomic_set(&res->read_failures, 0);
33767- atomic_set(&res->write_failures, 0);
33768+ atomic_set_unchecked(&res->read_failures, 0);
33769+ atomic_set_unchecked(&res->write_failures, 0);
33770 rc = 0;
33771 }
33772 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
33773@@ -2677,9 +2677,9 @@ static int pmcraid_error_handler(struct
33774
33775 /* If this was a SCSI read/write command keep count of errors */
33776 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
33777- atomic_inc(&res->read_failures);
33778+ atomic_inc_unchecked(&res->read_failures);
33779 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
33780- atomic_inc(&res->write_failures);
33781+ atomic_inc_unchecked(&res->write_failures);
33782
33783 if (!RES_IS_GSCSI(res->cfg_entry) &&
33784 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
33785@@ -3535,7 +3535,7 @@ static int pmcraid_queuecommand_lck(
33786 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
33787 * hrrq_id assigned here in queuecommand
33788 */
33789- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
33790+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
33791 pinstance->num_hrrq;
33792 cmd->cmd_done = pmcraid_io_done;
33793
33794@@ -3860,7 +3860,7 @@ static long pmcraid_ioctl_passthrough(
33795 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
33796 * hrrq_id assigned here in queuecommand
33797 */
33798- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
33799+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
33800 pinstance->num_hrrq;
33801
33802 if (request_size) {
33803@@ -4498,7 +4498,7 @@ static void pmcraid_worker_function(stru
33804
33805 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
33806 /* add resources only after host is added into system */
33807- if (!atomic_read(&pinstance->expose_resources))
33808+ if (!atomic_read_unchecked(&pinstance->expose_resources))
33809 return;
33810
33811 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
33812@@ -5332,8 +5332,8 @@ static int __devinit pmcraid_init_instan
33813 init_waitqueue_head(&pinstance->reset_wait_q);
33814
33815 atomic_set(&pinstance->outstanding_cmds, 0);
33816- atomic_set(&pinstance->last_message_id, 0);
33817- atomic_set(&pinstance->expose_resources, 0);
33818+ atomic_set_unchecked(&pinstance->last_message_id, 0);
33819+ atomic_set_unchecked(&pinstance->expose_resources, 0);
33820
33821 INIT_LIST_HEAD(&pinstance->free_res_q);
33822 INIT_LIST_HEAD(&pinstance->used_res_q);
33823@@ -6048,7 +6048,7 @@ static int __devinit pmcraid_probe(
33824 /* Schedule worker thread to handle CCN and take care of adding and
33825 * removing devices to OS
33826 */
33827- atomic_set(&pinstance->expose_resources, 1);
33828+ atomic_set_unchecked(&pinstance->expose_resources, 1);
33829 schedule_work(&pinstance->worker_q);
33830 return rc;
33831
33832diff -urNp linux-3.0.7/drivers/scsi/pmcraid.h linux-3.0.7/drivers/scsi/pmcraid.h
33833--- linux-3.0.7/drivers/scsi/pmcraid.h 2011-07-21 22:17:23.000000000 -0400
33834+++ linux-3.0.7/drivers/scsi/pmcraid.h 2011-08-23 21:47:56.000000000 -0400
33835@@ -749,7 +749,7 @@ struct pmcraid_instance {
33836 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
33837
33838 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
33839- atomic_t last_message_id;
33840+ atomic_unchecked_t last_message_id;
33841
33842 /* configuration table */
33843 struct pmcraid_config_table *cfg_table;
33844@@ -778,7 +778,7 @@ struct pmcraid_instance {
33845 atomic_t outstanding_cmds;
33846
33847 /* should add/delete resources to mid-layer now ?*/
33848- atomic_t expose_resources;
33849+ atomic_unchecked_t expose_resources;
33850
33851
33852
33853@@ -814,8 +814,8 @@ struct pmcraid_resource_entry {
33854 struct pmcraid_config_table_entry_ext cfg_entry_ext;
33855 };
33856 struct scsi_device *scsi_dev; /* Link scsi_device structure */
33857- atomic_t read_failures; /* count of failed READ commands */
33858- atomic_t write_failures; /* count of failed WRITE commands */
33859+ atomic_unchecked_t read_failures; /* count of failed READ commands */
33860+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
33861
33862 /* To indicate add/delete/modify during CCN */
33863 u8 change_detected;
33864diff -urNp linux-3.0.7/drivers/scsi/qla2xxx/qla_def.h linux-3.0.7/drivers/scsi/qla2xxx/qla_def.h
33865--- linux-3.0.7/drivers/scsi/qla2xxx/qla_def.h 2011-07-21 22:17:23.000000000 -0400
33866+++ linux-3.0.7/drivers/scsi/qla2xxx/qla_def.h 2011-08-23 21:47:56.000000000 -0400
33867@@ -2244,7 +2244,7 @@ struct isp_operations {
33868 int (*get_flash_version) (struct scsi_qla_host *, void *);
33869 int (*start_scsi) (srb_t *);
33870 int (*abort_isp) (struct scsi_qla_host *);
33871-};
33872+} __no_const;
33873
33874 /* MSI-X Support *************************************************************/
33875
33876diff -urNp linux-3.0.7/drivers/scsi/qla4xxx/ql4_def.h linux-3.0.7/drivers/scsi/qla4xxx/ql4_def.h
33877--- linux-3.0.7/drivers/scsi/qla4xxx/ql4_def.h 2011-07-21 22:17:23.000000000 -0400
33878+++ linux-3.0.7/drivers/scsi/qla4xxx/ql4_def.h 2011-08-23 21:47:56.000000000 -0400
33879@@ -256,7 +256,7 @@ struct ddb_entry {
33880 atomic_t retry_relogin_timer; /* Min Time between relogins
33881 * (4000 only) */
33882 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
33883- atomic_t relogin_retry_count; /* Num of times relogin has been
33884+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
33885 * retried */
33886
33887 uint16_t port;
33888diff -urNp linux-3.0.7/drivers/scsi/qla4xxx/ql4_init.c linux-3.0.7/drivers/scsi/qla4xxx/ql4_init.c
33889--- linux-3.0.7/drivers/scsi/qla4xxx/ql4_init.c 2011-07-21 22:17:23.000000000 -0400
33890+++ linux-3.0.7/drivers/scsi/qla4xxx/ql4_init.c 2011-08-23 21:47:56.000000000 -0400
33891@@ -680,7 +680,7 @@ static struct ddb_entry * qla4xxx_alloc_
33892 ddb_entry->fw_ddb_index = fw_ddb_index;
33893 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
33894 atomic_set(&ddb_entry->relogin_timer, 0);
33895- atomic_set(&ddb_entry->relogin_retry_count, 0);
33896+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
33897 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
33898 list_add_tail(&ddb_entry->list, &ha->ddb_list);
33899 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
33900@@ -1433,7 +1433,7 @@ int qla4xxx_process_ddb_changed(struct s
33901 if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) &&
33902 (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) {
33903 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
33904- atomic_set(&ddb_entry->relogin_retry_count, 0);
33905+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
33906 atomic_set(&ddb_entry->relogin_timer, 0);
33907 clear_bit(DF_RELOGIN, &ddb_entry->flags);
33908 iscsi_unblock_session(ddb_entry->sess);
33909diff -urNp linux-3.0.7/drivers/scsi/qla4xxx/ql4_os.c linux-3.0.7/drivers/scsi/qla4xxx/ql4_os.c
33910--- linux-3.0.7/drivers/scsi/qla4xxx/ql4_os.c 2011-07-21 22:17:23.000000000 -0400
33911+++ linux-3.0.7/drivers/scsi/qla4xxx/ql4_os.c 2011-08-23 21:47:56.000000000 -0400
33912@@ -811,13 +811,13 @@ static void qla4xxx_timer(struct scsi_ql
33913 ddb_entry->fw_ddb_device_state ==
33914 DDB_DS_SESSION_FAILED) {
33915 /* Reset retry relogin timer */
33916- atomic_inc(&ddb_entry->relogin_retry_count);
33917+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
33918 DEBUG2(printk("scsi%ld: ddb [%d] relogin"
33919 " timed out-retrying"
33920 " relogin (%d)\n",
33921 ha->host_no,
33922 ddb_entry->fw_ddb_index,
33923- atomic_read(&ddb_entry->
33924+ atomic_read_unchecked(&ddb_entry->
33925 relogin_retry_count))
33926 );
33927 start_dpc++;
33928diff -urNp linux-3.0.7/drivers/scsi/scsi.c linux-3.0.7/drivers/scsi/scsi.c
33929--- linux-3.0.7/drivers/scsi/scsi.c 2011-07-21 22:17:23.000000000 -0400
33930+++ linux-3.0.7/drivers/scsi/scsi.c 2011-08-23 21:47:56.000000000 -0400
33931@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
33932 unsigned long timeout;
33933 int rtn = 0;
33934
33935- atomic_inc(&cmd->device->iorequest_cnt);
33936+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
33937
33938 /* check if the device is still usable */
33939 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
33940diff -urNp linux-3.0.7/drivers/scsi/scsi_debug.c linux-3.0.7/drivers/scsi/scsi_debug.c
33941--- linux-3.0.7/drivers/scsi/scsi_debug.c 2011-07-21 22:17:23.000000000 -0400
33942+++ linux-3.0.7/drivers/scsi/scsi_debug.c 2011-08-23 21:48:14.000000000 -0400
33943@@ -1493,6 +1493,8 @@ static int resp_mode_select(struct scsi_
33944 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
33945 unsigned char *cmd = (unsigned char *)scp->cmnd;
33946
33947+ pax_track_stack();
33948+
33949 if ((errsts = check_readiness(scp, 1, devip)))
33950 return errsts;
33951 memset(arr, 0, sizeof(arr));
33952@@ -1590,6 +1592,8 @@ static int resp_log_sense(struct scsi_cm
33953 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
33954 unsigned char *cmd = (unsigned char *)scp->cmnd;
33955
33956+ pax_track_stack();
33957+
33958 if ((errsts = check_readiness(scp, 1, devip)))
33959 return errsts;
33960 memset(arr, 0, sizeof(arr));
33961diff -urNp linux-3.0.7/drivers/scsi/scsi_lib.c linux-3.0.7/drivers/scsi/scsi_lib.c
33962--- linux-3.0.7/drivers/scsi/scsi_lib.c 2011-09-02 18:11:21.000000000 -0400
33963+++ linux-3.0.7/drivers/scsi/scsi_lib.c 2011-08-23 21:47:56.000000000 -0400
33964@@ -1412,7 +1412,7 @@ static void scsi_kill_request(struct req
33965 shost = sdev->host;
33966 scsi_init_cmd_errh(cmd);
33967 cmd->result = DID_NO_CONNECT << 16;
33968- atomic_inc(&cmd->device->iorequest_cnt);
33969+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
33970
33971 /*
33972 * SCSI request completion path will do scsi_device_unbusy(),
33973@@ -1438,9 +1438,9 @@ static void scsi_softirq_done(struct req
33974
33975 INIT_LIST_HEAD(&cmd->eh_entry);
33976
33977- atomic_inc(&cmd->device->iodone_cnt);
33978+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
33979 if (cmd->result)
33980- atomic_inc(&cmd->device->ioerr_cnt);
33981+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
33982
33983 disposition = scsi_decide_disposition(cmd);
33984 if (disposition != SUCCESS &&
33985diff -urNp linux-3.0.7/drivers/scsi/scsi_sysfs.c linux-3.0.7/drivers/scsi/scsi_sysfs.c
33986--- linux-3.0.7/drivers/scsi/scsi_sysfs.c 2011-07-21 22:17:23.000000000 -0400
33987+++ linux-3.0.7/drivers/scsi/scsi_sysfs.c 2011-08-23 21:47:56.000000000 -0400
33988@@ -622,7 +622,7 @@ show_iostat_##field(struct device *dev,
33989 char *buf) \
33990 { \
33991 struct scsi_device *sdev = to_scsi_device(dev); \
33992- unsigned long long count = atomic_read(&sdev->field); \
33993+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
33994 return snprintf(buf, 20, "0x%llx\n", count); \
33995 } \
33996 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
33997diff -urNp linux-3.0.7/drivers/scsi/scsi_tgt_lib.c linux-3.0.7/drivers/scsi/scsi_tgt_lib.c
33998--- linux-3.0.7/drivers/scsi/scsi_tgt_lib.c 2011-07-21 22:17:23.000000000 -0400
33999+++ linux-3.0.7/drivers/scsi/scsi_tgt_lib.c 2011-10-06 04:17:55.000000000 -0400
34000@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct sc
34001 int err;
34002
34003 dprintk("%lx %u\n", uaddr, len);
34004- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
34005+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
34006 if (err) {
34007 /*
34008 * TODO: need to fixup sg_tablesize, max_segment_size,
34009diff -urNp linux-3.0.7/drivers/scsi/scsi_transport_fc.c linux-3.0.7/drivers/scsi/scsi_transport_fc.c
34010--- linux-3.0.7/drivers/scsi/scsi_transport_fc.c 2011-07-21 22:17:23.000000000 -0400
34011+++ linux-3.0.7/drivers/scsi/scsi_transport_fc.c 2011-08-23 21:47:56.000000000 -0400
34012@@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_
34013 * Netlink Infrastructure
34014 */
34015
34016-static atomic_t fc_event_seq;
34017+static atomic_unchecked_t fc_event_seq;
34018
34019 /**
34020 * fc_get_event_number - Obtain the next sequential FC event number
34021@@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
34022 u32
34023 fc_get_event_number(void)
34024 {
34025- return atomic_add_return(1, &fc_event_seq);
34026+ return atomic_add_return_unchecked(1, &fc_event_seq);
34027 }
34028 EXPORT_SYMBOL(fc_get_event_number);
34029
34030@@ -645,7 +645,7 @@ static __init int fc_transport_init(void
34031 {
34032 int error;
34033
34034- atomic_set(&fc_event_seq, 0);
34035+ atomic_set_unchecked(&fc_event_seq, 0);
34036
34037 error = transport_class_register(&fc_host_class);
34038 if (error)
34039@@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char
34040 char *cp;
34041
34042 *val = simple_strtoul(buf, &cp, 0);
34043- if ((*cp && (*cp != '\n')) || (*val < 0))
34044+ if (*cp && (*cp != '\n'))
34045 return -EINVAL;
34046 /*
34047 * Check for overflow; dev_loss_tmo is u32
34048diff -urNp linux-3.0.7/drivers/scsi/scsi_transport_iscsi.c linux-3.0.7/drivers/scsi/scsi_transport_iscsi.c
34049--- linux-3.0.7/drivers/scsi/scsi_transport_iscsi.c 2011-07-21 22:17:23.000000000 -0400
34050+++ linux-3.0.7/drivers/scsi/scsi_transport_iscsi.c 2011-08-23 21:47:56.000000000 -0400
34051@@ -83,7 +83,7 @@ struct iscsi_internal {
34052 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
34053 };
34054
34055-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
34056+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
34057 static struct workqueue_struct *iscsi_eh_timer_workq;
34058
34059 /*
34060@@ -761,7 +761,7 @@ int iscsi_add_session(struct iscsi_cls_s
34061 int err;
34062
34063 ihost = shost->shost_data;
34064- session->sid = atomic_add_return(1, &iscsi_session_nr);
34065+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
34066
34067 if (id == ISCSI_MAX_TARGET) {
34068 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
34069@@ -2200,7 +2200,7 @@ static __init int iscsi_transport_init(v
34070 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
34071 ISCSI_TRANSPORT_VERSION);
34072
34073- atomic_set(&iscsi_session_nr, 0);
34074+ atomic_set_unchecked(&iscsi_session_nr, 0);
34075
34076 err = class_register(&iscsi_transport_class);
34077 if (err)
34078diff -urNp linux-3.0.7/drivers/scsi/scsi_transport_srp.c linux-3.0.7/drivers/scsi/scsi_transport_srp.c
34079--- linux-3.0.7/drivers/scsi/scsi_transport_srp.c 2011-07-21 22:17:23.000000000 -0400
34080+++ linux-3.0.7/drivers/scsi/scsi_transport_srp.c 2011-08-23 21:47:56.000000000 -0400
34081@@ -33,7 +33,7 @@
34082 #include "scsi_transport_srp_internal.h"
34083
34084 struct srp_host_attrs {
34085- atomic_t next_port_id;
34086+ atomic_unchecked_t next_port_id;
34087 };
34088 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
34089
34090@@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
34091 struct Scsi_Host *shost = dev_to_shost(dev);
34092 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
34093
34094- atomic_set(&srp_host->next_port_id, 0);
34095+ atomic_set_unchecked(&srp_host->next_port_id, 0);
34096 return 0;
34097 }
34098
34099@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
34100 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
34101 rport->roles = ids->roles;
34102
34103- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
34104+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
34105 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
34106
34107 transport_setup_device(&rport->dev);
34108diff -urNp linux-3.0.7/drivers/scsi/sg.c linux-3.0.7/drivers/scsi/sg.c
34109--- linux-3.0.7/drivers/scsi/sg.c 2011-07-21 22:17:23.000000000 -0400
34110+++ linux-3.0.7/drivers/scsi/sg.c 2011-10-06 04:17:55.000000000 -0400
34111@@ -1075,7 +1075,7 @@ sg_ioctl(struct file *filp, unsigned int
34112 sdp->disk->disk_name,
34113 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
34114 NULL,
34115- (char *)arg);
34116+ (char __user *)arg);
34117 case BLKTRACESTART:
34118 return blk_trace_startstop(sdp->device->request_queue, 1);
34119 case BLKTRACESTOP:
34120@@ -2310,7 +2310,7 @@ struct sg_proc_leaf {
34121 const struct file_operations * fops;
34122 };
34123
34124-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
34125+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
34126 {"allow_dio", &adio_fops},
34127 {"debug", &debug_fops},
34128 {"def_reserved_size", &dressz_fops},
34129@@ -2325,7 +2325,7 @@ sg_proc_init(void)
34130 {
34131 int k, mask;
34132 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
34133- struct sg_proc_leaf * leaf;
34134+ const struct sg_proc_leaf * leaf;
34135
34136 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
34137 if (!sg_proc_sgp)
34138diff -urNp linux-3.0.7/drivers/scsi/sym53c8xx_2/sym_glue.c linux-3.0.7/drivers/scsi/sym53c8xx_2/sym_glue.c
34139--- linux-3.0.7/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-07-21 22:17:23.000000000 -0400
34140+++ linux-3.0.7/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-08-23 21:48:14.000000000 -0400
34141@@ -1756,6 +1756,8 @@ static int __devinit sym2_probe(struct p
34142 int do_iounmap = 0;
34143 int do_disable_device = 1;
34144
34145+ pax_track_stack();
34146+
34147 memset(&sym_dev, 0, sizeof(sym_dev));
34148 memset(&nvram, 0, sizeof(nvram));
34149 sym_dev.pdev = pdev;
34150diff -urNp linux-3.0.7/drivers/scsi/vmw_pvscsi.c linux-3.0.7/drivers/scsi/vmw_pvscsi.c
34151--- linux-3.0.7/drivers/scsi/vmw_pvscsi.c 2011-07-21 22:17:23.000000000 -0400
34152+++ linux-3.0.7/drivers/scsi/vmw_pvscsi.c 2011-08-23 21:48:14.000000000 -0400
34153@@ -447,6 +447,8 @@ static void pvscsi_setup_all_rings(const
34154 dma_addr_t base;
34155 unsigned i;
34156
34157+ pax_track_stack();
34158+
34159 cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
34160 cmd.reqRingNumPages = adapter->req_pages;
34161 cmd.cmpRingNumPages = adapter->cmp_pages;
34162diff -urNp linux-3.0.7/drivers/spi/dw_spi_pci.c linux-3.0.7/drivers/spi/dw_spi_pci.c
34163--- linux-3.0.7/drivers/spi/dw_spi_pci.c 2011-07-21 22:17:23.000000000 -0400
34164+++ linux-3.0.7/drivers/spi/dw_spi_pci.c 2011-10-11 10:44:33.000000000 -0400
34165@@ -148,7 +148,7 @@ static int spi_resume(struct pci_dev *pd
34166 #define spi_resume NULL
34167 #endif
34168
34169-static const struct pci_device_id pci_ids[] __devinitdata = {
34170+static const struct pci_device_id pci_ids[] __devinitconst = {
34171 /* Intel MID platform SPI controller 0 */
34172 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
34173 {},
34174diff -urNp linux-3.0.7/drivers/spi/spi.c linux-3.0.7/drivers/spi/spi.c
34175--- linux-3.0.7/drivers/spi/spi.c 2011-07-21 22:17:23.000000000 -0400
34176+++ linux-3.0.7/drivers/spi/spi.c 2011-08-23 21:47:56.000000000 -0400
34177@@ -1023,7 +1023,7 @@ int spi_bus_unlock(struct spi_master *ma
34178 EXPORT_SYMBOL_GPL(spi_bus_unlock);
34179
34180 /* portable code must never pass more than 32 bytes */
34181-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
34182+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
34183
34184 static u8 *buf;
34185
34186diff -urNp linux-3.0.7/drivers/staging/ath6kl/os/linux/ar6000_drv.c linux-3.0.7/drivers/staging/ath6kl/os/linux/ar6000_drv.c
34187--- linux-3.0.7/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-09-02 18:11:21.000000000 -0400
34188+++ linux-3.0.7/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-08-23 21:48:14.000000000 -0400
34189@@ -362,7 +362,7 @@ static struct ar_cookie s_ar_cookie_mem[
34190 (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0))
34191
34192
34193-static struct net_device_ops ar6000_netdev_ops = {
34194+static net_device_ops_no_const ar6000_netdev_ops = {
34195 .ndo_init = NULL,
34196 .ndo_open = ar6000_open,
34197 .ndo_stop = ar6000_close,
34198diff -urNp linux-3.0.7/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h linux-3.0.7/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
34199--- linux-3.0.7/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-07-21 22:17:23.000000000 -0400
34200+++ linux-3.0.7/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-08-23 21:47:56.000000000 -0400
34201@@ -30,7 +30,7 @@ typedef bool (*ar6k_pal_recv_pkt_t)(void
34202 typedef struct ar6k_pal_config_s
34203 {
34204 ar6k_pal_recv_pkt_t fpar6k_pal_recv_pkt;
34205-}ar6k_pal_config_t;
34206+} __no_const ar6k_pal_config_t;
34207
34208 void register_pal_cb(ar6k_pal_config_t *palConfig_p);
34209 #endif /* _AR6K_PAL_H_ */
34210diff -urNp linux-3.0.7/drivers/staging/brcm80211/brcmfmac/dhd_linux.c linux-3.0.7/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
34211--- linux-3.0.7/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-07-21 22:17:23.000000000 -0400
34212+++ linux-3.0.7/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-08-23 21:47:56.000000000 -0400
34213@@ -853,14 +853,14 @@ static void dhd_op_if(dhd_if_t *ifp)
34214 free_netdev(ifp->net);
34215 }
34216 /* Allocate etherdev, including space for private structure */
34217- ifp->net = alloc_etherdev(sizeof(dhd));
34218+ ifp->net = alloc_etherdev(sizeof(*dhd));
34219 if (!ifp->net) {
34220 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
34221 ret = -ENOMEM;
34222 }
34223 if (ret == 0) {
34224 strcpy(ifp->net->name, ifp->name);
34225- memcpy(netdev_priv(ifp->net), &dhd, sizeof(dhd));
34226+ memcpy(netdev_priv(ifp->net), dhd, sizeof(*dhd));
34227 err = dhd_net_attach(&dhd->pub, ifp->idx);
34228 if (err != 0) {
34229 DHD_ERROR(("%s: dhd_net_attach failed, "
34230@@ -1872,7 +1872,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
34231 strcpy(nv_path, nvram_path);
34232
34233 /* Allocate etherdev, including space for private structure */
34234- net = alloc_etherdev(sizeof(dhd));
34235+ net = alloc_etherdev(sizeof(*dhd));
34236 if (!net) {
34237 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
34238 goto fail;
34239@@ -1888,7 +1888,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
34240 /*
34241 * Save the dhd_info into the priv
34242 */
34243- memcpy(netdev_priv(net), &dhd, sizeof(dhd));
34244+ memcpy(netdev_priv(net), dhd, sizeof(*dhd));
34245
34246 /* Set network interface name if it was provided as module parameter */
34247 if (iface_name[0]) {
34248@@ -2004,7 +2004,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
34249 /*
34250 * Save the dhd_info into the priv
34251 */
34252- memcpy(netdev_priv(net), &dhd, sizeof(dhd));
34253+ memcpy(netdev_priv(net), dhd, sizeof(*dhd));
34254
34255 #if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
34256 g_bus = bus;
34257diff -urNp linux-3.0.7/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h linux-3.0.7/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h
34258--- linux-3.0.7/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h 2011-07-21 22:17:23.000000000 -0400
34259+++ linux-3.0.7/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h 2011-08-23 21:47:56.000000000 -0400
34260@@ -593,7 +593,7 @@ struct phy_func_ptr {
34261 initfn_t carrsuppr;
34262 rxsigpwrfn_t rxsigpwr;
34263 detachfn_t detach;
34264-};
34265+} __no_const;
34266 typedef struct phy_func_ptr phy_func_ptr_t;
34267
34268 struct phy_info {
34269diff -urNp linux-3.0.7/drivers/staging/brcm80211/include/bcmsdh.h linux-3.0.7/drivers/staging/brcm80211/include/bcmsdh.h
34270--- linux-3.0.7/drivers/staging/brcm80211/include/bcmsdh.h 2011-07-21 22:17:23.000000000 -0400
34271+++ linux-3.0.7/drivers/staging/brcm80211/include/bcmsdh.h 2011-08-23 21:47:56.000000000 -0400
34272@@ -185,7 +185,7 @@ typedef struct {
34273 u16 func, uint bustype, void *regsva, void *param);
34274 /* detach from device */
34275 void (*detach) (void *ch);
34276-} bcmsdh_driver_t;
34277+} __no_const bcmsdh_driver_t;
34278
34279 /* platform specific/high level functions */
34280 extern int bcmsdh_register(bcmsdh_driver_t *driver);
34281diff -urNp linux-3.0.7/drivers/staging/et131x/et1310_tx.c linux-3.0.7/drivers/staging/et131x/et1310_tx.c
34282--- linux-3.0.7/drivers/staging/et131x/et1310_tx.c 2011-07-21 22:17:23.000000000 -0400
34283+++ linux-3.0.7/drivers/staging/et131x/et1310_tx.c 2011-08-23 21:47:56.000000000 -0400
34284@@ -635,11 +635,11 @@ inline void et131x_free_send_packet(stru
34285 struct net_device_stats *stats = &etdev->net_stats;
34286
34287 if (tcb->flags & fMP_DEST_BROAD)
34288- atomic_inc(&etdev->Stats.brdcstxmt);
34289+ atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
34290 else if (tcb->flags & fMP_DEST_MULTI)
34291- atomic_inc(&etdev->Stats.multixmt);
34292+ atomic_inc_unchecked(&etdev->Stats.multixmt);
34293 else
34294- atomic_inc(&etdev->Stats.unixmt);
34295+ atomic_inc_unchecked(&etdev->Stats.unixmt);
34296
34297 if (tcb->skb) {
34298 stats->tx_bytes += tcb->skb->len;
34299diff -urNp linux-3.0.7/drivers/staging/et131x/et131x_adapter.h linux-3.0.7/drivers/staging/et131x/et131x_adapter.h
34300--- linux-3.0.7/drivers/staging/et131x/et131x_adapter.h 2011-07-21 22:17:23.000000000 -0400
34301+++ linux-3.0.7/drivers/staging/et131x/et131x_adapter.h 2011-08-23 21:47:56.000000000 -0400
34302@@ -110,11 +110,11 @@ typedef struct _ce_stats_t {
34303 * operations
34304 */
34305 u32 unircv; /* # multicast packets received */
34306- atomic_t unixmt; /* # multicast packets for Tx */
34307+ atomic_unchecked_t unixmt; /* # multicast packets for Tx */
34308 u32 multircv; /* # multicast packets received */
34309- atomic_t multixmt; /* # multicast packets for Tx */
34310+ atomic_unchecked_t multixmt; /* # multicast packets for Tx */
34311 u32 brdcstrcv; /* # broadcast packets received */
34312- atomic_t brdcstxmt; /* # broadcast packets for Tx */
34313+ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
34314 u32 norcvbuf; /* # Rx packets discarded */
34315 u32 noxmtbuf; /* # Tx packets discarded */
34316
34317diff -urNp linux-3.0.7/drivers/staging/hv/channel.c linux-3.0.7/drivers/staging/hv/channel.c
34318--- linux-3.0.7/drivers/staging/hv/channel.c 2011-09-02 18:11:21.000000000 -0400
34319+++ linux-3.0.7/drivers/staging/hv/channel.c 2011-08-23 21:47:56.000000000 -0400
34320@@ -433,8 +433,8 @@ int vmbus_establish_gpadl(struct vmbus_c
34321 int ret = 0;
34322 int t;
34323
34324- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
34325- atomic_inc(&vmbus_connection.next_gpadl_handle);
34326+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
34327+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
34328
34329 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
34330 if (ret)
34331diff -urNp linux-3.0.7/drivers/staging/hv/hv.c linux-3.0.7/drivers/staging/hv/hv.c
34332--- linux-3.0.7/drivers/staging/hv/hv.c 2011-07-21 22:17:23.000000000 -0400
34333+++ linux-3.0.7/drivers/staging/hv/hv.c 2011-08-23 21:47:56.000000000 -0400
34334@@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, voi
34335 u64 output_address = (output) ? virt_to_phys(output) : 0;
34336 u32 output_address_hi = output_address >> 32;
34337 u32 output_address_lo = output_address & 0xFFFFFFFF;
34338- volatile void *hypercall_page = hv_context.hypercall_page;
34339+ volatile void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
34340
34341 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
34342 "=a"(hv_status_lo) : "d" (control_hi),
34343diff -urNp linux-3.0.7/drivers/staging/hv/hv_mouse.c linux-3.0.7/drivers/staging/hv/hv_mouse.c
34344--- linux-3.0.7/drivers/staging/hv/hv_mouse.c 2011-07-21 22:17:23.000000000 -0400
34345+++ linux-3.0.7/drivers/staging/hv/hv_mouse.c 2011-08-23 21:47:56.000000000 -0400
34346@@ -879,8 +879,10 @@ static void reportdesc_callback(struct h
34347 if (hid_dev) {
34348 DPRINT_INFO(INPUTVSC_DRV, "hid_device created");
34349
34350- hid_dev->ll_driver->open = mousevsc_hid_open;
34351- hid_dev->ll_driver->close = mousevsc_hid_close;
34352+ pax_open_kernel();
34353+ *(void **)&hid_dev->ll_driver->open = mousevsc_hid_open;
34354+ *(void **)&hid_dev->ll_driver->close = mousevsc_hid_close;
34355+ pax_close_kernel();
34356
34357 hid_dev->bus = BUS_VIRTUAL;
34358 hid_dev->vendor = input_device_ctx->device_info.vendor;
34359diff -urNp linux-3.0.7/drivers/staging/hv/hyperv_vmbus.h linux-3.0.7/drivers/staging/hv/hyperv_vmbus.h
34360--- linux-3.0.7/drivers/staging/hv/hyperv_vmbus.h 2011-07-21 22:17:23.000000000 -0400
34361+++ linux-3.0.7/drivers/staging/hv/hyperv_vmbus.h 2011-08-23 21:47:56.000000000 -0400
34362@@ -559,7 +559,7 @@ enum vmbus_connect_state {
34363 struct vmbus_connection {
34364 enum vmbus_connect_state conn_state;
34365
34366- atomic_t next_gpadl_handle;
34367+ atomic_unchecked_t next_gpadl_handle;
34368
34369 /*
34370 * Represents channel interrupts. Each bit position represents a
34371diff -urNp linux-3.0.7/drivers/staging/hv/rndis_filter.c linux-3.0.7/drivers/staging/hv/rndis_filter.c
34372--- linux-3.0.7/drivers/staging/hv/rndis_filter.c 2011-09-02 18:11:21.000000000 -0400
34373+++ linux-3.0.7/drivers/staging/hv/rndis_filter.c 2011-08-23 21:47:56.000000000 -0400
34374@@ -43,7 +43,7 @@ struct rndis_device {
34375
34376 enum rndis_device_state state;
34377 u32 link_stat;
34378- atomic_t new_req_id;
34379+ atomic_unchecked_t new_req_id;
34380
34381 spinlock_t request_lock;
34382 struct list_head req_list;
34383@@ -117,7 +117,7 @@ static struct rndis_request *get_rndis_r
34384 * template
34385 */
34386 set = &rndis_msg->msg.set_req;
34387- set->req_id = atomic_inc_return(&dev->new_req_id);
34388+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34389
34390 /* Add to the request list */
34391 spin_lock_irqsave(&dev->request_lock, flags);
34392@@ -637,7 +637,7 @@ static void rndis_filter_halt_device(str
34393
34394 /* Setup the rndis set */
34395 halt = &request->request_msg.msg.halt_req;
34396- halt->req_id = atomic_inc_return(&dev->new_req_id);
34397+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34398
34399 /* Ignore return since this msg is optional. */
34400 rndis_filter_send_request(dev, request);
34401diff -urNp linux-3.0.7/drivers/staging/hv/vmbus_drv.c linux-3.0.7/drivers/staging/hv/vmbus_drv.c
34402--- linux-3.0.7/drivers/staging/hv/vmbus_drv.c 2011-07-21 22:17:23.000000000 -0400
34403+++ linux-3.0.7/drivers/staging/hv/vmbus_drv.c 2011-08-23 21:47:56.000000000 -0400
34404@@ -668,11 +668,11 @@ int vmbus_child_device_register(struct h
34405 {
34406 int ret = 0;
34407
34408- static atomic_t device_num = ATOMIC_INIT(0);
34409+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
34410
34411 /* Set the device name. Otherwise, device_register() will fail. */
34412 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
34413- atomic_inc_return(&device_num));
34414+ atomic_inc_return_unchecked(&device_num));
34415
34416 /* The new device belongs to this bus */
34417 child_device_obj->device.bus = &hv_bus; /* device->dev.bus; */
34418diff -urNp linux-3.0.7/drivers/staging/iio/ring_generic.h linux-3.0.7/drivers/staging/iio/ring_generic.h
34419--- linux-3.0.7/drivers/staging/iio/ring_generic.h 2011-07-21 22:17:23.000000000 -0400
34420+++ linux-3.0.7/drivers/staging/iio/ring_generic.h 2011-08-23 21:47:56.000000000 -0400
34421@@ -62,7 +62,7 @@ struct iio_ring_access_funcs {
34422
34423 int (*is_enabled)(struct iio_ring_buffer *ring);
34424 int (*enable)(struct iio_ring_buffer *ring);
34425-};
34426+} __no_const;
34427
34428 struct iio_ring_setup_ops {
34429 int (*preenable)(struct iio_dev *);
34430diff -urNp linux-3.0.7/drivers/staging/octeon/ethernet.c linux-3.0.7/drivers/staging/octeon/ethernet.c
34431--- linux-3.0.7/drivers/staging/octeon/ethernet.c 2011-07-21 22:17:23.000000000 -0400
34432+++ linux-3.0.7/drivers/staging/octeon/ethernet.c 2011-08-23 21:47:56.000000000 -0400
34433@@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_
34434 * since the RX tasklet also increments it.
34435 */
34436 #ifdef CONFIG_64BIT
34437- atomic64_add(rx_status.dropped_packets,
34438- (atomic64_t *)&priv->stats.rx_dropped);
34439+ atomic64_add_unchecked(rx_status.dropped_packets,
34440+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
34441 #else
34442- atomic_add(rx_status.dropped_packets,
34443- (atomic_t *)&priv->stats.rx_dropped);
34444+ atomic_add_unchecked(rx_status.dropped_packets,
34445+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
34446 #endif
34447 }
34448
34449diff -urNp linux-3.0.7/drivers/staging/octeon/ethernet-rx.c linux-3.0.7/drivers/staging/octeon/ethernet-rx.c
34450--- linux-3.0.7/drivers/staging/octeon/ethernet-rx.c 2011-07-21 22:17:23.000000000 -0400
34451+++ linux-3.0.7/drivers/staging/octeon/ethernet-rx.c 2011-08-23 21:47:56.000000000 -0400
34452@@ -417,11 +417,11 @@ static int cvm_oct_napi_poll(struct napi
34453 /* Increment RX stats for virtual ports */
34454 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
34455 #ifdef CONFIG_64BIT
34456- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
34457- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
34458+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
34459+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
34460 #else
34461- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
34462- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
34463+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
34464+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
34465 #endif
34466 }
34467 netif_receive_skb(skb);
34468@@ -433,9 +433,9 @@ static int cvm_oct_napi_poll(struct napi
34469 dev->name);
34470 */
34471 #ifdef CONFIG_64BIT
34472- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
34473+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
34474 #else
34475- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
34476+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
34477 #endif
34478 dev_kfree_skb_irq(skb);
34479 }
34480diff -urNp linux-3.0.7/drivers/staging/pohmelfs/inode.c linux-3.0.7/drivers/staging/pohmelfs/inode.c
34481--- linux-3.0.7/drivers/staging/pohmelfs/inode.c 2011-07-21 22:17:23.000000000 -0400
34482+++ linux-3.0.7/drivers/staging/pohmelfs/inode.c 2011-08-23 21:47:56.000000000 -0400
34483@@ -1856,7 +1856,7 @@ static int pohmelfs_fill_super(struct su
34484 mutex_init(&psb->mcache_lock);
34485 psb->mcache_root = RB_ROOT;
34486 psb->mcache_timeout = msecs_to_jiffies(5000);
34487- atomic_long_set(&psb->mcache_gen, 0);
34488+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
34489
34490 psb->trans_max_pages = 100;
34491
34492@@ -1871,7 +1871,7 @@ static int pohmelfs_fill_super(struct su
34493 INIT_LIST_HEAD(&psb->crypto_ready_list);
34494 INIT_LIST_HEAD(&psb->crypto_active_list);
34495
34496- atomic_set(&psb->trans_gen, 1);
34497+ atomic_set_unchecked(&psb->trans_gen, 1);
34498 atomic_long_set(&psb->total_inodes, 0);
34499
34500 mutex_init(&psb->state_lock);
34501diff -urNp linux-3.0.7/drivers/staging/pohmelfs/mcache.c linux-3.0.7/drivers/staging/pohmelfs/mcache.c
34502--- linux-3.0.7/drivers/staging/pohmelfs/mcache.c 2011-07-21 22:17:23.000000000 -0400
34503+++ linux-3.0.7/drivers/staging/pohmelfs/mcache.c 2011-08-23 21:47:56.000000000 -0400
34504@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
34505 m->data = data;
34506 m->start = start;
34507 m->size = size;
34508- m->gen = atomic_long_inc_return(&psb->mcache_gen);
34509+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
34510
34511 mutex_lock(&psb->mcache_lock);
34512 err = pohmelfs_mcache_insert(psb, m);
34513diff -urNp linux-3.0.7/drivers/staging/pohmelfs/netfs.h linux-3.0.7/drivers/staging/pohmelfs/netfs.h
34514--- linux-3.0.7/drivers/staging/pohmelfs/netfs.h 2011-07-21 22:17:23.000000000 -0400
34515+++ linux-3.0.7/drivers/staging/pohmelfs/netfs.h 2011-08-23 21:47:56.000000000 -0400
34516@@ -571,14 +571,14 @@ struct pohmelfs_config;
34517 struct pohmelfs_sb {
34518 struct rb_root mcache_root;
34519 struct mutex mcache_lock;
34520- atomic_long_t mcache_gen;
34521+ atomic_long_unchecked_t mcache_gen;
34522 unsigned long mcache_timeout;
34523
34524 unsigned int idx;
34525
34526 unsigned int trans_retries;
34527
34528- atomic_t trans_gen;
34529+ atomic_unchecked_t trans_gen;
34530
34531 unsigned int crypto_attached_size;
34532 unsigned int crypto_align_size;
34533diff -urNp linux-3.0.7/drivers/staging/pohmelfs/trans.c linux-3.0.7/drivers/staging/pohmelfs/trans.c
34534--- linux-3.0.7/drivers/staging/pohmelfs/trans.c 2011-07-21 22:17:23.000000000 -0400
34535+++ linux-3.0.7/drivers/staging/pohmelfs/trans.c 2011-08-23 21:47:56.000000000 -0400
34536@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
34537 int err;
34538 struct netfs_cmd *cmd = t->iovec.iov_base;
34539
34540- t->gen = atomic_inc_return(&psb->trans_gen);
34541+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
34542
34543 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
34544 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
34545diff -urNp linux-3.0.7/drivers/staging/rtl8712/rtl871x_io.h linux-3.0.7/drivers/staging/rtl8712/rtl871x_io.h
34546--- linux-3.0.7/drivers/staging/rtl8712/rtl871x_io.h 2011-07-21 22:17:23.000000000 -0400
34547+++ linux-3.0.7/drivers/staging/rtl8712/rtl871x_io.h 2011-08-23 21:47:56.000000000 -0400
34548@@ -83,7 +83,7 @@ struct _io_ops {
34549 u8 *pmem);
34550 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
34551 u8 *pmem);
34552-};
34553+} __no_const;
34554
34555 struct io_req {
34556 struct list_head list;
34557diff -urNp linux-3.0.7/drivers/staging/sbe-2t3e3/netdev.c linux-3.0.7/drivers/staging/sbe-2t3e3/netdev.c
34558--- linux-3.0.7/drivers/staging/sbe-2t3e3/netdev.c 2011-07-21 22:17:23.000000000 -0400
34559+++ linux-3.0.7/drivers/staging/sbe-2t3e3/netdev.c 2011-08-24 18:21:41.000000000 -0400
34560@@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, s
34561 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
34562
34563 if (rlen)
34564- if (copy_to_user(data, &resp, rlen))
34565+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
34566 return -EFAULT;
34567
34568 return 0;
34569diff -urNp linux-3.0.7/drivers/staging/tty/stallion.c linux-3.0.7/drivers/staging/tty/stallion.c
34570--- linux-3.0.7/drivers/staging/tty/stallion.c 2011-07-21 22:17:23.000000000 -0400
34571+++ linux-3.0.7/drivers/staging/tty/stallion.c 2011-08-23 21:48:14.000000000 -0400
34572@@ -2406,6 +2406,8 @@ static int stl_getportstruct(struct stlp
34573 struct stlport stl_dummyport;
34574 struct stlport *portp;
34575
34576+ pax_track_stack();
34577+
34578 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
34579 return -EFAULT;
34580 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
34581diff -urNp linux-3.0.7/drivers/staging/usbip/usbip_common.h linux-3.0.7/drivers/staging/usbip/usbip_common.h
34582--- linux-3.0.7/drivers/staging/usbip/usbip_common.h 2011-07-21 22:17:23.000000000 -0400
34583+++ linux-3.0.7/drivers/staging/usbip/usbip_common.h 2011-08-23 21:47:56.000000000 -0400
34584@@ -315,7 +315,7 @@ struct usbip_device {
34585 void (*shutdown)(struct usbip_device *);
34586 void (*reset)(struct usbip_device *);
34587 void (*unusable)(struct usbip_device *);
34588- } eh_ops;
34589+ } __no_const eh_ops;
34590 };
34591
34592 void usbip_pack_pdu(struct usbip_header *pdu, struct urb *urb, int cmd,
34593diff -urNp linux-3.0.7/drivers/staging/usbip/vhci.h linux-3.0.7/drivers/staging/usbip/vhci.h
34594--- linux-3.0.7/drivers/staging/usbip/vhci.h 2011-07-21 22:17:23.000000000 -0400
34595+++ linux-3.0.7/drivers/staging/usbip/vhci.h 2011-08-23 21:47:56.000000000 -0400
34596@@ -94,7 +94,7 @@ struct vhci_hcd {
34597 unsigned resuming:1;
34598 unsigned long re_timeout;
34599
34600- atomic_t seqnum;
34601+ atomic_unchecked_t seqnum;
34602
34603 /*
34604 * NOTE:
34605diff -urNp linux-3.0.7/drivers/staging/usbip/vhci_hcd.c linux-3.0.7/drivers/staging/usbip/vhci_hcd.c
34606--- linux-3.0.7/drivers/staging/usbip/vhci_hcd.c 2011-09-02 18:11:21.000000000 -0400
34607+++ linux-3.0.7/drivers/staging/usbip/vhci_hcd.c 2011-08-23 21:47:56.000000000 -0400
34608@@ -511,7 +511,7 @@ static void vhci_tx_urb(struct urb *urb)
34609 return;
34610 }
34611
34612- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
34613+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
34614 if (priv->seqnum == 0xffff)
34615 dev_info(&urb->dev->dev, "seqnum max\n");
34616
34617@@ -765,7 +765,7 @@ static int vhci_urb_dequeue(struct usb_h
34618 return -ENOMEM;
34619 }
34620
34621- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
34622+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
34623 if (unlink->seqnum == 0xffff)
34624 pr_info("seqnum max\n");
34625
34626@@ -955,7 +955,7 @@ static int vhci_start(struct usb_hcd *hc
34627 vdev->rhport = rhport;
34628 }
34629
34630- atomic_set(&vhci->seqnum, 0);
34631+ atomic_set_unchecked(&vhci->seqnum, 0);
34632 spin_lock_init(&vhci->lock);
34633
34634 hcd->power_budget = 0; /* no limit */
34635diff -urNp linux-3.0.7/drivers/staging/usbip/vhci_rx.c linux-3.0.7/drivers/staging/usbip/vhci_rx.c
34636--- linux-3.0.7/drivers/staging/usbip/vhci_rx.c 2011-07-21 22:17:23.000000000 -0400
34637+++ linux-3.0.7/drivers/staging/usbip/vhci_rx.c 2011-08-23 21:47:56.000000000 -0400
34638@@ -76,7 +76,7 @@ static void vhci_recv_ret_submit(struct
34639 if (!urb) {
34640 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
34641 pr_info("max seqnum %d\n",
34642- atomic_read(&the_controller->seqnum));
34643+ atomic_read_unchecked(&the_controller->seqnum));
34644 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
34645 return;
34646 }
34647diff -urNp linux-3.0.7/drivers/staging/vt6655/hostap.c linux-3.0.7/drivers/staging/vt6655/hostap.c
34648--- linux-3.0.7/drivers/staging/vt6655/hostap.c 2011-07-21 22:17:23.000000000 -0400
34649+++ linux-3.0.7/drivers/staging/vt6655/hostap.c 2011-08-23 21:47:56.000000000 -0400
34650@@ -79,14 +79,13 @@ static int msglevel
34651 *
34652 */
34653
34654+static net_device_ops_no_const apdev_netdev_ops;
34655+
34656 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
34657 {
34658 PSDevice apdev_priv;
34659 struct net_device *dev = pDevice->dev;
34660 int ret;
34661- const struct net_device_ops apdev_netdev_ops = {
34662- .ndo_start_xmit = pDevice->tx_80211,
34663- };
34664
34665 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
34666
34667@@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevic
34668 *apdev_priv = *pDevice;
34669 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
34670
34671+ /* only half broken now */
34672+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
34673 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
34674
34675 pDevice->apdev->type = ARPHRD_IEEE80211;
34676diff -urNp linux-3.0.7/drivers/staging/vt6656/hostap.c linux-3.0.7/drivers/staging/vt6656/hostap.c
34677--- linux-3.0.7/drivers/staging/vt6656/hostap.c 2011-07-21 22:17:23.000000000 -0400
34678+++ linux-3.0.7/drivers/staging/vt6656/hostap.c 2011-08-23 21:47:56.000000000 -0400
34679@@ -80,14 +80,13 @@ static int msglevel
34680 *
34681 */
34682
34683+static net_device_ops_no_const apdev_netdev_ops;
34684+
34685 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
34686 {
34687 PSDevice apdev_priv;
34688 struct net_device *dev = pDevice->dev;
34689 int ret;
34690- const struct net_device_ops apdev_netdev_ops = {
34691- .ndo_start_xmit = pDevice->tx_80211,
34692- };
34693
34694 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
34695
34696@@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevic
34697 *apdev_priv = *pDevice;
34698 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
34699
34700+ /* only half broken now */
34701+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
34702 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
34703
34704 pDevice->apdev->type = ARPHRD_IEEE80211;
34705diff -urNp linux-3.0.7/drivers/staging/wlan-ng/hfa384x_usb.c linux-3.0.7/drivers/staging/wlan-ng/hfa384x_usb.c
34706--- linux-3.0.7/drivers/staging/wlan-ng/hfa384x_usb.c 2011-07-21 22:17:23.000000000 -0400
34707+++ linux-3.0.7/drivers/staging/wlan-ng/hfa384x_usb.c 2011-08-23 21:47:56.000000000 -0400
34708@@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hf
34709
34710 struct usbctlx_completor {
34711 int (*complete) (struct usbctlx_completor *);
34712-};
34713+} __no_const;
34714
34715 static int
34716 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
34717diff -urNp linux-3.0.7/drivers/staging/zcache/tmem.c linux-3.0.7/drivers/staging/zcache/tmem.c
34718--- linux-3.0.7/drivers/staging/zcache/tmem.c 2011-07-21 22:17:23.000000000 -0400
34719+++ linux-3.0.7/drivers/staging/zcache/tmem.c 2011-08-23 21:47:56.000000000 -0400
34720@@ -39,7 +39,7 @@
34721 * A tmem host implementation must use this function to register callbacks
34722 * for memory allocation.
34723 */
34724-static struct tmem_hostops tmem_hostops;
34725+static tmem_hostops_no_const tmem_hostops;
34726
34727 static void tmem_objnode_tree_init(void);
34728
34729@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_h
34730 * A tmem host implementation must use this function to register
34731 * callbacks for a page-accessible memory (PAM) implementation
34732 */
34733-static struct tmem_pamops tmem_pamops;
34734+static tmem_pamops_no_const tmem_pamops;
34735
34736 void tmem_register_pamops(struct tmem_pamops *m)
34737 {
34738diff -urNp linux-3.0.7/drivers/staging/zcache/tmem.h linux-3.0.7/drivers/staging/zcache/tmem.h
34739--- linux-3.0.7/drivers/staging/zcache/tmem.h 2011-07-21 22:17:23.000000000 -0400
34740+++ linux-3.0.7/drivers/staging/zcache/tmem.h 2011-08-23 21:47:56.000000000 -0400
34741@@ -171,6 +171,7 @@ struct tmem_pamops {
34742 int (*get_data)(struct page *, void *, struct tmem_pool *);
34743 void (*free)(void *, struct tmem_pool *);
34744 };
34745+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
34746 extern void tmem_register_pamops(struct tmem_pamops *m);
34747
34748 /* memory allocation methods provided by the host implementation */
34749@@ -180,6 +181,7 @@ struct tmem_hostops {
34750 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
34751 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
34752 };
34753+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
34754 extern void tmem_register_hostops(struct tmem_hostops *m);
34755
34756 /* core tmem accessor functions */
34757diff -urNp linux-3.0.7/drivers/target/target_core_alua.c linux-3.0.7/drivers/target/target_core_alua.c
34758--- linux-3.0.7/drivers/target/target_core_alua.c 2011-07-21 22:17:23.000000000 -0400
34759+++ linux-3.0.7/drivers/target/target_core_alua.c 2011-08-23 21:48:14.000000000 -0400
34760@@ -675,6 +675,8 @@ static int core_alua_update_tpg_primary_
34761 char path[ALUA_METADATA_PATH_LEN];
34762 int len;
34763
34764+ pax_track_stack();
34765+
34766 memset(path, 0, ALUA_METADATA_PATH_LEN);
34767
34768 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
34769@@ -938,6 +940,8 @@ static int core_alua_update_tpg_secondar
34770 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
34771 int len;
34772
34773+ pax_track_stack();
34774+
34775 memset(path, 0, ALUA_METADATA_PATH_LEN);
34776 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
34777
34778diff -urNp linux-3.0.7/drivers/target/target_core_cdb.c linux-3.0.7/drivers/target/target_core_cdb.c
34779--- linux-3.0.7/drivers/target/target_core_cdb.c 2011-07-21 22:17:23.000000000 -0400
34780+++ linux-3.0.7/drivers/target/target_core_cdb.c 2011-08-23 21:48:14.000000000 -0400
34781@@ -838,6 +838,8 @@ target_emulate_modesense(struct se_cmd *
34782 int length = 0;
34783 unsigned char buf[SE_MODE_PAGE_BUF];
34784
34785+ pax_track_stack();
34786+
34787 memset(buf, 0, SE_MODE_PAGE_BUF);
34788
34789 switch (cdb[2] & 0x3f) {
34790diff -urNp linux-3.0.7/drivers/target/target_core_configfs.c linux-3.0.7/drivers/target/target_core_configfs.c
34791--- linux-3.0.7/drivers/target/target_core_configfs.c 2011-07-21 22:17:23.000000000 -0400
34792+++ linux-3.0.7/drivers/target/target_core_configfs.c 2011-08-23 21:48:14.000000000 -0400
34793@@ -1276,6 +1276,8 @@ static ssize_t target_core_dev_pr_show_a
34794 ssize_t len = 0;
34795 int reg_count = 0, prf_isid;
34796
34797+ pax_track_stack();
34798+
34799 if (!(su_dev->se_dev_ptr))
34800 return -ENODEV;
34801
34802diff -urNp linux-3.0.7/drivers/target/target_core_pr.c linux-3.0.7/drivers/target/target_core_pr.c
34803--- linux-3.0.7/drivers/target/target_core_pr.c 2011-07-21 22:17:23.000000000 -0400
34804+++ linux-3.0.7/drivers/target/target_core_pr.c 2011-08-23 21:48:14.000000000 -0400
34805@@ -918,6 +918,8 @@ static int __core_scsi3_check_aptpl_regi
34806 unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
34807 u16 tpgt;
34808
34809+ pax_track_stack();
34810+
34811 memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
34812 memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
34813 /*
34814@@ -1861,6 +1863,8 @@ static int __core_scsi3_update_aptpl_buf
34815 ssize_t len = 0;
34816 int reg_count = 0;
34817
34818+ pax_track_stack();
34819+
34820 memset(buf, 0, pr_aptpl_buf_len);
34821 /*
34822 * Called to clear metadata once APTPL has been deactivated.
34823@@ -1983,6 +1987,8 @@ static int __core_scsi3_write_aptpl_to_f
34824 char path[512];
34825 int ret;
34826
34827+ pax_track_stack();
34828+
34829 memset(iov, 0, sizeof(struct iovec));
34830 memset(path, 0, 512);
34831
34832diff -urNp linux-3.0.7/drivers/target/target_core_tmr.c linux-3.0.7/drivers/target/target_core_tmr.c
34833--- linux-3.0.7/drivers/target/target_core_tmr.c 2011-07-21 22:17:23.000000000 -0400
34834+++ linux-3.0.7/drivers/target/target_core_tmr.c 2011-08-23 21:47:56.000000000 -0400
34835@@ -269,7 +269,7 @@ int core_tmr_lun_reset(
34836 CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key,
34837 T_TASK(cmd)->t_task_cdbs,
34838 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
34839- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
34840+ atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
34841 atomic_read(&T_TASK(cmd)->t_transport_active),
34842 atomic_read(&T_TASK(cmd)->t_transport_stop),
34843 atomic_read(&T_TASK(cmd)->t_transport_sent));
34844@@ -311,7 +311,7 @@ int core_tmr_lun_reset(
34845 DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
34846 " task: %p, t_fe_count: %d dev: %p\n", task,
34847 fe_count, dev);
34848- atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
34849+ atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
34850 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
34851 flags);
34852 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
34853@@ -321,7 +321,7 @@ int core_tmr_lun_reset(
34854 }
34855 DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
34856 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
34857- atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
34858+ atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
34859 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
34860 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
34861
34862diff -urNp linux-3.0.7/drivers/target/target_core_transport.c linux-3.0.7/drivers/target/target_core_transport.c
34863--- linux-3.0.7/drivers/target/target_core_transport.c 2011-07-21 22:17:23.000000000 -0400
34864+++ linux-3.0.7/drivers/target/target_core_transport.c 2011-08-23 21:47:56.000000000 -0400
34865@@ -1681,7 +1681,7 @@ struct se_device *transport_add_device_t
34866
34867 dev->queue_depth = dev_limits->queue_depth;
34868 atomic_set(&dev->depth_left, dev->queue_depth);
34869- atomic_set(&dev->dev_ordered_id, 0);
34870+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
34871
34872 se_dev_set_default_attribs(dev, dev_limits);
34873
34874@@ -1882,7 +1882,7 @@ static int transport_check_alloc_task_at
34875 * Used to determine when ORDERED commands should go from
34876 * Dormant to Active status.
34877 */
34878- cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id);
34879+ cmd->se_ordered_id = atomic_inc_return_unchecked(&SE_DEV(cmd)->dev_ordered_id);
34880 smp_mb__after_atomic_inc();
34881 DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
34882 cmd->se_ordered_id, cmd->sam_task_attr,
34883@@ -2169,7 +2169,7 @@ static void transport_generic_request_fa
34884 " t_transport_active: %d t_transport_stop: %d"
34885 " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs,
34886 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
34887- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
34888+ atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
34889 atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left),
34890 atomic_read(&T_TASK(cmd)->t_transport_active),
34891 atomic_read(&T_TASK(cmd)->t_transport_stop),
34892@@ -2673,9 +2673,9 @@ check_depth:
34893 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
34894 atomic_set(&task->task_active, 1);
34895 atomic_set(&task->task_sent, 1);
34896- atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent);
34897+ atomic_inc_unchecked(&T_TASK(cmd)->t_task_cdbs_sent);
34898
34899- if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) ==
34900+ if (atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent) ==
34901 T_TASK(cmd)->t_task_cdbs)
34902 atomic_set(&cmd->transport_sent, 1);
34903
34904@@ -5568,7 +5568,7 @@ static void transport_generic_wait_for_t
34905 atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
34906 }
34907 if (!atomic_read(&T_TASK(cmd)->t_transport_active) ||
34908- atomic_read(&T_TASK(cmd)->t_transport_aborted))
34909+ atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted))
34910 goto remove;
34911
34912 atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
34913@@ -5797,7 +5797,7 @@ int transport_check_aborted_status(struc
34914 {
34915 int ret = 0;
34916
34917- if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) {
34918+ if (atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted) != 0) {
34919 if (!(send_status) ||
34920 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
34921 return 1;
34922@@ -5825,7 +5825,7 @@ void transport_send_task_abort(struct se
34923 */
34924 if (cmd->data_direction == DMA_TO_DEVICE) {
34925 if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) {
34926- atomic_inc(&T_TASK(cmd)->t_transport_aborted);
34927+ atomic_inc_unchecked(&T_TASK(cmd)->t_transport_aborted);
34928 smp_mb__after_atomic_inc();
34929 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
34930 transport_new_cmd_failure(cmd);
34931@@ -5949,7 +5949,7 @@ static void transport_processing_shutdow
34932 CMD_TFO(cmd)->get_task_tag(cmd),
34933 T_TASK(cmd)->t_task_cdbs,
34934 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
34935- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
34936+ atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
34937 atomic_read(&T_TASK(cmd)->t_transport_active),
34938 atomic_read(&T_TASK(cmd)->t_transport_stop),
34939 atomic_read(&T_TASK(cmd)->t_transport_sent));
34940diff -urNp linux-3.0.7/drivers/telephony/ixj.c linux-3.0.7/drivers/telephony/ixj.c
34941--- linux-3.0.7/drivers/telephony/ixj.c 2011-07-21 22:17:23.000000000 -0400
34942+++ linux-3.0.7/drivers/telephony/ixj.c 2011-08-23 21:48:14.000000000 -0400
34943@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
34944 bool mContinue;
34945 char *pIn, *pOut;
34946
34947+ pax_track_stack();
34948+
34949 if (!SCI_Prepare(j))
34950 return 0;
34951
34952diff -urNp linux-3.0.7/drivers/tty/hvc/hvcs.c linux-3.0.7/drivers/tty/hvc/hvcs.c
34953--- linux-3.0.7/drivers/tty/hvc/hvcs.c 2011-07-21 22:17:23.000000000 -0400
34954+++ linux-3.0.7/drivers/tty/hvc/hvcs.c 2011-08-23 21:47:56.000000000 -0400
34955@@ -83,6 +83,7 @@
34956 #include <asm/hvcserver.h>
34957 #include <asm/uaccess.h>
34958 #include <asm/vio.h>
34959+#include <asm/local.h>
34960
34961 /*
34962 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
34963@@ -270,7 +271,7 @@ struct hvcs_struct {
34964 unsigned int index;
34965
34966 struct tty_struct *tty;
34967- int open_count;
34968+ local_t open_count;
34969
34970 /*
34971 * Used to tell the driver kernel_thread what operations need to take
34972@@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(st
34973
34974 spin_lock_irqsave(&hvcsd->lock, flags);
34975
34976- if (hvcsd->open_count > 0) {
34977+ if (local_read(&hvcsd->open_count) > 0) {
34978 spin_unlock_irqrestore(&hvcsd->lock, flags);
34979 printk(KERN_INFO "HVCS: vterm state unchanged. "
34980 "The hvcs device node is still in use.\n");
34981@@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *
34982 if ((retval = hvcs_partner_connect(hvcsd)))
34983 goto error_release;
34984
34985- hvcsd->open_count = 1;
34986+ local_set(&hvcsd->open_count, 1);
34987 hvcsd->tty = tty;
34988 tty->driver_data = hvcsd;
34989
34990@@ -1179,7 +1180,7 @@ fast_open:
34991
34992 spin_lock_irqsave(&hvcsd->lock, flags);
34993 kref_get(&hvcsd->kref);
34994- hvcsd->open_count++;
34995+ local_inc(&hvcsd->open_count);
34996 hvcsd->todo_mask |= HVCS_SCHED_READ;
34997 spin_unlock_irqrestore(&hvcsd->lock, flags);
34998
34999@@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct
35000 hvcsd = tty->driver_data;
35001
35002 spin_lock_irqsave(&hvcsd->lock, flags);
35003- if (--hvcsd->open_count == 0) {
35004+ if (local_dec_and_test(&hvcsd->open_count)) {
35005
35006 vio_disable_interrupts(hvcsd->vdev);
35007
35008@@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct
35009 free_irq(irq, hvcsd);
35010 kref_put(&hvcsd->kref, destroy_hvcs_struct);
35011 return;
35012- } else if (hvcsd->open_count < 0) {
35013+ } else if (local_read(&hvcsd->open_count) < 0) {
35014 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
35015 " is missmanaged.\n",
35016- hvcsd->vdev->unit_address, hvcsd->open_count);
35017+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
35018 }
35019
35020 spin_unlock_irqrestore(&hvcsd->lock, flags);
35021@@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struc
35022
35023 spin_lock_irqsave(&hvcsd->lock, flags);
35024 /* Preserve this so that we know how many kref refs to put */
35025- temp_open_count = hvcsd->open_count;
35026+ temp_open_count = local_read(&hvcsd->open_count);
35027
35028 /*
35029 * Don't kref put inside the spinlock because the destruction
35030@@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struc
35031 hvcsd->tty->driver_data = NULL;
35032 hvcsd->tty = NULL;
35033
35034- hvcsd->open_count = 0;
35035+ local_set(&hvcsd->open_count, 0);
35036
35037 /* This will drop any buffered data on the floor which is OK in a hangup
35038 * scenario. */
35039@@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct
35040 * the middle of a write operation? This is a crummy place to do this
35041 * but we want to keep it all in the spinlock.
35042 */
35043- if (hvcsd->open_count <= 0) {
35044+ if (local_read(&hvcsd->open_count) <= 0) {
35045 spin_unlock_irqrestore(&hvcsd->lock, flags);
35046 return -ENODEV;
35047 }
35048@@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_st
35049 {
35050 struct hvcs_struct *hvcsd = tty->driver_data;
35051
35052- if (!hvcsd || hvcsd->open_count <= 0)
35053+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
35054 return 0;
35055
35056 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
35057diff -urNp linux-3.0.7/drivers/tty/ipwireless/tty.c linux-3.0.7/drivers/tty/ipwireless/tty.c
35058--- linux-3.0.7/drivers/tty/ipwireless/tty.c 2011-07-21 22:17:23.000000000 -0400
35059+++ linux-3.0.7/drivers/tty/ipwireless/tty.c 2011-08-23 21:47:56.000000000 -0400
35060@@ -29,6 +29,7 @@
35061 #include <linux/tty_driver.h>
35062 #include <linux/tty_flip.h>
35063 #include <linux/uaccess.h>
35064+#include <asm/local.h>
35065
35066 #include "tty.h"
35067 #include "network.h"
35068@@ -51,7 +52,7 @@ struct ipw_tty {
35069 int tty_type;
35070 struct ipw_network *network;
35071 struct tty_struct *linux_tty;
35072- int open_count;
35073+ local_t open_count;
35074 unsigned int control_lines;
35075 struct mutex ipw_tty_mutex;
35076 int tx_bytes_queued;
35077@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
35078 mutex_unlock(&tty->ipw_tty_mutex);
35079 return -ENODEV;
35080 }
35081- if (tty->open_count == 0)
35082+ if (local_read(&tty->open_count) == 0)
35083 tty->tx_bytes_queued = 0;
35084
35085- tty->open_count++;
35086+ local_inc(&tty->open_count);
35087
35088 tty->linux_tty = linux_tty;
35089 linux_tty->driver_data = tty;
35090@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
35091
35092 static void do_ipw_close(struct ipw_tty *tty)
35093 {
35094- tty->open_count--;
35095-
35096- if (tty->open_count == 0) {
35097+ if (local_dec_return(&tty->open_count) == 0) {
35098 struct tty_struct *linux_tty = tty->linux_tty;
35099
35100 if (linux_tty != NULL) {
35101@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
35102 return;
35103
35104 mutex_lock(&tty->ipw_tty_mutex);
35105- if (tty->open_count == 0) {
35106+ if (local_read(&tty->open_count) == 0) {
35107 mutex_unlock(&tty->ipw_tty_mutex);
35108 return;
35109 }
35110@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
35111 return;
35112 }
35113
35114- if (!tty->open_count) {
35115+ if (!local_read(&tty->open_count)) {
35116 mutex_unlock(&tty->ipw_tty_mutex);
35117 return;
35118 }
35119@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
35120 return -ENODEV;
35121
35122 mutex_lock(&tty->ipw_tty_mutex);
35123- if (!tty->open_count) {
35124+ if (!local_read(&tty->open_count)) {
35125 mutex_unlock(&tty->ipw_tty_mutex);
35126 return -EINVAL;
35127 }
35128@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
35129 if (!tty)
35130 return -ENODEV;
35131
35132- if (!tty->open_count)
35133+ if (!local_read(&tty->open_count))
35134 return -EINVAL;
35135
35136 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
35137@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
35138 if (!tty)
35139 return 0;
35140
35141- if (!tty->open_count)
35142+ if (!local_read(&tty->open_count))
35143 return 0;
35144
35145 return tty->tx_bytes_queued;
35146@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
35147 if (!tty)
35148 return -ENODEV;
35149
35150- if (!tty->open_count)
35151+ if (!local_read(&tty->open_count))
35152 return -EINVAL;
35153
35154 return get_control_lines(tty);
35155@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
35156 if (!tty)
35157 return -ENODEV;
35158
35159- if (!tty->open_count)
35160+ if (!local_read(&tty->open_count))
35161 return -EINVAL;
35162
35163 return set_control_lines(tty, set, clear);
35164@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
35165 if (!tty)
35166 return -ENODEV;
35167
35168- if (!tty->open_count)
35169+ if (!local_read(&tty->open_count))
35170 return -EINVAL;
35171
35172 /* FIXME: Exactly how is the tty object locked here .. */
35173@@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty
35174 against a parallel ioctl etc */
35175 mutex_lock(&ttyj->ipw_tty_mutex);
35176 }
35177- while (ttyj->open_count)
35178+ while (local_read(&ttyj->open_count))
35179 do_ipw_close(ttyj);
35180 ipwireless_disassociate_network_ttys(network,
35181 ttyj->channel_idx);
35182diff -urNp linux-3.0.7/drivers/tty/n_gsm.c linux-3.0.7/drivers/tty/n_gsm.c
35183--- linux-3.0.7/drivers/tty/n_gsm.c 2011-09-02 18:11:21.000000000 -0400
35184+++ linux-3.0.7/drivers/tty/n_gsm.c 2011-08-23 21:47:56.000000000 -0400
35185@@ -1589,7 +1589,7 @@ static struct gsm_dlci *gsm_dlci_alloc(s
35186 return NULL;
35187 spin_lock_init(&dlci->lock);
35188 dlci->fifo = &dlci->_fifo;
35189- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
35190+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
35191 kfree(dlci);
35192 return NULL;
35193 }
35194diff -urNp linux-3.0.7/drivers/tty/n_tty.c linux-3.0.7/drivers/tty/n_tty.c
35195--- linux-3.0.7/drivers/tty/n_tty.c 2011-07-21 22:17:23.000000000 -0400
35196+++ linux-3.0.7/drivers/tty/n_tty.c 2011-08-23 21:47:56.000000000 -0400
35197@@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_
35198 {
35199 *ops = tty_ldisc_N_TTY;
35200 ops->owner = NULL;
35201- ops->refcount = ops->flags = 0;
35202+ atomic_set(&ops->refcount, 0);
35203+ ops->flags = 0;
35204 }
35205 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
35206diff -urNp linux-3.0.7/drivers/tty/pty.c linux-3.0.7/drivers/tty/pty.c
35207--- linux-3.0.7/drivers/tty/pty.c 2011-10-16 21:54:54.000000000 -0400
35208+++ linux-3.0.7/drivers/tty/pty.c 2011-10-16 21:55:28.000000000 -0400
35209@@ -767,8 +767,10 @@ static void __init unix98_pty_init(void)
35210 register_sysctl_table(pty_root_table);
35211
35212 /* Now create the /dev/ptmx special device */
35213+ pax_open_kernel();
35214 tty_default_fops(&ptmx_fops);
35215- ptmx_fops.open = ptmx_open;
35216+ *(void **)&ptmx_fops.open = ptmx_open;
35217+ pax_close_kernel();
35218
35219 cdev_init(&ptmx_cdev, &ptmx_fops);
35220 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
35221diff -urNp linux-3.0.7/drivers/tty/rocket.c linux-3.0.7/drivers/tty/rocket.c
35222--- linux-3.0.7/drivers/tty/rocket.c 2011-07-21 22:17:23.000000000 -0400
35223+++ linux-3.0.7/drivers/tty/rocket.c 2011-08-23 21:48:14.000000000 -0400
35224@@ -1277,6 +1277,8 @@ static int get_ports(struct r_port *info
35225 struct rocket_ports tmp;
35226 int board;
35227
35228+ pax_track_stack();
35229+
35230 if (!retports)
35231 return -EFAULT;
35232 memset(&tmp, 0, sizeof (tmp));
35233diff -urNp linux-3.0.7/drivers/tty/serial/kgdboc.c linux-3.0.7/drivers/tty/serial/kgdboc.c
35234--- linux-3.0.7/drivers/tty/serial/kgdboc.c 2011-07-21 22:17:23.000000000 -0400
35235+++ linux-3.0.7/drivers/tty/serial/kgdboc.c 2011-08-23 21:47:56.000000000 -0400
35236@@ -23,8 +23,9 @@
35237 #define MAX_CONFIG_LEN 40
35238
35239 static struct kgdb_io kgdboc_io_ops;
35240+static struct kgdb_io kgdboc_io_ops_console;
35241
35242-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
35243+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
35244 static int configured = -1;
35245
35246 static char config[MAX_CONFIG_LEN];
35247@@ -147,6 +148,8 @@ static void cleanup_kgdboc(void)
35248 kgdboc_unregister_kbd();
35249 if (configured == 1)
35250 kgdb_unregister_io_module(&kgdboc_io_ops);
35251+ else if (configured == 2)
35252+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
35253 }
35254
35255 static int configure_kgdboc(void)
35256@@ -156,13 +159,13 @@ static int configure_kgdboc(void)
35257 int err;
35258 char *cptr = config;
35259 struct console *cons;
35260+ int is_console = 0;
35261
35262 err = kgdboc_option_setup(config);
35263 if (err || !strlen(config) || isspace(config[0]))
35264 goto noconfig;
35265
35266 err = -ENODEV;
35267- kgdboc_io_ops.is_console = 0;
35268 kgdb_tty_driver = NULL;
35269
35270 kgdboc_use_kms = 0;
35271@@ -183,7 +186,7 @@ static int configure_kgdboc(void)
35272 int idx;
35273 if (cons->device && cons->device(cons, &idx) == p &&
35274 idx == tty_line) {
35275- kgdboc_io_ops.is_console = 1;
35276+ is_console = 1;
35277 break;
35278 }
35279 cons = cons->next;
35280@@ -193,12 +196,16 @@ static int configure_kgdboc(void)
35281 kgdb_tty_line = tty_line;
35282
35283 do_register:
35284- err = kgdb_register_io_module(&kgdboc_io_ops);
35285+ if (is_console) {
35286+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
35287+ configured = 2;
35288+ } else {
35289+ err = kgdb_register_io_module(&kgdboc_io_ops);
35290+ configured = 1;
35291+ }
35292 if (err)
35293 goto noconfig;
35294
35295- configured = 1;
35296-
35297 return 0;
35298
35299 noconfig:
35300@@ -212,7 +219,7 @@ noconfig:
35301 static int __init init_kgdboc(void)
35302 {
35303 /* Already configured? */
35304- if (configured == 1)
35305+ if (configured >= 1)
35306 return 0;
35307
35308 return configure_kgdboc();
35309@@ -261,7 +268,7 @@ static int param_set_kgdboc_var(const ch
35310 if (config[len - 1] == '\n')
35311 config[len - 1] = '\0';
35312
35313- if (configured == 1)
35314+ if (configured >= 1)
35315 cleanup_kgdboc();
35316
35317 /* Go and configure with the new params. */
35318@@ -301,6 +308,15 @@ static struct kgdb_io kgdboc_io_ops = {
35319 .post_exception = kgdboc_post_exp_handler,
35320 };
35321
35322+static struct kgdb_io kgdboc_io_ops_console = {
35323+ .name = "kgdboc",
35324+ .read_char = kgdboc_get_char,
35325+ .write_char = kgdboc_put_char,
35326+ .pre_exception = kgdboc_pre_exp_handler,
35327+ .post_exception = kgdboc_post_exp_handler,
35328+ .is_console = 1
35329+};
35330+
35331 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
35332 /* This is only available if kgdboc is a built in for early debugging */
35333 static int __init kgdboc_early_init(char *opt)
35334diff -urNp linux-3.0.7/drivers/tty/serial/mfd.c linux-3.0.7/drivers/tty/serial/mfd.c
35335--- linux-3.0.7/drivers/tty/serial/mfd.c 2011-07-21 22:17:23.000000000 -0400
35336+++ linux-3.0.7/drivers/tty/serial/mfd.c 2011-10-11 10:44:33.000000000 -0400
35337@@ -1423,7 +1423,7 @@ static void serial_hsu_remove(struct pci
35338 }
35339
35340 /* First 3 are UART ports, and the 4th is the DMA */
35341-static const struct pci_device_id pci_ids[] __devinitdata = {
35342+static const struct pci_device_id pci_ids[] __devinitconst = {
35343 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081B) },
35344 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081C) },
35345 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081D) },
35346diff -urNp linux-3.0.7/drivers/tty/serial/mrst_max3110.c linux-3.0.7/drivers/tty/serial/mrst_max3110.c
35347--- linux-3.0.7/drivers/tty/serial/mrst_max3110.c 2011-10-16 21:54:54.000000000 -0400
35348+++ linux-3.0.7/drivers/tty/serial/mrst_max3110.c 2011-10-16 21:55:28.000000000 -0400
35349@@ -393,6 +393,8 @@ static void max3110_con_receive(struct u
35350 int loop = 1, num, total = 0;
35351 u8 recv_buf[512], *pbuf;
35352
35353+ pax_track_stack();
35354+
35355 pbuf = recv_buf;
35356 do {
35357 num = max3110_read_multi(max, pbuf);
35358diff -urNp linux-3.0.7/drivers/tty/tty_io.c linux-3.0.7/drivers/tty/tty_io.c
35359--- linux-3.0.7/drivers/tty/tty_io.c 2011-10-16 21:54:54.000000000 -0400
35360+++ linux-3.0.7/drivers/tty/tty_io.c 2011-10-16 21:55:28.000000000 -0400
35361@@ -3214,7 +3214,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
35362
35363 void tty_default_fops(struct file_operations *fops)
35364 {
35365- *fops = tty_fops;
35366+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
35367 }
35368
35369 /*
35370diff -urNp linux-3.0.7/drivers/tty/tty_ldisc.c linux-3.0.7/drivers/tty/tty_ldisc.c
35371--- linux-3.0.7/drivers/tty/tty_ldisc.c 2011-07-21 22:17:23.000000000 -0400
35372+++ linux-3.0.7/drivers/tty/tty_ldisc.c 2011-08-23 21:47:56.000000000 -0400
35373@@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
35374 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
35375 struct tty_ldisc_ops *ldo = ld->ops;
35376
35377- ldo->refcount--;
35378+ atomic_dec(&ldo->refcount);
35379 module_put(ldo->owner);
35380 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35381
35382@@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
35383 spin_lock_irqsave(&tty_ldisc_lock, flags);
35384 tty_ldiscs[disc] = new_ldisc;
35385 new_ldisc->num = disc;
35386- new_ldisc->refcount = 0;
35387+ atomic_set(&new_ldisc->refcount, 0);
35388 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35389
35390 return ret;
35391@@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
35392 return -EINVAL;
35393
35394 spin_lock_irqsave(&tty_ldisc_lock, flags);
35395- if (tty_ldiscs[disc]->refcount)
35396+ if (atomic_read(&tty_ldiscs[disc]->refcount))
35397 ret = -EBUSY;
35398 else
35399 tty_ldiscs[disc] = NULL;
35400@@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
35401 if (ldops) {
35402 ret = ERR_PTR(-EAGAIN);
35403 if (try_module_get(ldops->owner)) {
35404- ldops->refcount++;
35405+ atomic_inc(&ldops->refcount);
35406 ret = ldops;
35407 }
35408 }
35409@@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
35410 unsigned long flags;
35411
35412 spin_lock_irqsave(&tty_ldisc_lock, flags);
35413- ldops->refcount--;
35414+ atomic_dec(&ldops->refcount);
35415 module_put(ldops->owner);
35416 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35417 }
35418diff -urNp linux-3.0.7/drivers/tty/vt/keyboard.c linux-3.0.7/drivers/tty/vt/keyboard.c
35419--- linux-3.0.7/drivers/tty/vt/keyboard.c 2011-07-21 22:17:23.000000000 -0400
35420+++ linux-3.0.7/drivers/tty/vt/keyboard.c 2011-08-23 21:48:14.000000000 -0400
35421@@ -656,6 +656,16 @@ static void k_spec(struct vc_data *vc, u
35422 kbd->kbdmode == VC_OFF) &&
35423 value != KVAL(K_SAK))
35424 return; /* SAK is allowed even in raw mode */
35425+
35426+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
35427+ {
35428+ void *func = fn_handler[value];
35429+ if (func == fn_show_state || func == fn_show_ptregs ||
35430+ func == fn_show_mem)
35431+ return;
35432+ }
35433+#endif
35434+
35435 fn_handler[value](vc);
35436 }
35437
35438diff -urNp linux-3.0.7/drivers/tty/vt/vt.c linux-3.0.7/drivers/tty/vt/vt.c
35439--- linux-3.0.7/drivers/tty/vt/vt.c 2011-07-21 22:17:23.000000000 -0400
35440+++ linux-3.0.7/drivers/tty/vt/vt.c 2011-08-23 21:47:56.000000000 -0400
35441@@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
35442
35443 static void notify_write(struct vc_data *vc, unsigned int unicode)
35444 {
35445- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
35446+ struct vt_notifier_param param = { .vc = vc, .c = unicode };
35447 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
35448 }
35449
35450diff -urNp linux-3.0.7/drivers/tty/vt/vt_ioctl.c linux-3.0.7/drivers/tty/vt/vt_ioctl.c
35451--- linux-3.0.7/drivers/tty/vt/vt_ioctl.c 2011-07-21 22:17:23.000000000 -0400
35452+++ linux-3.0.7/drivers/tty/vt/vt_ioctl.c 2011-08-23 21:48:14.000000000 -0400
35453@@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
35454 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
35455 return -EFAULT;
35456
35457- if (!capable(CAP_SYS_TTY_CONFIG))
35458- perm = 0;
35459-
35460 switch (cmd) {
35461 case KDGKBENT:
35462 key_map = key_maps[s];
35463@@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __
35464 val = (i ? K_HOLE : K_NOSUCHMAP);
35465 return put_user(val, &user_kbe->kb_value);
35466 case KDSKBENT:
35467+ if (!capable(CAP_SYS_TTY_CONFIG))
35468+ perm = 0;
35469+
35470 if (!perm)
35471 return -EPERM;
35472 if (!i && v == K_NOSUCHMAP) {
35473@@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
35474 int i, j, k;
35475 int ret;
35476
35477- if (!capable(CAP_SYS_TTY_CONFIG))
35478- perm = 0;
35479-
35480 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
35481 if (!kbs) {
35482 ret = -ENOMEM;
35483@@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
35484 kfree(kbs);
35485 return ((p && *p) ? -EOVERFLOW : 0);
35486 case KDSKBSENT:
35487+ if (!capable(CAP_SYS_TTY_CONFIG))
35488+ perm = 0;
35489+
35490 if (!perm) {
35491 ret = -EPERM;
35492 goto reterr;
35493diff -urNp linux-3.0.7/drivers/uio/uio.c linux-3.0.7/drivers/uio/uio.c
35494--- linux-3.0.7/drivers/uio/uio.c 2011-07-21 22:17:23.000000000 -0400
35495+++ linux-3.0.7/drivers/uio/uio.c 2011-08-23 21:47:56.000000000 -0400
35496@@ -25,6 +25,7 @@
35497 #include <linux/kobject.h>
35498 #include <linux/cdev.h>
35499 #include <linux/uio_driver.h>
35500+#include <asm/local.h>
35501
35502 #define UIO_MAX_DEVICES (1U << MINORBITS)
35503
35504@@ -32,10 +33,10 @@ struct uio_device {
35505 struct module *owner;
35506 struct device *dev;
35507 int minor;
35508- atomic_t event;
35509+ atomic_unchecked_t event;
35510 struct fasync_struct *async_queue;
35511 wait_queue_head_t wait;
35512- int vma_count;
35513+ local_t vma_count;
35514 struct uio_info *info;
35515 struct kobject *map_dir;
35516 struct kobject *portio_dir;
35517@@ -242,7 +243,7 @@ static ssize_t show_event(struct device
35518 struct device_attribute *attr, char *buf)
35519 {
35520 struct uio_device *idev = dev_get_drvdata(dev);
35521- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
35522+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
35523 }
35524
35525 static struct device_attribute uio_class_attributes[] = {
35526@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *i
35527 {
35528 struct uio_device *idev = info->uio_dev;
35529
35530- atomic_inc(&idev->event);
35531+ atomic_inc_unchecked(&idev->event);
35532 wake_up_interruptible(&idev->wait);
35533 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
35534 }
35535@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode,
35536 }
35537
35538 listener->dev = idev;
35539- listener->event_count = atomic_read(&idev->event);
35540+ listener->event_count = atomic_read_unchecked(&idev->event);
35541 filep->private_data = listener;
35542
35543 if (idev->info->open) {
35544@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file
35545 return -EIO;
35546
35547 poll_wait(filep, &idev->wait, wait);
35548- if (listener->event_count != atomic_read(&idev->event))
35549+ if (listener->event_count != atomic_read_unchecked(&idev->event))
35550 return POLLIN | POLLRDNORM;
35551 return 0;
35552 }
35553@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *fil
35554 do {
35555 set_current_state(TASK_INTERRUPTIBLE);
35556
35557- event_count = atomic_read(&idev->event);
35558+ event_count = atomic_read_unchecked(&idev->event);
35559 if (event_count != listener->event_count) {
35560 if (copy_to_user(buf, &event_count, count))
35561 retval = -EFAULT;
35562@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_
35563 static void uio_vma_open(struct vm_area_struct *vma)
35564 {
35565 struct uio_device *idev = vma->vm_private_data;
35566- idev->vma_count++;
35567+ local_inc(&idev->vma_count);
35568 }
35569
35570 static void uio_vma_close(struct vm_area_struct *vma)
35571 {
35572 struct uio_device *idev = vma->vm_private_data;
35573- idev->vma_count--;
35574+ local_dec(&idev->vma_count);
35575 }
35576
35577 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
35578@@ -823,7 +824,7 @@ int __uio_register_device(struct module
35579 idev->owner = owner;
35580 idev->info = info;
35581 init_waitqueue_head(&idev->wait);
35582- atomic_set(&idev->event, 0);
35583+ atomic_set_unchecked(&idev->event, 0);
35584
35585 ret = uio_get_minor(idev);
35586 if (ret)
35587diff -urNp linux-3.0.7/drivers/usb/atm/cxacru.c linux-3.0.7/drivers/usb/atm/cxacru.c
35588--- linux-3.0.7/drivers/usb/atm/cxacru.c 2011-07-21 22:17:23.000000000 -0400
35589+++ linux-3.0.7/drivers/usb/atm/cxacru.c 2011-08-23 21:47:56.000000000 -0400
35590@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_c
35591 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
35592 if (ret < 2)
35593 return -EINVAL;
35594- if (index < 0 || index > 0x7f)
35595+ if (index > 0x7f)
35596 return -EINVAL;
35597 pos += tmp;
35598
35599diff -urNp linux-3.0.7/drivers/usb/atm/usbatm.c linux-3.0.7/drivers/usb/atm/usbatm.c
35600--- linux-3.0.7/drivers/usb/atm/usbatm.c 2011-07-21 22:17:23.000000000 -0400
35601+++ linux-3.0.7/drivers/usb/atm/usbatm.c 2011-08-23 21:47:56.000000000 -0400
35602@@ -332,7 +332,7 @@ static void usbatm_extract_one_cell(stru
35603 if (printk_ratelimit())
35604 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
35605 __func__, vpi, vci);
35606- atomic_inc(&vcc->stats->rx_err);
35607+ atomic_inc_unchecked(&vcc->stats->rx_err);
35608 return;
35609 }
35610
35611@@ -360,7 +360,7 @@ static void usbatm_extract_one_cell(stru
35612 if (length > ATM_MAX_AAL5_PDU) {
35613 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
35614 __func__, length, vcc);
35615- atomic_inc(&vcc->stats->rx_err);
35616+ atomic_inc_unchecked(&vcc->stats->rx_err);
35617 goto out;
35618 }
35619
35620@@ -369,14 +369,14 @@ static void usbatm_extract_one_cell(stru
35621 if (sarb->len < pdu_length) {
35622 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
35623 __func__, pdu_length, sarb->len, vcc);
35624- atomic_inc(&vcc->stats->rx_err);
35625+ atomic_inc_unchecked(&vcc->stats->rx_err);
35626 goto out;
35627 }
35628
35629 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
35630 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
35631 __func__, vcc);
35632- atomic_inc(&vcc->stats->rx_err);
35633+ atomic_inc_unchecked(&vcc->stats->rx_err);
35634 goto out;
35635 }
35636
35637@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(stru
35638 if (printk_ratelimit())
35639 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
35640 __func__, length);
35641- atomic_inc(&vcc->stats->rx_drop);
35642+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35643 goto out;
35644 }
35645
35646@@ -411,7 +411,7 @@ static void usbatm_extract_one_cell(stru
35647
35648 vcc->push(vcc, skb);
35649
35650- atomic_inc(&vcc->stats->rx);
35651+ atomic_inc_unchecked(&vcc->stats->rx);
35652 out:
35653 skb_trim(sarb, 0);
35654 }
35655@@ -614,7 +614,7 @@ static void usbatm_tx_process(unsigned l
35656 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
35657
35658 usbatm_pop(vcc, skb);
35659- atomic_inc(&vcc->stats->tx);
35660+ atomic_inc_unchecked(&vcc->stats->tx);
35661
35662 skb = skb_dequeue(&instance->sndqueue);
35663 }
35664@@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct a
35665 if (!left--)
35666 return sprintf(page,
35667 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
35668- atomic_read(&atm_dev->stats.aal5.tx),
35669- atomic_read(&atm_dev->stats.aal5.tx_err),
35670- atomic_read(&atm_dev->stats.aal5.rx),
35671- atomic_read(&atm_dev->stats.aal5.rx_err),
35672- atomic_read(&atm_dev->stats.aal5.rx_drop));
35673+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
35674+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
35675+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
35676+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
35677+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
35678
35679 if (!left--) {
35680 if (instance->disconnected)
35681diff -urNp linux-3.0.7/drivers/usb/core/devices.c linux-3.0.7/drivers/usb/core/devices.c
35682--- linux-3.0.7/drivers/usb/core/devices.c 2011-07-21 22:17:23.000000000 -0400
35683+++ linux-3.0.7/drivers/usb/core/devices.c 2011-08-23 21:47:56.000000000 -0400
35684@@ -126,7 +126,7 @@ static const char format_endpt[] =
35685 * time it gets called.
35686 */
35687 static struct device_connect_event {
35688- atomic_t count;
35689+ atomic_unchecked_t count;
35690 wait_queue_head_t wait;
35691 } device_event = {
35692 .count = ATOMIC_INIT(1),
35693@@ -164,7 +164,7 @@ static const struct class_info clas_info
35694
35695 void usbfs_conn_disc_event(void)
35696 {
35697- atomic_add(2, &device_event.count);
35698+ atomic_add_unchecked(2, &device_event.count);
35699 wake_up(&device_event.wait);
35700 }
35701
35702@@ -648,7 +648,7 @@ static unsigned int usb_device_poll(stru
35703
35704 poll_wait(file, &device_event.wait, wait);
35705
35706- event_count = atomic_read(&device_event.count);
35707+ event_count = atomic_read_unchecked(&device_event.count);
35708 if (file->f_version != event_count) {
35709 file->f_version = event_count;
35710 return POLLIN | POLLRDNORM;
35711diff -urNp linux-3.0.7/drivers/usb/core/message.c linux-3.0.7/drivers/usb/core/message.c
35712--- linux-3.0.7/drivers/usb/core/message.c 2011-07-21 22:17:23.000000000 -0400
35713+++ linux-3.0.7/drivers/usb/core/message.c 2011-08-23 21:47:56.000000000 -0400
35714@@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device
35715 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
35716 if (buf) {
35717 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
35718- if (len > 0) {
35719- smallbuf = kmalloc(++len, GFP_NOIO);
35720+ if (len++ > 0) {
35721+ smallbuf = kmalloc(len, GFP_NOIO);
35722 if (!smallbuf)
35723 return buf;
35724 memcpy(smallbuf, buf, len);
35725diff -urNp linux-3.0.7/drivers/usb/early/ehci-dbgp.c linux-3.0.7/drivers/usb/early/ehci-dbgp.c
35726--- linux-3.0.7/drivers/usb/early/ehci-dbgp.c 2011-07-21 22:17:23.000000000 -0400
35727+++ linux-3.0.7/drivers/usb/early/ehci-dbgp.c 2011-08-23 21:47:56.000000000 -0400
35728@@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x,
35729
35730 #ifdef CONFIG_KGDB
35731 static struct kgdb_io kgdbdbgp_io_ops;
35732-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
35733+static struct kgdb_io kgdbdbgp_io_ops_console;
35734+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
35735 #else
35736 #define dbgp_kgdb_mode (0)
35737 #endif
35738@@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops =
35739 .write_char = kgdbdbgp_write_char,
35740 };
35741
35742+static struct kgdb_io kgdbdbgp_io_ops_console = {
35743+ .name = "kgdbdbgp",
35744+ .read_char = kgdbdbgp_read_char,
35745+ .write_char = kgdbdbgp_write_char,
35746+ .is_console = 1
35747+};
35748+
35749 static int kgdbdbgp_wait_time;
35750
35751 static int __init kgdbdbgp_parse_config(char *str)
35752@@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(
35753 ptr++;
35754 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
35755 }
35756- kgdb_register_io_module(&kgdbdbgp_io_ops);
35757- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
35758+ if (early_dbgp_console.index != -1)
35759+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
35760+ else
35761+ kgdb_register_io_module(&kgdbdbgp_io_ops);
35762
35763 return 0;
35764 }
35765diff -urNp linux-3.0.7/drivers/usb/host/xhci-mem.c linux-3.0.7/drivers/usb/host/xhci-mem.c
35766--- linux-3.0.7/drivers/usb/host/xhci-mem.c 2011-07-21 22:17:23.000000000 -0400
35767+++ linux-3.0.7/drivers/usb/host/xhci-mem.c 2011-08-23 21:48:14.000000000 -0400
35768@@ -1685,6 +1685,8 @@ static int xhci_check_trb_in_td_math(str
35769 unsigned int num_tests;
35770 int i, ret;
35771
35772+ pax_track_stack();
35773+
35774 num_tests = ARRAY_SIZE(simple_test_vector);
35775 for (i = 0; i < num_tests; i++) {
35776 ret = xhci_test_trb_in_td(xhci,
35777diff -urNp linux-3.0.7/drivers/usb/wusbcore/wa-hc.h linux-3.0.7/drivers/usb/wusbcore/wa-hc.h
35778--- linux-3.0.7/drivers/usb/wusbcore/wa-hc.h 2011-07-21 22:17:23.000000000 -0400
35779+++ linux-3.0.7/drivers/usb/wusbcore/wa-hc.h 2011-08-23 21:47:56.000000000 -0400
35780@@ -192,7 +192,7 @@ struct wahc {
35781 struct list_head xfer_delayed_list;
35782 spinlock_t xfer_list_lock;
35783 struct work_struct xfer_work;
35784- atomic_t xfer_id_count;
35785+ atomic_unchecked_t xfer_id_count;
35786 };
35787
35788
35789@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
35790 INIT_LIST_HEAD(&wa->xfer_delayed_list);
35791 spin_lock_init(&wa->xfer_list_lock);
35792 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
35793- atomic_set(&wa->xfer_id_count, 1);
35794+ atomic_set_unchecked(&wa->xfer_id_count, 1);
35795 }
35796
35797 /**
35798diff -urNp linux-3.0.7/drivers/usb/wusbcore/wa-xfer.c linux-3.0.7/drivers/usb/wusbcore/wa-xfer.c
35799--- linux-3.0.7/drivers/usb/wusbcore/wa-xfer.c 2011-07-21 22:17:23.000000000 -0400
35800+++ linux-3.0.7/drivers/usb/wusbcore/wa-xfer.c 2011-08-23 21:47:56.000000000 -0400
35801@@ -294,7 +294,7 @@ out:
35802 */
35803 static void wa_xfer_id_init(struct wa_xfer *xfer)
35804 {
35805- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
35806+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
35807 }
35808
35809 /*
35810diff -urNp linux-3.0.7/drivers/vhost/vhost.c linux-3.0.7/drivers/vhost/vhost.c
35811--- linux-3.0.7/drivers/vhost/vhost.c 2011-07-21 22:17:23.000000000 -0400
35812+++ linux-3.0.7/drivers/vhost/vhost.c 2011-08-23 21:47:56.000000000 -0400
35813@@ -589,7 +589,7 @@ static int init_used(struct vhost_virtqu
35814 return get_user(vq->last_used_idx, &used->idx);
35815 }
35816
35817-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
35818+static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
35819 {
35820 struct file *eventfp, *filep = NULL,
35821 *pollstart = NULL, *pollstop = NULL;
35822diff -urNp linux-3.0.7/drivers/video/aty/aty128fb.c linux-3.0.7/drivers/video/aty/aty128fb.c
35823--- linux-3.0.7/drivers/video/aty/aty128fb.c 2011-07-21 22:17:23.000000000 -0400
35824+++ linux-3.0.7/drivers/video/aty/aty128fb.c 2011-10-11 10:44:33.000000000 -0400
35825@@ -148,7 +148,7 @@ enum {
35826 };
35827
35828 /* Must match above enum */
35829-static const char *r128_family[] __devinitdata = {
35830+static const char *r128_family[] __devinitconst = {
35831 "AGP",
35832 "PCI",
35833 "PRO AGP",
35834diff -urNp linux-3.0.7/drivers/video/fbcmap.c linux-3.0.7/drivers/video/fbcmap.c
35835--- linux-3.0.7/drivers/video/fbcmap.c 2011-07-21 22:17:23.000000000 -0400
35836+++ linux-3.0.7/drivers/video/fbcmap.c 2011-08-23 21:47:56.000000000 -0400
35837@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user
35838 rc = -ENODEV;
35839 goto out;
35840 }
35841- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
35842- !info->fbops->fb_setcmap)) {
35843+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
35844 rc = -EINVAL;
35845 goto out1;
35846 }
35847diff -urNp linux-3.0.7/drivers/video/fbmem.c linux-3.0.7/drivers/video/fbmem.c
35848--- linux-3.0.7/drivers/video/fbmem.c 2011-07-21 22:17:23.000000000 -0400
35849+++ linux-3.0.7/drivers/video/fbmem.c 2011-08-23 21:48:14.000000000 -0400
35850@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_in
35851 image->dx += image->width + 8;
35852 }
35853 } else if (rotate == FB_ROTATE_UD) {
35854- for (x = 0; x < num && image->dx >= 0; x++) {
35855+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
35856 info->fbops->fb_imageblit(info, image);
35857 image->dx -= image->width + 8;
35858 }
35859@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_in
35860 image->dy += image->height + 8;
35861 }
35862 } else if (rotate == FB_ROTATE_CCW) {
35863- for (x = 0; x < num && image->dy >= 0; x++) {
35864+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
35865 info->fbops->fb_imageblit(info, image);
35866 image->dy -= image->height + 8;
35867 }
35868@@ -939,6 +939,8 @@ fb_set_var(struct fb_info *info, struct
35869 int flags = info->flags;
35870 int ret = 0;
35871
35872+ pax_track_stack();
35873+
35874 if (var->activate & FB_ACTIVATE_INV_MODE) {
35875 struct fb_videomode mode1, mode2;
35876
35877@@ -1064,6 +1066,8 @@ static long do_fb_ioctl(struct fb_info *
35878 void __user *argp = (void __user *)arg;
35879 long ret = 0;
35880
35881+ pax_track_stack();
35882+
35883 switch (cmd) {
35884 case FBIOGET_VSCREENINFO:
35885 if (!lock_fb_info(info))
35886@@ -1143,7 +1147,7 @@ static long do_fb_ioctl(struct fb_info *
35887 return -EFAULT;
35888 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
35889 return -EINVAL;
35890- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
35891+ if (con2fb.framebuffer >= FB_MAX)
35892 return -EINVAL;
35893 if (!registered_fb[con2fb.framebuffer])
35894 request_module("fb%d", con2fb.framebuffer);
35895diff -urNp linux-3.0.7/drivers/video/geode/gx1fb_core.c linux-3.0.7/drivers/video/geode/gx1fb_core.c
35896--- linux-3.0.7/drivers/video/geode/gx1fb_core.c 2011-07-21 22:17:23.000000000 -0400
35897+++ linux-3.0.7/drivers/video/geode/gx1fb_core.c 2011-10-11 10:44:33.000000000 -0400
35898@@ -29,7 +29,7 @@ static int crt_option = 1;
35899 static char panel_option[32] = "";
35900
35901 /* Modes relevant to the GX1 (taken from modedb.c) */
35902-static const struct fb_videomode __devinitdata gx1_modedb[] = {
35903+static const struct fb_videomode __devinitconst gx1_modedb[] = {
35904 /* 640x480-60 VESA */
35905 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
35906 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
35907diff -urNp linux-3.0.7/drivers/video/gxt4500.c linux-3.0.7/drivers/video/gxt4500.c
35908--- linux-3.0.7/drivers/video/gxt4500.c 2011-07-21 22:17:23.000000000 -0400
35909+++ linux-3.0.7/drivers/video/gxt4500.c 2011-10-11 10:44:33.000000000 -0400
35910@@ -156,7 +156,7 @@ struct gxt4500_par {
35911 static char *mode_option;
35912
35913 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
35914-static const struct fb_videomode defaultmode __devinitdata = {
35915+static const struct fb_videomode defaultmode __devinitconst = {
35916 .refresh = 60,
35917 .xres = 1280,
35918 .yres = 1024,
35919@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, stru
35920 return 0;
35921 }
35922
35923-static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
35924+static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
35925 .id = "IBM GXT4500P",
35926 .type = FB_TYPE_PACKED_PIXELS,
35927 .visual = FB_VISUAL_PSEUDOCOLOR,
35928diff -urNp linux-3.0.7/drivers/video/i810/i810_accel.c linux-3.0.7/drivers/video/i810/i810_accel.c
35929--- linux-3.0.7/drivers/video/i810/i810_accel.c 2011-07-21 22:17:23.000000000 -0400
35930+++ linux-3.0.7/drivers/video/i810/i810_accel.c 2011-08-23 21:47:56.000000000 -0400
35931@@ -73,6 +73,7 @@ static inline int wait_for_space(struct
35932 }
35933 }
35934 printk("ringbuffer lockup!!!\n");
35935+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
35936 i810_report_error(mmio);
35937 par->dev_flags |= LOCKUP;
35938 info->pixmap.scan_align = 1;
35939diff -urNp linux-3.0.7/drivers/video/i810/i810_main.c linux-3.0.7/drivers/video/i810/i810_main.c
35940--- linux-3.0.7/drivers/video/i810/i810_main.c 2011-07-21 22:17:23.000000000 -0400
35941+++ linux-3.0.7/drivers/video/i810/i810_main.c 2011-10-11 10:44:33.000000000 -0400
35942@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_
35943 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
35944
35945 /* PCI */
35946-static const char *i810_pci_list[] __devinitdata = {
35947+static const char *i810_pci_list[] __devinitconst = {
35948 "Intel(R) 810 Framebuffer Device" ,
35949 "Intel(R) 810-DC100 Framebuffer Device" ,
35950 "Intel(R) 810E Framebuffer Device" ,
35951diff -urNp linux-3.0.7/drivers/video/jz4740_fb.c linux-3.0.7/drivers/video/jz4740_fb.c
35952--- linux-3.0.7/drivers/video/jz4740_fb.c 2011-07-21 22:17:23.000000000 -0400
35953+++ linux-3.0.7/drivers/video/jz4740_fb.c 2011-10-11 10:44:33.000000000 -0400
35954@@ -136,7 +136,7 @@ struct jzfb {
35955 uint32_t pseudo_palette[16];
35956 };
35957
35958-static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
35959+static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
35960 .id = "JZ4740 FB",
35961 .type = FB_TYPE_PACKED_PIXELS,
35962 .visual = FB_VISUAL_TRUECOLOR,
35963diff -urNp linux-3.0.7/drivers/video/logo/logo_linux_clut224.ppm linux-3.0.7/drivers/video/logo/logo_linux_clut224.ppm
35964--- linux-3.0.7/drivers/video/logo/logo_linux_clut224.ppm 2011-07-21 22:17:23.000000000 -0400
35965+++ linux-3.0.7/drivers/video/logo/logo_linux_clut224.ppm 2011-08-29 23:49:40.000000000 -0400
35966@@ -1,1604 +1,1123 @@
35967 P3
35968-# Standard 224-color Linux logo
35969 80 80
35970 255
35971- 0 0 0 0 0 0 0 0 0 0 0 0
35972- 0 0 0 0 0 0 0 0 0 0 0 0
35973- 0 0 0 0 0 0 0 0 0 0 0 0
35974- 0 0 0 0 0 0 0 0 0 0 0 0
35975- 0 0 0 0 0 0 0 0 0 0 0 0
35976- 0 0 0 0 0 0 0 0 0 0 0 0
35977- 0 0 0 0 0 0 0 0 0 0 0 0
35978- 0 0 0 0 0 0 0 0 0 0 0 0
35979- 0 0 0 0 0 0 0 0 0 0 0 0
35980- 6 6 6 6 6 6 10 10 10 10 10 10
35981- 10 10 10 6 6 6 6 6 6 6 6 6
35982- 0 0 0 0 0 0 0 0 0 0 0 0
35983- 0 0 0 0 0 0 0 0 0 0 0 0
35984- 0 0 0 0 0 0 0 0 0 0 0 0
35985- 0 0 0 0 0 0 0 0 0 0 0 0
35986- 0 0 0 0 0 0 0 0 0 0 0 0
35987- 0 0 0 0 0 0 0 0 0 0 0 0
35988- 0 0 0 0 0 0 0 0 0 0 0 0
35989- 0 0 0 0 0 0 0 0 0 0 0 0
35990- 0 0 0 0 0 0 0 0 0 0 0 0
35991- 0 0 0 0 0 0 0 0 0 0 0 0
35992- 0 0 0 0 0 0 0 0 0 0 0 0
35993- 0 0 0 0 0 0 0 0 0 0 0 0
35994- 0 0 0 0 0 0 0 0 0 0 0 0
35995- 0 0 0 0 0 0 0 0 0 0 0 0
35996- 0 0 0 0 0 0 0 0 0 0 0 0
35997- 0 0 0 0 0 0 0 0 0 0 0 0
35998- 0 0 0 0 0 0 0 0 0 0 0 0
35999- 0 0 0 6 6 6 10 10 10 14 14 14
36000- 22 22 22 26 26 26 30 30 30 34 34 34
36001- 30 30 30 30 30 30 26 26 26 18 18 18
36002- 14 14 14 10 10 10 6 6 6 0 0 0
36003- 0 0 0 0 0 0 0 0 0 0 0 0
36004- 0 0 0 0 0 0 0 0 0 0 0 0
36005- 0 0 0 0 0 0 0 0 0 0 0 0
36006- 0 0 0 0 0 0 0 0 0 0 0 0
36007- 0 0 0 0 0 0 0 0 0 0 0 0
36008- 0 0 0 0 0 0 0 0 0 0 0 0
36009- 0 0 0 0 0 0 0 0 0 0 0 0
36010- 0 0 0 0 0 0 0 0 0 0 0 0
36011- 0 0 0 0 0 0 0 0 0 0 0 0
36012- 0 0 0 0 0 1 0 0 1 0 0 0
36013- 0 0 0 0 0 0 0 0 0 0 0 0
36014- 0 0 0 0 0 0 0 0 0 0 0 0
36015- 0 0 0 0 0 0 0 0 0 0 0 0
36016- 0 0 0 0 0 0 0 0 0 0 0 0
36017- 0 0 0 0 0 0 0 0 0 0 0 0
36018- 0 0 0 0 0 0 0 0 0 0 0 0
36019- 6 6 6 14 14 14 26 26 26 42 42 42
36020- 54 54 54 66 66 66 78 78 78 78 78 78
36021- 78 78 78 74 74 74 66 66 66 54 54 54
36022- 42 42 42 26 26 26 18 18 18 10 10 10
36023- 6 6 6 0 0 0 0 0 0 0 0 0
36024- 0 0 0 0 0 0 0 0 0 0 0 0
36025- 0 0 0 0 0 0 0 0 0 0 0 0
36026- 0 0 0 0 0 0 0 0 0 0 0 0
36027- 0 0 0 0 0 0 0 0 0 0 0 0
36028- 0 0 0 0 0 0 0 0 0 0 0 0
36029- 0 0 0 0 0 0 0 0 0 0 0 0
36030- 0 0 0 0 0 0 0 0 0 0 0 0
36031- 0 0 0 0 0 0 0 0 0 0 0 0
36032- 0 0 1 0 0 0 0 0 0 0 0 0
36033- 0 0 0 0 0 0 0 0 0 0 0 0
36034- 0 0 0 0 0 0 0 0 0 0 0 0
36035- 0 0 0 0 0 0 0 0 0 0 0 0
36036- 0 0 0 0 0 0 0 0 0 0 0 0
36037- 0 0 0 0 0 0 0 0 0 0 0 0
36038- 0 0 0 0 0 0 0 0 0 10 10 10
36039- 22 22 22 42 42 42 66 66 66 86 86 86
36040- 66 66 66 38 38 38 38 38 38 22 22 22
36041- 26 26 26 34 34 34 54 54 54 66 66 66
36042- 86 86 86 70 70 70 46 46 46 26 26 26
36043- 14 14 14 6 6 6 0 0 0 0 0 0
36044- 0 0 0 0 0 0 0 0 0 0 0 0
36045- 0 0 0 0 0 0 0 0 0 0 0 0
36046- 0 0 0 0 0 0 0 0 0 0 0 0
36047- 0 0 0 0 0 0 0 0 0 0 0 0
36048- 0 0 0 0 0 0 0 0 0 0 0 0
36049- 0 0 0 0 0 0 0 0 0 0 0 0
36050- 0 0 0 0 0 0 0 0 0 0 0 0
36051- 0 0 0 0 0 0 0 0 0 0 0 0
36052- 0 0 1 0 0 1 0 0 1 0 0 0
36053- 0 0 0 0 0 0 0 0 0 0 0 0
36054- 0 0 0 0 0 0 0 0 0 0 0 0
36055- 0 0 0 0 0 0 0 0 0 0 0 0
36056- 0 0 0 0 0 0 0 0 0 0 0 0
36057- 0 0 0 0 0 0 0 0 0 0 0 0
36058- 0 0 0 0 0 0 10 10 10 26 26 26
36059- 50 50 50 82 82 82 58 58 58 6 6 6
36060- 2 2 6 2 2 6 2 2 6 2 2 6
36061- 2 2 6 2 2 6 2 2 6 2 2 6
36062- 6 6 6 54 54 54 86 86 86 66 66 66
36063- 38 38 38 18 18 18 6 6 6 0 0 0
36064- 0 0 0 0 0 0 0 0 0 0 0 0
36065- 0 0 0 0 0 0 0 0 0 0 0 0
36066- 0 0 0 0 0 0 0 0 0 0 0 0
36067- 0 0 0 0 0 0 0 0 0 0 0 0
36068- 0 0 0 0 0 0 0 0 0 0 0 0
36069- 0 0 0 0 0 0 0 0 0 0 0 0
36070- 0 0 0 0 0 0 0 0 0 0 0 0
36071- 0 0 0 0 0 0 0 0 0 0 0 0
36072- 0 0 0 0 0 0 0 0 0 0 0 0
36073- 0 0 0 0 0 0 0 0 0 0 0 0
36074- 0 0 0 0 0 0 0 0 0 0 0 0
36075- 0 0 0 0 0 0 0 0 0 0 0 0
36076- 0 0 0 0 0 0 0 0 0 0 0 0
36077- 0 0 0 0 0 0 0 0 0 0 0 0
36078- 0 0 0 6 6 6 22 22 22 50 50 50
36079- 78 78 78 34 34 34 2 2 6 2 2 6
36080- 2 2 6 2 2 6 2 2 6 2 2 6
36081- 2 2 6 2 2 6 2 2 6 2 2 6
36082- 2 2 6 2 2 6 6 6 6 70 70 70
36083- 78 78 78 46 46 46 22 22 22 6 6 6
36084- 0 0 0 0 0 0 0 0 0 0 0 0
36085- 0 0 0 0 0 0 0 0 0 0 0 0
36086- 0 0 0 0 0 0 0 0 0 0 0 0
36087- 0 0 0 0 0 0 0 0 0 0 0 0
36088- 0 0 0 0 0 0 0 0 0 0 0 0
36089- 0 0 0 0 0 0 0 0 0 0 0 0
36090- 0 0 0 0 0 0 0 0 0 0 0 0
36091- 0 0 0 0 0 0 0 0 0 0 0 0
36092- 0 0 1 0 0 1 0 0 1 0 0 0
36093- 0 0 0 0 0 0 0 0 0 0 0 0
36094- 0 0 0 0 0 0 0 0 0 0 0 0
36095- 0 0 0 0 0 0 0 0 0 0 0 0
36096- 0 0 0 0 0 0 0 0 0 0 0 0
36097- 0 0 0 0 0 0 0 0 0 0 0 0
36098- 6 6 6 18 18 18 42 42 42 82 82 82
36099- 26 26 26 2 2 6 2 2 6 2 2 6
36100- 2 2 6 2 2 6 2 2 6 2 2 6
36101- 2 2 6 2 2 6 2 2 6 14 14 14
36102- 46 46 46 34 34 34 6 6 6 2 2 6
36103- 42 42 42 78 78 78 42 42 42 18 18 18
36104- 6 6 6 0 0 0 0 0 0 0 0 0
36105- 0 0 0 0 0 0 0 0 0 0 0 0
36106- 0 0 0 0 0 0 0 0 0 0 0 0
36107- 0 0 0 0 0 0 0 0 0 0 0 0
36108- 0 0 0 0 0 0 0 0 0 0 0 0
36109- 0 0 0 0 0 0 0 0 0 0 0 0
36110- 0 0 0 0 0 0 0 0 0 0 0 0
36111- 0 0 0 0 0 0 0 0 0 0 0 0
36112- 0 0 1 0 0 0 0 0 1 0 0 0
36113- 0 0 0 0 0 0 0 0 0 0 0 0
36114- 0 0 0 0 0 0 0 0 0 0 0 0
36115- 0 0 0 0 0 0 0 0 0 0 0 0
36116- 0 0 0 0 0 0 0 0 0 0 0 0
36117- 0 0 0 0 0 0 0 0 0 0 0 0
36118- 10 10 10 30 30 30 66 66 66 58 58 58
36119- 2 2 6 2 2 6 2 2 6 2 2 6
36120- 2 2 6 2 2 6 2 2 6 2 2 6
36121- 2 2 6 2 2 6 2 2 6 26 26 26
36122- 86 86 86 101 101 101 46 46 46 10 10 10
36123- 2 2 6 58 58 58 70 70 70 34 34 34
36124- 10 10 10 0 0 0 0 0 0 0 0 0
36125- 0 0 0 0 0 0 0 0 0 0 0 0
36126- 0 0 0 0 0 0 0 0 0 0 0 0
36127- 0 0 0 0 0 0 0 0 0 0 0 0
36128- 0 0 0 0 0 0 0 0 0 0 0 0
36129- 0 0 0 0 0 0 0 0 0 0 0 0
36130- 0 0 0 0 0 0 0 0 0 0 0 0
36131- 0 0 0 0 0 0 0 0 0 0 0 0
36132- 0 0 1 0 0 1 0 0 1 0 0 0
36133- 0 0 0 0 0 0 0 0 0 0 0 0
36134- 0 0 0 0 0 0 0 0 0 0 0 0
36135- 0 0 0 0 0 0 0 0 0 0 0 0
36136- 0 0 0 0 0 0 0 0 0 0 0 0
36137- 0 0 0 0 0 0 0 0 0 0 0 0
36138- 14 14 14 42 42 42 86 86 86 10 10 10
36139- 2 2 6 2 2 6 2 2 6 2 2 6
36140- 2 2 6 2 2 6 2 2 6 2 2 6
36141- 2 2 6 2 2 6 2 2 6 30 30 30
36142- 94 94 94 94 94 94 58 58 58 26 26 26
36143- 2 2 6 6 6 6 78 78 78 54 54 54
36144- 22 22 22 6 6 6 0 0 0 0 0 0
36145- 0 0 0 0 0 0 0 0 0 0 0 0
36146- 0 0 0 0 0 0 0 0 0 0 0 0
36147- 0 0 0 0 0 0 0 0 0 0 0 0
36148- 0 0 0 0 0 0 0 0 0 0 0 0
36149- 0 0 0 0 0 0 0 0 0 0 0 0
36150- 0 0 0 0 0 0 0 0 0 0 0 0
36151- 0 0 0 0 0 0 0 0 0 0 0 0
36152- 0 0 0 0 0 0 0 0 0 0 0 0
36153- 0 0 0 0 0 0 0 0 0 0 0 0
36154- 0 0 0 0 0 0 0 0 0 0 0 0
36155- 0 0 0 0 0 0 0 0 0 0 0 0
36156- 0 0 0 0 0 0 0 0 0 0 0 0
36157- 0 0 0 0 0 0 0 0 0 6 6 6
36158- 22 22 22 62 62 62 62 62 62 2 2 6
36159- 2 2 6 2 2 6 2 2 6 2 2 6
36160- 2 2 6 2 2 6 2 2 6 2 2 6
36161- 2 2 6 2 2 6 2 2 6 26 26 26
36162- 54 54 54 38 38 38 18 18 18 10 10 10
36163- 2 2 6 2 2 6 34 34 34 82 82 82
36164- 38 38 38 14 14 14 0 0 0 0 0 0
36165- 0 0 0 0 0 0 0 0 0 0 0 0
36166- 0 0 0 0 0 0 0 0 0 0 0 0
36167- 0 0 0 0 0 0 0 0 0 0 0 0
36168- 0 0 0 0 0 0 0 0 0 0 0 0
36169- 0 0 0 0 0 0 0 0 0 0 0 0
36170- 0 0 0 0 0 0 0 0 0 0 0 0
36171- 0 0 0 0 0 0 0 0 0 0 0 0
36172- 0 0 0 0 0 1 0 0 1 0 0 0
36173- 0 0 0 0 0 0 0 0 0 0 0 0
36174- 0 0 0 0 0 0 0 0 0 0 0 0
36175- 0 0 0 0 0 0 0 0 0 0 0 0
36176- 0 0 0 0 0 0 0 0 0 0 0 0
36177- 0 0 0 0 0 0 0 0 0 6 6 6
36178- 30 30 30 78 78 78 30 30 30 2 2 6
36179- 2 2 6 2 2 6 2 2 6 2 2 6
36180- 2 2 6 2 2 6 2 2 6 2 2 6
36181- 2 2 6 2 2 6 2 2 6 10 10 10
36182- 10 10 10 2 2 6 2 2 6 2 2 6
36183- 2 2 6 2 2 6 2 2 6 78 78 78
36184- 50 50 50 18 18 18 6 6 6 0 0 0
36185- 0 0 0 0 0 0 0 0 0 0 0 0
36186- 0 0 0 0 0 0 0 0 0 0 0 0
36187- 0 0 0 0 0 0 0 0 0 0 0 0
36188- 0 0 0 0 0 0 0 0 0 0 0 0
36189- 0 0 0 0 0 0 0 0 0 0 0 0
36190- 0 0 0 0 0 0 0 0 0 0 0 0
36191- 0 0 0 0 0 0 0 0 0 0 0 0
36192- 0 0 1 0 0 0 0 0 0 0 0 0
36193- 0 0 0 0 0 0 0 0 0 0 0 0
36194- 0 0 0 0 0 0 0 0 0 0 0 0
36195- 0 0 0 0 0 0 0 0 0 0 0 0
36196- 0 0 0 0 0 0 0 0 0 0 0 0
36197- 0 0 0 0 0 0 0 0 0 10 10 10
36198- 38 38 38 86 86 86 14 14 14 2 2 6
36199- 2 2 6 2 2 6 2 2 6 2 2 6
36200- 2 2 6 2 2 6 2 2 6 2 2 6
36201- 2 2 6 2 2 6 2 2 6 2 2 6
36202- 2 2 6 2 2 6 2 2 6 2 2 6
36203- 2 2 6 2 2 6 2 2 6 54 54 54
36204- 66 66 66 26 26 26 6 6 6 0 0 0
36205- 0 0 0 0 0 0 0 0 0 0 0 0
36206- 0 0 0 0 0 0 0 0 0 0 0 0
36207- 0 0 0 0 0 0 0 0 0 0 0 0
36208- 0 0 0 0 0 0 0 0 0 0 0 0
36209- 0 0 0 0 0 0 0 0 0 0 0 0
36210- 0 0 0 0 0 0 0 0 0 0 0 0
36211- 0 0 0 0 0 0 0 0 0 0 0 0
36212- 0 0 0 0 0 1 0 0 1 0 0 0
36213- 0 0 0 0 0 0 0 0 0 0 0 0
36214- 0 0 0 0 0 0 0 0 0 0 0 0
36215- 0 0 0 0 0 0 0 0 0 0 0 0
36216- 0 0 0 0 0 0 0 0 0 0 0 0
36217- 0 0 0 0 0 0 0 0 0 14 14 14
36218- 42 42 42 82 82 82 2 2 6 2 2 6
36219- 2 2 6 6 6 6 10 10 10 2 2 6
36220- 2 2 6 2 2 6 2 2 6 2 2 6
36221- 2 2 6 2 2 6 2 2 6 6 6 6
36222- 14 14 14 10 10 10 2 2 6 2 2 6
36223- 2 2 6 2 2 6 2 2 6 18 18 18
36224- 82 82 82 34 34 34 10 10 10 0 0 0
36225- 0 0 0 0 0 0 0 0 0 0 0 0
36226- 0 0 0 0 0 0 0 0 0 0 0 0
36227- 0 0 0 0 0 0 0 0 0 0 0 0
36228- 0 0 0 0 0 0 0 0 0 0 0 0
36229- 0 0 0 0 0 0 0 0 0 0 0 0
36230- 0 0 0 0 0 0 0 0 0 0 0 0
36231- 0 0 0 0 0 0 0 0 0 0 0 0
36232- 0 0 1 0 0 0 0 0 0 0 0 0
36233- 0 0 0 0 0 0 0 0 0 0 0 0
36234- 0 0 0 0 0 0 0 0 0 0 0 0
36235- 0 0 0 0 0 0 0 0 0 0 0 0
36236- 0 0 0 0 0 0 0 0 0 0 0 0
36237- 0 0 0 0 0 0 0 0 0 14 14 14
36238- 46 46 46 86 86 86 2 2 6 2 2 6
36239- 6 6 6 6 6 6 22 22 22 34 34 34
36240- 6 6 6 2 2 6 2 2 6 2 2 6
36241- 2 2 6 2 2 6 18 18 18 34 34 34
36242- 10 10 10 50 50 50 22 22 22 2 2 6
36243- 2 2 6 2 2 6 2 2 6 10 10 10
36244- 86 86 86 42 42 42 14 14 14 0 0 0
36245- 0 0 0 0 0 0 0 0 0 0 0 0
36246- 0 0 0 0 0 0 0 0 0 0 0 0
36247- 0 0 0 0 0 0 0 0 0 0 0 0
36248- 0 0 0 0 0 0 0 0 0 0 0 0
36249- 0 0 0 0 0 0 0 0 0 0 0 0
36250- 0 0 0 0 0 0 0 0 0 0 0 0
36251- 0 0 0 0 0 0 0 0 0 0 0 0
36252- 0 0 1 0 0 1 0 0 1 0 0 0
36253- 0 0 0 0 0 0 0 0 0 0 0 0
36254- 0 0 0 0 0 0 0 0 0 0 0 0
36255- 0 0 0 0 0 0 0 0 0 0 0 0
36256- 0 0 0 0 0 0 0 0 0 0 0 0
36257- 0 0 0 0 0 0 0 0 0 14 14 14
36258- 46 46 46 86 86 86 2 2 6 2 2 6
36259- 38 38 38 116 116 116 94 94 94 22 22 22
36260- 22 22 22 2 2 6 2 2 6 2 2 6
36261- 14 14 14 86 86 86 138 138 138 162 162 162
36262-154 154 154 38 38 38 26 26 26 6 6 6
36263- 2 2 6 2 2 6 2 2 6 2 2 6
36264- 86 86 86 46 46 46 14 14 14 0 0 0
36265- 0 0 0 0 0 0 0 0 0 0 0 0
36266- 0 0 0 0 0 0 0 0 0 0 0 0
36267- 0 0 0 0 0 0 0 0 0 0 0 0
36268- 0 0 0 0 0 0 0 0 0 0 0 0
36269- 0 0 0 0 0 0 0 0 0 0 0 0
36270- 0 0 0 0 0 0 0 0 0 0 0 0
36271- 0 0 0 0 0 0 0 0 0 0 0 0
36272- 0 0 0 0 0 0 0 0 0 0 0 0
36273- 0 0 0 0 0 0 0 0 0 0 0 0
36274- 0 0 0 0 0 0 0 0 0 0 0 0
36275- 0 0 0 0 0 0 0 0 0 0 0 0
36276- 0 0 0 0 0 0 0 0 0 0 0 0
36277- 0 0 0 0 0 0 0 0 0 14 14 14
36278- 46 46 46 86 86 86 2 2 6 14 14 14
36279-134 134 134 198 198 198 195 195 195 116 116 116
36280- 10 10 10 2 2 6 2 2 6 6 6 6
36281-101 98 89 187 187 187 210 210 210 218 218 218
36282-214 214 214 134 134 134 14 14 14 6 6 6
36283- 2 2 6 2 2 6 2 2 6 2 2 6
36284- 86 86 86 50 50 50 18 18 18 6 6 6
36285- 0 0 0 0 0 0 0 0 0 0 0 0
36286- 0 0 0 0 0 0 0 0 0 0 0 0
36287- 0 0 0 0 0 0 0 0 0 0 0 0
36288- 0 0 0 0 0 0 0 0 0 0 0 0
36289- 0 0 0 0 0 0 0 0 0 0 0 0
36290- 0 0 0 0 0 0 0 0 0 0 0 0
36291- 0 0 0 0 0 0 0 0 1 0 0 0
36292- 0 0 1 0 0 1 0 0 1 0 0 0
36293- 0 0 0 0 0 0 0 0 0 0 0 0
36294- 0 0 0 0 0 0 0 0 0 0 0 0
36295- 0 0 0 0 0 0 0 0 0 0 0 0
36296- 0 0 0 0 0 0 0 0 0 0 0 0
36297- 0 0 0 0 0 0 0 0 0 14 14 14
36298- 46 46 46 86 86 86 2 2 6 54 54 54
36299-218 218 218 195 195 195 226 226 226 246 246 246
36300- 58 58 58 2 2 6 2 2 6 30 30 30
36301-210 210 210 253 253 253 174 174 174 123 123 123
36302-221 221 221 234 234 234 74 74 74 2 2 6
36303- 2 2 6 2 2 6 2 2 6 2 2 6
36304- 70 70 70 58 58 58 22 22 22 6 6 6
36305- 0 0 0 0 0 0 0 0 0 0 0 0
36306- 0 0 0 0 0 0 0 0 0 0 0 0
36307- 0 0 0 0 0 0 0 0 0 0 0 0
36308- 0 0 0 0 0 0 0 0 0 0 0 0
36309- 0 0 0 0 0 0 0 0 0 0 0 0
36310- 0 0 0 0 0 0 0 0 0 0 0 0
36311- 0 0 0 0 0 0 0 0 0 0 0 0
36312- 0 0 0 0 0 0 0 0 0 0 0 0
36313- 0 0 0 0 0 0 0 0 0 0 0 0
36314- 0 0 0 0 0 0 0 0 0 0 0 0
36315- 0 0 0 0 0 0 0 0 0 0 0 0
36316- 0 0 0 0 0 0 0 0 0 0 0 0
36317- 0 0 0 0 0 0 0 0 0 14 14 14
36318- 46 46 46 82 82 82 2 2 6 106 106 106
36319-170 170 170 26 26 26 86 86 86 226 226 226
36320-123 123 123 10 10 10 14 14 14 46 46 46
36321-231 231 231 190 190 190 6 6 6 70 70 70
36322- 90 90 90 238 238 238 158 158 158 2 2 6
36323- 2 2 6 2 2 6 2 2 6 2 2 6
36324- 70 70 70 58 58 58 22 22 22 6 6 6
36325- 0 0 0 0 0 0 0 0 0 0 0 0
36326- 0 0 0 0 0 0 0 0 0 0 0 0
36327- 0 0 0 0 0 0 0 0 0 0 0 0
36328- 0 0 0 0 0 0 0 0 0 0 0 0
36329- 0 0 0 0 0 0 0 0 0 0 0 0
36330- 0 0 0 0 0 0 0 0 0 0 0 0
36331- 0 0 0 0 0 0 0 0 1 0 0 0
36332- 0 0 1 0 0 1 0 0 1 0 0 0
36333- 0 0 0 0 0 0 0 0 0 0 0 0
36334- 0 0 0 0 0 0 0 0 0 0 0 0
36335- 0 0 0 0 0 0 0 0 0 0 0 0
36336- 0 0 0 0 0 0 0 0 0 0 0 0
36337- 0 0 0 0 0 0 0 0 0 14 14 14
36338- 42 42 42 86 86 86 6 6 6 116 116 116
36339-106 106 106 6 6 6 70 70 70 149 149 149
36340-128 128 128 18 18 18 38 38 38 54 54 54
36341-221 221 221 106 106 106 2 2 6 14 14 14
36342- 46 46 46 190 190 190 198 198 198 2 2 6
36343- 2 2 6 2 2 6 2 2 6 2 2 6
36344- 74 74 74 62 62 62 22 22 22 6 6 6
36345- 0 0 0 0 0 0 0 0 0 0 0 0
36346- 0 0 0 0 0 0 0 0 0 0 0 0
36347- 0 0 0 0 0 0 0 0 0 0 0 0
36348- 0 0 0 0 0 0 0 0 0 0 0 0
36349- 0 0 0 0 0 0 0 0 0 0 0 0
36350- 0 0 0 0 0 0 0 0 0 0 0 0
36351- 0 0 0 0 0 0 0 0 1 0 0 0
36352- 0 0 1 0 0 0 0 0 1 0 0 0
36353- 0 0 0 0 0 0 0 0 0 0 0 0
36354- 0 0 0 0 0 0 0 0 0 0 0 0
36355- 0 0 0 0 0 0 0 0 0 0 0 0
36356- 0 0 0 0 0 0 0 0 0 0 0 0
36357- 0 0 0 0 0 0 0 0 0 14 14 14
36358- 42 42 42 94 94 94 14 14 14 101 101 101
36359-128 128 128 2 2 6 18 18 18 116 116 116
36360-118 98 46 121 92 8 121 92 8 98 78 10
36361-162 162 162 106 106 106 2 2 6 2 2 6
36362- 2 2 6 195 195 195 195 195 195 6 6 6
36363- 2 2 6 2 2 6 2 2 6 2 2 6
36364- 74 74 74 62 62 62 22 22 22 6 6 6
36365- 0 0 0 0 0 0 0 0 0 0 0 0
36366- 0 0 0 0 0 0 0 0 0 0 0 0
36367- 0 0 0 0 0 0 0 0 0 0 0 0
36368- 0 0 0 0 0 0 0 0 0 0 0 0
36369- 0 0 0 0 0 0 0 0 0 0 0 0
36370- 0 0 0 0 0 0 0 0 0 0 0 0
36371- 0 0 0 0 0 0 0 0 1 0 0 1
36372- 0 0 1 0 0 0 0 0 1 0 0 0
36373- 0 0 0 0 0 0 0 0 0 0 0 0
36374- 0 0 0 0 0 0 0 0 0 0 0 0
36375- 0 0 0 0 0 0 0 0 0 0 0 0
36376- 0 0 0 0 0 0 0 0 0 0 0 0
36377- 0 0 0 0 0 0 0 0 0 10 10 10
36378- 38 38 38 90 90 90 14 14 14 58 58 58
36379-210 210 210 26 26 26 54 38 6 154 114 10
36380-226 170 11 236 186 11 225 175 15 184 144 12
36381-215 174 15 175 146 61 37 26 9 2 2 6
36382- 70 70 70 246 246 246 138 138 138 2 2 6
36383- 2 2 6 2 2 6 2 2 6 2 2 6
36384- 70 70 70 66 66 66 26 26 26 6 6 6
36385- 0 0 0 0 0 0 0 0 0 0 0 0
36386- 0 0 0 0 0 0 0 0 0 0 0 0
36387- 0 0 0 0 0 0 0 0 0 0 0 0
36388- 0 0 0 0 0 0 0 0 0 0 0 0
36389- 0 0 0 0 0 0 0 0 0 0 0 0
36390- 0 0 0 0 0 0 0 0 0 0 0 0
36391- 0 0 0 0 0 0 0 0 0 0 0 0
36392- 0 0 0 0 0 0 0 0 0 0 0 0
36393- 0 0 0 0 0 0 0 0 0 0 0 0
36394- 0 0 0 0 0 0 0 0 0 0 0 0
36395- 0 0 0 0 0 0 0 0 0 0 0 0
36396- 0 0 0 0 0 0 0 0 0 0 0 0
36397- 0 0 0 0 0 0 0 0 0 10 10 10
36398- 38 38 38 86 86 86 14 14 14 10 10 10
36399-195 195 195 188 164 115 192 133 9 225 175 15
36400-239 182 13 234 190 10 232 195 16 232 200 30
36401-245 207 45 241 208 19 232 195 16 184 144 12
36402-218 194 134 211 206 186 42 42 42 2 2 6
36403- 2 2 6 2 2 6 2 2 6 2 2 6
36404- 50 50 50 74 74 74 30 30 30 6 6 6
36405- 0 0 0 0 0 0 0 0 0 0 0 0
36406- 0 0 0 0 0 0 0 0 0 0 0 0
36407- 0 0 0 0 0 0 0 0 0 0 0 0
36408- 0 0 0 0 0 0 0 0 0 0 0 0
36409- 0 0 0 0 0 0 0 0 0 0 0 0
36410- 0 0 0 0 0 0 0 0 0 0 0 0
36411- 0 0 0 0 0 0 0 0 0 0 0 0
36412- 0 0 0 0 0 0 0 0 0 0 0 0
36413- 0 0 0 0 0 0 0 0 0 0 0 0
36414- 0 0 0 0 0 0 0 0 0 0 0 0
36415- 0 0 0 0 0 0 0 0 0 0 0 0
36416- 0 0 0 0 0 0 0 0 0 0 0 0
36417- 0 0 0 0 0 0 0 0 0 10 10 10
36418- 34 34 34 86 86 86 14 14 14 2 2 6
36419-121 87 25 192 133 9 219 162 10 239 182 13
36420-236 186 11 232 195 16 241 208 19 244 214 54
36421-246 218 60 246 218 38 246 215 20 241 208 19
36422-241 208 19 226 184 13 121 87 25 2 2 6
36423- 2 2 6 2 2 6 2 2 6 2 2 6
36424- 50 50 50 82 82 82 34 34 34 10 10 10
36425- 0 0 0 0 0 0 0 0 0 0 0 0
36426- 0 0 0 0 0 0 0 0 0 0 0 0
36427- 0 0 0 0 0 0 0 0 0 0 0 0
36428- 0 0 0 0 0 0 0 0 0 0 0 0
36429- 0 0 0 0 0 0 0 0 0 0 0 0
36430- 0 0 0 0 0 0 0 0 0 0 0 0
36431- 0 0 0 0 0 0 0 0 0 0 0 0
36432- 0 0 0 0 0 0 0 0 0 0 0 0
36433- 0 0 0 0 0 0 0 0 0 0 0 0
36434- 0 0 0 0 0 0 0 0 0 0 0 0
36435- 0 0 0 0 0 0 0 0 0 0 0 0
36436- 0 0 0 0 0 0 0 0 0 0 0 0
36437- 0 0 0 0 0 0 0 0 0 10 10 10
36438- 34 34 34 82 82 82 30 30 30 61 42 6
36439-180 123 7 206 145 10 230 174 11 239 182 13
36440-234 190 10 238 202 15 241 208 19 246 218 74
36441-246 218 38 246 215 20 246 215 20 246 215 20
36442-226 184 13 215 174 15 184 144 12 6 6 6
36443- 2 2 6 2 2 6 2 2 6 2 2 6
36444- 26 26 26 94 94 94 42 42 42 14 14 14
36445- 0 0 0 0 0 0 0 0 0 0 0 0
36446- 0 0 0 0 0 0 0 0 0 0 0 0
36447- 0 0 0 0 0 0 0 0 0 0 0 0
36448- 0 0 0 0 0 0 0 0 0 0 0 0
36449- 0 0 0 0 0 0 0 0 0 0 0 0
36450- 0 0 0 0 0 0 0 0 0 0 0 0
36451- 0 0 0 0 0 0 0 0 0 0 0 0
36452- 0 0 0 0 0 0 0 0 0 0 0 0
36453- 0 0 0 0 0 0 0 0 0 0 0 0
36454- 0 0 0 0 0 0 0 0 0 0 0 0
36455- 0 0 0 0 0 0 0 0 0 0 0 0
36456- 0 0 0 0 0 0 0 0 0 0 0 0
36457- 0 0 0 0 0 0 0 0 0 10 10 10
36458- 30 30 30 78 78 78 50 50 50 104 69 6
36459-192 133 9 216 158 10 236 178 12 236 186 11
36460-232 195 16 241 208 19 244 214 54 245 215 43
36461-246 215 20 246 215 20 241 208 19 198 155 10
36462-200 144 11 216 158 10 156 118 10 2 2 6
36463- 2 2 6 2 2 6 2 2 6 2 2 6
36464- 6 6 6 90 90 90 54 54 54 18 18 18
36465- 6 6 6 0 0 0 0 0 0 0 0 0
36466- 0 0 0 0 0 0 0 0 0 0 0 0
36467- 0 0 0 0 0 0 0 0 0 0 0 0
36468- 0 0 0 0 0 0 0 0 0 0 0 0
36469- 0 0 0 0 0 0 0 0 0 0 0 0
36470- 0 0 0 0 0 0 0 0 0 0 0 0
36471- 0 0 0 0 0 0 0 0 0 0 0 0
36472- 0 0 0 0 0 0 0 0 0 0 0 0
36473- 0 0 0 0 0 0 0 0 0 0 0 0
36474- 0 0 0 0 0 0 0 0 0 0 0 0
36475- 0 0 0 0 0 0 0 0 0 0 0 0
36476- 0 0 0 0 0 0 0 0 0 0 0 0
36477- 0 0 0 0 0 0 0 0 0 10 10 10
36478- 30 30 30 78 78 78 46 46 46 22 22 22
36479-137 92 6 210 162 10 239 182 13 238 190 10
36480-238 202 15 241 208 19 246 215 20 246 215 20
36481-241 208 19 203 166 17 185 133 11 210 150 10
36482-216 158 10 210 150 10 102 78 10 2 2 6
36483- 6 6 6 54 54 54 14 14 14 2 2 6
36484- 2 2 6 62 62 62 74 74 74 30 30 30
36485- 10 10 10 0 0 0 0 0 0 0 0 0
36486- 0 0 0 0 0 0 0 0 0 0 0 0
36487- 0 0 0 0 0 0 0 0 0 0 0 0
36488- 0 0 0 0 0 0 0 0 0 0 0 0
36489- 0 0 0 0 0 0 0 0 0 0 0 0
36490- 0 0 0 0 0 0 0 0 0 0 0 0
36491- 0 0 0 0 0 0 0 0 0 0 0 0
36492- 0 0 0 0 0 0 0 0 0 0 0 0
36493- 0 0 0 0 0 0 0 0 0 0 0 0
36494- 0 0 0 0 0 0 0 0 0 0 0 0
36495- 0 0 0 0 0 0 0 0 0 0 0 0
36496- 0 0 0 0 0 0 0 0 0 0 0 0
36497- 0 0 0 0 0 0 0 0 0 10 10 10
36498- 34 34 34 78 78 78 50 50 50 6 6 6
36499- 94 70 30 139 102 15 190 146 13 226 184 13
36500-232 200 30 232 195 16 215 174 15 190 146 13
36501-168 122 10 192 133 9 210 150 10 213 154 11
36502-202 150 34 182 157 106 101 98 89 2 2 6
36503- 2 2 6 78 78 78 116 116 116 58 58 58
36504- 2 2 6 22 22 22 90 90 90 46 46 46
36505- 18 18 18 6 6 6 0 0 0 0 0 0
36506- 0 0 0 0 0 0 0 0 0 0 0 0
36507- 0 0 0 0 0 0 0 0 0 0 0 0
36508- 0 0 0 0 0 0 0 0 0 0 0 0
36509- 0 0 0 0 0 0 0 0 0 0 0 0
36510- 0 0 0 0 0 0 0 0 0 0 0 0
36511- 0 0 0 0 0 0 0 0 0 0 0 0
36512- 0 0 0 0 0 0 0 0 0 0 0 0
36513- 0 0 0 0 0 0 0 0 0 0 0 0
36514- 0 0 0 0 0 0 0 0 0 0 0 0
36515- 0 0 0 0 0 0 0 0 0 0 0 0
36516- 0 0 0 0 0 0 0 0 0 0 0 0
36517- 0 0 0 0 0 0 0 0 0 10 10 10
36518- 38 38 38 86 86 86 50 50 50 6 6 6
36519-128 128 128 174 154 114 156 107 11 168 122 10
36520-198 155 10 184 144 12 197 138 11 200 144 11
36521-206 145 10 206 145 10 197 138 11 188 164 115
36522-195 195 195 198 198 198 174 174 174 14 14 14
36523- 2 2 6 22 22 22 116 116 116 116 116 116
36524- 22 22 22 2 2 6 74 74 74 70 70 70
36525- 30 30 30 10 10 10 0 0 0 0 0 0
36526- 0 0 0 0 0 0 0 0 0 0 0 0
36527- 0 0 0 0 0 0 0 0 0 0 0 0
36528- 0 0 0 0 0 0 0 0 0 0 0 0
36529- 0 0 0 0 0 0 0 0 0 0 0 0
36530- 0 0 0 0 0 0 0 0 0 0 0 0
36531- 0 0 0 0 0 0 0 0 0 0 0 0
36532- 0 0 0 0 0 0 0 0 0 0 0 0
36533- 0 0 0 0 0 0 0 0 0 0 0 0
36534- 0 0 0 0 0 0 0 0 0 0 0 0
36535- 0 0 0 0 0 0 0 0 0 0 0 0
36536- 0 0 0 0 0 0 0 0 0 0 0 0
36537- 0 0 0 0 0 0 6 6 6 18 18 18
36538- 50 50 50 101 101 101 26 26 26 10 10 10
36539-138 138 138 190 190 190 174 154 114 156 107 11
36540-197 138 11 200 144 11 197 138 11 192 133 9
36541-180 123 7 190 142 34 190 178 144 187 187 187
36542-202 202 202 221 221 221 214 214 214 66 66 66
36543- 2 2 6 2 2 6 50 50 50 62 62 62
36544- 6 6 6 2 2 6 10 10 10 90 90 90
36545- 50 50 50 18 18 18 6 6 6 0 0 0
36546- 0 0 0 0 0 0 0 0 0 0 0 0
36547- 0 0 0 0 0 0 0 0 0 0 0 0
36548- 0 0 0 0 0 0 0 0 0 0 0 0
36549- 0 0 0 0 0 0 0 0 0 0 0 0
36550- 0 0 0 0 0 0 0 0 0 0 0 0
36551- 0 0 0 0 0 0 0 0 0 0 0 0
36552- 0 0 0 0 0 0 0 0 0 0 0 0
36553- 0 0 0 0 0 0 0 0 0 0 0 0
36554- 0 0 0 0 0 0 0 0 0 0 0 0
36555- 0 0 0 0 0 0 0 0 0 0 0 0
36556- 0 0 0 0 0 0 0 0 0 0 0 0
36557- 0 0 0 0 0 0 10 10 10 34 34 34
36558- 74 74 74 74 74 74 2 2 6 6 6 6
36559-144 144 144 198 198 198 190 190 190 178 166 146
36560-154 121 60 156 107 11 156 107 11 168 124 44
36561-174 154 114 187 187 187 190 190 190 210 210 210
36562-246 246 246 253 253 253 253 253 253 182 182 182
36563- 6 6 6 2 2 6 2 2 6 2 2 6
36564- 2 2 6 2 2 6 2 2 6 62 62 62
36565- 74 74 74 34 34 34 14 14 14 0 0 0
36566- 0 0 0 0 0 0 0 0 0 0 0 0
36567- 0 0 0 0 0 0 0 0 0 0 0 0
36568- 0 0 0 0 0 0 0 0 0 0 0 0
36569- 0 0 0 0 0 0 0 0 0 0 0 0
36570- 0 0 0 0 0 0 0 0 0 0 0 0
36571- 0 0 0 0 0 0 0 0 0 0 0 0
36572- 0 0 0 0 0 0 0 0 0 0 0 0
36573- 0 0 0 0 0 0 0 0 0 0 0 0
36574- 0 0 0 0 0 0 0 0 0 0 0 0
36575- 0 0 0 0 0 0 0 0 0 0 0 0
36576- 0 0 0 0 0 0 0 0 0 0 0 0
36577- 0 0 0 10 10 10 22 22 22 54 54 54
36578- 94 94 94 18 18 18 2 2 6 46 46 46
36579-234 234 234 221 221 221 190 190 190 190 190 190
36580-190 190 190 187 187 187 187 187 187 190 190 190
36581-190 190 190 195 195 195 214 214 214 242 242 242
36582-253 253 253 253 253 253 253 253 253 253 253 253
36583- 82 82 82 2 2 6 2 2 6 2 2 6
36584- 2 2 6 2 2 6 2 2 6 14 14 14
36585- 86 86 86 54 54 54 22 22 22 6 6 6
36586- 0 0 0 0 0 0 0 0 0 0 0 0
36587- 0 0 0 0 0 0 0 0 0 0 0 0
36588- 0 0 0 0 0 0 0 0 0 0 0 0
36589- 0 0 0 0 0 0 0 0 0 0 0 0
36590- 0 0 0 0 0 0 0 0 0 0 0 0
36591- 0 0 0 0 0 0 0 0 0 0 0 0
36592- 0 0 0 0 0 0 0 0 0 0 0 0
36593- 0 0 0 0 0 0 0 0 0 0 0 0
36594- 0 0 0 0 0 0 0 0 0 0 0 0
36595- 0 0 0 0 0 0 0 0 0 0 0 0
36596- 0 0 0 0 0 0 0 0 0 0 0 0
36597- 6 6 6 18 18 18 46 46 46 90 90 90
36598- 46 46 46 18 18 18 6 6 6 182 182 182
36599-253 253 253 246 246 246 206 206 206 190 190 190
36600-190 190 190 190 190 190 190 190 190 190 190 190
36601-206 206 206 231 231 231 250 250 250 253 253 253
36602-253 253 253 253 253 253 253 253 253 253 253 253
36603-202 202 202 14 14 14 2 2 6 2 2 6
36604- 2 2 6 2 2 6 2 2 6 2 2 6
36605- 42 42 42 86 86 86 42 42 42 18 18 18
36606- 6 6 6 0 0 0 0 0 0 0 0 0
36607- 0 0 0 0 0 0 0 0 0 0 0 0
36608- 0 0 0 0 0 0 0 0 0 0 0 0
36609- 0 0 0 0 0 0 0 0 0 0 0 0
36610- 0 0 0 0 0 0 0 0 0 0 0 0
36611- 0 0 0 0 0 0 0 0 0 0 0 0
36612- 0 0 0 0 0 0 0 0 0 0 0 0
36613- 0 0 0 0 0 0 0 0 0 0 0 0
36614- 0 0 0 0 0 0 0 0 0 0 0 0
36615- 0 0 0 0 0 0 0 0 0 0 0 0
36616- 0 0 0 0 0 0 0 0 0 6 6 6
36617- 14 14 14 38 38 38 74 74 74 66 66 66
36618- 2 2 6 6 6 6 90 90 90 250 250 250
36619-253 253 253 253 253 253 238 238 238 198 198 198
36620-190 190 190 190 190 190 195 195 195 221 221 221
36621-246 246 246 253 253 253 253 253 253 253 253 253
36622-253 253 253 253 253 253 253 253 253 253 253 253
36623-253 253 253 82 82 82 2 2 6 2 2 6
36624- 2 2 6 2 2 6 2 2 6 2 2 6
36625- 2 2 6 78 78 78 70 70 70 34 34 34
36626- 14 14 14 6 6 6 0 0 0 0 0 0
36627- 0 0 0 0 0 0 0 0 0 0 0 0
36628- 0 0 0 0 0 0 0 0 0 0 0 0
36629- 0 0 0 0 0 0 0 0 0 0 0 0
36630- 0 0 0 0 0 0 0 0 0 0 0 0
36631- 0 0 0 0 0 0 0 0 0 0 0 0
36632- 0 0 0 0 0 0 0 0 0 0 0 0
36633- 0 0 0 0 0 0 0 0 0 0 0 0
36634- 0 0 0 0 0 0 0 0 0 0 0 0
36635- 0 0 0 0 0 0 0 0 0 0 0 0
36636- 0 0 0 0 0 0 0 0 0 14 14 14
36637- 34 34 34 66 66 66 78 78 78 6 6 6
36638- 2 2 6 18 18 18 218 218 218 253 253 253
36639-253 253 253 253 253 253 253 253 253 246 246 246
36640-226 226 226 231 231 231 246 246 246 253 253 253
36641-253 253 253 253 253 253 253 253 253 253 253 253
36642-253 253 253 253 253 253 253 253 253 253 253 253
36643-253 253 253 178 178 178 2 2 6 2 2 6
36644- 2 2 6 2 2 6 2 2 6 2 2 6
36645- 2 2 6 18 18 18 90 90 90 62 62 62
36646- 30 30 30 10 10 10 0 0 0 0 0 0
36647- 0 0 0 0 0 0 0 0 0 0 0 0
36648- 0 0 0 0 0 0 0 0 0 0 0 0
36649- 0 0 0 0 0 0 0 0 0 0 0 0
36650- 0 0 0 0 0 0 0 0 0 0 0 0
36651- 0 0 0 0 0 0 0 0 0 0 0 0
36652- 0 0 0 0 0 0 0 0 0 0 0 0
36653- 0 0 0 0 0 0 0 0 0 0 0 0
36654- 0 0 0 0 0 0 0 0 0 0 0 0
36655- 0 0 0 0 0 0 0 0 0 0 0 0
36656- 0 0 0 0 0 0 10 10 10 26 26 26
36657- 58 58 58 90 90 90 18 18 18 2 2 6
36658- 2 2 6 110 110 110 253 253 253 253 253 253
36659-253 253 253 253 253 253 253 253 253 253 253 253
36660-250 250 250 253 253 253 253 253 253 253 253 253
36661-253 253 253 253 253 253 253 253 253 253 253 253
36662-253 253 253 253 253 253 253 253 253 253 253 253
36663-253 253 253 231 231 231 18 18 18 2 2 6
36664- 2 2 6 2 2 6 2 2 6 2 2 6
36665- 2 2 6 2 2 6 18 18 18 94 94 94
36666- 54 54 54 26 26 26 10 10 10 0 0 0
36667- 0 0 0 0 0 0 0 0 0 0 0 0
36668- 0 0 0 0 0 0 0 0 0 0 0 0
36669- 0 0 0 0 0 0 0 0 0 0 0 0
36670- 0 0 0 0 0 0 0 0 0 0 0 0
36671- 0 0 0 0 0 0 0 0 0 0 0 0
36672- 0 0 0 0 0 0 0 0 0 0 0 0
36673- 0 0 0 0 0 0 0 0 0 0 0 0
36674- 0 0 0 0 0 0 0 0 0 0 0 0
36675- 0 0 0 0 0 0 0 0 0 0 0 0
36676- 0 0 0 6 6 6 22 22 22 50 50 50
36677- 90 90 90 26 26 26 2 2 6 2 2 6
36678- 14 14 14 195 195 195 250 250 250 253 253 253
36679-253 253 253 253 253 253 253 253 253 253 253 253
36680-253 253 253 253 253 253 253 253 253 253 253 253
36681-253 253 253 253 253 253 253 253 253 253 253 253
36682-253 253 253 253 253 253 253 253 253 253 253 253
36683-250 250 250 242 242 242 54 54 54 2 2 6
36684- 2 2 6 2 2 6 2 2 6 2 2 6
36685- 2 2 6 2 2 6 2 2 6 38 38 38
36686- 86 86 86 50 50 50 22 22 22 6 6 6
36687- 0 0 0 0 0 0 0 0 0 0 0 0
36688- 0 0 0 0 0 0 0 0 0 0 0 0
36689- 0 0 0 0 0 0 0 0 0 0 0 0
36690- 0 0 0 0 0 0 0 0 0 0 0 0
36691- 0 0 0 0 0 0 0 0 0 0 0 0
36692- 0 0 0 0 0 0 0 0 0 0 0 0
36693- 0 0 0 0 0 0 0 0 0 0 0 0
36694- 0 0 0 0 0 0 0 0 0 0 0 0
36695- 0 0 0 0 0 0 0 0 0 0 0 0
36696- 6 6 6 14 14 14 38 38 38 82 82 82
36697- 34 34 34 2 2 6 2 2 6 2 2 6
36698- 42 42 42 195 195 195 246 246 246 253 253 253
36699-253 253 253 253 253 253 253 253 253 250 250 250
36700-242 242 242 242 242 242 250 250 250 253 253 253
36701-253 253 253 253 253 253 253 253 253 253 253 253
36702-253 253 253 250 250 250 246 246 246 238 238 238
36703-226 226 226 231 231 231 101 101 101 6 6 6
36704- 2 2 6 2 2 6 2 2 6 2 2 6
36705- 2 2 6 2 2 6 2 2 6 2 2 6
36706- 38 38 38 82 82 82 42 42 42 14 14 14
36707- 6 6 6 0 0 0 0 0 0 0 0 0
36708- 0 0 0 0 0 0 0 0 0 0 0 0
36709- 0 0 0 0 0 0 0 0 0 0 0 0
36710- 0 0 0 0 0 0 0 0 0 0 0 0
36711- 0 0 0 0 0 0 0 0 0 0 0 0
36712- 0 0 0 0 0 0 0 0 0 0 0 0
36713- 0 0 0 0 0 0 0 0 0 0 0 0
36714- 0 0 0 0 0 0 0 0 0 0 0 0
36715- 0 0 0 0 0 0 0 0 0 0 0 0
36716- 10 10 10 26 26 26 62 62 62 66 66 66
36717- 2 2 6 2 2 6 2 2 6 6 6 6
36718- 70 70 70 170 170 170 206 206 206 234 234 234
36719-246 246 246 250 250 250 250 250 250 238 238 238
36720-226 226 226 231 231 231 238 238 238 250 250 250
36721-250 250 250 250 250 250 246 246 246 231 231 231
36722-214 214 214 206 206 206 202 202 202 202 202 202
36723-198 198 198 202 202 202 182 182 182 18 18 18
36724- 2 2 6 2 2 6 2 2 6 2 2 6
36725- 2 2 6 2 2 6 2 2 6 2 2 6
36726- 2 2 6 62 62 62 66 66 66 30 30 30
36727- 10 10 10 0 0 0 0 0 0 0 0 0
36728- 0 0 0 0 0 0 0 0 0 0 0 0
36729- 0 0 0 0 0 0 0 0 0 0 0 0
36730- 0 0 0 0 0 0 0 0 0 0 0 0
36731- 0 0 0 0 0 0 0 0 0 0 0 0
36732- 0 0 0 0 0 0 0 0 0 0 0 0
36733- 0 0 0 0 0 0 0 0 0 0 0 0
36734- 0 0 0 0 0 0 0 0 0 0 0 0
36735- 0 0 0 0 0 0 0 0 0 0 0 0
36736- 14 14 14 42 42 42 82 82 82 18 18 18
36737- 2 2 6 2 2 6 2 2 6 10 10 10
36738- 94 94 94 182 182 182 218 218 218 242 242 242
36739-250 250 250 253 253 253 253 253 253 250 250 250
36740-234 234 234 253 253 253 253 253 253 253 253 253
36741-253 253 253 253 253 253 253 253 253 246 246 246
36742-238 238 238 226 226 226 210 210 210 202 202 202
36743-195 195 195 195 195 195 210 210 210 158 158 158
36744- 6 6 6 14 14 14 50 50 50 14 14 14
36745- 2 2 6 2 2 6 2 2 6 2 2 6
36746- 2 2 6 6 6 6 86 86 86 46 46 46
36747- 18 18 18 6 6 6 0 0 0 0 0 0
36748- 0 0 0 0 0 0 0 0 0 0 0 0
36749- 0 0 0 0 0 0 0 0 0 0 0 0
36750- 0 0 0 0 0 0 0 0 0 0 0 0
36751- 0 0 0 0 0 0 0 0 0 0 0 0
36752- 0 0 0 0 0 0 0 0 0 0 0 0
36753- 0 0 0 0 0 0 0 0 0 0 0 0
36754- 0 0 0 0 0 0 0 0 0 0 0 0
36755- 0 0 0 0 0 0 0 0 0 6 6 6
36756- 22 22 22 54 54 54 70 70 70 2 2 6
36757- 2 2 6 10 10 10 2 2 6 22 22 22
36758-166 166 166 231 231 231 250 250 250 253 253 253
36759-253 253 253 253 253 253 253 253 253 250 250 250
36760-242 242 242 253 253 253 253 253 253 253 253 253
36761-253 253 253 253 253 253 253 253 253 253 253 253
36762-253 253 253 253 253 253 253 253 253 246 246 246
36763-231 231 231 206 206 206 198 198 198 226 226 226
36764- 94 94 94 2 2 6 6 6 6 38 38 38
36765- 30 30 30 2 2 6 2 2 6 2 2 6
36766- 2 2 6 2 2 6 62 62 62 66 66 66
36767- 26 26 26 10 10 10 0 0 0 0 0 0
36768- 0 0 0 0 0 0 0 0 0 0 0 0
36769- 0 0 0 0 0 0 0 0 0 0 0 0
36770- 0 0 0 0 0 0 0 0 0 0 0 0
36771- 0 0 0 0 0 0 0 0 0 0 0 0
36772- 0 0 0 0 0 0 0 0 0 0 0 0
36773- 0 0 0 0 0 0 0 0 0 0 0 0
36774- 0 0 0 0 0 0 0 0 0 0 0 0
36775- 0 0 0 0 0 0 0 0 0 10 10 10
36776- 30 30 30 74 74 74 50 50 50 2 2 6
36777- 26 26 26 26 26 26 2 2 6 106 106 106
36778-238 238 238 253 253 253 253 253 253 253 253 253
36779-253 253 253 253 253 253 253 253 253 253 253 253
36780-253 253 253 253 253 253 253 253 253 253 253 253
36781-253 253 253 253 253 253 253 253 253 253 253 253
36782-253 253 253 253 253 253 253 253 253 253 253 253
36783-253 253 253 246 246 246 218 218 218 202 202 202
36784-210 210 210 14 14 14 2 2 6 2 2 6
36785- 30 30 30 22 22 22 2 2 6 2 2 6
36786- 2 2 6 2 2 6 18 18 18 86 86 86
36787- 42 42 42 14 14 14 0 0 0 0 0 0
36788- 0 0 0 0 0 0 0 0 0 0 0 0
36789- 0 0 0 0 0 0 0 0 0 0 0 0
36790- 0 0 0 0 0 0 0 0 0 0 0 0
36791- 0 0 0 0 0 0 0 0 0 0 0 0
36792- 0 0 0 0 0 0 0 0 0 0 0 0
36793- 0 0 0 0 0 0 0 0 0 0 0 0
36794- 0 0 0 0 0 0 0 0 0 0 0 0
36795- 0 0 0 0 0 0 0 0 0 14 14 14
36796- 42 42 42 90 90 90 22 22 22 2 2 6
36797- 42 42 42 2 2 6 18 18 18 218 218 218
36798-253 253 253 253 253 253 253 253 253 253 253 253
36799-253 253 253 253 253 253 253 253 253 253 253 253
36800-253 253 253 253 253 253 253 253 253 253 253 253
36801-253 253 253 253 253 253 253 253 253 253 253 253
36802-253 253 253 253 253 253 253 253 253 253 253 253
36803-253 253 253 253 253 253 250 250 250 221 221 221
36804-218 218 218 101 101 101 2 2 6 14 14 14
36805- 18 18 18 38 38 38 10 10 10 2 2 6
36806- 2 2 6 2 2 6 2 2 6 78 78 78
36807- 58 58 58 22 22 22 6 6 6 0 0 0
36808- 0 0 0 0 0 0 0 0 0 0 0 0
36809- 0 0 0 0 0 0 0 0 0 0 0 0
36810- 0 0 0 0 0 0 0 0 0 0 0 0
36811- 0 0 0 0 0 0 0 0 0 0 0 0
36812- 0 0 0 0 0 0 0 0 0 0 0 0
36813- 0 0 0 0 0 0 0 0 0 0 0 0
36814- 0 0 0 0 0 0 0 0 0 0 0 0
36815- 0 0 0 0 0 0 6 6 6 18 18 18
36816- 54 54 54 82 82 82 2 2 6 26 26 26
36817- 22 22 22 2 2 6 123 123 123 253 253 253
36818-253 253 253 253 253 253 253 253 253 253 253 253
36819-253 253 253 253 253 253 253 253 253 253 253 253
36820-253 253 253 253 253 253 253 253 253 253 253 253
36821-253 253 253 253 253 253 253 253 253 253 253 253
36822-253 253 253 253 253 253 253 253 253 253 253 253
36823-253 253 253 253 253 253 253 253 253 250 250 250
36824-238 238 238 198 198 198 6 6 6 38 38 38
36825- 58 58 58 26 26 26 38 38 38 2 2 6
36826- 2 2 6 2 2 6 2 2 6 46 46 46
36827- 78 78 78 30 30 30 10 10 10 0 0 0
36828- 0 0 0 0 0 0 0 0 0 0 0 0
36829- 0 0 0 0 0 0 0 0 0 0 0 0
36830- 0 0 0 0 0 0 0 0 0 0 0 0
36831- 0 0 0 0 0 0 0 0 0 0 0 0
36832- 0 0 0 0 0 0 0 0 0 0 0 0
36833- 0 0 0 0 0 0 0 0 0 0 0 0
36834- 0 0 0 0 0 0 0 0 0 0 0 0
36835- 0 0 0 0 0 0 10 10 10 30 30 30
36836- 74 74 74 58 58 58 2 2 6 42 42 42
36837- 2 2 6 22 22 22 231 231 231 253 253 253
36838-253 253 253 253 253 253 253 253 253 253 253 253
36839-253 253 253 253 253 253 253 253 253 250 250 250
36840-253 253 253 253 253 253 253 253 253 253 253 253
36841-253 253 253 253 253 253 253 253 253 253 253 253
36842-253 253 253 253 253 253 253 253 253 253 253 253
36843-253 253 253 253 253 253 253 253 253 253 253 253
36844-253 253 253 246 246 246 46 46 46 38 38 38
36845- 42 42 42 14 14 14 38 38 38 14 14 14
36846- 2 2 6 2 2 6 2 2 6 6 6 6
36847- 86 86 86 46 46 46 14 14 14 0 0 0
36848- 0 0 0 0 0 0 0 0 0 0 0 0
36849- 0 0 0 0 0 0 0 0 0 0 0 0
36850- 0 0 0 0 0 0 0 0 0 0 0 0
36851- 0 0 0 0 0 0 0 0 0 0 0 0
36852- 0 0 0 0 0 0 0 0 0 0 0 0
36853- 0 0 0 0 0 0 0 0 0 0 0 0
36854- 0 0 0 0 0 0 0 0 0 0 0 0
36855- 0 0 0 6 6 6 14 14 14 42 42 42
36856- 90 90 90 18 18 18 18 18 18 26 26 26
36857- 2 2 6 116 116 116 253 253 253 253 253 253
36858-253 253 253 253 253 253 253 253 253 253 253 253
36859-253 253 253 253 253 253 250 250 250 238 238 238
36860-253 253 253 253 253 253 253 253 253 253 253 253
36861-253 253 253 253 253 253 253 253 253 253 253 253
36862-253 253 253 253 253 253 253 253 253 253 253 253
36863-253 253 253 253 253 253 253 253 253 253 253 253
36864-253 253 253 253 253 253 94 94 94 6 6 6
36865- 2 2 6 2 2 6 10 10 10 34 34 34
36866- 2 2 6 2 2 6 2 2 6 2 2 6
36867- 74 74 74 58 58 58 22 22 22 6 6 6
36868- 0 0 0 0 0 0 0 0 0 0 0 0
36869- 0 0 0 0 0 0 0 0 0 0 0 0
36870- 0 0 0 0 0 0 0 0 0 0 0 0
36871- 0 0 0 0 0 0 0 0 0 0 0 0
36872- 0 0 0 0 0 0 0 0 0 0 0 0
36873- 0 0 0 0 0 0 0 0 0 0 0 0
36874- 0 0 0 0 0 0 0 0 0 0 0 0
36875- 0 0 0 10 10 10 26 26 26 66 66 66
36876- 82 82 82 2 2 6 38 38 38 6 6 6
36877- 14 14 14 210 210 210 253 253 253 253 253 253
36878-253 253 253 253 253 253 253 253 253 253 253 253
36879-253 253 253 253 253 253 246 246 246 242 242 242
36880-253 253 253 253 253 253 253 253 253 253 253 253
36881-253 253 253 253 253 253 253 253 253 253 253 253
36882-253 253 253 253 253 253 253 253 253 253 253 253
36883-253 253 253 253 253 253 253 253 253 253 253 253
36884-253 253 253 253 253 253 144 144 144 2 2 6
36885- 2 2 6 2 2 6 2 2 6 46 46 46
36886- 2 2 6 2 2 6 2 2 6 2 2 6
36887- 42 42 42 74 74 74 30 30 30 10 10 10
36888- 0 0 0 0 0 0 0 0 0 0 0 0
36889- 0 0 0 0 0 0 0 0 0 0 0 0
36890- 0 0 0 0 0 0 0 0 0 0 0 0
36891- 0 0 0 0 0 0 0 0 0 0 0 0
36892- 0 0 0 0 0 0 0 0 0 0 0 0
36893- 0 0 0 0 0 0 0 0 0 0 0 0
36894- 0 0 0 0 0 0 0 0 0 0 0 0
36895- 6 6 6 14 14 14 42 42 42 90 90 90
36896- 26 26 26 6 6 6 42 42 42 2 2 6
36897- 74 74 74 250 250 250 253 253 253 253 253 253
36898-253 253 253 253 253 253 253 253 253 253 253 253
36899-253 253 253 253 253 253 242 242 242 242 242 242
36900-253 253 253 253 253 253 253 253 253 253 253 253
36901-253 253 253 253 253 253 253 253 253 253 253 253
36902-253 253 253 253 253 253 253 253 253 253 253 253
36903-253 253 253 253 253 253 253 253 253 253 253 253
36904-253 253 253 253 253 253 182 182 182 2 2 6
36905- 2 2 6 2 2 6 2 2 6 46 46 46
36906- 2 2 6 2 2 6 2 2 6 2 2 6
36907- 10 10 10 86 86 86 38 38 38 10 10 10
36908- 0 0 0 0 0 0 0 0 0 0 0 0
36909- 0 0 0 0 0 0 0 0 0 0 0 0
36910- 0 0 0 0 0 0 0 0 0 0 0 0
36911- 0 0 0 0 0 0 0 0 0 0 0 0
36912- 0 0 0 0 0 0 0 0 0 0 0 0
36913- 0 0 0 0 0 0 0 0 0 0 0 0
36914- 0 0 0 0 0 0 0 0 0 0 0 0
36915- 10 10 10 26 26 26 66 66 66 82 82 82
36916- 2 2 6 22 22 22 18 18 18 2 2 6
36917-149 149 149 253 253 253 253 253 253 253 253 253
36918-253 253 253 253 253 253 253 253 253 253 253 253
36919-253 253 253 253 253 253 234 234 234 242 242 242
36920-253 253 253 253 253 253 253 253 253 253 253 253
36921-253 253 253 253 253 253 253 253 253 253 253 253
36922-253 253 253 253 253 253 253 253 253 253 253 253
36923-253 253 253 253 253 253 253 253 253 253 253 253
36924-253 253 253 253 253 253 206 206 206 2 2 6
36925- 2 2 6 2 2 6 2 2 6 38 38 38
36926- 2 2 6 2 2 6 2 2 6 2 2 6
36927- 6 6 6 86 86 86 46 46 46 14 14 14
36928- 0 0 0 0 0 0 0 0 0 0 0 0
36929- 0 0 0 0 0 0 0 0 0 0 0 0
36930- 0 0 0 0 0 0 0 0 0 0 0 0
36931- 0 0 0 0 0 0 0 0 0 0 0 0
36932- 0 0 0 0 0 0 0 0 0 0 0 0
36933- 0 0 0 0 0 0 0 0 0 0 0 0
36934- 0 0 0 0 0 0 0 0 0 6 6 6
36935- 18 18 18 46 46 46 86 86 86 18 18 18
36936- 2 2 6 34 34 34 10 10 10 6 6 6
36937-210 210 210 253 253 253 253 253 253 253 253 253
36938-253 253 253 253 253 253 253 253 253 253 253 253
36939-253 253 253 253 253 253 234 234 234 242 242 242
36940-253 253 253 253 253 253 253 253 253 253 253 253
36941-253 253 253 253 253 253 253 253 253 253 253 253
36942-253 253 253 253 253 253 253 253 253 253 253 253
36943-253 253 253 253 253 253 253 253 253 253 253 253
36944-253 253 253 253 253 253 221 221 221 6 6 6
36945- 2 2 6 2 2 6 6 6 6 30 30 30
36946- 2 2 6 2 2 6 2 2 6 2 2 6
36947- 2 2 6 82 82 82 54 54 54 18 18 18
36948- 6 6 6 0 0 0 0 0 0 0 0 0
36949- 0 0 0 0 0 0 0 0 0 0 0 0
36950- 0 0 0 0 0 0 0 0 0 0 0 0
36951- 0 0 0 0 0 0 0 0 0 0 0 0
36952- 0 0 0 0 0 0 0 0 0 0 0 0
36953- 0 0 0 0 0 0 0 0 0 0 0 0
36954- 0 0 0 0 0 0 0 0 0 10 10 10
36955- 26 26 26 66 66 66 62 62 62 2 2 6
36956- 2 2 6 38 38 38 10 10 10 26 26 26
36957-238 238 238 253 253 253 253 253 253 253 253 253
36958-253 253 253 253 253 253 253 253 253 253 253 253
36959-253 253 253 253 253 253 231 231 231 238 238 238
36960-253 253 253 253 253 253 253 253 253 253 253 253
36961-253 253 253 253 253 253 253 253 253 253 253 253
36962-253 253 253 253 253 253 253 253 253 253 253 253
36963-253 253 253 253 253 253 253 253 253 253 253 253
36964-253 253 253 253 253 253 231 231 231 6 6 6
36965- 2 2 6 2 2 6 10 10 10 30 30 30
36966- 2 2 6 2 2 6 2 2 6 2 2 6
36967- 2 2 6 66 66 66 58 58 58 22 22 22
36968- 6 6 6 0 0 0 0 0 0 0 0 0
36969- 0 0 0 0 0 0 0 0 0 0 0 0
36970- 0 0 0 0 0 0 0 0 0 0 0 0
36971- 0 0 0 0 0 0 0 0 0 0 0 0
36972- 0 0 0 0 0 0 0 0 0 0 0 0
36973- 0 0 0 0 0 0 0 0 0 0 0 0
36974- 0 0 0 0 0 0 0 0 0 10 10 10
36975- 38 38 38 78 78 78 6 6 6 2 2 6
36976- 2 2 6 46 46 46 14 14 14 42 42 42
36977-246 246 246 253 253 253 253 253 253 253 253 253
36978-253 253 253 253 253 253 253 253 253 253 253 253
36979-253 253 253 253 253 253 231 231 231 242 242 242
36980-253 253 253 253 253 253 253 253 253 253 253 253
36981-253 253 253 253 253 253 253 253 253 253 253 253
36982-253 253 253 253 253 253 253 253 253 253 253 253
36983-253 253 253 253 253 253 253 253 253 253 253 253
36984-253 253 253 253 253 253 234 234 234 10 10 10
36985- 2 2 6 2 2 6 22 22 22 14 14 14
36986- 2 2 6 2 2 6 2 2 6 2 2 6
36987- 2 2 6 66 66 66 62 62 62 22 22 22
36988- 6 6 6 0 0 0 0 0 0 0 0 0
36989- 0 0 0 0 0 0 0 0 0 0 0 0
36990- 0 0 0 0 0 0 0 0 0 0 0 0
36991- 0 0 0 0 0 0 0 0 0 0 0 0
36992- 0 0 0 0 0 0 0 0 0 0 0 0
36993- 0 0 0 0 0 0 0 0 0 0 0 0
36994- 0 0 0 0 0 0 6 6 6 18 18 18
36995- 50 50 50 74 74 74 2 2 6 2 2 6
36996- 14 14 14 70 70 70 34 34 34 62 62 62
36997-250 250 250 253 253 253 253 253 253 253 253 253
36998-253 253 253 253 253 253 253 253 253 253 253 253
36999-253 253 253 253 253 253 231 231 231 246 246 246
37000-253 253 253 253 253 253 253 253 253 253 253 253
37001-253 253 253 253 253 253 253 253 253 253 253 253
37002-253 253 253 253 253 253 253 253 253 253 253 253
37003-253 253 253 253 253 253 253 253 253 253 253 253
37004-253 253 253 253 253 253 234 234 234 14 14 14
37005- 2 2 6 2 2 6 30 30 30 2 2 6
37006- 2 2 6 2 2 6 2 2 6 2 2 6
37007- 2 2 6 66 66 66 62 62 62 22 22 22
37008- 6 6 6 0 0 0 0 0 0 0 0 0
37009- 0 0 0 0 0 0 0 0 0 0 0 0
37010- 0 0 0 0 0 0 0 0 0 0 0 0
37011- 0 0 0 0 0 0 0 0 0 0 0 0
37012- 0 0 0 0 0 0 0 0 0 0 0 0
37013- 0 0 0 0 0 0 0 0 0 0 0 0
37014- 0 0 0 0 0 0 6 6 6 18 18 18
37015- 54 54 54 62 62 62 2 2 6 2 2 6
37016- 2 2 6 30 30 30 46 46 46 70 70 70
37017-250 250 250 253 253 253 253 253 253 253 253 253
37018-253 253 253 253 253 253 253 253 253 253 253 253
37019-253 253 253 253 253 253 231 231 231 246 246 246
37020-253 253 253 253 253 253 253 253 253 253 253 253
37021-253 253 253 253 253 253 253 253 253 253 253 253
37022-253 253 253 253 253 253 253 253 253 253 253 253
37023-253 253 253 253 253 253 253 253 253 253 253 253
37024-253 253 253 253 253 253 226 226 226 10 10 10
37025- 2 2 6 6 6 6 30 30 30 2 2 6
37026- 2 2 6 2 2 6 2 2 6 2 2 6
37027- 2 2 6 66 66 66 58 58 58 22 22 22
37028- 6 6 6 0 0 0 0 0 0 0 0 0
37029- 0 0 0 0 0 0 0 0 0 0 0 0
37030- 0 0 0 0 0 0 0 0 0 0 0 0
37031- 0 0 0 0 0 0 0 0 0 0 0 0
37032- 0 0 0 0 0 0 0 0 0 0 0 0
37033- 0 0 0 0 0 0 0 0 0 0 0 0
37034- 0 0 0 0 0 0 6 6 6 22 22 22
37035- 58 58 58 62 62 62 2 2 6 2 2 6
37036- 2 2 6 2 2 6 30 30 30 78 78 78
37037-250 250 250 253 253 253 253 253 253 253 253 253
37038-253 253 253 253 253 253 253 253 253 253 253 253
37039-253 253 253 253 253 253 231 231 231 246 246 246
37040-253 253 253 253 253 253 253 253 253 253 253 253
37041-253 253 253 253 253 253 253 253 253 253 253 253
37042-253 253 253 253 253 253 253 253 253 253 253 253
37043-253 253 253 253 253 253 253 253 253 253 253 253
37044-253 253 253 253 253 253 206 206 206 2 2 6
37045- 22 22 22 34 34 34 18 14 6 22 22 22
37046- 26 26 26 18 18 18 6 6 6 2 2 6
37047- 2 2 6 82 82 82 54 54 54 18 18 18
37048- 6 6 6 0 0 0 0 0 0 0 0 0
37049- 0 0 0 0 0 0 0 0 0 0 0 0
37050- 0 0 0 0 0 0 0 0 0 0 0 0
37051- 0 0 0 0 0 0 0 0 0 0 0 0
37052- 0 0 0 0 0 0 0 0 0 0 0 0
37053- 0 0 0 0 0 0 0 0 0 0 0 0
37054- 0 0 0 0 0 0 6 6 6 26 26 26
37055- 62 62 62 106 106 106 74 54 14 185 133 11
37056-210 162 10 121 92 8 6 6 6 62 62 62
37057-238 238 238 253 253 253 253 253 253 253 253 253
37058-253 253 253 253 253 253 253 253 253 253 253 253
37059-253 253 253 253 253 253 231 231 231 246 246 246
37060-253 253 253 253 253 253 253 253 253 253 253 253
37061-253 253 253 253 253 253 253 253 253 253 253 253
37062-253 253 253 253 253 253 253 253 253 253 253 253
37063-253 253 253 253 253 253 253 253 253 253 253 253
37064-253 253 253 253 253 253 158 158 158 18 18 18
37065- 14 14 14 2 2 6 2 2 6 2 2 6
37066- 6 6 6 18 18 18 66 66 66 38 38 38
37067- 6 6 6 94 94 94 50 50 50 18 18 18
37068- 6 6 6 0 0 0 0 0 0 0 0 0
37069- 0 0 0 0 0 0 0 0 0 0 0 0
37070- 0 0 0 0 0 0 0 0 0 0 0 0
37071- 0 0 0 0 0 0 0 0 0 0 0 0
37072- 0 0 0 0 0 0 0 0 0 0 0 0
37073- 0 0 0 0 0 0 0 0 0 6 6 6
37074- 10 10 10 10 10 10 18 18 18 38 38 38
37075- 78 78 78 142 134 106 216 158 10 242 186 14
37076-246 190 14 246 190 14 156 118 10 10 10 10
37077- 90 90 90 238 238 238 253 253 253 253 253 253
37078-253 253 253 253 253 253 253 253 253 253 253 253
37079-253 253 253 253 253 253 231 231 231 250 250 250
37080-253 253 253 253 253 253 253 253 253 253 253 253
37081-253 253 253 253 253 253 253 253 253 253 253 253
37082-253 253 253 253 253 253 253 253 253 253 253 253
37083-253 253 253 253 253 253 253 253 253 246 230 190
37084-238 204 91 238 204 91 181 142 44 37 26 9
37085- 2 2 6 2 2 6 2 2 6 2 2 6
37086- 2 2 6 2 2 6 38 38 38 46 46 46
37087- 26 26 26 106 106 106 54 54 54 18 18 18
37088- 6 6 6 0 0 0 0 0 0 0 0 0
37089- 0 0 0 0 0 0 0 0 0 0 0 0
37090- 0 0 0 0 0 0 0 0 0 0 0 0
37091- 0 0 0 0 0 0 0 0 0 0 0 0
37092- 0 0 0 0 0 0 0 0 0 0 0 0
37093- 0 0 0 6 6 6 14 14 14 22 22 22
37094- 30 30 30 38 38 38 50 50 50 70 70 70
37095-106 106 106 190 142 34 226 170 11 242 186 14
37096-246 190 14 246 190 14 246 190 14 154 114 10
37097- 6 6 6 74 74 74 226 226 226 253 253 253
37098-253 253 253 253 253 253 253 253 253 253 253 253
37099-253 253 253 253 253 253 231 231 231 250 250 250
37100-253 253 253 253 253 253 253 253 253 253 253 253
37101-253 253 253 253 253 253 253 253 253 253 253 253
37102-253 253 253 253 253 253 253 253 253 253 253 253
37103-253 253 253 253 253 253 253 253 253 228 184 62
37104-241 196 14 241 208 19 232 195 16 38 30 10
37105- 2 2 6 2 2 6 2 2 6 2 2 6
37106- 2 2 6 6 6 6 30 30 30 26 26 26
37107-203 166 17 154 142 90 66 66 66 26 26 26
37108- 6 6 6 0 0 0 0 0 0 0 0 0
37109- 0 0 0 0 0 0 0 0 0 0 0 0
37110- 0 0 0 0 0 0 0 0 0 0 0 0
37111- 0 0 0 0 0 0 0 0 0 0 0 0
37112- 0 0 0 0 0 0 0 0 0 0 0 0
37113- 6 6 6 18 18 18 38 38 38 58 58 58
37114- 78 78 78 86 86 86 101 101 101 123 123 123
37115-175 146 61 210 150 10 234 174 13 246 186 14
37116-246 190 14 246 190 14 246 190 14 238 190 10
37117-102 78 10 2 2 6 46 46 46 198 198 198
37118-253 253 253 253 253 253 253 253 253 253 253 253
37119-253 253 253 253 253 253 234 234 234 242 242 242
37120-253 253 253 253 253 253 253 253 253 253 253 253
37121-253 253 253 253 253 253 253 253 253 253 253 253
37122-253 253 253 253 253 253 253 253 253 253 253 253
37123-253 253 253 253 253 253 253 253 253 224 178 62
37124-242 186 14 241 196 14 210 166 10 22 18 6
37125- 2 2 6 2 2 6 2 2 6 2 2 6
37126- 2 2 6 2 2 6 6 6 6 121 92 8
37127-238 202 15 232 195 16 82 82 82 34 34 34
37128- 10 10 10 0 0 0 0 0 0 0 0 0
37129- 0 0 0 0 0 0 0 0 0 0 0 0
37130- 0 0 0 0 0 0 0 0 0 0 0 0
37131- 0 0 0 0 0 0 0 0 0 0 0 0
37132- 0 0 0 0 0 0 0 0 0 0 0 0
37133- 14 14 14 38 38 38 70 70 70 154 122 46
37134-190 142 34 200 144 11 197 138 11 197 138 11
37135-213 154 11 226 170 11 242 186 14 246 190 14
37136-246 190 14 246 190 14 246 190 14 246 190 14
37137-225 175 15 46 32 6 2 2 6 22 22 22
37138-158 158 158 250 250 250 253 253 253 253 253 253
37139-253 253 253 253 253 253 253 253 253 253 253 253
37140-253 253 253 253 253 253 253 253 253 253 253 253
37141-253 253 253 253 253 253 253 253 253 253 253 253
37142-253 253 253 253 253 253 253 253 253 253 253 253
37143-253 253 253 250 250 250 242 242 242 224 178 62
37144-239 182 13 236 186 11 213 154 11 46 32 6
37145- 2 2 6 2 2 6 2 2 6 2 2 6
37146- 2 2 6 2 2 6 61 42 6 225 175 15
37147-238 190 10 236 186 11 112 100 78 42 42 42
37148- 14 14 14 0 0 0 0 0 0 0 0 0
37149- 0 0 0 0 0 0 0 0 0 0 0 0
37150- 0 0 0 0 0 0 0 0 0 0 0 0
37151- 0 0 0 0 0 0 0 0 0 0 0 0
37152- 0 0 0 0 0 0 0 0 0 6 6 6
37153- 22 22 22 54 54 54 154 122 46 213 154 11
37154-226 170 11 230 174 11 226 170 11 226 170 11
37155-236 178 12 242 186 14 246 190 14 246 190 14
37156-246 190 14 246 190 14 246 190 14 246 190 14
37157-241 196 14 184 144 12 10 10 10 2 2 6
37158- 6 6 6 116 116 116 242 242 242 253 253 253
37159-253 253 253 253 253 253 253 253 253 253 253 253
37160-253 253 253 253 253 253 253 253 253 253 253 253
37161-253 253 253 253 253 253 253 253 253 253 253 253
37162-253 253 253 253 253 253 253 253 253 253 253 253
37163-253 253 253 231 231 231 198 198 198 214 170 54
37164-236 178 12 236 178 12 210 150 10 137 92 6
37165- 18 14 6 2 2 6 2 2 6 2 2 6
37166- 6 6 6 70 47 6 200 144 11 236 178 12
37167-239 182 13 239 182 13 124 112 88 58 58 58
37168- 22 22 22 6 6 6 0 0 0 0 0 0
37169- 0 0 0 0 0 0 0 0 0 0 0 0
37170- 0 0 0 0 0 0 0 0 0 0 0 0
37171- 0 0 0 0 0 0 0 0 0 0 0 0
37172- 0 0 0 0 0 0 0 0 0 10 10 10
37173- 30 30 30 70 70 70 180 133 36 226 170 11
37174-239 182 13 242 186 14 242 186 14 246 186 14
37175-246 190 14 246 190 14 246 190 14 246 190 14
37176-246 190 14 246 190 14 246 190 14 246 190 14
37177-246 190 14 232 195 16 98 70 6 2 2 6
37178- 2 2 6 2 2 6 66 66 66 221 221 221
37179-253 253 253 253 253 253 253 253 253 253 253 253
37180-253 253 253 253 253 253 253 253 253 253 253 253
37181-253 253 253 253 253 253 253 253 253 253 253 253
37182-253 253 253 253 253 253 253 253 253 253 253 253
37183-253 253 253 206 206 206 198 198 198 214 166 58
37184-230 174 11 230 174 11 216 158 10 192 133 9
37185-163 110 8 116 81 8 102 78 10 116 81 8
37186-167 114 7 197 138 11 226 170 11 239 182 13
37187-242 186 14 242 186 14 162 146 94 78 78 78
37188- 34 34 34 14 14 14 6 6 6 0 0 0
37189- 0 0 0 0 0 0 0 0 0 0 0 0
37190- 0 0 0 0 0 0 0 0 0 0 0 0
37191- 0 0 0 0 0 0 0 0 0 0 0 0
37192- 0 0 0 0 0 0 0 0 0 6 6 6
37193- 30 30 30 78 78 78 190 142 34 226 170 11
37194-239 182 13 246 190 14 246 190 14 246 190 14
37195-246 190 14 246 190 14 246 190 14 246 190 14
37196-246 190 14 246 190 14 246 190 14 246 190 14
37197-246 190 14 241 196 14 203 166 17 22 18 6
37198- 2 2 6 2 2 6 2 2 6 38 38 38
37199-218 218 218 253 253 253 253 253 253 253 253 253
37200-253 253 253 253 253 253 253 253 253 253 253 253
37201-253 253 253 253 253 253 253 253 253 253 253 253
37202-253 253 253 253 253 253 253 253 253 253 253 253
37203-250 250 250 206 206 206 198 198 198 202 162 69
37204-226 170 11 236 178 12 224 166 10 210 150 10
37205-200 144 11 197 138 11 192 133 9 197 138 11
37206-210 150 10 226 170 11 242 186 14 246 190 14
37207-246 190 14 246 186 14 225 175 15 124 112 88
37208- 62 62 62 30 30 30 14 14 14 6 6 6
37209- 0 0 0 0 0 0 0 0 0 0 0 0
37210- 0 0 0 0 0 0 0 0 0 0 0 0
37211- 0 0 0 0 0 0 0 0 0 0 0 0
37212- 0 0 0 0 0 0 0 0 0 10 10 10
37213- 30 30 30 78 78 78 174 135 50 224 166 10
37214-239 182 13 246 190 14 246 190 14 246 190 14
37215-246 190 14 246 190 14 246 190 14 246 190 14
37216-246 190 14 246 190 14 246 190 14 246 190 14
37217-246 190 14 246 190 14 241 196 14 139 102 15
37218- 2 2 6 2 2 6 2 2 6 2 2 6
37219- 78 78 78 250 250 250 253 253 253 253 253 253
37220-253 253 253 253 253 253 253 253 253 253 253 253
37221-253 253 253 253 253 253 253 253 253 253 253 253
37222-253 253 253 253 253 253 253 253 253 253 253 253
37223-250 250 250 214 214 214 198 198 198 190 150 46
37224-219 162 10 236 178 12 234 174 13 224 166 10
37225-216 158 10 213 154 11 213 154 11 216 158 10
37226-226 170 11 239 182 13 246 190 14 246 190 14
37227-246 190 14 246 190 14 242 186 14 206 162 42
37228-101 101 101 58 58 58 30 30 30 14 14 14
37229- 6 6 6 0 0 0 0 0 0 0 0 0
37230- 0 0 0 0 0 0 0 0 0 0 0 0
37231- 0 0 0 0 0 0 0 0 0 0 0 0
37232- 0 0 0 0 0 0 0 0 0 10 10 10
37233- 30 30 30 74 74 74 174 135 50 216 158 10
37234-236 178 12 246 190 14 246 190 14 246 190 14
37235-246 190 14 246 190 14 246 190 14 246 190 14
37236-246 190 14 246 190 14 246 190 14 246 190 14
37237-246 190 14 246 190 14 241 196 14 226 184 13
37238- 61 42 6 2 2 6 2 2 6 2 2 6
37239- 22 22 22 238 238 238 253 253 253 253 253 253
37240-253 253 253 253 253 253 253 253 253 253 253 253
37241-253 253 253 253 253 253 253 253 253 253 253 253
37242-253 253 253 253 253 253 253 253 253 253 253 253
37243-253 253 253 226 226 226 187 187 187 180 133 36
37244-216 158 10 236 178 12 239 182 13 236 178 12
37245-230 174 11 226 170 11 226 170 11 230 174 11
37246-236 178 12 242 186 14 246 190 14 246 190 14
37247-246 190 14 246 190 14 246 186 14 239 182 13
37248-206 162 42 106 106 106 66 66 66 34 34 34
37249- 14 14 14 6 6 6 0 0 0 0 0 0
37250- 0 0 0 0 0 0 0 0 0 0 0 0
37251- 0 0 0 0 0 0 0 0 0 0 0 0
37252- 0 0 0 0 0 0 0 0 0 6 6 6
37253- 26 26 26 70 70 70 163 133 67 213 154 11
37254-236 178 12 246 190 14 246 190 14 246 190 14
37255-246 190 14 246 190 14 246 190 14 246 190 14
37256-246 190 14 246 190 14 246 190 14 246 190 14
37257-246 190 14 246 190 14 246 190 14 241 196 14
37258-190 146 13 18 14 6 2 2 6 2 2 6
37259- 46 46 46 246 246 246 253 253 253 253 253 253
37260-253 253 253 253 253 253 253 253 253 253 253 253
37261-253 253 253 253 253 253 253 253 253 253 253 253
37262-253 253 253 253 253 253 253 253 253 253 253 253
37263-253 253 253 221 221 221 86 86 86 156 107 11
37264-216 158 10 236 178 12 242 186 14 246 186 14
37265-242 186 14 239 182 13 239 182 13 242 186 14
37266-242 186 14 246 186 14 246 190 14 246 190 14
37267-246 190 14 246 190 14 246 190 14 246 190 14
37268-242 186 14 225 175 15 142 122 72 66 66 66
37269- 30 30 30 10 10 10 0 0 0 0 0 0
37270- 0 0 0 0 0 0 0 0 0 0 0 0
37271- 0 0 0 0 0 0 0 0 0 0 0 0
37272- 0 0 0 0 0 0 0 0 0 6 6 6
37273- 26 26 26 70 70 70 163 133 67 210 150 10
37274-236 178 12 246 190 14 246 190 14 246 190 14
37275-246 190 14 246 190 14 246 190 14 246 190 14
37276-246 190 14 246 190 14 246 190 14 246 190 14
37277-246 190 14 246 190 14 246 190 14 246 190 14
37278-232 195 16 121 92 8 34 34 34 106 106 106
37279-221 221 221 253 253 253 253 253 253 253 253 253
37280-253 253 253 253 253 253 253 253 253 253 253 253
37281-253 253 253 253 253 253 253 253 253 253 253 253
37282-253 253 253 253 253 253 253 253 253 253 253 253
37283-242 242 242 82 82 82 18 14 6 163 110 8
37284-216 158 10 236 178 12 242 186 14 246 190 14
37285-246 190 14 246 190 14 246 190 14 246 190 14
37286-246 190 14 246 190 14 246 190 14 246 190 14
37287-246 190 14 246 190 14 246 190 14 246 190 14
37288-246 190 14 246 190 14 242 186 14 163 133 67
37289- 46 46 46 18 18 18 6 6 6 0 0 0
37290- 0 0 0 0 0 0 0 0 0 0 0 0
37291- 0 0 0 0 0 0 0 0 0 0 0 0
37292- 0 0 0 0 0 0 0 0 0 10 10 10
37293- 30 30 30 78 78 78 163 133 67 210 150 10
37294-236 178 12 246 186 14 246 190 14 246 190 14
37295-246 190 14 246 190 14 246 190 14 246 190 14
37296-246 190 14 246 190 14 246 190 14 246 190 14
37297-246 190 14 246 190 14 246 190 14 246 190 14
37298-241 196 14 215 174 15 190 178 144 253 253 253
37299-253 253 253 253 253 253 253 253 253 253 253 253
37300-253 253 253 253 253 253 253 253 253 253 253 253
37301-253 253 253 253 253 253 253 253 253 253 253 253
37302-253 253 253 253 253 253 253 253 253 218 218 218
37303- 58 58 58 2 2 6 22 18 6 167 114 7
37304-216 158 10 236 178 12 246 186 14 246 190 14
37305-246 190 14 246 190 14 246 190 14 246 190 14
37306-246 190 14 246 190 14 246 190 14 246 190 14
37307-246 190 14 246 190 14 246 190 14 246 190 14
37308-246 190 14 246 186 14 242 186 14 190 150 46
37309- 54 54 54 22 22 22 6 6 6 0 0 0
37310- 0 0 0 0 0 0 0 0 0 0 0 0
37311- 0 0 0 0 0 0 0 0 0 0 0 0
37312- 0 0 0 0 0 0 0 0 0 14 14 14
37313- 38 38 38 86 86 86 180 133 36 213 154 11
37314-236 178 12 246 186 14 246 190 14 246 190 14
37315-246 190 14 246 190 14 246 190 14 246 190 14
37316-246 190 14 246 190 14 246 190 14 246 190 14
37317-246 190 14 246 190 14 246 190 14 246 190 14
37318-246 190 14 232 195 16 190 146 13 214 214 214
37319-253 253 253 253 253 253 253 253 253 253 253 253
37320-253 253 253 253 253 253 253 253 253 253 253 253
37321-253 253 253 253 253 253 253 253 253 253 253 253
37322-253 253 253 250 250 250 170 170 170 26 26 26
37323- 2 2 6 2 2 6 37 26 9 163 110 8
37324-219 162 10 239 182 13 246 186 14 246 190 14
37325-246 190 14 246 190 14 246 190 14 246 190 14
37326-246 190 14 246 190 14 246 190 14 246 190 14
37327-246 190 14 246 190 14 246 190 14 246 190 14
37328-246 186 14 236 178 12 224 166 10 142 122 72
37329- 46 46 46 18 18 18 6 6 6 0 0 0
37330- 0 0 0 0 0 0 0 0 0 0 0 0
37331- 0 0 0 0 0 0 0 0 0 0 0 0
37332- 0 0 0 0 0 0 6 6 6 18 18 18
37333- 50 50 50 109 106 95 192 133 9 224 166 10
37334-242 186 14 246 190 14 246 190 14 246 190 14
37335-246 190 14 246 190 14 246 190 14 246 190 14
37336-246 190 14 246 190 14 246 190 14 246 190 14
37337-246 190 14 246 190 14 246 190 14 246 190 14
37338-242 186 14 226 184 13 210 162 10 142 110 46
37339-226 226 226 253 253 253 253 253 253 253 253 253
37340-253 253 253 253 253 253 253 253 253 253 253 253
37341-253 253 253 253 253 253 253 253 253 253 253 253
37342-198 198 198 66 66 66 2 2 6 2 2 6
37343- 2 2 6 2 2 6 50 34 6 156 107 11
37344-219 162 10 239 182 13 246 186 14 246 190 14
37345-246 190 14 246 190 14 246 190 14 246 190 14
37346-246 190 14 246 190 14 246 190 14 246 190 14
37347-246 190 14 246 190 14 246 190 14 242 186 14
37348-234 174 13 213 154 11 154 122 46 66 66 66
37349- 30 30 30 10 10 10 0 0 0 0 0 0
37350- 0 0 0 0 0 0 0 0 0 0 0 0
37351- 0 0 0 0 0 0 0 0 0 0 0 0
37352- 0 0 0 0 0 0 6 6 6 22 22 22
37353- 58 58 58 154 121 60 206 145 10 234 174 13
37354-242 186 14 246 186 14 246 190 14 246 190 14
37355-246 190 14 246 190 14 246 190 14 246 190 14
37356-246 190 14 246 190 14 246 190 14 246 190 14
37357-246 190 14 246 190 14 246 190 14 246 190 14
37358-246 186 14 236 178 12 210 162 10 163 110 8
37359- 61 42 6 138 138 138 218 218 218 250 250 250
37360-253 253 253 253 253 253 253 253 253 250 250 250
37361-242 242 242 210 210 210 144 144 144 66 66 66
37362- 6 6 6 2 2 6 2 2 6 2 2 6
37363- 2 2 6 2 2 6 61 42 6 163 110 8
37364-216 158 10 236 178 12 246 190 14 246 190 14
37365-246 190 14 246 190 14 246 190 14 246 190 14
37366-246 190 14 246 190 14 246 190 14 246 190 14
37367-246 190 14 239 182 13 230 174 11 216 158 10
37368-190 142 34 124 112 88 70 70 70 38 38 38
37369- 18 18 18 6 6 6 0 0 0 0 0 0
37370- 0 0 0 0 0 0 0 0 0 0 0 0
37371- 0 0 0 0 0 0 0 0 0 0 0 0
37372- 0 0 0 0 0 0 6 6 6 22 22 22
37373- 62 62 62 168 124 44 206 145 10 224 166 10
37374-236 178 12 239 182 13 242 186 14 242 186 14
37375-246 186 14 246 190 14 246 190 14 246 190 14
37376-246 190 14 246 190 14 246 190 14 246 190 14
37377-246 190 14 246 190 14 246 190 14 246 190 14
37378-246 190 14 236 178 12 216 158 10 175 118 6
37379- 80 54 7 2 2 6 6 6 6 30 30 30
37380- 54 54 54 62 62 62 50 50 50 38 38 38
37381- 14 14 14 2 2 6 2 2 6 2 2 6
37382- 2 2 6 2 2 6 2 2 6 2 2 6
37383- 2 2 6 6 6 6 80 54 7 167 114 7
37384-213 154 11 236 178 12 246 190 14 246 190 14
37385-246 190 14 246 190 14 246 190 14 246 190 14
37386-246 190 14 242 186 14 239 182 13 239 182 13
37387-230 174 11 210 150 10 174 135 50 124 112 88
37388- 82 82 82 54 54 54 34 34 34 18 18 18
37389- 6 6 6 0 0 0 0 0 0 0 0 0
37390- 0 0 0 0 0 0 0 0 0 0 0 0
37391- 0 0 0 0 0 0 0 0 0 0 0 0
37392- 0 0 0 0 0 0 6 6 6 18 18 18
37393- 50 50 50 158 118 36 192 133 9 200 144 11
37394-216 158 10 219 162 10 224 166 10 226 170 11
37395-230 174 11 236 178 12 239 182 13 239 182 13
37396-242 186 14 246 186 14 246 190 14 246 190 14
37397-246 190 14 246 190 14 246 190 14 246 190 14
37398-246 186 14 230 174 11 210 150 10 163 110 8
37399-104 69 6 10 10 10 2 2 6 2 2 6
37400- 2 2 6 2 2 6 2 2 6 2 2 6
37401- 2 2 6 2 2 6 2 2 6 2 2 6
37402- 2 2 6 2 2 6 2 2 6 2 2 6
37403- 2 2 6 6 6 6 91 60 6 167 114 7
37404-206 145 10 230 174 11 242 186 14 246 190 14
37405-246 190 14 246 190 14 246 186 14 242 186 14
37406-239 182 13 230 174 11 224 166 10 213 154 11
37407-180 133 36 124 112 88 86 86 86 58 58 58
37408- 38 38 38 22 22 22 10 10 10 6 6 6
37409- 0 0 0 0 0 0 0 0 0 0 0 0
37410- 0 0 0 0 0 0 0 0 0 0 0 0
37411- 0 0 0 0 0 0 0 0 0 0 0 0
37412- 0 0 0 0 0 0 0 0 0 14 14 14
37413- 34 34 34 70 70 70 138 110 50 158 118 36
37414-167 114 7 180 123 7 192 133 9 197 138 11
37415-200 144 11 206 145 10 213 154 11 219 162 10
37416-224 166 10 230 174 11 239 182 13 242 186 14
37417-246 186 14 246 186 14 246 186 14 246 186 14
37418-239 182 13 216 158 10 185 133 11 152 99 6
37419-104 69 6 18 14 6 2 2 6 2 2 6
37420- 2 2 6 2 2 6 2 2 6 2 2 6
37421- 2 2 6 2 2 6 2 2 6 2 2 6
37422- 2 2 6 2 2 6 2 2 6 2 2 6
37423- 2 2 6 6 6 6 80 54 7 152 99 6
37424-192 133 9 219 162 10 236 178 12 239 182 13
37425-246 186 14 242 186 14 239 182 13 236 178 12
37426-224 166 10 206 145 10 192 133 9 154 121 60
37427- 94 94 94 62 62 62 42 42 42 22 22 22
37428- 14 14 14 6 6 6 0 0 0 0 0 0
37429- 0 0 0 0 0 0 0 0 0 0 0 0
37430- 0 0 0 0 0 0 0 0 0 0 0 0
37431- 0 0 0 0 0 0 0 0 0 0 0 0
37432- 0 0 0 0 0 0 0 0 0 6 6 6
37433- 18 18 18 34 34 34 58 58 58 78 78 78
37434-101 98 89 124 112 88 142 110 46 156 107 11
37435-163 110 8 167 114 7 175 118 6 180 123 7
37436-185 133 11 197 138 11 210 150 10 219 162 10
37437-226 170 11 236 178 12 236 178 12 234 174 13
37438-219 162 10 197 138 11 163 110 8 130 83 6
37439- 91 60 6 10 10 10 2 2 6 2 2 6
37440- 18 18 18 38 38 38 38 38 38 38 38 38
37441- 38 38 38 38 38 38 38 38 38 38 38 38
37442- 38 38 38 38 38 38 26 26 26 2 2 6
37443- 2 2 6 6 6 6 70 47 6 137 92 6
37444-175 118 6 200 144 11 219 162 10 230 174 11
37445-234 174 13 230 174 11 219 162 10 210 150 10
37446-192 133 9 163 110 8 124 112 88 82 82 82
37447- 50 50 50 30 30 30 14 14 14 6 6 6
37448- 0 0 0 0 0 0 0 0 0 0 0 0
37449- 0 0 0 0 0 0 0 0 0 0 0 0
37450- 0 0 0 0 0 0 0 0 0 0 0 0
37451- 0 0 0 0 0 0 0 0 0 0 0 0
37452- 0 0 0 0 0 0 0 0 0 0 0 0
37453- 6 6 6 14 14 14 22 22 22 34 34 34
37454- 42 42 42 58 58 58 74 74 74 86 86 86
37455-101 98 89 122 102 70 130 98 46 121 87 25
37456-137 92 6 152 99 6 163 110 8 180 123 7
37457-185 133 11 197 138 11 206 145 10 200 144 11
37458-180 123 7 156 107 11 130 83 6 104 69 6
37459- 50 34 6 54 54 54 110 110 110 101 98 89
37460- 86 86 86 82 82 82 78 78 78 78 78 78
37461- 78 78 78 78 78 78 78 78 78 78 78 78
37462- 78 78 78 82 82 82 86 86 86 94 94 94
37463-106 106 106 101 101 101 86 66 34 124 80 6
37464-156 107 11 180 123 7 192 133 9 200 144 11
37465-206 145 10 200 144 11 192 133 9 175 118 6
37466-139 102 15 109 106 95 70 70 70 42 42 42
37467- 22 22 22 10 10 10 0 0 0 0 0 0
37468- 0 0 0 0 0 0 0 0 0 0 0 0
37469- 0 0 0 0 0 0 0 0 0 0 0 0
37470- 0 0 0 0 0 0 0 0 0 0 0 0
37471- 0 0 0 0 0 0 0 0 0 0 0 0
37472- 0 0 0 0 0 0 0 0 0 0 0 0
37473- 0 0 0 0 0 0 6 6 6 10 10 10
37474- 14 14 14 22 22 22 30 30 30 38 38 38
37475- 50 50 50 62 62 62 74 74 74 90 90 90
37476-101 98 89 112 100 78 121 87 25 124 80 6
37477-137 92 6 152 99 6 152 99 6 152 99 6
37478-138 86 6 124 80 6 98 70 6 86 66 30
37479-101 98 89 82 82 82 58 58 58 46 46 46
37480- 38 38 38 34 34 34 34 34 34 34 34 34
37481- 34 34 34 34 34 34 34 34 34 34 34 34
37482- 34 34 34 34 34 34 38 38 38 42 42 42
37483- 54 54 54 82 82 82 94 86 76 91 60 6
37484-134 86 6 156 107 11 167 114 7 175 118 6
37485-175 118 6 167 114 7 152 99 6 121 87 25
37486-101 98 89 62 62 62 34 34 34 18 18 18
37487- 6 6 6 0 0 0 0 0 0 0 0 0
37488- 0 0 0 0 0 0 0 0 0 0 0 0
37489- 0 0 0 0 0 0 0 0 0 0 0 0
37490- 0 0 0 0 0 0 0 0 0 0 0 0
37491- 0 0 0 0 0 0 0 0 0 0 0 0
37492- 0 0 0 0 0 0 0 0 0 0 0 0
37493- 0 0 0 0 0 0 0 0 0 0 0 0
37494- 0 0 0 6 6 6 6 6 6 10 10 10
37495- 18 18 18 22 22 22 30 30 30 42 42 42
37496- 50 50 50 66 66 66 86 86 86 101 98 89
37497-106 86 58 98 70 6 104 69 6 104 69 6
37498-104 69 6 91 60 6 82 62 34 90 90 90
37499- 62 62 62 38 38 38 22 22 22 14 14 14
37500- 10 10 10 10 10 10 10 10 10 10 10 10
37501- 10 10 10 10 10 10 6 6 6 10 10 10
37502- 10 10 10 10 10 10 10 10 10 14 14 14
37503- 22 22 22 42 42 42 70 70 70 89 81 66
37504- 80 54 7 104 69 6 124 80 6 137 92 6
37505-134 86 6 116 81 8 100 82 52 86 86 86
37506- 58 58 58 30 30 30 14 14 14 6 6 6
37507- 0 0 0 0 0 0 0 0 0 0 0 0
37508- 0 0 0 0 0 0 0 0 0 0 0 0
37509- 0 0 0 0 0 0 0 0 0 0 0 0
37510- 0 0 0 0 0 0 0 0 0 0 0 0
37511- 0 0 0 0 0 0 0 0 0 0 0 0
37512- 0 0 0 0 0 0 0 0 0 0 0 0
37513- 0 0 0 0 0 0 0 0 0 0 0 0
37514- 0 0 0 0 0 0 0 0 0 0 0 0
37515- 0 0 0 6 6 6 10 10 10 14 14 14
37516- 18 18 18 26 26 26 38 38 38 54 54 54
37517- 70 70 70 86 86 86 94 86 76 89 81 66
37518- 89 81 66 86 86 86 74 74 74 50 50 50
37519- 30 30 30 14 14 14 6 6 6 0 0 0
37520- 0 0 0 0 0 0 0 0 0 0 0 0
37521- 0 0 0 0 0 0 0 0 0 0 0 0
37522- 0 0 0 0 0 0 0 0 0 0 0 0
37523- 6 6 6 18 18 18 34 34 34 58 58 58
37524- 82 82 82 89 81 66 89 81 66 89 81 66
37525- 94 86 66 94 86 76 74 74 74 50 50 50
37526- 26 26 26 14 14 14 6 6 6 0 0 0
37527- 0 0 0 0 0 0 0 0 0 0 0 0
37528- 0 0 0 0 0 0 0 0 0 0 0 0
37529- 0 0 0 0 0 0 0 0 0 0 0 0
37530- 0 0 0 0 0 0 0 0 0 0 0 0
37531- 0 0 0 0 0 0 0 0 0 0 0 0
37532- 0 0 0 0 0 0 0 0 0 0 0 0
37533- 0 0 0 0 0 0 0 0 0 0 0 0
37534- 0 0 0 0 0 0 0 0 0 0 0 0
37535- 0 0 0 0 0 0 0 0 0 0 0 0
37536- 6 6 6 6 6 6 14 14 14 18 18 18
37537- 30 30 30 38 38 38 46 46 46 54 54 54
37538- 50 50 50 42 42 42 30 30 30 18 18 18
37539- 10 10 10 0 0 0 0 0 0 0 0 0
37540- 0 0 0 0 0 0 0 0 0 0 0 0
37541- 0 0 0 0 0 0 0 0 0 0 0 0
37542- 0 0 0 0 0 0 0 0 0 0 0 0
37543- 0 0 0 6 6 6 14 14 14 26 26 26
37544- 38 38 38 50 50 50 58 58 58 58 58 58
37545- 54 54 54 42 42 42 30 30 30 18 18 18
37546- 10 10 10 0 0 0 0 0 0 0 0 0
37547- 0 0 0 0 0 0 0 0 0 0 0 0
37548- 0 0 0 0 0 0 0 0 0 0 0 0
37549- 0 0 0 0 0 0 0 0 0 0 0 0
37550- 0 0 0 0 0 0 0 0 0 0 0 0
37551- 0 0 0 0 0 0 0 0 0 0 0 0
37552- 0 0 0 0 0 0 0 0 0 0 0 0
37553- 0 0 0 0 0 0 0 0 0 0 0 0
37554- 0 0 0 0 0 0 0 0 0 0 0 0
37555- 0 0 0 0 0 0 0 0 0 0 0 0
37556- 0 0 0 0 0 0 0 0 0 6 6 6
37557- 6 6 6 10 10 10 14 14 14 18 18 18
37558- 18 18 18 14 14 14 10 10 10 6 6 6
37559- 0 0 0 0 0 0 0 0 0 0 0 0
37560- 0 0 0 0 0 0 0 0 0 0 0 0
37561- 0 0 0 0 0 0 0 0 0 0 0 0
37562- 0 0 0 0 0 0 0 0 0 0 0 0
37563- 0 0 0 0 0 0 0 0 0 6 6 6
37564- 14 14 14 18 18 18 22 22 22 22 22 22
37565- 18 18 18 14 14 14 10 10 10 6 6 6
37566- 0 0 0 0 0 0 0 0 0 0 0 0
37567- 0 0 0 0 0 0 0 0 0 0 0 0
37568- 0 0 0 0 0 0 0 0 0 0 0 0
37569- 0 0 0 0 0 0 0 0 0 0 0 0
37570- 0 0 0 0 0 0 0 0 0 0 0 0
37571+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37572+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37573+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37574+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37575+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37576+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37577+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37578+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37579+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37580+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37581+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37582+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37583+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37584+4 4 4 4 4 4
37585+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37586+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37587+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37588+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37589+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37590+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37591+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37592+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37593+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37594+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37595+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37596+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37597+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37598+4 4 4 4 4 4
37599+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37600+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37601+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37602+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37603+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37604+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37605+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37606+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37607+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37608+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37609+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37610+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37611+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37612+4 4 4 4 4 4
37613+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37614+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37615+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37616+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37617+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37618+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37619+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37620+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37621+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37622+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37623+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37624+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37625+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37626+4 4 4 4 4 4
37627+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37628+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37629+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37630+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37631+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37632+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37633+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37634+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37635+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37636+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37637+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37638+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37639+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37640+4 4 4 4 4 4
37641+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37642+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37643+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37644+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37645+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37646+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37647+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37648+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37649+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37650+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37651+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37652+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37653+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37654+4 4 4 4 4 4
37655+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37656+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37657+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37658+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37659+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
37660+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
37661+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37662+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37663+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37664+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
37665+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
37666+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
37667+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37668+4 4 4 4 4 4
37669+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37670+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37671+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37672+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37673+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
37674+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
37675+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37676+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37677+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37678+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
37679+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
37680+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
37681+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37682+4 4 4 4 4 4
37683+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37684+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37685+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37686+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37687+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
37688+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
37689+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
37690+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37691+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37692+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
37693+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
37694+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
37695+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
37696+4 4 4 4 4 4
37697+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37698+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37699+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37700+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
37701+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
37702+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
37703+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
37704+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37705+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
37706+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
37707+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
37708+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
37709+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
37710+4 4 4 4 4 4
37711+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37712+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37713+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37714+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
37715+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
37716+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
37717+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
37718+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
37719+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
37720+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
37721+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
37722+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
37723+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
37724+4 4 4 4 4 4
37725+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37726+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37727+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
37728+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
37729+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
37730+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
37731+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
37732+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
37733+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
37734+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
37735+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
37736+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
37737+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
37738+4 4 4 4 4 4
37739+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37740+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37741+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
37742+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
37743+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
37744+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
37745+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
37746+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
37747+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
37748+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
37749+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
37750+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
37751+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
37752+4 4 4 4 4 4
37753+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37754+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37755+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
37756+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
37757+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
37758+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
37759+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
37760+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
37761+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
37762+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
37763+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
37764+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
37765+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
37766+4 4 4 4 4 4
37767+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37768+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37769+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
37770+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
37771+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
37772+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
37773+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
37774+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
37775+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
37776+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
37777+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
37778+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
37779+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
37780+4 4 4 4 4 4
37781+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37782+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37783+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
37784+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
37785+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
37786+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
37787+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
37788+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
37789+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
37790+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
37791+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
37792+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
37793+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
37794+4 4 4 4 4 4
37795+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37796+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
37797+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
37798+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
37799+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
37800+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
37801+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
37802+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
37803+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
37804+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
37805+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
37806+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
37807+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
37808+4 4 4 4 4 4
37809+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37810+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
37811+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
37812+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
37813+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
37814+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
37815+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
37816+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
37817+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
37818+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
37819+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
37820+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
37821+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
37822+0 0 0 4 4 4
37823+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
37824+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
37825+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
37826+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
37827+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
37828+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
37829+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
37830+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
37831+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
37832+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
37833+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
37834+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
37835+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
37836+2 0 0 0 0 0
37837+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
37838+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
37839+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
37840+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
37841+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
37842+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
37843+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
37844+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
37845+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
37846+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
37847+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
37848+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
37849+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
37850+37 38 37 0 0 0
37851+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
37852+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
37853+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
37854+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
37855+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
37856+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
37857+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
37858+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
37859+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
37860+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
37861+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
37862+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
37863+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
37864+85 115 134 4 0 0
37865+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
37866+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
37867+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
37868+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
37869+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
37870+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
37871+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
37872+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
37873+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
37874+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
37875+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
37876+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
37877+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
37878+60 73 81 4 0 0
37879+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
37880+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
37881+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
37882+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
37883+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
37884+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
37885+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
37886+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
37887+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
37888+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
37889+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
37890+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
37891+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
37892+16 19 21 4 0 0
37893+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
37894+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
37895+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
37896+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
37897+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
37898+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
37899+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
37900+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
37901+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
37902+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
37903+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
37904+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
37905+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
37906+4 0 0 4 3 3
37907+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
37908+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
37909+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
37910+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
37911+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
37912+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
37913+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
37914+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
37915+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
37916+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
37917+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
37918+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
37919+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
37920+3 2 2 4 4 4
37921+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
37922+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
37923+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
37924+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
37925+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
37926+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
37927+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
37928+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
37929+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
37930+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
37931+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
37932+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
37933+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
37934+4 4 4 4 4 4
37935+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
37936+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
37937+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
37938+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
37939+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
37940+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
37941+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
37942+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
37943+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
37944+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
37945+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
37946+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
37947+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
37948+4 4 4 4 4 4
37949+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
37950+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
37951+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
37952+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
37953+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
37954+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
37955+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
37956+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
37957+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
37958+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
37959+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
37960+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
37961+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
37962+5 5 5 5 5 5
37963+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
37964+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
37965+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
37966+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
37967+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
37968+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
37969+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
37970+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
37971+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
37972+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
37973+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
37974+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
37975+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
37976+5 5 5 4 4 4
37977+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
37978+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
37979+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
37980+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
37981+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
37982+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
37983+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
37984+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
37985+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
37986+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
37987+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
37988+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
37989+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37990+4 4 4 4 4 4
37991+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
37992+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
37993+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
37994+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
37995+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
37996+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
37997+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
37998+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
37999+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
38000+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
38001+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
38002+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
38003+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38004+4 4 4 4 4 4
38005+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
38006+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
38007+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
38008+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
38009+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38010+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
38011+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
38012+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
38013+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
38014+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
38015+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
38016+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38017+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38018+4 4 4 4 4 4
38019+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
38020+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
38021+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
38022+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
38023+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38024+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38025+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38026+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
38027+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
38028+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
38029+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
38030+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38031+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38032+4 4 4 4 4 4
38033+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
38034+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
38035+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
38036+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
38037+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38038+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
38039+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
38040+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
38041+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
38042+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
38043+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38044+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38045+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38046+4 4 4 4 4 4
38047+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
38048+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
38049+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
38050+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
38051+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38052+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
38053+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
38054+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
38055+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
38056+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
38057+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
38058+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38059+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38060+4 4 4 4 4 4
38061+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
38062+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
38063+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
38064+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
38065+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38066+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
38067+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
38068+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
38069+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
38070+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
38071+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
38072+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38073+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38074+4 4 4 4 4 4
38075+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
38076+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
38077+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
38078+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38079+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
38080+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
38081+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
38082+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
38083+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
38084+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
38085+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38086+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38087+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38088+4 4 4 4 4 4
38089+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
38090+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
38091+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
38092+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38093+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38094+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
38095+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
38096+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
38097+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
38098+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
38099+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38100+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38101+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38102+4 4 4 4 4 4
38103+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
38104+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
38105+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38106+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38107+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38108+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
38109+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
38110+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
38111+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
38112+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
38113+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38114+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38115+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38116+4 4 4 4 4 4
38117+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
38118+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
38119+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38120+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38121+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38122+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
38123+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
38124+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
38125+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38126+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38127+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38128+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38129+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38130+4 4 4 4 4 4
38131+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38132+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
38133+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38134+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
38135+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
38136+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
38137+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
38138+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
38139+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38140+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38141+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38142+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38143+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38144+4 4 4 4 4 4
38145+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38146+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
38147+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38148+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
38149+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38150+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
38151+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
38152+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
38153+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38154+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38155+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38156+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38157+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38158+4 4 4 4 4 4
38159+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
38160+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
38161+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38162+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
38163+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
38164+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
38165+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
38166+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
38167+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38168+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38169+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38170+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38171+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38172+4 4 4 4 4 4
38173+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
38174+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
38175+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38176+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
38177+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
38178+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
38179+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
38180+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
38181+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38182+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38183+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38184+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38185+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38186+4 4 4 4 4 4
38187+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38188+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
38189+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38190+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
38191+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
38192+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
38193+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
38194+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
38195+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38196+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38197+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38198+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38199+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38200+4 4 4 4 4 4
38201+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
38202+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
38203+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38204+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
38205+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
38206+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
38207+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
38208+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
38209+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
38210+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38211+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38212+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38213+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38214+4 4 4 4 4 4
38215+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38216+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
38217+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
38218+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
38219+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
38220+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
38221+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
38222+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
38223+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38224+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38225+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38226+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38227+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38228+4 4 4 4 4 4
38229+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38230+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
38231+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38232+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
38233+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
38234+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
38235+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
38236+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
38237+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38238+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38239+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38240+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38241+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38242+4 4 4 4 4 4
38243+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38244+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
38245+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
38246+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
38247+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
38248+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
38249+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38250+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
38251+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38252+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38253+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38254+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38255+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38256+4 4 4 4 4 4
38257+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38258+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
38259+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
38260+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38261+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
38262+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
38263+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38264+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
38265+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38266+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38267+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38268+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38269+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38270+4 4 4 4 4 4
38271+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38272+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
38273+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
38274+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
38275+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
38276+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
38277+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
38278+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
38279+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
38280+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38281+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38282+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38283+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38284+4 4 4 4 4 4
38285+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38286+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
38287+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
38288+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
38289+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
38290+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
38291+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
38292+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
38293+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
38294+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38295+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38296+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38297+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38298+4 4 4 4 4 4
38299+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
38300+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
38301+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
38302+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
38303+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38304+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
38305+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
38306+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
38307+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
38308+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38309+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38310+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38311+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38312+4 4 4 4 4 4
38313+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38314+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
38315+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
38316+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
38317+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
38318+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
38319+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
38320+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
38321+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
38322+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38323+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38324+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38325+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38326+4 4 4 4 4 4
38327+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
38328+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
38329+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
38330+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
38331+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
38332+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
38333+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
38334+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
38335+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
38336+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
38337+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38338+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38339+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38340+4 4 4 4 4 4
38341+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
38342+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
38343+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
38344+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
38345+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
38346+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
38347+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
38348+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
38349+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
38350+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
38351+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38352+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38353+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38354+4 4 4 4 4 4
38355+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
38356+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
38357+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
38358+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
38359+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
38360+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
38361+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38362+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
38363+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
38364+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
38365+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38366+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38367+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38368+4 4 4 4 4 4
38369+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
38370+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
38371+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
38372+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
38373+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
38374+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
38375+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
38376+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
38377+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
38378+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
38379+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38380+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38381+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38382+4 4 4 4 4 4
38383+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
38384+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
38385+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
38386+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
38387+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
38388+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
38389+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
38390+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
38391+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
38392+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
38393+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38394+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38395+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38396+4 4 4 4 4 4
38397+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
38398+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
38399+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
38400+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
38401+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
38402+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
38403+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
38404+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
38405+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
38406+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
38407+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38408+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38409+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38410+4 4 4 4 4 4
38411+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
38412+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
38413+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
38414+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
38415+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
38416+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
38417+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
38418+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
38419+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
38420+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
38421+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38422+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38423+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38424+4 4 4 4 4 4
38425+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
38426+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
38427+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
38428+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
38429+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
38430+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
38431+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
38432+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
38433+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
38434+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38435+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38436+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38437+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38438+4 4 4 4 4 4
38439+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
38440+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
38441+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
38442+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
38443+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
38444+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
38445+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
38446+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
38447+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
38448+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38449+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38450+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38451+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38452+4 4 4 4 4 4
38453+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
38454+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
38455+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
38456+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
38457+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
38458+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
38459+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
38460+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
38461+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
38462+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38463+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38464+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38465+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38466+4 4 4 4 4 4
38467+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
38468+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
38469+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
38470+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
38471+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
38472+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
38473+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
38474+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
38475+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38476+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38477+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38478+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38479+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38480+4 4 4 4 4 4
38481+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
38482+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
38483+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
38484+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
38485+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
38486+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
38487+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
38488+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
38489+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38490+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38491+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38492+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38493+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38494+4 4 4 4 4 4
38495+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
38496+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
38497+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
38498+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
38499+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
38500+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
38501+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
38502+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
38503+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38504+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38505+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38506+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38507+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38508+4 4 4 4 4 4
38509+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
38510+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
38511+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
38512+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
38513+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
38514+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
38515+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
38516+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
38517+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38518+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38519+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38520+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38521+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38522+4 4 4 4 4 4
38523+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38524+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
38525+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
38526+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
38527+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
38528+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
38529+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
38530+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
38531+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38532+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38533+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38534+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38535+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38536+4 4 4 4 4 4
38537+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38538+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
38539+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
38540+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
38541+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
38542+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
38543+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
38544+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38545+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38546+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38547+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38548+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38549+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38550+4 4 4 4 4 4
38551+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38552+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38553+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
38554+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
38555+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
38556+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
38557+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
38558+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38559+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38560+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38561+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38562+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38563+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38564+4 4 4 4 4 4
38565+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38566+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38567+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
38568+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
38569+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
38570+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
38571+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
38572+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38573+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38574+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38575+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38576+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38577+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38578+4 4 4 4 4 4
38579+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38580+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38581+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38582+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
38583+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
38584+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
38585+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
38586+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38587+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38588+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38589+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38590+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38591+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38592+4 4 4 4 4 4
38593+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38594+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38595+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38596+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
38597+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
38598+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
38599+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
38600+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38601+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38602+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38603+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38604+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38605+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38606+4 4 4 4 4 4
38607+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38608+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38609+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38610+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
38611+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
38612+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
38613+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
38614+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38615+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38616+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38617+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38618+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38619+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38620+4 4 4 4 4 4
38621+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38622+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38623+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38624+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
38625+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
38626+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
38627+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38628+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38629+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38630+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38631+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38632+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38633+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38634+4 4 4 4 4 4
38635+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38636+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38637+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38638+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38639+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
38640+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
38641+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
38642+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38643+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38644+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38645+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38646+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38647+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38648+4 4 4 4 4 4
38649+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38650+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38651+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38652+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38653+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
38654+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
38655+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38656+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38657+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38658+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38659+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38660+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38661+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38662+4 4 4 4 4 4
38663+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38664+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38665+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38666+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38667+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
38668+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
38669+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38670+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38671+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38672+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38673+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38674+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38675+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38676+4 4 4 4 4 4
38677+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38678+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38679+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38680+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38681+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
38682+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
38683+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38684+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38685+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38686+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38687+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38688+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38689+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38690+4 4 4 4 4 4
38691diff -urNp linux-3.0.7/drivers/video/udlfb.c linux-3.0.7/drivers/video/udlfb.c
38692--- linux-3.0.7/drivers/video/udlfb.c 2011-07-21 22:17:23.000000000 -0400
38693+++ linux-3.0.7/drivers/video/udlfb.c 2011-08-23 21:47:56.000000000 -0400
38694@@ -586,11 +586,11 @@ int dlfb_handle_damage(struct dlfb_data
38695 dlfb_urb_completion(urb);
38696
38697 error:
38698- atomic_add(bytes_sent, &dev->bytes_sent);
38699- atomic_add(bytes_identical, &dev->bytes_identical);
38700- atomic_add(width*height*2, &dev->bytes_rendered);
38701+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
38702+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
38703+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
38704 end_cycles = get_cycles();
38705- atomic_add(((unsigned int) ((end_cycles - start_cycles)
38706+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
38707 >> 10)), /* Kcycles */
38708 &dev->cpu_kcycles_used);
38709
38710@@ -711,11 +711,11 @@ static void dlfb_dpy_deferred_io(struct
38711 dlfb_urb_completion(urb);
38712
38713 error:
38714- atomic_add(bytes_sent, &dev->bytes_sent);
38715- atomic_add(bytes_identical, &dev->bytes_identical);
38716- atomic_add(bytes_rendered, &dev->bytes_rendered);
38717+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
38718+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
38719+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
38720 end_cycles = get_cycles();
38721- atomic_add(((unsigned int) ((end_cycles - start_cycles)
38722+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
38723 >> 10)), /* Kcycles */
38724 &dev->cpu_kcycles_used);
38725 }
38726@@ -1307,7 +1307,7 @@ static ssize_t metrics_bytes_rendered_sh
38727 struct fb_info *fb_info = dev_get_drvdata(fbdev);
38728 struct dlfb_data *dev = fb_info->par;
38729 return snprintf(buf, PAGE_SIZE, "%u\n",
38730- atomic_read(&dev->bytes_rendered));
38731+ atomic_read_unchecked(&dev->bytes_rendered));
38732 }
38733
38734 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
38735@@ -1315,7 +1315,7 @@ static ssize_t metrics_bytes_identical_s
38736 struct fb_info *fb_info = dev_get_drvdata(fbdev);
38737 struct dlfb_data *dev = fb_info->par;
38738 return snprintf(buf, PAGE_SIZE, "%u\n",
38739- atomic_read(&dev->bytes_identical));
38740+ atomic_read_unchecked(&dev->bytes_identical));
38741 }
38742
38743 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
38744@@ -1323,7 +1323,7 @@ static ssize_t metrics_bytes_sent_show(s
38745 struct fb_info *fb_info = dev_get_drvdata(fbdev);
38746 struct dlfb_data *dev = fb_info->par;
38747 return snprintf(buf, PAGE_SIZE, "%u\n",
38748- atomic_read(&dev->bytes_sent));
38749+ atomic_read_unchecked(&dev->bytes_sent));
38750 }
38751
38752 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
38753@@ -1331,7 +1331,7 @@ static ssize_t metrics_cpu_kcycles_used_
38754 struct fb_info *fb_info = dev_get_drvdata(fbdev);
38755 struct dlfb_data *dev = fb_info->par;
38756 return snprintf(buf, PAGE_SIZE, "%u\n",
38757- atomic_read(&dev->cpu_kcycles_used));
38758+ atomic_read_unchecked(&dev->cpu_kcycles_used));
38759 }
38760
38761 static ssize_t edid_show(
38762@@ -1388,10 +1388,10 @@ static ssize_t metrics_reset_store(struc
38763 struct fb_info *fb_info = dev_get_drvdata(fbdev);
38764 struct dlfb_data *dev = fb_info->par;
38765
38766- atomic_set(&dev->bytes_rendered, 0);
38767- atomic_set(&dev->bytes_identical, 0);
38768- atomic_set(&dev->bytes_sent, 0);
38769- atomic_set(&dev->cpu_kcycles_used, 0);
38770+ atomic_set_unchecked(&dev->bytes_rendered, 0);
38771+ atomic_set_unchecked(&dev->bytes_identical, 0);
38772+ atomic_set_unchecked(&dev->bytes_sent, 0);
38773+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
38774
38775 return count;
38776 }
38777diff -urNp linux-3.0.7/drivers/video/uvesafb.c linux-3.0.7/drivers/video/uvesafb.c
38778--- linux-3.0.7/drivers/video/uvesafb.c 2011-07-21 22:17:23.000000000 -0400
38779+++ linux-3.0.7/drivers/video/uvesafb.c 2011-08-23 21:47:56.000000000 -0400
38780@@ -19,6 +19,7 @@
38781 #include <linux/io.h>
38782 #include <linux/mutex.h>
38783 #include <linux/slab.h>
38784+#include <linux/moduleloader.h>
38785 #include <video/edid.h>
38786 #include <video/uvesafb.h>
38787 #ifdef CONFIG_X86
38788@@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
38789 NULL,
38790 };
38791
38792- return call_usermodehelper(v86d_path, argv, envp, 1);
38793+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
38794 }
38795
38796 /*
38797@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(
38798 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
38799 par->pmi_setpal = par->ypan = 0;
38800 } else {
38801+
38802+#ifdef CONFIG_PAX_KERNEXEC
38803+#ifdef CONFIG_MODULES
38804+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
38805+#endif
38806+ if (!par->pmi_code) {
38807+ par->pmi_setpal = par->ypan = 0;
38808+ return 0;
38809+ }
38810+#endif
38811+
38812 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
38813 + task->t.regs.edi);
38814+
38815+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38816+ pax_open_kernel();
38817+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
38818+ pax_close_kernel();
38819+
38820+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
38821+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
38822+#else
38823 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
38824 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
38825+#endif
38826+
38827 printk(KERN_INFO "uvesafb: protected mode interface info at "
38828 "%04x:%04x\n",
38829 (u16)task->t.regs.es, (u16)task->t.regs.edi);
38830@@ -1821,6 +1844,11 @@ out:
38831 if (par->vbe_modes)
38832 kfree(par->vbe_modes);
38833
38834+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38835+ if (par->pmi_code)
38836+ module_free_exec(NULL, par->pmi_code);
38837+#endif
38838+
38839 framebuffer_release(info);
38840 return err;
38841 }
38842@@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platfor
38843 kfree(par->vbe_state_orig);
38844 if (par->vbe_state_saved)
38845 kfree(par->vbe_state_saved);
38846+
38847+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38848+ if (par->pmi_code)
38849+ module_free_exec(NULL, par->pmi_code);
38850+#endif
38851+
38852 }
38853
38854 framebuffer_release(info);
38855diff -urNp linux-3.0.7/drivers/video/vesafb.c linux-3.0.7/drivers/video/vesafb.c
38856--- linux-3.0.7/drivers/video/vesafb.c 2011-07-21 22:17:23.000000000 -0400
38857+++ linux-3.0.7/drivers/video/vesafb.c 2011-08-23 21:47:56.000000000 -0400
38858@@ -9,6 +9,7 @@
38859 */
38860
38861 #include <linux/module.h>
38862+#include <linux/moduleloader.h>
38863 #include <linux/kernel.h>
38864 #include <linux/errno.h>
38865 #include <linux/string.h>
38866@@ -52,8 +53,8 @@ static int vram_remap __initdata; /*
38867 static int vram_total __initdata; /* Set total amount of memory */
38868 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
38869 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
38870-static void (*pmi_start)(void) __read_mostly;
38871-static void (*pmi_pal) (void) __read_mostly;
38872+static void (*pmi_start)(void) __read_only;
38873+static void (*pmi_pal) (void) __read_only;
38874 static int depth __read_mostly;
38875 static int vga_compat __read_mostly;
38876 /* --------------------------------------------------------------------- */
38877@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
38878 unsigned int size_vmode;
38879 unsigned int size_remap;
38880 unsigned int size_total;
38881+ void *pmi_code = NULL;
38882
38883 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
38884 return -ENODEV;
38885@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
38886 size_remap = size_total;
38887 vesafb_fix.smem_len = size_remap;
38888
38889-#ifndef __i386__
38890- screen_info.vesapm_seg = 0;
38891-#endif
38892-
38893 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
38894 printk(KERN_WARNING
38895 "vesafb: cannot reserve video memory at 0x%lx\n",
38896@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct pl
38897 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
38898 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
38899
38900+#ifdef __i386__
38901+
38902+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38903+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
38904+ if (!pmi_code)
38905+#elif !defined(CONFIG_PAX_KERNEXEC)
38906+ if (0)
38907+#endif
38908+
38909+#endif
38910+ screen_info.vesapm_seg = 0;
38911+
38912 if (screen_info.vesapm_seg) {
38913- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
38914- screen_info.vesapm_seg,screen_info.vesapm_off);
38915+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
38916+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
38917 }
38918
38919 if (screen_info.vesapm_seg < 0xc000)
38920@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct pl
38921
38922 if (ypan || pmi_setpal) {
38923 unsigned short *pmi_base;
38924+
38925 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
38926- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
38927- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
38928+
38929+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38930+ pax_open_kernel();
38931+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
38932+#else
38933+ pmi_code = pmi_base;
38934+#endif
38935+
38936+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
38937+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
38938+
38939+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38940+ pmi_start = ktva_ktla(pmi_start);
38941+ pmi_pal = ktva_ktla(pmi_pal);
38942+ pax_close_kernel();
38943+#endif
38944+
38945 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
38946 if (pmi_base[3]) {
38947 printk(KERN_INFO "vesafb: pmi: ports = ");
38948@@ -488,6 +514,11 @@ static int __init vesafb_probe(struct pl
38949 info->node, info->fix.id);
38950 return 0;
38951 err:
38952+
38953+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38954+ module_free_exec(NULL, pmi_code);
38955+#endif
38956+
38957 if (info->screen_base)
38958 iounmap(info->screen_base);
38959 framebuffer_release(info);
38960diff -urNp linux-3.0.7/drivers/video/via/via_clock.h linux-3.0.7/drivers/video/via/via_clock.h
38961--- linux-3.0.7/drivers/video/via/via_clock.h 2011-07-21 22:17:23.000000000 -0400
38962+++ linux-3.0.7/drivers/video/via/via_clock.h 2011-08-23 21:47:56.000000000 -0400
38963@@ -56,7 +56,7 @@ struct via_clock {
38964
38965 void (*set_engine_pll_state)(u8 state);
38966 void (*set_engine_pll)(struct via_pll_config config);
38967-};
38968+} __no_const;
38969
38970
38971 static inline u32 get_pll_internal_frequency(u32 ref_freq,
38972diff -urNp linux-3.0.7/drivers/virtio/virtio_balloon.c linux-3.0.7/drivers/virtio/virtio_balloon.c
38973--- linux-3.0.7/drivers/virtio/virtio_balloon.c 2011-07-21 22:17:23.000000000 -0400
38974+++ linux-3.0.7/drivers/virtio/virtio_balloon.c 2011-08-23 21:48:14.000000000 -0400
38975@@ -174,6 +174,8 @@ static void update_balloon_stats(struct
38976 struct sysinfo i;
38977 int idx = 0;
38978
38979+ pax_track_stack();
38980+
38981 all_vm_events(events);
38982 si_meminfo(&i);
38983
38984diff -urNp linux-3.0.7/fs/9p/vfs_inode.c linux-3.0.7/fs/9p/vfs_inode.c
38985--- linux-3.0.7/fs/9p/vfs_inode.c 2011-10-16 21:54:54.000000000 -0400
38986+++ linux-3.0.7/fs/9p/vfs_inode.c 2011-10-16 21:55:28.000000000 -0400
38987@@ -1264,7 +1264,7 @@ static void *v9fs_vfs_follow_link(struct
38988 void
38989 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
38990 {
38991- char *s = nd_get_link(nd);
38992+ const char *s = nd_get_link(nd);
38993
38994 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
38995 IS_ERR(s) ? "<error>" : s);
38996diff -urNp linux-3.0.7/fs/aio.c linux-3.0.7/fs/aio.c
38997--- linux-3.0.7/fs/aio.c 2011-07-21 22:17:23.000000000 -0400
38998+++ linux-3.0.7/fs/aio.c 2011-08-23 21:48:14.000000000 -0400
38999@@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx
39000 size += sizeof(struct io_event) * nr_events;
39001 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
39002
39003- if (nr_pages < 0)
39004+ if (nr_pages <= 0)
39005 return -EINVAL;
39006
39007 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
39008@@ -1088,6 +1088,8 @@ static int read_events(struct kioctx *ct
39009 struct aio_timeout to;
39010 int retry = 0;
39011
39012+ pax_track_stack();
39013+
39014 /* needed to zero any padding within an entry (there shouldn't be
39015 * any, but C is fun!
39016 */
39017@@ -1381,22 +1383,27 @@ static ssize_t aio_fsync(struct kiocb *i
39018 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
39019 {
39020 ssize_t ret;
39021+ struct iovec iovstack;
39022
39023 #ifdef CONFIG_COMPAT
39024 if (compat)
39025 ret = compat_rw_copy_check_uvector(type,
39026 (struct compat_iovec __user *)kiocb->ki_buf,
39027- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39028+ kiocb->ki_nbytes, 1, &iovstack,
39029 &kiocb->ki_iovec);
39030 else
39031 #endif
39032 ret = rw_copy_check_uvector(type,
39033 (struct iovec __user *)kiocb->ki_buf,
39034- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39035+ kiocb->ki_nbytes, 1, &iovstack,
39036 &kiocb->ki_iovec);
39037 if (ret < 0)
39038 goto out;
39039
39040+ if (kiocb->ki_iovec == &iovstack) {
39041+ kiocb->ki_inline_vec = iovstack;
39042+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
39043+ }
39044 kiocb->ki_nr_segs = kiocb->ki_nbytes;
39045 kiocb->ki_cur_seg = 0;
39046 /* ki_nbytes/left now reflect bytes instead of segs */
39047diff -urNp linux-3.0.7/fs/attr.c linux-3.0.7/fs/attr.c
39048--- linux-3.0.7/fs/attr.c 2011-07-21 22:17:23.000000000 -0400
39049+++ linux-3.0.7/fs/attr.c 2011-08-23 21:48:14.000000000 -0400
39050@@ -98,6 +98,7 @@ int inode_newsize_ok(const struct inode
39051 unsigned long limit;
39052
39053 limit = rlimit(RLIMIT_FSIZE);
39054+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
39055 if (limit != RLIM_INFINITY && offset > limit)
39056 goto out_sig;
39057 if (offset > inode->i_sb->s_maxbytes)
39058diff -urNp linux-3.0.7/fs/autofs4/waitq.c linux-3.0.7/fs/autofs4/waitq.c
39059--- linux-3.0.7/fs/autofs4/waitq.c 2011-07-21 22:17:23.000000000 -0400
39060+++ linux-3.0.7/fs/autofs4/waitq.c 2011-10-06 04:17:55.000000000 -0400
39061@@ -60,7 +60,7 @@ static int autofs4_write(struct file *fi
39062 {
39063 unsigned long sigpipe, flags;
39064 mm_segment_t fs;
39065- const char *data = (const char *)addr;
39066+ const char __user *data = (const char __force_user *)addr;
39067 ssize_t wr = 0;
39068
39069 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
39070diff -urNp linux-3.0.7/fs/befs/linuxvfs.c linux-3.0.7/fs/befs/linuxvfs.c
39071--- linux-3.0.7/fs/befs/linuxvfs.c 2011-09-02 18:11:26.000000000 -0400
39072+++ linux-3.0.7/fs/befs/linuxvfs.c 2011-08-29 23:26:27.000000000 -0400
39073@@ -503,7 +503,7 @@ static void befs_put_link(struct dentry
39074 {
39075 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
39076 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
39077- char *link = nd_get_link(nd);
39078+ const char *link = nd_get_link(nd);
39079 if (!IS_ERR(link))
39080 kfree(link);
39081 }
39082diff -urNp linux-3.0.7/fs/binfmt_aout.c linux-3.0.7/fs/binfmt_aout.c
39083--- linux-3.0.7/fs/binfmt_aout.c 2011-07-21 22:17:23.000000000 -0400
39084+++ linux-3.0.7/fs/binfmt_aout.c 2011-08-23 21:48:14.000000000 -0400
39085@@ -16,6 +16,7 @@
39086 #include <linux/string.h>
39087 #include <linux/fs.h>
39088 #include <linux/file.h>
39089+#include <linux/security.h>
39090 #include <linux/stat.h>
39091 #include <linux/fcntl.h>
39092 #include <linux/ptrace.h>
39093@@ -86,6 +87,8 @@ static int aout_core_dump(struct coredum
39094 #endif
39095 # define START_STACK(u) ((void __user *)u.start_stack)
39096
39097+ memset(&dump, 0, sizeof(dump));
39098+
39099 fs = get_fs();
39100 set_fs(KERNEL_DS);
39101 has_dumped = 1;
39102@@ -97,10 +100,12 @@ static int aout_core_dump(struct coredum
39103
39104 /* If the size of the dump file exceeds the rlimit, then see what would happen
39105 if we wrote the stack, but not the data area. */
39106+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
39107 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
39108 dump.u_dsize = 0;
39109
39110 /* Make sure we have enough room to write the stack and data areas. */
39111+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
39112 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
39113 dump.u_ssize = 0;
39114
39115@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux
39116 rlim = rlimit(RLIMIT_DATA);
39117 if (rlim >= RLIM_INFINITY)
39118 rlim = ~0;
39119+
39120+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
39121 if (ex.a_data + ex.a_bss > rlim)
39122 return -ENOMEM;
39123
39124@@ -262,6 +269,27 @@ static int load_aout_binary(struct linux
39125 install_exec_creds(bprm);
39126 current->flags &= ~PF_FORKNOEXEC;
39127
39128+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39129+ current->mm->pax_flags = 0UL;
39130+#endif
39131+
39132+#ifdef CONFIG_PAX_PAGEEXEC
39133+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
39134+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
39135+
39136+#ifdef CONFIG_PAX_EMUTRAMP
39137+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
39138+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
39139+#endif
39140+
39141+#ifdef CONFIG_PAX_MPROTECT
39142+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
39143+ current->mm->pax_flags |= MF_PAX_MPROTECT;
39144+#endif
39145+
39146+ }
39147+#endif
39148+
39149 if (N_MAGIC(ex) == OMAGIC) {
39150 unsigned long text_addr, map_size;
39151 loff_t pos;
39152@@ -334,7 +362,7 @@ static int load_aout_binary(struct linux
39153
39154 down_write(&current->mm->mmap_sem);
39155 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
39156- PROT_READ | PROT_WRITE | PROT_EXEC,
39157+ PROT_READ | PROT_WRITE,
39158 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
39159 fd_offset + ex.a_text);
39160 up_write(&current->mm->mmap_sem);
39161diff -urNp linux-3.0.7/fs/binfmt_elf.c linux-3.0.7/fs/binfmt_elf.c
39162--- linux-3.0.7/fs/binfmt_elf.c 2011-07-21 22:17:23.000000000 -0400
39163+++ linux-3.0.7/fs/binfmt_elf.c 2011-08-23 21:48:14.000000000 -0400
39164@@ -51,6 +51,10 @@ static int elf_core_dump(struct coredump
39165 #define elf_core_dump NULL
39166 #endif
39167
39168+#ifdef CONFIG_PAX_MPROTECT
39169+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
39170+#endif
39171+
39172 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
39173 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
39174 #else
39175@@ -70,6 +74,11 @@ static struct linux_binfmt elf_format =
39176 .load_binary = load_elf_binary,
39177 .load_shlib = load_elf_library,
39178 .core_dump = elf_core_dump,
39179+
39180+#ifdef CONFIG_PAX_MPROTECT
39181+ .handle_mprotect= elf_handle_mprotect,
39182+#endif
39183+
39184 .min_coredump = ELF_EXEC_PAGESIZE,
39185 };
39186
39187@@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
39188
39189 static int set_brk(unsigned long start, unsigned long end)
39190 {
39191+ unsigned long e = end;
39192+
39193 start = ELF_PAGEALIGN(start);
39194 end = ELF_PAGEALIGN(end);
39195 if (end > start) {
39196@@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
39197 if (BAD_ADDR(addr))
39198 return addr;
39199 }
39200- current->mm->start_brk = current->mm->brk = end;
39201+ current->mm->start_brk = current->mm->brk = e;
39202 return 0;
39203 }
39204
39205@@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
39206 elf_addr_t __user *u_rand_bytes;
39207 const char *k_platform = ELF_PLATFORM;
39208 const char *k_base_platform = ELF_BASE_PLATFORM;
39209- unsigned char k_rand_bytes[16];
39210+ u32 k_rand_bytes[4];
39211 int items;
39212 elf_addr_t *elf_info;
39213 int ei_index = 0;
39214 const struct cred *cred = current_cred();
39215 struct vm_area_struct *vma;
39216+ unsigned long saved_auxv[AT_VECTOR_SIZE];
39217+
39218+ pax_track_stack();
39219
39220 /*
39221 * In some cases (e.g. Hyper-Threading), we want to avoid L1
39222@@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
39223 * Generate 16 random bytes for userspace PRNG seeding.
39224 */
39225 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
39226- u_rand_bytes = (elf_addr_t __user *)
39227- STACK_ALLOC(p, sizeof(k_rand_bytes));
39228+ srandom32(k_rand_bytes[0] ^ random32());
39229+ srandom32(k_rand_bytes[1] ^ random32());
39230+ srandom32(k_rand_bytes[2] ^ random32());
39231+ srandom32(k_rand_bytes[3] ^ random32());
39232+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
39233+ u_rand_bytes = (elf_addr_t __user *) p;
39234 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
39235 return -EFAULT;
39236
39237@@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
39238 return -EFAULT;
39239 current->mm->env_end = p;
39240
39241+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
39242+
39243 /* Put the elf_info on the stack in the right place. */
39244 sp = (elf_addr_t __user *)envp + 1;
39245- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
39246+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
39247 return -EFAULT;
39248 return 0;
39249 }
39250@@ -381,10 +401,10 @@ static unsigned long load_elf_interp(str
39251 {
39252 struct elf_phdr *elf_phdata;
39253 struct elf_phdr *eppnt;
39254- unsigned long load_addr = 0;
39255+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
39256 int load_addr_set = 0;
39257 unsigned long last_bss = 0, elf_bss = 0;
39258- unsigned long error = ~0UL;
39259+ unsigned long error = -EINVAL;
39260 unsigned long total_size;
39261 int retval, i, size;
39262
39263@@ -430,6 +450,11 @@ static unsigned long load_elf_interp(str
39264 goto out_close;
39265 }
39266
39267+#ifdef CONFIG_PAX_SEGMEXEC
39268+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
39269+ pax_task_size = SEGMEXEC_TASK_SIZE;
39270+#endif
39271+
39272 eppnt = elf_phdata;
39273 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
39274 if (eppnt->p_type == PT_LOAD) {
39275@@ -473,8 +498,8 @@ static unsigned long load_elf_interp(str
39276 k = load_addr + eppnt->p_vaddr;
39277 if (BAD_ADDR(k) ||
39278 eppnt->p_filesz > eppnt->p_memsz ||
39279- eppnt->p_memsz > TASK_SIZE ||
39280- TASK_SIZE - eppnt->p_memsz < k) {
39281+ eppnt->p_memsz > pax_task_size ||
39282+ pax_task_size - eppnt->p_memsz < k) {
39283 error = -ENOMEM;
39284 goto out_close;
39285 }
39286@@ -528,6 +553,193 @@ out:
39287 return error;
39288 }
39289
39290+#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
39291+static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
39292+{
39293+ unsigned long pax_flags = 0UL;
39294+
39295+#ifdef CONFIG_PAX_PAGEEXEC
39296+ if (elf_phdata->p_flags & PF_PAGEEXEC)
39297+ pax_flags |= MF_PAX_PAGEEXEC;
39298+#endif
39299+
39300+#ifdef CONFIG_PAX_SEGMEXEC
39301+ if (elf_phdata->p_flags & PF_SEGMEXEC)
39302+ pax_flags |= MF_PAX_SEGMEXEC;
39303+#endif
39304+
39305+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39306+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39307+ if ((__supported_pte_mask & _PAGE_NX))
39308+ pax_flags &= ~MF_PAX_SEGMEXEC;
39309+ else
39310+ pax_flags &= ~MF_PAX_PAGEEXEC;
39311+ }
39312+#endif
39313+
39314+#ifdef CONFIG_PAX_EMUTRAMP
39315+ if (elf_phdata->p_flags & PF_EMUTRAMP)
39316+ pax_flags |= MF_PAX_EMUTRAMP;
39317+#endif
39318+
39319+#ifdef CONFIG_PAX_MPROTECT
39320+ if (elf_phdata->p_flags & PF_MPROTECT)
39321+ pax_flags |= MF_PAX_MPROTECT;
39322+#endif
39323+
39324+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39325+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
39326+ pax_flags |= MF_PAX_RANDMMAP;
39327+#endif
39328+
39329+ return pax_flags;
39330+}
39331+#endif
39332+
39333+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39334+static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
39335+{
39336+ unsigned long pax_flags = 0UL;
39337+
39338+#ifdef CONFIG_PAX_PAGEEXEC
39339+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
39340+ pax_flags |= MF_PAX_PAGEEXEC;
39341+#endif
39342+
39343+#ifdef CONFIG_PAX_SEGMEXEC
39344+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
39345+ pax_flags |= MF_PAX_SEGMEXEC;
39346+#endif
39347+
39348+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39349+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39350+ if ((__supported_pte_mask & _PAGE_NX))
39351+ pax_flags &= ~MF_PAX_SEGMEXEC;
39352+ else
39353+ pax_flags &= ~MF_PAX_PAGEEXEC;
39354+ }
39355+#endif
39356+
39357+#ifdef CONFIG_PAX_EMUTRAMP
39358+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
39359+ pax_flags |= MF_PAX_EMUTRAMP;
39360+#endif
39361+
39362+#ifdef CONFIG_PAX_MPROTECT
39363+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
39364+ pax_flags |= MF_PAX_MPROTECT;
39365+#endif
39366+
39367+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39368+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
39369+ pax_flags |= MF_PAX_RANDMMAP;
39370+#endif
39371+
39372+ return pax_flags;
39373+}
39374+#endif
39375+
39376+#ifdef CONFIG_PAX_EI_PAX
39377+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
39378+{
39379+ unsigned long pax_flags = 0UL;
39380+
39381+#ifdef CONFIG_PAX_PAGEEXEC
39382+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
39383+ pax_flags |= MF_PAX_PAGEEXEC;
39384+#endif
39385+
39386+#ifdef CONFIG_PAX_SEGMEXEC
39387+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
39388+ pax_flags |= MF_PAX_SEGMEXEC;
39389+#endif
39390+
39391+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39392+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39393+ if ((__supported_pte_mask & _PAGE_NX))
39394+ pax_flags &= ~MF_PAX_SEGMEXEC;
39395+ else
39396+ pax_flags &= ~MF_PAX_PAGEEXEC;
39397+ }
39398+#endif
39399+
39400+#ifdef CONFIG_PAX_EMUTRAMP
39401+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
39402+ pax_flags |= MF_PAX_EMUTRAMP;
39403+#endif
39404+
39405+#ifdef CONFIG_PAX_MPROTECT
39406+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
39407+ pax_flags |= MF_PAX_MPROTECT;
39408+#endif
39409+
39410+#ifdef CONFIG_PAX_ASLR
39411+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
39412+ pax_flags |= MF_PAX_RANDMMAP;
39413+#endif
39414+
39415+ return pax_flags;
39416+}
39417+#endif
39418+
39419+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
39420+static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
39421+{
39422+ unsigned long pax_flags = 0UL;
39423+
39424+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39425+ unsigned long i;
39426+ int found_flags = 0;
39427+#endif
39428+
39429+#ifdef CONFIG_PAX_EI_PAX
39430+ pax_flags = pax_parse_ei_pax(elf_ex);
39431+#endif
39432+
39433+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39434+ for (i = 0UL; i < elf_ex->e_phnum; i++)
39435+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
39436+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
39437+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
39438+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
39439+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
39440+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
39441+ return -EINVAL;
39442+
39443+#ifdef CONFIG_PAX_SOFTMODE
39444+ if (pax_softmode)
39445+ pax_flags = pax_parse_softmode(&elf_phdata[i]);
39446+ else
39447+#endif
39448+
39449+ pax_flags = pax_parse_hardmode(&elf_phdata[i]);
39450+ found_flags = 1;
39451+ break;
39452+ }
39453+#endif
39454+
39455+#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
39456+ if (found_flags == 0) {
39457+ struct elf_phdr phdr;
39458+ memset(&phdr, 0, sizeof(phdr));
39459+ phdr.p_flags = PF_NOEMUTRAMP;
39460+#ifdef CONFIG_PAX_SOFTMODE
39461+ if (pax_softmode)
39462+ pax_flags = pax_parse_softmode(&phdr);
39463+ else
39464+#endif
39465+ pax_flags = pax_parse_hardmode(&phdr);
39466+ }
39467+#endif
39468+
39469+ if (0 > pax_check_flags(&pax_flags))
39470+ return -EINVAL;
39471+
39472+ current->mm->pax_flags = pax_flags;
39473+ return 0;
39474+}
39475+#endif
39476+
39477 /*
39478 * These are the functions used to load ELF style executables and shared
39479 * libraries. There is no binary dependent code anywhere else.
39480@@ -544,6 +756,11 @@ static unsigned long randomize_stack_top
39481 {
39482 unsigned int random_variable = 0;
39483
39484+#ifdef CONFIG_PAX_RANDUSTACK
39485+ if (randomize_va_space)
39486+ return stack_top - current->mm->delta_stack;
39487+#endif
39488+
39489 if ((current->flags & PF_RANDOMIZE) &&
39490 !(current->personality & ADDR_NO_RANDOMIZE)) {
39491 random_variable = get_random_int() & STACK_RND_MASK;
39492@@ -562,7 +779,7 @@ static int load_elf_binary(struct linux_
39493 unsigned long load_addr = 0, load_bias = 0;
39494 int load_addr_set = 0;
39495 char * elf_interpreter = NULL;
39496- unsigned long error;
39497+ unsigned long error = 0;
39498 struct elf_phdr *elf_ppnt, *elf_phdata;
39499 unsigned long elf_bss, elf_brk;
39500 int retval, i;
39501@@ -572,11 +789,11 @@ static int load_elf_binary(struct linux_
39502 unsigned long start_code, end_code, start_data, end_data;
39503 unsigned long reloc_func_desc __maybe_unused = 0;
39504 int executable_stack = EXSTACK_DEFAULT;
39505- unsigned long def_flags = 0;
39506 struct {
39507 struct elfhdr elf_ex;
39508 struct elfhdr interp_elf_ex;
39509 } *loc;
39510+ unsigned long pax_task_size = TASK_SIZE;
39511
39512 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
39513 if (!loc) {
39514@@ -714,11 +931,81 @@ static int load_elf_binary(struct linux_
39515
39516 /* OK, This is the point of no return */
39517 current->flags &= ~PF_FORKNOEXEC;
39518- current->mm->def_flags = def_flags;
39519+
39520+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39521+ current->mm->pax_flags = 0UL;
39522+#endif
39523+
39524+#ifdef CONFIG_PAX_DLRESOLVE
39525+ current->mm->call_dl_resolve = 0UL;
39526+#endif
39527+
39528+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
39529+ current->mm->call_syscall = 0UL;
39530+#endif
39531+
39532+#ifdef CONFIG_PAX_ASLR
39533+ current->mm->delta_mmap = 0UL;
39534+ current->mm->delta_stack = 0UL;
39535+#endif
39536+
39537+ current->mm->def_flags = 0;
39538+
39539+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
39540+ if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
39541+ send_sig(SIGKILL, current, 0);
39542+ goto out_free_dentry;
39543+ }
39544+#endif
39545+
39546+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
39547+ pax_set_initial_flags(bprm);
39548+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
39549+ if (pax_set_initial_flags_func)
39550+ (pax_set_initial_flags_func)(bprm);
39551+#endif
39552+
39553+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
39554+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
39555+ current->mm->context.user_cs_limit = PAGE_SIZE;
39556+ current->mm->def_flags |= VM_PAGEEXEC;
39557+ }
39558+#endif
39559+
39560+#ifdef CONFIG_PAX_SEGMEXEC
39561+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
39562+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
39563+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
39564+ pax_task_size = SEGMEXEC_TASK_SIZE;
39565+ current->mm->def_flags |= VM_NOHUGEPAGE;
39566+ }
39567+#endif
39568+
39569+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
39570+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39571+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
39572+ put_cpu();
39573+ }
39574+#endif
39575
39576 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
39577 may depend on the personality. */
39578 SET_PERSONALITY(loc->elf_ex);
39579+
39580+#ifdef CONFIG_PAX_ASLR
39581+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
39582+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
39583+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
39584+ }
39585+#endif
39586+
39587+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
39588+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39589+ executable_stack = EXSTACK_DISABLE_X;
39590+ current->personality &= ~READ_IMPLIES_EXEC;
39591+ } else
39592+#endif
39593+
39594 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
39595 current->personality |= READ_IMPLIES_EXEC;
39596
39597@@ -800,6 +1087,20 @@ static int load_elf_binary(struct linux_
39598 #else
39599 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
39600 #endif
39601+
39602+#ifdef CONFIG_PAX_RANDMMAP
39603+ /* PaX: randomize base address at the default exe base if requested */
39604+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
39605+#ifdef CONFIG_SPARC64
39606+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
39607+#else
39608+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
39609+#endif
39610+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
39611+ elf_flags |= MAP_FIXED;
39612+ }
39613+#endif
39614+
39615 }
39616
39617 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
39618@@ -832,9 +1133,9 @@ static int load_elf_binary(struct linux_
39619 * allowed task size. Note that p_filesz must always be
39620 * <= p_memsz so it is only necessary to check p_memsz.
39621 */
39622- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
39623- elf_ppnt->p_memsz > TASK_SIZE ||
39624- TASK_SIZE - elf_ppnt->p_memsz < k) {
39625+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
39626+ elf_ppnt->p_memsz > pax_task_size ||
39627+ pax_task_size - elf_ppnt->p_memsz < k) {
39628 /* set_brk can never work. Avoid overflows. */
39629 send_sig(SIGKILL, current, 0);
39630 retval = -EINVAL;
39631@@ -862,6 +1163,11 @@ static int load_elf_binary(struct linux_
39632 start_data += load_bias;
39633 end_data += load_bias;
39634
39635+#ifdef CONFIG_PAX_RANDMMAP
39636+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
39637+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
39638+#endif
39639+
39640 /* Calling set_brk effectively mmaps the pages that we need
39641 * for the bss and break sections. We must do this before
39642 * mapping in the interpreter, to make sure it doesn't wind
39643@@ -873,9 +1179,11 @@ static int load_elf_binary(struct linux_
39644 goto out_free_dentry;
39645 }
39646 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
39647- send_sig(SIGSEGV, current, 0);
39648- retval = -EFAULT; /* Nobody gets to see this, but.. */
39649- goto out_free_dentry;
39650+ /*
39651+ * This bss-zeroing can fail if the ELF
39652+ * file specifies odd protections. So
39653+ * we don't check the return value
39654+ */
39655 }
39656
39657 if (elf_interpreter) {
39658@@ -1090,7 +1398,7 @@ out:
39659 * Decide what to dump of a segment, part, all or none.
39660 */
39661 static unsigned long vma_dump_size(struct vm_area_struct *vma,
39662- unsigned long mm_flags)
39663+ unsigned long mm_flags, long signr)
39664 {
39665 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
39666
39667@@ -1124,7 +1432,7 @@ static unsigned long vma_dump_size(struc
39668 if (vma->vm_file == NULL)
39669 return 0;
39670
39671- if (FILTER(MAPPED_PRIVATE))
39672+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
39673 goto whole;
39674
39675 /*
39676@@ -1346,9 +1654,9 @@ static void fill_auxv_note(struct memelf
39677 {
39678 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
39679 int i = 0;
39680- do
39681+ do {
39682 i += 2;
39683- while (auxv[i - 2] != AT_NULL);
39684+ } while (auxv[i - 2] != AT_NULL);
39685 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
39686 }
39687
39688@@ -1854,14 +2162,14 @@ static void fill_extnum_info(struct elfh
39689 }
39690
39691 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
39692- unsigned long mm_flags)
39693+ struct coredump_params *cprm)
39694 {
39695 struct vm_area_struct *vma;
39696 size_t size = 0;
39697
39698 for (vma = first_vma(current, gate_vma); vma != NULL;
39699 vma = next_vma(vma, gate_vma))
39700- size += vma_dump_size(vma, mm_flags);
39701+ size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
39702 return size;
39703 }
39704
39705@@ -1955,7 +2263,7 @@ static int elf_core_dump(struct coredump
39706
39707 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
39708
39709- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
39710+ offset += elf_core_vma_data_size(gate_vma, cprm);
39711 offset += elf_core_extra_data_size();
39712 e_shoff = offset;
39713
39714@@ -1969,10 +2277,12 @@ static int elf_core_dump(struct coredump
39715 offset = dataoff;
39716
39717 size += sizeof(*elf);
39718+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
39719 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
39720 goto end_coredump;
39721
39722 size += sizeof(*phdr4note);
39723+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
39724 if (size > cprm->limit
39725 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
39726 goto end_coredump;
39727@@ -1986,7 +2296,7 @@ static int elf_core_dump(struct coredump
39728 phdr.p_offset = offset;
39729 phdr.p_vaddr = vma->vm_start;
39730 phdr.p_paddr = 0;
39731- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
39732+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
39733 phdr.p_memsz = vma->vm_end - vma->vm_start;
39734 offset += phdr.p_filesz;
39735 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
39736@@ -1997,6 +2307,7 @@ static int elf_core_dump(struct coredump
39737 phdr.p_align = ELF_EXEC_PAGESIZE;
39738
39739 size += sizeof(phdr);
39740+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
39741 if (size > cprm->limit
39742 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
39743 goto end_coredump;
39744@@ -2021,7 +2332,7 @@ static int elf_core_dump(struct coredump
39745 unsigned long addr;
39746 unsigned long end;
39747
39748- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
39749+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
39750
39751 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
39752 struct page *page;
39753@@ -2030,6 +2341,7 @@ static int elf_core_dump(struct coredump
39754 page = get_dump_page(addr);
39755 if (page) {
39756 void *kaddr = kmap(page);
39757+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
39758 stop = ((size += PAGE_SIZE) > cprm->limit) ||
39759 !dump_write(cprm->file, kaddr,
39760 PAGE_SIZE);
39761@@ -2047,6 +2359,7 @@ static int elf_core_dump(struct coredump
39762
39763 if (e_phnum == PN_XNUM) {
39764 size += sizeof(*shdr4extnum);
39765+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
39766 if (size > cprm->limit
39767 || !dump_write(cprm->file, shdr4extnum,
39768 sizeof(*shdr4extnum)))
39769@@ -2067,6 +2380,97 @@ out:
39770
39771 #endif /* CONFIG_ELF_CORE */
39772
39773+#ifdef CONFIG_PAX_MPROTECT
39774+/* PaX: non-PIC ELF libraries need relocations on their executable segments
39775+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
39776+ * we'll remove VM_MAYWRITE for good on RELRO segments.
39777+ *
39778+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
39779+ * basis because we want to allow the common case and not the special ones.
39780+ */
39781+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
39782+{
39783+ struct elfhdr elf_h;
39784+ struct elf_phdr elf_p;
39785+ unsigned long i;
39786+ unsigned long oldflags;
39787+ bool is_textrel_rw, is_textrel_rx, is_relro;
39788+
39789+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
39790+ return;
39791+
39792+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
39793+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
39794+
39795+#ifdef CONFIG_PAX_ELFRELOCS
39796+ /* possible TEXTREL */
39797+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
39798+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
39799+#else
39800+ is_textrel_rw = false;
39801+ is_textrel_rx = false;
39802+#endif
39803+
39804+ /* possible RELRO */
39805+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
39806+
39807+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
39808+ return;
39809+
39810+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
39811+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
39812+
39813+#ifdef CONFIG_PAX_ETEXECRELOCS
39814+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
39815+#else
39816+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
39817+#endif
39818+
39819+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
39820+ !elf_check_arch(&elf_h) ||
39821+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
39822+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
39823+ return;
39824+
39825+ for (i = 0UL; i < elf_h.e_phnum; i++) {
39826+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
39827+ return;
39828+ switch (elf_p.p_type) {
39829+ case PT_DYNAMIC:
39830+ if (!is_textrel_rw && !is_textrel_rx)
39831+ continue;
39832+ i = 0UL;
39833+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
39834+ elf_dyn dyn;
39835+
39836+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
39837+ return;
39838+ if (dyn.d_tag == DT_NULL)
39839+ return;
39840+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
39841+ gr_log_textrel(vma);
39842+ if (is_textrel_rw)
39843+ vma->vm_flags |= VM_MAYWRITE;
39844+ else
39845+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
39846+ vma->vm_flags &= ~VM_MAYWRITE;
39847+ return;
39848+ }
39849+ i++;
39850+ }
39851+ return;
39852+
39853+ case PT_GNU_RELRO:
39854+ if (!is_relro)
39855+ continue;
39856+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
39857+ vma->vm_flags &= ~VM_MAYWRITE;
39858+ return;
39859+ }
39860+ }
39861+}
39862+#endif
39863+
39864 static int __init init_elf_binfmt(void)
39865 {
39866 return register_binfmt(&elf_format);
39867diff -urNp linux-3.0.7/fs/binfmt_flat.c linux-3.0.7/fs/binfmt_flat.c
39868--- linux-3.0.7/fs/binfmt_flat.c 2011-07-21 22:17:23.000000000 -0400
39869+++ linux-3.0.7/fs/binfmt_flat.c 2011-08-23 21:47:56.000000000 -0400
39870@@ -567,7 +567,9 @@ static int load_flat_file(struct linux_b
39871 realdatastart = (unsigned long) -ENOMEM;
39872 printk("Unable to allocate RAM for process data, errno %d\n",
39873 (int)-realdatastart);
39874+ down_write(&current->mm->mmap_sem);
39875 do_munmap(current->mm, textpos, text_len);
39876+ up_write(&current->mm->mmap_sem);
39877 ret = realdatastart;
39878 goto err;
39879 }
39880@@ -591,8 +593,10 @@ static int load_flat_file(struct linux_b
39881 }
39882 if (IS_ERR_VALUE(result)) {
39883 printk("Unable to read data+bss, errno %d\n", (int)-result);
39884+ down_write(&current->mm->mmap_sem);
39885 do_munmap(current->mm, textpos, text_len);
39886 do_munmap(current->mm, realdatastart, len);
39887+ up_write(&current->mm->mmap_sem);
39888 ret = result;
39889 goto err;
39890 }
39891@@ -661,8 +665,10 @@ static int load_flat_file(struct linux_b
39892 }
39893 if (IS_ERR_VALUE(result)) {
39894 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
39895+ down_write(&current->mm->mmap_sem);
39896 do_munmap(current->mm, textpos, text_len + data_len + extra +
39897 MAX_SHARED_LIBS * sizeof(unsigned long));
39898+ up_write(&current->mm->mmap_sem);
39899 ret = result;
39900 goto err;
39901 }
39902diff -urNp linux-3.0.7/fs/bio.c linux-3.0.7/fs/bio.c
39903--- linux-3.0.7/fs/bio.c 2011-07-21 22:17:23.000000000 -0400
39904+++ linux-3.0.7/fs/bio.c 2011-10-06 04:17:55.000000000 -0400
39905@@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct b
39906 const int read = bio_data_dir(bio) == READ;
39907 struct bio_map_data *bmd = bio->bi_private;
39908 int i;
39909- char *p = bmd->sgvecs[0].iov_base;
39910+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
39911
39912 __bio_for_each_segment(bvec, bio, i, 0) {
39913 char *addr = page_address(bvec->bv_page);
39914diff -urNp linux-3.0.7/fs/block_dev.c linux-3.0.7/fs/block_dev.c
39915--- linux-3.0.7/fs/block_dev.c 2011-10-16 21:54:54.000000000 -0400
39916+++ linux-3.0.7/fs/block_dev.c 2011-10-16 21:55:28.000000000 -0400
39917@@ -671,7 +671,7 @@ static bool bd_may_claim(struct block_de
39918 else if (bdev->bd_contains == bdev)
39919 return true; /* is a whole device which isn't held */
39920
39921- else if (whole->bd_holder == bd_may_claim)
39922+ else if (whole->bd_holder == (void *)bd_may_claim)
39923 return true; /* is a partition of a device that is being partitioned */
39924 else if (whole->bd_holder != NULL)
39925 return false; /* is a partition of a held device */
39926diff -urNp linux-3.0.7/fs/btrfs/ctree.c linux-3.0.7/fs/btrfs/ctree.c
39927--- linux-3.0.7/fs/btrfs/ctree.c 2011-07-21 22:17:23.000000000 -0400
39928+++ linux-3.0.7/fs/btrfs/ctree.c 2011-08-23 21:47:56.000000000 -0400
39929@@ -454,9 +454,12 @@ static noinline int __btrfs_cow_block(st
39930 free_extent_buffer(buf);
39931 add_root_to_dirty_list(root);
39932 } else {
39933- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
39934- parent_start = parent->start;
39935- else
39936+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
39937+ if (parent)
39938+ parent_start = parent->start;
39939+ else
39940+ parent_start = 0;
39941+ } else
39942 parent_start = 0;
39943
39944 WARN_ON(trans->transid != btrfs_header_generation(parent));
39945diff -urNp linux-3.0.7/fs/btrfs/inode.c linux-3.0.7/fs/btrfs/inode.c
39946--- linux-3.0.7/fs/btrfs/inode.c 2011-10-16 21:54:54.000000000 -0400
39947+++ linux-3.0.7/fs/btrfs/inode.c 2011-10-16 21:55:28.000000000 -0400
39948@@ -6896,7 +6896,7 @@ fail:
39949 return -ENOMEM;
39950 }
39951
39952-static int btrfs_getattr(struct vfsmount *mnt,
39953+int btrfs_getattr(struct vfsmount *mnt,
39954 struct dentry *dentry, struct kstat *stat)
39955 {
39956 struct inode *inode = dentry->d_inode;
39957@@ -6908,6 +6908,14 @@ static int btrfs_getattr(struct vfsmount
39958 return 0;
39959 }
39960
39961+EXPORT_SYMBOL(btrfs_getattr);
39962+
39963+dev_t get_btrfs_dev_from_inode(struct inode *inode)
39964+{
39965+ return BTRFS_I(inode)->root->anon_super.s_dev;
39966+}
39967+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
39968+
39969 /*
39970 * If a file is moved, it will inherit the cow and compression flags of the new
39971 * directory.
39972diff -urNp linux-3.0.7/fs/btrfs/ioctl.c linux-3.0.7/fs/btrfs/ioctl.c
39973--- linux-3.0.7/fs/btrfs/ioctl.c 2011-07-21 22:17:23.000000000 -0400
39974+++ linux-3.0.7/fs/btrfs/ioctl.c 2011-10-06 04:17:55.000000000 -0400
39975@@ -2676,9 +2676,12 @@ long btrfs_ioctl_space_info(struct btrfs
39976 for (i = 0; i < num_types; i++) {
39977 struct btrfs_space_info *tmp;
39978
39979+ /* Don't copy in more than we allocated */
39980 if (!slot_count)
39981 break;
39982
39983+ slot_count--;
39984+
39985 info = NULL;
39986 rcu_read_lock();
39987 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
39988@@ -2700,15 +2703,12 @@ long btrfs_ioctl_space_info(struct btrfs
39989 memcpy(dest, &space, sizeof(space));
39990 dest++;
39991 space_args.total_spaces++;
39992- slot_count--;
39993 }
39994- if (!slot_count)
39995- break;
39996 }
39997 up_read(&info->groups_sem);
39998 }
39999
40000- user_dest = (struct btrfs_ioctl_space_info *)
40001+ user_dest = (struct btrfs_ioctl_space_info __user *)
40002 (arg + sizeof(struct btrfs_ioctl_space_args));
40003
40004 if (copy_to_user(user_dest, dest_orig, alloc_size))
40005diff -urNp linux-3.0.7/fs/btrfs/relocation.c linux-3.0.7/fs/btrfs/relocation.c
40006--- linux-3.0.7/fs/btrfs/relocation.c 2011-07-21 22:17:23.000000000 -0400
40007+++ linux-3.0.7/fs/btrfs/relocation.c 2011-08-23 21:47:56.000000000 -0400
40008@@ -1242,7 +1242,7 @@ static int __update_reloc_root(struct bt
40009 }
40010 spin_unlock(&rc->reloc_root_tree.lock);
40011
40012- BUG_ON((struct btrfs_root *)node->data != root);
40013+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
40014
40015 if (!del) {
40016 spin_lock(&rc->reloc_root_tree.lock);
40017diff -urNp linux-3.0.7/fs/cachefiles/bind.c linux-3.0.7/fs/cachefiles/bind.c
40018--- linux-3.0.7/fs/cachefiles/bind.c 2011-07-21 22:17:23.000000000 -0400
40019+++ linux-3.0.7/fs/cachefiles/bind.c 2011-08-23 21:47:56.000000000 -0400
40020@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
40021 args);
40022
40023 /* start by checking things over */
40024- ASSERT(cache->fstop_percent >= 0 &&
40025- cache->fstop_percent < cache->fcull_percent &&
40026+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
40027 cache->fcull_percent < cache->frun_percent &&
40028 cache->frun_percent < 100);
40029
40030- ASSERT(cache->bstop_percent >= 0 &&
40031- cache->bstop_percent < cache->bcull_percent &&
40032+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
40033 cache->bcull_percent < cache->brun_percent &&
40034 cache->brun_percent < 100);
40035
40036diff -urNp linux-3.0.7/fs/cachefiles/daemon.c linux-3.0.7/fs/cachefiles/daemon.c
40037--- linux-3.0.7/fs/cachefiles/daemon.c 2011-07-21 22:17:23.000000000 -0400
40038+++ linux-3.0.7/fs/cachefiles/daemon.c 2011-08-23 21:47:56.000000000 -0400
40039@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(st
40040 if (n > buflen)
40041 return -EMSGSIZE;
40042
40043- if (copy_to_user(_buffer, buffer, n) != 0)
40044+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
40045 return -EFAULT;
40046
40047 return n;
40048@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(s
40049 if (test_bit(CACHEFILES_DEAD, &cache->flags))
40050 return -EIO;
40051
40052- if (datalen < 0 || datalen > PAGE_SIZE - 1)
40053+ if (datalen > PAGE_SIZE - 1)
40054 return -EOPNOTSUPP;
40055
40056 /* drag the command string into the kernel so we can parse it */
40057@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struc
40058 if (args[0] != '%' || args[1] != '\0')
40059 return -EINVAL;
40060
40061- if (fstop < 0 || fstop >= cache->fcull_percent)
40062+ if (fstop >= cache->fcull_percent)
40063 return cachefiles_daemon_range_error(cache, args);
40064
40065 cache->fstop_percent = fstop;
40066@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struc
40067 if (args[0] != '%' || args[1] != '\0')
40068 return -EINVAL;
40069
40070- if (bstop < 0 || bstop >= cache->bcull_percent)
40071+ if (bstop >= cache->bcull_percent)
40072 return cachefiles_daemon_range_error(cache, args);
40073
40074 cache->bstop_percent = bstop;
40075diff -urNp linux-3.0.7/fs/cachefiles/internal.h linux-3.0.7/fs/cachefiles/internal.h
40076--- linux-3.0.7/fs/cachefiles/internal.h 2011-07-21 22:17:23.000000000 -0400
40077+++ linux-3.0.7/fs/cachefiles/internal.h 2011-08-23 21:47:56.000000000 -0400
40078@@ -57,7 +57,7 @@ struct cachefiles_cache {
40079 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
40080 struct rb_root active_nodes; /* active nodes (can't be culled) */
40081 rwlock_t active_lock; /* lock for active_nodes */
40082- atomic_t gravecounter; /* graveyard uniquifier */
40083+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
40084 unsigned frun_percent; /* when to stop culling (% files) */
40085 unsigned fcull_percent; /* when to start culling (% files) */
40086 unsigned fstop_percent; /* when to stop allocating (% files) */
40087@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struc
40088 * proc.c
40089 */
40090 #ifdef CONFIG_CACHEFILES_HISTOGRAM
40091-extern atomic_t cachefiles_lookup_histogram[HZ];
40092-extern atomic_t cachefiles_mkdir_histogram[HZ];
40093-extern atomic_t cachefiles_create_histogram[HZ];
40094+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40095+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40096+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
40097
40098 extern int __init cachefiles_proc_init(void);
40099 extern void cachefiles_proc_cleanup(void);
40100 static inline
40101-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
40102+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
40103 {
40104 unsigned long jif = jiffies - start_jif;
40105 if (jif >= HZ)
40106 jif = HZ - 1;
40107- atomic_inc(&histogram[jif]);
40108+ atomic_inc_unchecked(&histogram[jif]);
40109 }
40110
40111 #else
40112diff -urNp linux-3.0.7/fs/cachefiles/namei.c linux-3.0.7/fs/cachefiles/namei.c
40113--- linux-3.0.7/fs/cachefiles/namei.c 2011-07-21 22:17:23.000000000 -0400
40114+++ linux-3.0.7/fs/cachefiles/namei.c 2011-08-23 21:47:56.000000000 -0400
40115@@ -318,7 +318,7 @@ try_again:
40116 /* first step is to make up a grave dentry in the graveyard */
40117 sprintf(nbuffer, "%08x%08x",
40118 (uint32_t) get_seconds(),
40119- (uint32_t) atomic_inc_return(&cache->gravecounter));
40120+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
40121
40122 /* do the multiway lock magic */
40123 trap = lock_rename(cache->graveyard, dir);
40124diff -urNp linux-3.0.7/fs/cachefiles/proc.c linux-3.0.7/fs/cachefiles/proc.c
40125--- linux-3.0.7/fs/cachefiles/proc.c 2011-07-21 22:17:23.000000000 -0400
40126+++ linux-3.0.7/fs/cachefiles/proc.c 2011-08-23 21:47:56.000000000 -0400
40127@@ -14,9 +14,9 @@
40128 #include <linux/seq_file.h>
40129 #include "internal.h"
40130
40131-atomic_t cachefiles_lookup_histogram[HZ];
40132-atomic_t cachefiles_mkdir_histogram[HZ];
40133-atomic_t cachefiles_create_histogram[HZ];
40134+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40135+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40136+atomic_unchecked_t cachefiles_create_histogram[HZ];
40137
40138 /*
40139 * display the latency histogram
40140@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
40141 return 0;
40142 default:
40143 index = (unsigned long) v - 3;
40144- x = atomic_read(&cachefiles_lookup_histogram[index]);
40145- y = atomic_read(&cachefiles_mkdir_histogram[index]);
40146- z = atomic_read(&cachefiles_create_histogram[index]);
40147+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
40148+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
40149+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
40150 if (x == 0 && y == 0 && z == 0)
40151 return 0;
40152
40153diff -urNp linux-3.0.7/fs/cachefiles/rdwr.c linux-3.0.7/fs/cachefiles/rdwr.c
40154--- linux-3.0.7/fs/cachefiles/rdwr.c 2011-07-21 22:17:23.000000000 -0400
40155+++ linux-3.0.7/fs/cachefiles/rdwr.c 2011-10-06 04:17:55.000000000 -0400
40156@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache
40157 old_fs = get_fs();
40158 set_fs(KERNEL_DS);
40159 ret = file->f_op->write(
40160- file, (const void __user *) data, len, &pos);
40161+ file, (const void __force_user *) data, len, &pos);
40162 set_fs(old_fs);
40163 kunmap(page);
40164 if (ret != len)
40165diff -urNp linux-3.0.7/fs/ceph/dir.c linux-3.0.7/fs/ceph/dir.c
40166--- linux-3.0.7/fs/ceph/dir.c 2011-07-21 22:17:23.000000000 -0400
40167+++ linux-3.0.7/fs/ceph/dir.c 2011-08-23 21:47:56.000000000 -0400
40168@@ -226,7 +226,7 @@ static int ceph_readdir(struct file *fil
40169 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
40170 struct ceph_mds_client *mdsc = fsc->mdsc;
40171 unsigned frag = fpos_frag(filp->f_pos);
40172- int off = fpos_off(filp->f_pos);
40173+ unsigned int off = fpos_off(filp->f_pos);
40174 int err;
40175 u32 ftype;
40176 struct ceph_mds_reply_info_parsed *rinfo;
40177diff -urNp linux-3.0.7/fs/cifs/cifs_debug.c linux-3.0.7/fs/cifs/cifs_debug.c
40178--- linux-3.0.7/fs/cifs/cifs_debug.c 2011-07-21 22:17:23.000000000 -0400
40179+++ linux-3.0.7/fs/cifs/cifs_debug.c 2011-08-25 17:18:05.000000000 -0400
40180@@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(str
40181
40182 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
40183 #ifdef CONFIG_CIFS_STATS2
40184- atomic_set(&totBufAllocCount, 0);
40185- atomic_set(&totSmBufAllocCount, 0);
40186+ atomic_set_unchecked(&totBufAllocCount, 0);
40187+ atomic_set_unchecked(&totSmBufAllocCount, 0);
40188 #endif /* CONFIG_CIFS_STATS2 */
40189 spin_lock(&cifs_tcp_ses_lock);
40190 list_for_each(tmp1, &cifs_tcp_ses_list) {
40191@@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(str
40192 tcon = list_entry(tmp3,
40193 struct cifs_tcon,
40194 tcon_list);
40195- atomic_set(&tcon->num_smbs_sent, 0);
40196- atomic_set(&tcon->num_writes, 0);
40197- atomic_set(&tcon->num_reads, 0);
40198- atomic_set(&tcon->num_oplock_brks, 0);
40199- atomic_set(&tcon->num_opens, 0);
40200- atomic_set(&tcon->num_posixopens, 0);
40201- atomic_set(&tcon->num_posixmkdirs, 0);
40202- atomic_set(&tcon->num_closes, 0);
40203- atomic_set(&tcon->num_deletes, 0);
40204- atomic_set(&tcon->num_mkdirs, 0);
40205- atomic_set(&tcon->num_rmdirs, 0);
40206- atomic_set(&tcon->num_renames, 0);
40207- atomic_set(&tcon->num_t2renames, 0);
40208- atomic_set(&tcon->num_ffirst, 0);
40209- atomic_set(&tcon->num_fnext, 0);
40210- atomic_set(&tcon->num_fclose, 0);
40211- atomic_set(&tcon->num_hardlinks, 0);
40212- atomic_set(&tcon->num_symlinks, 0);
40213- atomic_set(&tcon->num_locks, 0);
40214+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
40215+ atomic_set_unchecked(&tcon->num_writes, 0);
40216+ atomic_set_unchecked(&tcon->num_reads, 0);
40217+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
40218+ atomic_set_unchecked(&tcon->num_opens, 0);
40219+ atomic_set_unchecked(&tcon->num_posixopens, 0);
40220+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
40221+ atomic_set_unchecked(&tcon->num_closes, 0);
40222+ atomic_set_unchecked(&tcon->num_deletes, 0);
40223+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
40224+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
40225+ atomic_set_unchecked(&tcon->num_renames, 0);
40226+ atomic_set_unchecked(&tcon->num_t2renames, 0);
40227+ atomic_set_unchecked(&tcon->num_ffirst, 0);
40228+ atomic_set_unchecked(&tcon->num_fnext, 0);
40229+ atomic_set_unchecked(&tcon->num_fclose, 0);
40230+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
40231+ atomic_set_unchecked(&tcon->num_symlinks, 0);
40232+ atomic_set_unchecked(&tcon->num_locks, 0);
40233 }
40234 }
40235 }
40236@@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct s
40237 smBufAllocCount.counter, cifs_min_small);
40238 #ifdef CONFIG_CIFS_STATS2
40239 seq_printf(m, "Total Large %d Small %d Allocations\n",
40240- atomic_read(&totBufAllocCount),
40241- atomic_read(&totSmBufAllocCount));
40242+ atomic_read_unchecked(&totBufAllocCount),
40243+ atomic_read_unchecked(&totSmBufAllocCount));
40244 #endif /* CONFIG_CIFS_STATS2 */
40245
40246 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
40247@@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct s
40248 if (tcon->need_reconnect)
40249 seq_puts(m, "\tDISCONNECTED ");
40250 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
40251- atomic_read(&tcon->num_smbs_sent),
40252- atomic_read(&tcon->num_oplock_brks));
40253+ atomic_read_unchecked(&tcon->num_smbs_sent),
40254+ atomic_read_unchecked(&tcon->num_oplock_brks));
40255 seq_printf(m, "\nReads: %d Bytes: %lld",
40256- atomic_read(&tcon->num_reads),
40257+ atomic_read_unchecked(&tcon->num_reads),
40258 (long long)(tcon->bytes_read));
40259 seq_printf(m, "\nWrites: %d Bytes: %lld",
40260- atomic_read(&tcon->num_writes),
40261+ atomic_read_unchecked(&tcon->num_writes),
40262 (long long)(tcon->bytes_written));
40263 seq_printf(m, "\nFlushes: %d",
40264- atomic_read(&tcon->num_flushes));
40265+ atomic_read_unchecked(&tcon->num_flushes));
40266 seq_printf(m, "\nLocks: %d HardLinks: %d "
40267 "Symlinks: %d",
40268- atomic_read(&tcon->num_locks),
40269- atomic_read(&tcon->num_hardlinks),
40270- atomic_read(&tcon->num_symlinks));
40271+ atomic_read_unchecked(&tcon->num_locks),
40272+ atomic_read_unchecked(&tcon->num_hardlinks),
40273+ atomic_read_unchecked(&tcon->num_symlinks));
40274 seq_printf(m, "\nOpens: %d Closes: %d "
40275 "Deletes: %d",
40276- atomic_read(&tcon->num_opens),
40277- atomic_read(&tcon->num_closes),
40278- atomic_read(&tcon->num_deletes));
40279+ atomic_read_unchecked(&tcon->num_opens),
40280+ atomic_read_unchecked(&tcon->num_closes),
40281+ atomic_read_unchecked(&tcon->num_deletes));
40282 seq_printf(m, "\nPosix Opens: %d "
40283 "Posix Mkdirs: %d",
40284- atomic_read(&tcon->num_posixopens),
40285- atomic_read(&tcon->num_posixmkdirs));
40286+ atomic_read_unchecked(&tcon->num_posixopens),
40287+ atomic_read_unchecked(&tcon->num_posixmkdirs));
40288 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
40289- atomic_read(&tcon->num_mkdirs),
40290- atomic_read(&tcon->num_rmdirs));
40291+ atomic_read_unchecked(&tcon->num_mkdirs),
40292+ atomic_read_unchecked(&tcon->num_rmdirs));
40293 seq_printf(m, "\nRenames: %d T2 Renames %d",
40294- atomic_read(&tcon->num_renames),
40295- atomic_read(&tcon->num_t2renames));
40296+ atomic_read_unchecked(&tcon->num_renames),
40297+ atomic_read_unchecked(&tcon->num_t2renames));
40298 seq_printf(m, "\nFindFirst: %d FNext %d "
40299 "FClose %d",
40300- atomic_read(&tcon->num_ffirst),
40301- atomic_read(&tcon->num_fnext),
40302- atomic_read(&tcon->num_fclose));
40303+ atomic_read_unchecked(&tcon->num_ffirst),
40304+ atomic_read_unchecked(&tcon->num_fnext),
40305+ atomic_read_unchecked(&tcon->num_fclose));
40306 }
40307 }
40308 }
40309diff -urNp linux-3.0.7/fs/cifs/cifsfs.c linux-3.0.7/fs/cifs/cifsfs.c
40310--- linux-3.0.7/fs/cifs/cifsfs.c 2011-09-02 18:11:21.000000000 -0400
40311+++ linux-3.0.7/fs/cifs/cifsfs.c 2011-08-25 17:18:05.000000000 -0400
40312@@ -994,7 +994,7 @@ cifs_init_request_bufs(void)
40313 cifs_req_cachep = kmem_cache_create("cifs_request",
40314 CIFSMaxBufSize +
40315 MAX_CIFS_HDR_SIZE, 0,
40316- SLAB_HWCACHE_ALIGN, NULL);
40317+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
40318 if (cifs_req_cachep == NULL)
40319 return -ENOMEM;
40320
40321@@ -1021,7 +1021,7 @@ cifs_init_request_bufs(void)
40322 efficient to alloc 1 per page off the slab compared to 17K (5page)
40323 alloc of large cifs buffers even when page debugging is on */
40324 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
40325- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
40326+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
40327 NULL);
40328 if (cifs_sm_req_cachep == NULL) {
40329 mempool_destroy(cifs_req_poolp);
40330@@ -1106,8 +1106,8 @@ init_cifs(void)
40331 atomic_set(&bufAllocCount, 0);
40332 atomic_set(&smBufAllocCount, 0);
40333 #ifdef CONFIG_CIFS_STATS2
40334- atomic_set(&totBufAllocCount, 0);
40335- atomic_set(&totSmBufAllocCount, 0);
40336+ atomic_set_unchecked(&totBufAllocCount, 0);
40337+ atomic_set_unchecked(&totSmBufAllocCount, 0);
40338 #endif /* CONFIG_CIFS_STATS2 */
40339
40340 atomic_set(&midCount, 0);
40341diff -urNp linux-3.0.7/fs/cifs/cifsglob.h linux-3.0.7/fs/cifs/cifsglob.h
40342--- linux-3.0.7/fs/cifs/cifsglob.h 2011-07-21 22:17:23.000000000 -0400
40343+++ linux-3.0.7/fs/cifs/cifsglob.h 2011-08-25 17:18:05.000000000 -0400
40344@@ -381,28 +381,28 @@ struct cifs_tcon {
40345 __u16 Flags; /* optional support bits */
40346 enum statusEnum tidStatus;
40347 #ifdef CONFIG_CIFS_STATS
40348- atomic_t num_smbs_sent;
40349- atomic_t num_writes;
40350- atomic_t num_reads;
40351- atomic_t num_flushes;
40352- atomic_t num_oplock_brks;
40353- atomic_t num_opens;
40354- atomic_t num_closes;
40355- atomic_t num_deletes;
40356- atomic_t num_mkdirs;
40357- atomic_t num_posixopens;
40358- atomic_t num_posixmkdirs;
40359- atomic_t num_rmdirs;
40360- atomic_t num_renames;
40361- atomic_t num_t2renames;
40362- atomic_t num_ffirst;
40363- atomic_t num_fnext;
40364- atomic_t num_fclose;
40365- atomic_t num_hardlinks;
40366- atomic_t num_symlinks;
40367- atomic_t num_locks;
40368- atomic_t num_acl_get;
40369- atomic_t num_acl_set;
40370+ atomic_unchecked_t num_smbs_sent;
40371+ atomic_unchecked_t num_writes;
40372+ atomic_unchecked_t num_reads;
40373+ atomic_unchecked_t num_flushes;
40374+ atomic_unchecked_t num_oplock_brks;
40375+ atomic_unchecked_t num_opens;
40376+ atomic_unchecked_t num_closes;
40377+ atomic_unchecked_t num_deletes;
40378+ atomic_unchecked_t num_mkdirs;
40379+ atomic_unchecked_t num_posixopens;
40380+ atomic_unchecked_t num_posixmkdirs;
40381+ atomic_unchecked_t num_rmdirs;
40382+ atomic_unchecked_t num_renames;
40383+ atomic_unchecked_t num_t2renames;
40384+ atomic_unchecked_t num_ffirst;
40385+ atomic_unchecked_t num_fnext;
40386+ atomic_unchecked_t num_fclose;
40387+ atomic_unchecked_t num_hardlinks;
40388+ atomic_unchecked_t num_symlinks;
40389+ atomic_unchecked_t num_locks;
40390+ atomic_unchecked_t num_acl_get;
40391+ atomic_unchecked_t num_acl_set;
40392 #ifdef CONFIG_CIFS_STATS2
40393 unsigned long long time_writes;
40394 unsigned long long time_reads;
40395@@ -613,7 +613,7 @@ convert_delimiter(char *path, char delim
40396 }
40397
40398 #ifdef CONFIG_CIFS_STATS
40399-#define cifs_stats_inc atomic_inc
40400+#define cifs_stats_inc atomic_inc_unchecked
40401
40402 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
40403 unsigned int bytes)
40404@@ -911,8 +911,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnect
40405 /* Various Debug counters */
40406 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
40407 #ifdef CONFIG_CIFS_STATS2
40408-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
40409-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
40410+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
40411+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
40412 #endif
40413 GLOBAL_EXTERN atomic_t smBufAllocCount;
40414 GLOBAL_EXTERN atomic_t midCount;
40415diff -urNp linux-3.0.7/fs/cifs/link.c linux-3.0.7/fs/cifs/link.c
40416--- linux-3.0.7/fs/cifs/link.c 2011-07-21 22:17:23.000000000 -0400
40417+++ linux-3.0.7/fs/cifs/link.c 2011-08-23 21:47:56.000000000 -0400
40418@@ -587,7 +587,7 @@ symlink_exit:
40419
40420 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
40421 {
40422- char *p = nd_get_link(nd);
40423+ const char *p = nd_get_link(nd);
40424 if (!IS_ERR(p))
40425 kfree(p);
40426 }
40427diff -urNp linux-3.0.7/fs/cifs/misc.c linux-3.0.7/fs/cifs/misc.c
40428--- linux-3.0.7/fs/cifs/misc.c 2011-07-21 22:17:23.000000000 -0400
40429+++ linux-3.0.7/fs/cifs/misc.c 2011-08-25 17:18:05.000000000 -0400
40430@@ -156,7 +156,7 @@ cifs_buf_get(void)
40431 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
40432 atomic_inc(&bufAllocCount);
40433 #ifdef CONFIG_CIFS_STATS2
40434- atomic_inc(&totBufAllocCount);
40435+ atomic_inc_unchecked(&totBufAllocCount);
40436 #endif /* CONFIG_CIFS_STATS2 */
40437 }
40438
40439@@ -191,7 +191,7 @@ cifs_small_buf_get(void)
40440 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
40441 atomic_inc(&smBufAllocCount);
40442 #ifdef CONFIG_CIFS_STATS2
40443- atomic_inc(&totSmBufAllocCount);
40444+ atomic_inc_unchecked(&totSmBufAllocCount);
40445 #endif /* CONFIG_CIFS_STATS2 */
40446
40447 }
40448diff -urNp linux-3.0.7/fs/coda/cache.c linux-3.0.7/fs/coda/cache.c
40449--- linux-3.0.7/fs/coda/cache.c 2011-07-21 22:17:23.000000000 -0400
40450+++ linux-3.0.7/fs/coda/cache.c 2011-08-23 21:47:56.000000000 -0400
40451@@ -24,7 +24,7 @@
40452 #include "coda_linux.h"
40453 #include "coda_cache.h"
40454
40455-static atomic_t permission_epoch = ATOMIC_INIT(0);
40456+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
40457
40458 /* replace or extend an acl cache hit */
40459 void coda_cache_enter(struct inode *inode, int mask)
40460@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inod
40461 struct coda_inode_info *cii = ITOC(inode);
40462
40463 spin_lock(&cii->c_lock);
40464- cii->c_cached_epoch = atomic_read(&permission_epoch);
40465+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
40466 if (cii->c_uid != current_fsuid()) {
40467 cii->c_uid = current_fsuid();
40468 cii->c_cached_perm = mask;
40469@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode
40470 {
40471 struct coda_inode_info *cii = ITOC(inode);
40472 spin_lock(&cii->c_lock);
40473- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
40474+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
40475 spin_unlock(&cii->c_lock);
40476 }
40477
40478 /* remove all acl caches */
40479 void coda_cache_clear_all(struct super_block *sb)
40480 {
40481- atomic_inc(&permission_epoch);
40482+ atomic_inc_unchecked(&permission_epoch);
40483 }
40484
40485
40486@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode
40487 spin_lock(&cii->c_lock);
40488 hit = (mask & cii->c_cached_perm) == mask &&
40489 cii->c_uid == current_fsuid() &&
40490- cii->c_cached_epoch == atomic_read(&permission_epoch);
40491+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
40492 spin_unlock(&cii->c_lock);
40493
40494 return hit;
40495diff -urNp linux-3.0.7/fs/compat_binfmt_elf.c linux-3.0.7/fs/compat_binfmt_elf.c
40496--- linux-3.0.7/fs/compat_binfmt_elf.c 2011-07-21 22:17:23.000000000 -0400
40497+++ linux-3.0.7/fs/compat_binfmt_elf.c 2011-08-23 21:47:56.000000000 -0400
40498@@ -30,11 +30,13 @@
40499 #undef elf_phdr
40500 #undef elf_shdr
40501 #undef elf_note
40502+#undef elf_dyn
40503 #undef elf_addr_t
40504 #define elfhdr elf32_hdr
40505 #define elf_phdr elf32_phdr
40506 #define elf_shdr elf32_shdr
40507 #define elf_note elf32_note
40508+#define elf_dyn Elf32_Dyn
40509 #define elf_addr_t Elf32_Addr
40510
40511 /*
40512diff -urNp linux-3.0.7/fs/compat.c linux-3.0.7/fs/compat.c
40513--- linux-3.0.7/fs/compat.c 2011-07-21 22:17:23.000000000 -0400
40514+++ linux-3.0.7/fs/compat.c 2011-10-06 04:17:55.000000000 -0400
40515@@ -133,8 +133,8 @@ asmlinkage long compat_sys_utimes(const
40516 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
40517 {
40518 compat_ino_t ino = stat->ino;
40519- typeof(ubuf->st_uid) uid = 0;
40520- typeof(ubuf->st_gid) gid = 0;
40521+ typeof(((struct compat_stat *)0)->st_uid) uid = 0;
40522+ typeof(((struct compat_stat *)0)->st_gid) gid = 0;
40523 int err;
40524
40525 SET_UID(uid, stat->uid);
40526@@ -508,7 +508,7 @@ compat_sys_io_setup(unsigned nr_reqs, u3
40527
40528 set_fs(KERNEL_DS);
40529 /* The __user pointer cast is valid because of the set_fs() */
40530- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
40531+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
40532 set_fs(oldfs);
40533 /* truncating is ok because it's a user address */
40534 if (!ret)
40535@@ -566,7 +566,7 @@ ssize_t compat_rw_copy_check_uvector(int
40536 goto out;
40537
40538 ret = -EINVAL;
40539- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
40540+ if (nr_segs > UIO_MAXIOV)
40541 goto out;
40542 if (nr_segs > fast_segs) {
40543 ret = -ENOMEM;
40544@@ -848,6 +848,7 @@ struct compat_old_linux_dirent {
40545
40546 struct compat_readdir_callback {
40547 struct compat_old_linux_dirent __user *dirent;
40548+ struct file * file;
40549 int result;
40550 };
40551
40552@@ -865,6 +866,10 @@ static int compat_fillonedir(void *__buf
40553 buf->result = -EOVERFLOW;
40554 return -EOVERFLOW;
40555 }
40556+
40557+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40558+ return 0;
40559+
40560 buf->result++;
40561 dirent = buf->dirent;
40562 if (!access_ok(VERIFY_WRITE, dirent,
40563@@ -897,6 +902,7 @@ asmlinkage long compat_sys_old_readdir(u
40564
40565 buf.result = 0;
40566 buf.dirent = dirent;
40567+ buf.file = file;
40568
40569 error = vfs_readdir(file, compat_fillonedir, &buf);
40570 if (buf.result)
40571@@ -917,6 +923,7 @@ struct compat_linux_dirent {
40572 struct compat_getdents_callback {
40573 struct compat_linux_dirent __user *current_dir;
40574 struct compat_linux_dirent __user *previous;
40575+ struct file * file;
40576 int count;
40577 int error;
40578 };
40579@@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, c
40580 buf->error = -EOVERFLOW;
40581 return -EOVERFLOW;
40582 }
40583+
40584+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40585+ return 0;
40586+
40587 dirent = buf->previous;
40588 if (dirent) {
40589 if (__put_user(offset, &dirent->d_off))
40590@@ -985,6 +996,7 @@ asmlinkage long compat_sys_getdents(unsi
40591 buf.previous = NULL;
40592 buf.count = count;
40593 buf.error = 0;
40594+ buf.file = file;
40595
40596 error = vfs_readdir(file, compat_filldir, &buf);
40597 if (error >= 0)
40598@@ -1006,6 +1018,7 @@ out:
40599 struct compat_getdents_callback64 {
40600 struct linux_dirent64 __user *current_dir;
40601 struct linux_dirent64 __user *previous;
40602+ struct file * file;
40603 int count;
40604 int error;
40605 };
40606@@ -1022,6 +1035,10 @@ static int compat_filldir64(void * __buf
40607 buf->error = -EINVAL; /* only used if we fail.. */
40608 if (reclen > buf->count)
40609 return -EINVAL;
40610+
40611+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40612+ return 0;
40613+
40614 dirent = buf->previous;
40615
40616 if (dirent) {
40617@@ -1073,13 +1090,14 @@ asmlinkage long compat_sys_getdents64(un
40618 buf.previous = NULL;
40619 buf.count = count;
40620 buf.error = 0;
40621+ buf.file = file;
40622
40623 error = vfs_readdir(file, compat_filldir64, &buf);
40624 if (error >= 0)
40625 error = buf.error;
40626 lastdirent = buf.previous;
40627 if (lastdirent) {
40628- typeof(lastdirent->d_off) d_off = file->f_pos;
40629+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
40630 if (__put_user_unaligned(d_off, &lastdirent->d_off))
40631 error = -EFAULT;
40632 else
40633@@ -1446,6 +1464,8 @@ int compat_core_sys_select(int n, compat
40634 struct fdtable *fdt;
40635 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
40636
40637+ pax_track_stack();
40638+
40639 if (n < 0)
40640 goto out_nofds;
40641
40642@@ -1904,7 +1924,7 @@ asmlinkage long compat_sys_nfsservctl(in
40643 oldfs = get_fs();
40644 set_fs(KERNEL_DS);
40645 /* The __user pointer casts are valid because of the set_fs() */
40646- err = sys_nfsservctl(cmd, (void __user *) karg, (void __user *) kres);
40647+ err = sys_nfsservctl(cmd, (void __force_user *) karg, (void __force_user *) kres);
40648 set_fs(oldfs);
40649
40650 if (err)
40651diff -urNp linux-3.0.7/fs/compat_ioctl.c linux-3.0.7/fs/compat_ioctl.c
40652--- linux-3.0.7/fs/compat_ioctl.c 2011-07-21 22:17:23.000000000 -0400
40653+++ linux-3.0.7/fs/compat_ioctl.c 2011-10-06 04:17:55.000000000 -0400
40654@@ -208,6 +208,8 @@ static int do_video_set_spu_palette(unsi
40655
40656 err = get_user(palp, &up->palette);
40657 err |= get_user(length, &up->length);
40658+ if (err)
40659+ return -EFAULT;
40660
40661 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
40662 err = put_user(compat_ptr(palp), &up_native->palette);
40663@@ -619,7 +621,7 @@ static int serial_struct_ioctl(unsigned
40664 return -EFAULT;
40665 if (__get_user(udata, &ss32->iomem_base))
40666 return -EFAULT;
40667- ss.iomem_base = compat_ptr(udata);
40668+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
40669 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
40670 __get_user(ss.port_high, &ss32->port_high))
40671 return -EFAULT;
40672@@ -794,7 +796,7 @@ static int compat_ioctl_preallocate(stru
40673 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
40674 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
40675 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
40676- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
40677+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
40678 return -EFAULT;
40679
40680 return ioctl_preallocate(file, p);
40681@@ -1638,8 +1640,8 @@ asmlinkage long compat_sys_ioctl(unsigne
40682 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
40683 {
40684 unsigned int a, b;
40685- a = *(unsigned int *)p;
40686- b = *(unsigned int *)q;
40687+ a = *(const unsigned int *)p;
40688+ b = *(const unsigned int *)q;
40689 if (a > b)
40690 return 1;
40691 if (a < b)
40692diff -urNp linux-3.0.7/fs/configfs/dir.c linux-3.0.7/fs/configfs/dir.c
40693--- linux-3.0.7/fs/configfs/dir.c 2011-07-21 22:17:23.000000000 -0400
40694+++ linux-3.0.7/fs/configfs/dir.c 2011-08-23 21:47:56.000000000 -0400
40695@@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file
40696 }
40697 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
40698 struct configfs_dirent *next;
40699- const char * name;
40700+ const unsigned char * name;
40701+ char d_name[sizeof(next->s_dentry->d_iname)];
40702 int len;
40703 struct inode *inode = NULL;
40704
40705@@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file
40706 continue;
40707
40708 name = configfs_get_name(next);
40709- len = strlen(name);
40710+ if (next->s_dentry && name == next->s_dentry->d_iname) {
40711+ len = next->s_dentry->d_name.len;
40712+ memcpy(d_name, name, len);
40713+ name = d_name;
40714+ } else
40715+ len = strlen(name);
40716
40717 /*
40718 * We'll have a dentry and an inode for
40719diff -urNp linux-3.0.7/fs/dcache.c linux-3.0.7/fs/dcache.c
40720--- linux-3.0.7/fs/dcache.c 2011-07-21 22:17:23.000000000 -0400
40721+++ linux-3.0.7/fs/dcache.c 2011-08-23 21:47:56.000000000 -0400
40722@@ -3089,7 +3089,7 @@ void __init vfs_caches_init(unsigned lon
40723 mempages -= reserve;
40724
40725 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
40726- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
40727+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
40728
40729 dcache_init();
40730 inode_init();
40731diff -urNp linux-3.0.7/fs/ecryptfs/inode.c linux-3.0.7/fs/ecryptfs/inode.c
40732--- linux-3.0.7/fs/ecryptfs/inode.c 2011-09-02 18:11:21.000000000 -0400
40733+++ linux-3.0.7/fs/ecryptfs/inode.c 2011-10-06 04:17:55.000000000 -0400
40734@@ -704,7 +704,7 @@ static int ecryptfs_readlink_lower(struc
40735 old_fs = get_fs();
40736 set_fs(get_ds());
40737 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
40738- (char __user *)lower_buf,
40739+ (char __force_user *)lower_buf,
40740 lower_bufsiz);
40741 set_fs(old_fs);
40742 if (rc < 0)
40743@@ -750,7 +750,7 @@ static void *ecryptfs_follow_link(struct
40744 }
40745 old_fs = get_fs();
40746 set_fs(get_ds());
40747- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
40748+ rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
40749 set_fs(old_fs);
40750 if (rc < 0) {
40751 kfree(buf);
40752@@ -765,7 +765,7 @@ out:
40753 static void
40754 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
40755 {
40756- char *buf = nd_get_link(nd);
40757+ const char *buf = nd_get_link(nd);
40758 if (!IS_ERR(buf)) {
40759 /* Free the char* */
40760 kfree(buf);
40761diff -urNp linux-3.0.7/fs/ecryptfs/miscdev.c linux-3.0.7/fs/ecryptfs/miscdev.c
40762--- linux-3.0.7/fs/ecryptfs/miscdev.c 2011-07-21 22:17:23.000000000 -0400
40763+++ linux-3.0.7/fs/ecryptfs/miscdev.c 2011-08-23 21:47:56.000000000 -0400
40764@@ -328,7 +328,7 @@ check_list:
40765 goto out_unlock_msg_ctx;
40766 i = 5;
40767 if (msg_ctx->msg) {
40768- if (copy_to_user(&buf[i], packet_length, packet_length_size))
40769+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
40770 goto out_unlock_msg_ctx;
40771 i += packet_length_size;
40772 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
40773diff -urNp linux-3.0.7/fs/ecryptfs/read_write.c linux-3.0.7/fs/ecryptfs/read_write.c
40774--- linux-3.0.7/fs/ecryptfs/read_write.c 2011-09-02 18:11:21.000000000 -0400
40775+++ linux-3.0.7/fs/ecryptfs/read_write.c 2011-10-06 04:17:55.000000000 -0400
40776@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *e
40777 return -EIO;
40778 fs_save = get_fs();
40779 set_fs(get_ds());
40780- rc = vfs_write(lower_file, data, size, &offset);
40781+ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
40782 set_fs(fs_save);
40783 mark_inode_dirty_sync(ecryptfs_inode);
40784 return rc;
40785@@ -235,7 +235,7 @@ int ecryptfs_read_lower(char *data, loff
40786 return -EIO;
40787 fs_save = get_fs();
40788 set_fs(get_ds());
40789- rc = vfs_read(lower_file, data, size, &offset);
40790+ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
40791 set_fs(fs_save);
40792 return rc;
40793 }
40794diff -urNp linux-3.0.7/fs/exec.c linux-3.0.7/fs/exec.c
40795--- linux-3.0.7/fs/exec.c 2011-10-17 23:17:09.000000000 -0400
40796+++ linux-3.0.7/fs/exec.c 2011-10-17 23:17:19.000000000 -0400
40797@@ -55,12 +55,24 @@
40798 #include <linux/pipe_fs_i.h>
40799 #include <linux/oom.h>
40800 #include <linux/compat.h>
40801+#include <linux/random.h>
40802+#include <linux/seq_file.h>
40803+
40804+#ifdef CONFIG_PAX_REFCOUNT
40805+#include <linux/kallsyms.h>
40806+#include <linux/kdebug.h>
40807+#endif
40808
40809 #include <asm/uaccess.h>
40810 #include <asm/mmu_context.h>
40811 #include <asm/tlb.h>
40812 #include "internal.h"
40813
40814+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
40815+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
40816+EXPORT_SYMBOL(pax_set_initial_flags_func);
40817+#endif
40818+
40819 int core_uses_pid;
40820 char core_pattern[CORENAME_MAX_SIZE] = "core";
40821 unsigned int core_pipe_limit;
40822@@ -70,7 +82,7 @@ struct core_name {
40823 char *corename;
40824 int used, size;
40825 };
40826-static atomic_t call_count = ATOMIC_INIT(1);
40827+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
40828
40829 /* The maximal length of core_pattern is also specified in sysctl.c */
40830
40831@@ -116,7 +128,7 @@ SYSCALL_DEFINE1(uselib, const char __use
40832 char *tmp = getname(library);
40833 int error = PTR_ERR(tmp);
40834 static const struct open_flags uselib_flags = {
40835- .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
40836+ .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
40837 .acc_mode = MAY_READ | MAY_EXEC | MAY_OPEN,
40838 .intent = LOOKUP_OPEN
40839 };
40840@@ -195,18 +207,10 @@ static struct page *get_arg_page(struct
40841 int write)
40842 {
40843 struct page *page;
40844- int ret;
40845
40846-#ifdef CONFIG_STACK_GROWSUP
40847- if (write) {
40848- ret = expand_downwards(bprm->vma, pos);
40849- if (ret < 0)
40850- return NULL;
40851- }
40852-#endif
40853- ret = get_user_pages(current, bprm->mm, pos,
40854- 1, write, 1, &page, NULL);
40855- if (ret <= 0)
40856+ if (0 > expand_downwards(bprm->vma, pos))
40857+ return NULL;
40858+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
40859 return NULL;
40860
40861 if (write) {
40862@@ -281,6 +285,11 @@ static int __bprm_mm_init(struct linux_b
40863 vma->vm_end = STACK_TOP_MAX;
40864 vma->vm_start = vma->vm_end - PAGE_SIZE;
40865 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
40866+
40867+#ifdef CONFIG_PAX_SEGMEXEC
40868+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
40869+#endif
40870+
40871 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
40872 INIT_LIST_HEAD(&vma->anon_vma_chain);
40873
40874@@ -295,6 +304,12 @@ static int __bprm_mm_init(struct linux_b
40875 mm->stack_vm = mm->total_vm = 1;
40876 up_write(&mm->mmap_sem);
40877 bprm->p = vma->vm_end - sizeof(void *);
40878+
40879+#ifdef CONFIG_PAX_RANDUSTACK
40880+ if (randomize_va_space)
40881+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
40882+#endif
40883+
40884 return 0;
40885 err:
40886 up_write(&mm->mmap_sem);
40887@@ -403,19 +418,7 @@ err:
40888 return err;
40889 }
40890
40891-struct user_arg_ptr {
40892-#ifdef CONFIG_COMPAT
40893- bool is_compat;
40894-#endif
40895- union {
40896- const char __user *const __user *native;
40897-#ifdef CONFIG_COMPAT
40898- compat_uptr_t __user *compat;
40899-#endif
40900- } ptr;
40901-};
40902-
40903-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
40904+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
40905 {
40906 const char __user *native;
40907
40908@@ -424,14 +427,14 @@ static const char __user *get_user_arg_p
40909 compat_uptr_t compat;
40910
40911 if (get_user(compat, argv.ptr.compat + nr))
40912- return ERR_PTR(-EFAULT);
40913+ return (const char __force_user *)ERR_PTR(-EFAULT);
40914
40915 return compat_ptr(compat);
40916 }
40917 #endif
40918
40919 if (get_user(native, argv.ptr.native + nr))
40920- return ERR_PTR(-EFAULT);
40921+ return (const char __force_user *)ERR_PTR(-EFAULT);
40922
40923 return native;
40924 }
40925@@ -450,7 +453,7 @@ static int count(struct user_arg_ptr arg
40926 if (!p)
40927 break;
40928
40929- if (IS_ERR(p))
40930+ if (IS_ERR((const char __force_kernel *)p))
40931 return -EFAULT;
40932
40933 if (i++ >= max)
40934@@ -484,7 +487,7 @@ static int copy_strings(int argc, struct
40935
40936 ret = -EFAULT;
40937 str = get_user_arg_ptr(argv, argc);
40938- if (IS_ERR(str))
40939+ if (IS_ERR((const char __force_kernel *)str))
40940 goto out;
40941
40942 len = strnlen_user(str, MAX_ARG_STRLEN);
40943@@ -566,7 +569,7 @@ int copy_strings_kernel(int argc, const
40944 int r;
40945 mm_segment_t oldfs = get_fs();
40946 struct user_arg_ptr argv = {
40947- .ptr.native = (const char __user *const __user *)__argv,
40948+ .ptr.native = (const char __force_user *const __force_user *)__argv,
40949 };
40950
40951 set_fs(KERNEL_DS);
40952@@ -601,7 +604,8 @@ static int shift_arg_pages(struct vm_are
40953 unsigned long new_end = old_end - shift;
40954 struct mmu_gather tlb;
40955
40956- BUG_ON(new_start > new_end);
40957+ if (new_start >= new_end || new_start < mmap_min_addr)
40958+ return -ENOMEM;
40959
40960 /*
40961 * ensure there are no vmas between where we want to go
40962@@ -610,6 +614,10 @@ static int shift_arg_pages(struct vm_are
40963 if (vma != find_vma(mm, new_start))
40964 return -EFAULT;
40965
40966+#ifdef CONFIG_PAX_SEGMEXEC
40967+ BUG_ON(pax_find_mirror_vma(vma));
40968+#endif
40969+
40970 /*
40971 * cover the whole range: [new_start, old_end)
40972 */
40973@@ -690,10 +698,6 @@ int setup_arg_pages(struct linux_binprm
40974 stack_top = arch_align_stack(stack_top);
40975 stack_top = PAGE_ALIGN(stack_top);
40976
40977- if (unlikely(stack_top < mmap_min_addr) ||
40978- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
40979- return -ENOMEM;
40980-
40981 stack_shift = vma->vm_end - stack_top;
40982
40983 bprm->p -= stack_shift;
40984@@ -705,8 +709,28 @@ int setup_arg_pages(struct linux_binprm
40985 bprm->exec -= stack_shift;
40986
40987 down_write(&mm->mmap_sem);
40988+
40989+ /* Move stack pages down in memory. */
40990+ if (stack_shift) {
40991+ ret = shift_arg_pages(vma, stack_shift);
40992+ if (ret)
40993+ goto out_unlock;
40994+ }
40995+
40996 vm_flags = VM_STACK_FLAGS;
40997
40998+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
40999+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41000+ vm_flags &= ~VM_EXEC;
41001+
41002+#ifdef CONFIG_PAX_MPROTECT
41003+ if (mm->pax_flags & MF_PAX_MPROTECT)
41004+ vm_flags &= ~VM_MAYEXEC;
41005+#endif
41006+
41007+ }
41008+#endif
41009+
41010 /*
41011 * Adjust stack execute permissions; explicitly enable for
41012 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
41013@@ -725,13 +749,6 @@ int setup_arg_pages(struct linux_binprm
41014 goto out_unlock;
41015 BUG_ON(prev != vma);
41016
41017- /* Move stack pages down in memory. */
41018- if (stack_shift) {
41019- ret = shift_arg_pages(vma, stack_shift);
41020- if (ret)
41021- goto out_unlock;
41022- }
41023-
41024 /* mprotect_fixup is overkill to remove the temporary stack flags */
41025 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
41026
41027@@ -771,7 +788,7 @@ struct file *open_exec(const char *name)
41028 struct file *file;
41029 int err;
41030 static const struct open_flags open_exec_flags = {
41031- .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
41032+ .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
41033 .acc_mode = MAY_EXEC | MAY_OPEN,
41034 .intent = LOOKUP_OPEN
41035 };
41036@@ -812,7 +829,7 @@ int kernel_read(struct file *file, loff_
41037 old_fs = get_fs();
41038 set_fs(get_ds());
41039 /* The cast to a user pointer is valid due to the set_fs() */
41040- result = vfs_read(file, (void __user *)addr, count, &pos);
41041+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
41042 set_fs(old_fs);
41043 return result;
41044 }
41045@@ -1236,7 +1253,7 @@ int check_unsafe_exec(struct linux_binpr
41046 }
41047 rcu_read_unlock();
41048
41049- if (p->fs->users > n_fs) {
41050+ if (atomic_read(&p->fs->users) > n_fs) {
41051 bprm->unsafe |= LSM_UNSAFE_SHARE;
41052 } else {
41053 res = -EAGAIN;
41054@@ -1430,11 +1447,35 @@ static int do_execve_common(const char *
41055 struct user_arg_ptr envp,
41056 struct pt_regs *regs)
41057 {
41058+#ifdef CONFIG_GRKERNSEC
41059+ struct file *old_exec_file;
41060+ struct acl_subject_label *old_acl;
41061+ struct rlimit old_rlim[RLIM_NLIMITS];
41062+#endif
41063 struct linux_binprm *bprm;
41064 struct file *file;
41065 struct files_struct *displaced;
41066 bool clear_in_exec;
41067 int retval;
41068+ const struct cred *cred = current_cred();
41069+
41070+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
41071+
41072+ /*
41073+ * We move the actual failure in case of RLIMIT_NPROC excess from
41074+ * set*uid() to execve() because too many poorly written programs
41075+ * don't check setuid() return code. Here we additionally recheck
41076+ * whether NPROC limit is still exceeded.
41077+ */
41078+ if ((current->flags & PF_NPROC_EXCEEDED) &&
41079+ atomic_read(&cred->user->processes) > rlimit(RLIMIT_NPROC)) {
41080+ retval = -EAGAIN;
41081+ goto out_ret;
41082+ }
41083+
41084+ /* We're below the limit (still or again), so we don't want to make
41085+ * further execve() calls fail. */
41086+ current->flags &= ~PF_NPROC_EXCEEDED;
41087
41088 retval = unshare_files(&displaced);
41089 if (retval)
41090@@ -1466,6 +1507,16 @@ static int do_execve_common(const char *
41091 bprm->filename = filename;
41092 bprm->interp = filename;
41093
41094+ if (gr_process_user_ban()) {
41095+ retval = -EPERM;
41096+ goto out_file;
41097+ }
41098+
41099+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
41100+ retval = -EACCES;
41101+ goto out_file;
41102+ }
41103+
41104 retval = bprm_mm_init(bprm);
41105 if (retval)
41106 goto out_file;
41107@@ -1495,9 +1546,40 @@ static int do_execve_common(const char *
41108 if (retval < 0)
41109 goto out;
41110
41111+ if (!gr_tpe_allow(file)) {
41112+ retval = -EACCES;
41113+ goto out;
41114+ }
41115+
41116+ if (gr_check_crash_exec(file)) {
41117+ retval = -EACCES;
41118+ goto out;
41119+ }
41120+
41121+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
41122+
41123+ gr_handle_exec_args(bprm, argv);
41124+
41125+#ifdef CONFIG_GRKERNSEC
41126+ old_acl = current->acl;
41127+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
41128+ old_exec_file = current->exec_file;
41129+ get_file(file);
41130+ current->exec_file = file;
41131+#endif
41132+
41133+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
41134+ bprm->unsafe & LSM_UNSAFE_SHARE);
41135+ if (retval < 0)
41136+ goto out_fail;
41137+
41138 retval = search_binary_handler(bprm,regs);
41139 if (retval < 0)
41140- goto out;
41141+ goto out_fail;
41142+#ifdef CONFIG_GRKERNSEC
41143+ if (old_exec_file)
41144+ fput(old_exec_file);
41145+#endif
41146
41147 /* execve succeeded */
41148 current->fs->in_exec = 0;
41149@@ -1508,6 +1590,14 @@ static int do_execve_common(const char *
41150 put_files_struct(displaced);
41151 return retval;
41152
41153+out_fail:
41154+#ifdef CONFIG_GRKERNSEC
41155+ current->acl = old_acl;
41156+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
41157+ fput(current->exec_file);
41158+ current->exec_file = old_exec_file;
41159+#endif
41160+
41161 out:
41162 if (bprm->mm) {
41163 acct_arg_size(bprm, 0);
41164@@ -1581,7 +1671,7 @@ static int expand_corename(struct core_n
41165 {
41166 char *old_corename = cn->corename;
41167
41168- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
41169+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
41170 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
41171
41172 if (!cn->corename) {
41173@@ -1669,7 +1759,7 @@ static int format_corename(struct core_n
41174 int pid_in_pattern = 0;
41175 int err = 0;
41176
41177- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
41178+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
41179 cn->corename = kmalloc(cn->size, GFP_KERNEL);
41180 cn->used = 0;
41181
41182@@ -1760,6 +1850,219 @@ out:
41183 return ispipe;
41184 }
41185
41186+int pax_check_flags(unsigned long *flags)
41187+{
41188+ int retval = 0;
41189+
41190+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
41191+ if (*flags & MF_PAX_SEGMEXEC)
41192+ {
41193+ *flags &= ~MF_PAX_SEGMEXEC;
41194+ retval = -EINVAL;
41195+ }
41196+#endif
41197+
41198+ if ((*flags & MF_PAX_PAGEEXEC)
41199+
41200+#ifdef CONFIG_PAX_PAGEEXEC
41201+ && (*flags & MF_PAX_SEGMEXEC)
41202+#endif
41203+
41204+ )
41205+ {
41206+ *flags &= ~MF_PAX_PAGEEXEC;
41207+ retval = -EINVAL;
41208+ }
41209+
41210+ if ((*flags & MF_PAX_MPROTECT)
41211+
41212+#ifdef CONFIG_PAX_MPROTECT
41213+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
41214+#endif
41215+
41216+ )
41217+ {
41218+ *flags &= ~MF_PAX_MPROTECT;
41219+ retval = -EINVAL;
41220+ }
41221+
41222+ if ((*flags & MF_PAX_EMUTRAMP)
41223+
41224+#ifdef CONFIG_PAX_EMUTRAMP
41225+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
41226+#endif
41227+
41228+ )
41229+ {
41230+ *flags &= ~MF_PAX_EMUTRAMP;
41231+ retval = -EINVAL;
41232+ }
41233+
41234+ return retval;
41235+}
41236+
41237+EXPORT_SYMBOL(pax_check_flags);
41238+
41239+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41240+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
41241+{
41242+ struct task_struct *tsk = current;
41243+ struct mm_struct *mm = current->mm;
41244+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
41245+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
41246+ char *path_exec = NULL;
41247+ char *path_fault = NULL;
41248+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
41249+
41250+ if (buffer_exec && buffer_fault) {
41251+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
41252+
41253+ down_read(&mm->mmap_sem);
41254+ vma = mm->mmap;
41255+ while (vma && (!vma_exec || !vma_fault)) {
41256+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
41257+ vma_exec = vma;
41258+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
41259+ vma_fault = vma;
41260+ vma = vma->vm_next;
41261+ }
41262+ if (vma_exec) {
41263+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
41264+ if (IS_ERR(path_exec))
41265+ path_exec = "<path too long>";
41266+ else {
41267+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
41268+ if (path_exec) {
41269+ *path_exec = 0;
41270+ path_exec = buffer_exec;
41271+ } else
41272+ path_exec = "<path too long>";
41273+ }
41274+ }
41275+ if (vma_fault) {
41276+ start = vma_fault->vm_start;
41277+ end = vma_fault->vm_end;
41278+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
41279+ if (vma_fault->vm_file) {
41280+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
41281+ if (IS_ERR(path_fault))
41282+ path_fault = "<path too long>";
41283+ else {
41284+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
41285+ if (path_fault) {
41286+ *path_fault = 0;
41287+ path_fault = buffer_fault;
41288+ } else
41289+ path_fault = "<path too long>";
41290+ }
41291+ } else
41292+ path_fault = "<anonymous mapping>";
41293+ }
41294+ up_read(&mm->mmap_sem);
41295+ }
41296+ if (tsk->signal->curr_ip)
41297+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
41298+ else
41299+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
41300+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
41301+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
41302+ task_uid(tsk), task_euid(tsk), pc, sp);
41303+ free_page((unsigned long)buffer_exec);
41304+ free_page((unsigned long)buffer_fault);
41305+ pax_report_insns(pc, sp);
41306+ do_coredump(SIGKILL, SIGKILL, regs);
41307+}
41308+#endif
41309+
41310+#ifdef CONFIG_PAX_REFCOUNT
41311+void pax_report_refcount_overflow(struct pt_regs *regs)
41312+{
41313+ if (current->signal->curr_ip)
41314+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
41315+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
41316+ else
41317+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
41318+ current->comm, task_pid_nr(current), current_uid(), current_euid());
41319+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
41320+ show_regs(regs);
41321+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
41322+}
41323+#endif
41324+
41325+#ifdef CONFIG_PAX_USERCOPY
41326+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
41327+int object_is_on_stack(const void *obj, unsigned long len)
41328+{
41329+ const void * const stack = task_stack_page(current);
41330+ const void * const stackend = stack + THREAD_SIZE;
41331+
41332+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41333+ const void *frame = NULL;
41334+ const void *oldframe;
41335+#endif
41336+
41337+ if (obj + len < obj)
41338+ return -1;
41339+
41340+ if (obj + len <= stack || stackend <= obj)
41341+ return 0;
41342+
41343+ if (obj < stack || stackend < obj + len)
41344+ return -1;
41345+
41346+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41347+ oldframe = __builtin_frame_address(1);
41348+ if (oldframe)
41349+ frame = __builtin_frame_address(2);
41350+ /*
41351+ low ----------------------------------------------> high
41352+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
41353+ ^----------------^
41354+ allow copies only within here
41355+ */
41356+ while (stack <= frame && frame < stackend) {
41357+ /* if obj + len extends past the last frame, this
41358+ check won't pass and the next frame will be 0,
41359+ causing us to bail out and correctly report
41360+ the copy as invalid
41361+ */
41362+ if (obj + len <= frame)
41363+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
41364+ oldframe = frame;
41365+ frame = *(const void * const *)frame;
41366+ }
41367+ return -1;
41368+#else
41369+ return 1;
41370+#endif
41371+}
41372+
41373+
41374+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
41375+{
41376+ if (current->signal->curr_ip)
41377+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41378+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41379+ else
41380+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41381+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41382+ dump_stack();
41383+ gr_handle_kernel_exploit();
41384+ do_group_exit(SIGKILL);
41385+}
41386+#endif
41387+
41388+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
41389+void pax_track_stack(void)
41390+{
41391+ unsigned long sp = (unsigned long)&sp;
41392+ if (sp < current_thread_info()->lowest_stack &&
41393+ sp > (unsigned long)task_stack_page(current))
41394+ current_thread_info()->lowest_stack = sp;
41395+}
41396+EXPORT_SYMBOL(pax_track_stack);
41397+#endif
41398+
41399 static int zap_process(struct task_struct *start, int exit_code)
41400 {
41401 struct task_struct *t;
41402@@ -1971,17 +2274,17 @@ static void wait_for_dump_helpers(struct
41403 pipe = file->f_path.dentry->d_inode->i_pipe;
41404
41405 pipe_lock(pipe);
41406- pipe->readers++;
41407- pipe->writers--;
41408+ atomic_inc(&pipe->readers);
41409+ atomic_dec(&pipe->writers);
41410
41411- while ((pipe->readers > 1) && (!signal_pending(current))) {
41412+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
41413 wake_up_interruptible_sync(&pipe->wait);
41414 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
41415 pipe_wait(pipe);
41416 }
41417
41418- pipe->readers--;
41419- pipe->writers++;
41420+ atomic_dec(&pipe->readers);
41421+ atomic_inc(&pipe->writers);
41422 pipe_unlock(pipe);
41423
41424 }
41425@@ -2042,7 +2345,7 @@ void do_coredump(long signr, int exit_co
41426 int retval = 0;
41427 int flag = 0;
41428 int ispipe;
41429- static atomic_t core_dump_count = ATOMIC_INIT(0);
41430+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
41431 struct coredump_params cprm = {
41432 .signr = signr,
41433 .regs = regs,
41434@@ -2057,6 +2360,9 @@ void do_coredump(long signr, int exit_co
41435
41436 audit_core_dumps(signr);
41437
41438+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
41439+ gr_handle_brute_attach(current, cprm.mm_flags);
41440+
41441 binfmt = mm->binfmt;
41442 if (!binfmt || !binfmt->core_dump)
41443 goto fail;
41444@@ -2097,6 +2403,8 @@ void do_coredump(long signr, int exit_co
41445 goto fail_corename;
41446 }
41447
41448+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
41449+
41450 if (ispipe) {
41451 int dump_count;
41452 char **helper_argv;
41453@@ -2124,7 +2432,7 @@ void do_coredump(long signr, int exit_co
41454 }
41455 cprm.limit = RLIM_INFINITY;
41456
41457- dump_count = atomic_inc_return(&core_dump_count);
41458+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
41459 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
41460 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
41461 task_tgid_vnr(current), current->comm);
41462@@ -2194,7 +2502,7 @@ close_fail:
41463 filp_close(cprm.file, NULL);
41464 fail_dropcount:
41465 if (ispipe)
41466- atomic_dec(&core_dump_count);
41467+ atomic_dec_unchecked(&core_dump_count);
41468 fail_unlock:
41469 kfree(cn.corename);
41470 fail_corename:
41471@@ -2213,7 +2521,7 @@ fail:
41472 */
41473 int dump_write(struct file *file, const void *addr, int nr)
41474 {
41475- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
41476+ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
41477 }
41478 EXPORT_SYMBOL(dump_write);
41479
41480diff -urNp linux-3.0.7/fs/ext2/balloc.c linux-3.0.7/fs/ext2/balloc.c
41481--- linux-3.0.7/fs/ext2/balloc.c 2011-07-21 22:17:23.000000000 -0400
41482+++ linux-3.0.7/fs/ext2/balloc.c 2011-08-23 21:48:14.000000000 -0400
41483@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
41484
41485 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
41486 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
41487- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
41488+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
41489 sbi->s_resuid != current_fsuid() &&
41490 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
41491 return 0;
41492diff -urNp linux-3.0.7/fs/ext3/balloc.c linux-3.0.7/fs/ext3/balloc.c
41493--- linux-3.0.7/fs/ext3/balloc.c 2011-07-21 22:17:23.000000000 -0400
41494+++ linux-3.0.7/fs/ext3/balloc.c 2011-08-23 21:48:14.000000000 -0400
41495@@ -1441,7 +1441,7 @@ static int ext3_has_free_blocks(struct e
41496
41497 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
41498 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
41499- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
41500+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
41501 sbi->s_resuid != current_fsuid() &&
41502 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
41503 return 0;
41504diff -urNp linux-3.0.7/fs/ext3/ioctl.c linux-3.0.7/fs/ext3/ioctl.c
41505--- linux-3.0.7/fs/ext3/ioctl.c 2011-07-21 22:17:23.000000000 -0400
41506+++ linux-3.0.7/fs/ext3/ioctl.c 2011-10-06 04:17:55.000000000 -0400
41507@@ -285,7 +285,7 @@ group_add_out:
41508 if (!capable(CAP_SYS_ADMIN))
41509 return -EPERM;
41510
41511- if (copy_from_user(&range, (struct fstrim_range *)arg,
41512+ if (copy_from_user(&range, (struct fstrim_range __user *)arg,
41513 sizeof(range)))
41514 return -EFAULT;
41515
41516@@ -293,7 +293,7 @@ group_add_out:
41517 if (ret < 0)
41518 return ret;
41519
41520- if (copy_to_user((struct fstrim_range *)arg, &range,
41521+ if (copy_to_user((struct fstrim_range __user *)arg, &range,
41522 sizeof(range)))
41523 return -EFAULT;
41524
41525diff -urNp linux-3.0.7/fs/ext4/balloc.c linux-3.0.7/fs/ext4/balloc.c
41526--- linux-3.0.7/fs/ext4/balloc.c 2011-07-21 22:17:23.000000000 -0400
41527+++ linux-3.0.7/fs/ext4/balloc.c 2011-08-23 21:48:14.000000000 -0400
41528@@ -394,8 +394,8 @@ static int ext4_has_free_blocks(struct e
41529 /* Hm, nope. Are (enough) root reserved blocks available? */
41530 if (sbi->s_resuid == current_fsuid() ||
41531 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
41532- capable(CAP_SYS_RESOURCE) ||
41533- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
41534+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
41535+ capable_nolog(CAP_SYS_RESOURCE)) {
41536
41537 if (free_blocks >= (nblocks + dirty_blocks))
41538 return 1;
41539diff -urNp linux-3.0.7/fs/ext4/ext4.h linux-3.0.7/fs/ext4/ext4.h
41540--- linux-3.0.7/fs/ext4/ext4.h 2011-09-02 18:11:21.000000000 -0400
41541+++ linux-3.0.7/fs/ext4/ext4.h 2011-08-23 21:47:56.000000000 -0400
41542@@ -1177,19 +1177,19 @@ struct ext4_sb_info {
41543 unsigned long s_mb_last_start;
41544
41545 /* stats for buddy allocator */
41546- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
41547- atomic_t s_bal_success; /* we found long enough chunks */
41548- atomic_t s_bal_allocated; /* in blocks */
41549- atomic_t s_bal_ex_scanned; /* total extents scanned */
41550- atomic_t s_bal_goals; /* goal hits */
41551- atomic_t s_bal_breaks; /* too long searches */
41552- atomic_t s_bal_2orders; /* 2^order hits */
41553+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
41554+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
41555+ atomic_unchecked_t s_bal_allocated; /* in blocks */
41556+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
41557+ atomic_unchecked_t s_bal_goals; /* goal hits */
41558+ atomic_unchecked_t s_bal_breaks; /* too long searches */
41559+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
41560 spinlock_t s_bal_lock;
41561 unsigned long s_mb_buddies_generated;
41562 unsigned long long s_mb_generation_time;
41563- atomic_t s_mb_lost_chunks;
41564- atomic_t s_mb_preallocated;
41565- atomic_t s_mb_discarded;
41566+ atomic_unchecked_t s_mb_lost_chunks;
41567+ atomic_unchecked_t s_mb_preallocated;
41568+ atomic_unchecked_t s_mb_discarded;
41569 atomic_t s_lock_busy;
41570
41571 /* locality groups */
41572diff -urNp linux-3.0.7/fs/ext4/file.c linux-3.0.7/fs/ext4/file.c
41573--- linux-3.0.7/fs/ext4/file.c 2011-07-21 22:17:23.000000000 -0400
41574+++ linux-3.0.7/fs/ext4/file.c 2011-10-17 02:30:30.000000000 -0400
41575@@ -181,8 +181,8 @@ static int ext4_file_open(struct inode *
41576 path.dentry = mnt->mnt_root;
41577 cp = d_path(&path, buf, sizeof(buf));
41578 if (!IS_ERR(cp)) {
41579- memcpy(sbi->s_es->s_last_mounted, cp,
41580- sizeof(sbi->s_es->s_last_mounted));
41581+ strlcpy(sbi->s_es->s_last_mounted, cp,
41582+ sizeof(sbi->s_es->s_last_mounted));
41583 ext4_mark_super_dirty(sb);
41584 }
41585 }
41586diff -urNp linux-3.0.7/fs/ext4/ioctl.c linux-3.0.7/fs/ext4/ioctl.c
41587--- linux-3.0.7/fs/ext4/ioctl.c 2011-07-21 22:17:23.000000000 -0400
41588+++ linux-3.0.7/fs/ext4/ioctl.c 2011-10-06 04:17:55.000000000 -0400
41589@@ -344,7 +344,7 @@ mext_out:
41590 if (!blk_queue_discard(q))
41591 return -EOPNOTSUPP;
41592
41593- if (copy_from_user(&range, (struct fstrim_range *)arg,
41594+ if (copy_from_user(&range, (struct fstrim_range __user *)arg,
41595 sizeof(range)))
41596 return -EFAULT;
41597
41598@@ -354,7 +354,7 @@ mext_out:
41599 if (ret < 0)
41600 return ret;
41601
41602- if (copy_to_user((struct fstrim_range *)arg, &range,
41603+ if (copy_to_user((struct fstrim_range __user *)arg, &range,
41604 sizeof(range)))
41605 return -EFAULT;
41606
41607diff -urNp linux-3.0.7/fs/ext4/mballoc.c linux-3.0.7/fs/ext4/mballoc.c
41608--- linux-3.0.7/fs/ext4/mballoc.c 2011-09-02 18:11:21.000000000 -0400
41609+++ linux-3.0.7/fs/ext4/mballoc.c 2011-08-23 21:48:14.000000000 -0400
41610@@ -1793,7 +1793,7 @@ void ext4_mb_simple_scan_group(struct ex
41611 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
41612
41613 if (EXT4_SB(sb)->s_mb_stats)
41614- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
41615+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
41616
41617 break;
41618 }
41619@@ -2087,7 +2087,7 @@ repeat:
41620 ac->ac_status = AC_STATUS_CONTINUE;
41621 ac->ac_flags |= EXT4_MB_HINT_FIRST;
41622 cr = 3;
41623- atomic_inc(&sbi->s_mb_lost_chunks);
41624+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
41625 goto repeat;
41626 }
41627 }
41628@@ -2130,6 +2130,8 @@ static int ext4_mb_seq_groups_show(struc
41629 ext4_grpblk_t counters[16];
41630 } sg;
41631
41632+ pax_track_stack();
41633+
41634 group--;
41635 if (group == 0)
41636 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
41637@@ -2553,25 +2555,25 @@ int ext4_mb_release(struct super_block *
41638 if (sbi->s_mb_stats) {
41639 printk(KERN_INFO
41640 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
41641- atomic_read(&sbi->s_bal_allocated),
41642- atomic_read(&sbi->s_bal_reqs),
41643- atomic_read(&sbi->s_bal_success));
41644+ atomic_read_unchecked(&sbi->s_bal_allocated),
41645+ atomic_read_unchecked(&sbi->s_bal_reqs),
41646+ atomic_read_unchecked(&sbi->s_bal_success));
41647 printk(KERN_INFO
41648 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
41649 "%u 2^N hits, %u breaks, %u lost\n",
41650- atomic_read(&sbi->s_bal_ex_scanned),
41651- atomic_read(&sbi->s_bal_goals),
41652- atomic_read(&sbi->s_bal_2orders),
41653- atomic_read(&sbi->s_bal_breaks),
41654- atomic_read(&sbi->s_mb_lost_chunks));
41655+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
41656+ atomic_read_unchecked(&sbi->s_bal_goals),
41657+ atomic_read_unchecked(&sbi->s_bal_2orders),
41658+ atomic_read_unchecked(&sbi->s_bal_breaks),
41659+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
41660 printk(KERN_INFO
41661 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
41662 sbi->s_mb_buddies_generated++,
41663 sbi->s_mb_generation_time);
41664 printk(KERN_INFO
41665 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
41666- atomic_read(&sbi->s_mb_preallocated),
41667- atomic_read(&sbi->s_mb_discarded));
41668+ atomic_read_unchecked(&sbi->s_mb_preallocated),
41669+ atomic_read_unchecked(&sbi->s_mb_discarded));
41670 }
41671
41672 free_percpu(sbi->s_locality_groups);
41673@@ -3041,16 +3043,16 @@ static void ext4_mb_collect_stats(struct
41674 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
41675
41676 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
41677- atomic_inc(&sbi->s_bal_reqs);
41678- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
41679+ atomic_inc_unchecked(&sbi->s_bal_reqs);
41680+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
41681 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
41682- atomic_inc(&sbi->s_bal_success);
41683- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
41684+ atomic_inc_unchecked(&sbi->s_bal_success);
41685+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
41686 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
41687 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
41688- atomic_inc(&sbi->s_bal_goals);
41689+ atomic_inc_unchecked(&sbi->s_bal_goals);
41690 if (ac->ac_found > sbi->s_mb_max_to_scan)
41691- atomic_inc(&sbi->s_bal_breaks);
41692+ atomic_inc_unchecked(&sbi->s_bal_breaks);
41693 }
41694
41695 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
41696@@ -3448,7 +3450,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
41697 trace_ext4_mb_new_inode_pa(ac, pa);
41698
41699 ext4_mb_use_inode_pa(ac, pa);
41700- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41701+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41702
41703 ei = EXT4_I(ac->ac_inode);
41704 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
41705@@ -3508,7 +3510,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
41706 trace_ext4_mb_new_group_pa(ac, pa);
41707
41708 ext4_mb_use_group_pa(ac, pa);
41709- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41710+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41711
41712 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
41713 lg = ac->ac_lg;
41714@@ -3595,7 +3597,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
41715 * from the bitmap and continue.
41716 */
41717 }
41718- atomic_add(free, &sbi->s_mb_discarded);
41719+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
41720
41721 return err;
41722 }
41723@@ -3613,7 +3615,7 @@ ext4_mb_release_group_pa(struct ext4_bud
41724 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
41725 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
41726 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
41727- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
41728+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
41729 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
41730
41731 return 0;
41732diff -urNp linux-3.0.7/fs/fcntl.c linux-3.0.7/fs/fcntl.c
41733--- linux-3.0.7/fs/fcntl.c 2011-07-21 22:17:23.000000000 -0400
41734+++ linux-3.0.7/fs/fcntl.c 2011-10-06 04:17:55.000000000 -0400
41735@@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct
41736 if (err)
41737 return err;
41738
41739+ if (gr_handle_chroot_fowner(pid, type))
41740+ return -ENOENT;
41741+ if (gr_check_protected_task_fowner(pid, type))
41742+ return -EACCES;
41743+
41744 f_modown(filp, pid, type, force);
41745 return 0;
41746 }
41747@@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
41748
41749 static int f_setown_ex(struct file *filp, unsigned long arg)
41750 {
41751- struct f_owner_ex * __user owner_p = (void * __user)arg;
41752+ struct f_owner_ex __user *owner_p = (void __user *)arg;
41753 struct f_owner_ex owner;
41754 struct pid *pid;
41755 int type;
41756@@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp
41757
41758 static int f_getown_ex(struct file *filp, unsigned long arg)
41759 {
41760- struct f_owner_ex * __user owner_p = (void * __user)arg;
41761+ struct f_owner_ex __user *owner_p = (void __user *)arg;
41762 struct f_owner_ex owner;
41763 int ret = 0;
41764
41765@@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned in
41766 switch (cmd) {
41767 case F_DUPFD:
41768 case F_DUPFD_CLOEXEC:
41769+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
41770 if (arg >= rlimit(RLIMIT_NOFILE))
41771 break;
41772 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
41773@@ -835,14 +841,14 @@ static int __init fcntl_init(void)
41774 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
41775 * is defined as O_NONBLOCK on some platforms and not on others.
41776 */
41777- BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
41778+ BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
41779 O_RDONLY | O_WRONLY | O_RDWR |
41780 O_CREAT | O_EXCL | O_NOCTTY |
41781 O_TRUNC | O_APPEND | /* O_NONBLOCK | */
41782 __O_SYNC | O_DSYNC | FASYNC |
41783 O_DIRECT | O_LARGEFILE | O_DIRECTORY |
41784 O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
41785- __FMODE_EXEC | O_PATH
41786+ __FMODE_EXEC | O_PATH | FMODE_GREXEC
41787 ));
41788
41789 fasync_cache = kmem_cache_create("fasync_cache",
41790diff -urNp linux-3.0.7/fs/fifo.c linux-3.0.7/fs/fifo.c
41791--- linux-3.0.7/fs/fifo.c 2011-07-21 22:17:23.000000000 -0400
41792+++ linux-3.0.7/fs/fifo.c 2011-08-23 21:47:56.000000000 -0400
41793@@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode
41794 */
41795 filp->f_op = &read_pipefifo_fops;
41796 pipe->r_counter++;
41797- if (pipe->readers++ == 0)
41798+ if (atomic_inc_return(&pipe->readers) == 1)
41799 wake_up_partner(inode);
41800
41801- if (!pipe->writers) {
41802+ if (!atomic_read(&pipe->writers)) {
41803 if ((filp->f_flags & O_NONBLOCK)) {
41804 /* suppress POLLHUP until we have
41805 * seen a writer */
41806@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode
41807 * errno=ENXIO when there is no process reading the FIFO.
41808 */
41809 ret = -ENXIO;
41810- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
41811+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
41812 goto err;
41813
41814 filp->f_op = &write_pipefifo_fops;
41815 pipe->w_counter++;
41816- if (!pipe->writers++)
41817+ if (atomic_inc_return(&pipe->writers) == 1)
41818 wake_up_partner(inode);
41819
41820- if (!pipe->readers) {
41821+ if (!atomic_read(&pipe->readers)) {
41822 wait_for_partner(inode, &pipe->r_counter);
41823 if (signal_pending(current))
41824 goto err_wr;
41825@@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode
41826 */
41827 filp->f_op = &rdwr_pipefifo_fops;
41828
41829- pipe->readers++;
41830- pipe->writers++;
41831+ atomic_inc(&pipe->readers);
41832+ atomic_inc(&pipe->writers);
41833 pipe->r_counter++;
41834 pipe->w_counter++;
41835- if (pipe->readers == 1 || pipe->writers == 1)
41836+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
41837 wake_up_partner(inode);
41838 break;
41839
41840@@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode
41841 return 0;
41842
41843 err_rd:
41844- if (!--pipe->readers)
41845+ if (atomic_dec_and_test(&pipe->readers))
41846 wake_up_interruptible(&pipe->wait);
41847 ret = -ERESTARTSYS;
41848 goto err;
41849
41850 err_wr:
41851- if (!--pipe->writers)
41852+ if (atomic_dec_and_test(&pipe->writers))
41853 wake_up_interruptible(&pipe->wait);
41854 ret = -ERESTARTSYS;
41855 goto err;
41856
41857 err:
41858- if (!pipe->readers && !pipe->writers)
41859+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
41860 free_pipe_info(inode);
41861
41862 err_nocleanup:
41863diff -urNp linux-3.0.7/fs/file.c linux-3.0.7/fs/file.c
41864--- linux-3.0.7/fs/file.c 2011-07-21 22:17:23.000000000 -0400
41865+++ linux-3.0.7/fs/file.c 2011-08-23 21:48:14.000000000 -0400
41866@@ -15,6 +15,7 @@
41867 #include <linux/slab.h>
41868 #include <linux/vmalloc.h>
41869 #include <linux/file.h>
41870+#include <linux/security.h>
41871 #include <linux/fdtable.h>
41872 #include <linux/bitops.h>
41873 #include <linux/interrupt.h>
41874@@ -254,6 +255,7 @@ int expand_files(struct files_struct *fi
41875 * N.B. For clone tasks sharing a files structure, this test
41876 * will limit the total number of files that can be opened.
41877 */
41878+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
41879 if (nr >= rlimit(RLIMIT_NOFILE))
41880 return -EMFILE;
41881
41882diff -urNp linux-3.0.7/fs/filesystems.c linux-3.0.7/fs/filesystems.c
41883--- linux-3.0.7/fs/filesystems.c 2011-07-21 22:17:23.000000000 -0400
41884+++ linux-3.0.7/fs/filesystems.c 2011-08-23 21:48:14.000000000 -0400
41885@@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(con
41886 int len = dot ? dot - name : strlen(name);
41887
41888 fs = __get_fs_type(name, len);
41889+
41890+#ifdef CONFIG_GRKERNSEC_MODHARDEN
41891+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
41892+#else
41893 if (!fs && (request_module("%.*s", len, name) == 0))
41894+#endif
41895 fs = __get_fs_type(name, len);
41896
41897 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
41898diff -urNp linux-3.0.7/fs/fscache/cookie.c linux-3.0.7/fs/fscache/cookie.c
41899--- linux-3.0.7/fs/fscache/cookie.c 2011-07-21 22:17:23.000000000 -0400
41900+++ linux-3.0.7/fs/fscache/cookie.c 2011-08-23 21:47:56.000000000 -0400
41901@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
41902 parent ? (char *) parent->def->name : "<no-parent>",
41903 def->name, netfs_data);
41904
41905- fscache_stat(&fscache_n_acquires);
41906+ fscache_stat_unchecked(&fscache_n_acquires);
41907
41908 /* if there's no parent cookie, then we don't create one here either */
41909 if (!parent) {
41910- fscache_stat(&fscache_n_acquires_null);
41911+ fscache_stat_unchecked(&fscache_n_acquires_null);
41912 _leave(" [no parent]");
41913 return NULL;
41914 }
41915@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
41916 /* allocate and initialise a cookie */
41917 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
41918 if (!cookie) {
41919- fscache_stat(&fscache_n_acquires_oom);
41920+ fscache_stat_unchecked(&fscache_n_acquires_oom);
41921 _leave(" [ENOMEM]");
41922 return NULL;
41923 }
41924@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
41925
41926 switch (cookie->def->type) {
41927 case FSCACHE_COOKIE_TYPE_INDEX:
41928- fscache_stat(&fscache_n_cookie_index);
41929+ fscache_stat_unchecked(&fscache_n_cookie_index);
41930 break;
41931 case FSCACHE_COOKIE_TYPE_DATAFILE:
41932- fscache_stat(&fscache_n_cookie_data);
41933+ fscache_stat_unchecked(&fscache_n_cookie_data);
41934 break;
41935 default:
41936- fscache_stat(&fscache_n_cookie_special);
41937+ fscache_stat_unchecked(&fscache_n_cookie_special);
41938 break;
41939 }
41940
41941@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
41942 if (fscache_acquire_non_index_cookie(cookie) < 0) {
41943 atomic_dec(&parent->n_children);
41944 __fscache_cookie_put(cookie);
41945- fscache_stat(&fscache_n_acquires_nobufs);
41946+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
41947 _leave(" = NULL");
41948 return NULL;
41949 }
41950 }
41951
41952- fscache_stat(&fscache_n_acquires_ok);
41953+ fscache_stat_unchecked(&fscache_n_acquires_ok);
41954 _leave(" = %p", cookie);
41955 return cookie;
41956 }
41957@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
41958 cache = fscache_select_cache_for_object(cookie->parent);
41959 if (!cache) {
41960 up_read(&fscache_addremove_sem);
41961- fscache_stat(&fscache_n_acquires_no_cache);
41962+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
41963 _leave(" = -ENOMEDIUM [no cache]");
41964 return -ENOMEDIUM;
41965 }
41966@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
41967 object = cache->ops->alloc_object(cache, cookie);
41968 fscache_stat_d(&fscache_n_cop_alloc_object);
41969 if (IS_ERR(object)) {
41970- fscache_stat(&fscache_n_object_no_alloc);
41971+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
41972 ret = PTR_ERR(object);
41973 goto error;
41974 }
41975
41976- fscache_stat(&fscache_n_object_alloc);
41977+ fscache_stat_unchecked(&fscache_n_object_alloc);
41978
41979 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
41980
41981@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
41982 struct fscache_object *object;
41983 struct hlist_node *_p;
41984
41985- fscache_stat(&fscache_n_updates);
41986+ fscache_stat_unchecked(&fscache_n_updates);
41987
41988 if (!cookie) {
41989- fscache_stat(&fscache_n_updates_null);
41990+ fscache_stat_unchecked(&fscache_n_updates_null);
41991 _leave(" [no cookie]");
41992 return;
41993 }
41994@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
41995 struct fscache_object *object;
41996 unsigned long event;
41997
41998- fscache_stat(&fscache_n_relinquishes);
41999+ fscache_stat_unchecked(&fscache_n_relinquishes);
42000 if (retire)
42001- fscache_stat(&fscache_n_relinquishes_retire);
42002+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
42003
42004 if (!cookie) {
42005- fscache_stat(&fscache_n_relinquishes_null);
42006+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
42007 _leave(" [no cookie]");
42008 return;
42009 }
42010@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
42011
42012 /* wait for the cookie to finish being instantiated (or to fail) */
42013 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
42014- fscache_stat(&fscache_n_relinquishes_waitcrt);
42015+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
42016 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
42017 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
42018 }
42019diff -urNp linux-3.0.7/fs/fscache/internal.h linux-3.0.7/fs/fscache/internal.h
42020--- linux-3.0.7/fs/fscache/internal.h 2011-07-21 22:17:23.000000000 -0400
42021+++ linux-3.0.7/fs/fscache/internal.h 2011-08-23 21:47:56.000000000 -0400
42022@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
42023 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
42024 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
42025
42026-extern atomic_t fscache_n_op_pend;
42027-extern atomic_t fscache_n_op_run;
42028-extern atomic_t fscache_n_op_enqueue;
42029-extern atomic_t fscache_n_op_deferred_release;
42030-extern atomic_t fscache_n_op_release;
42031-extern atomic_t fscache_n_op_gc;
42032-extern atomic_t fscache_n_op_cancelled;
42033-extern atomic_t fscache_n_op_rejected;
42034-
42035-extern atomic_t fscache_n_attr_changed;
42036-extern atomic_t fscache_n_attr_changed_ok;
42037-extern atomic_t fscache_n_attr_changed_nobufs;
42038-extern atomic_t fscache_n_attr_changed_nomem;
42039-extern atomic_t fscache_n_attr_changed_calls;
42040-
42041-extern atomic_t fscache_n_allocs;
42042-extern atomic_t fscache_n_allocs_ok;
42043-extern atomic_t fscache_n_allocs_wait;
42044-extern atomic_t fscache_n_allocs_nobufs;
42045-extern atomic_t fscache_n_allocs_intr;
42046-extern atomic_t fscache_n_allocs_object_dead;
42047-extern atomic_t fscache_n_alloc_ops;
42048-extern atomic_t fscache_n_alloc_op_waits;
42049-
42050-extern atomic_t fscache_n_retrievals;
42051-extern atomic_t fscache_n_retrievals_ok;
42052-extern atomic_t fscache_n_retrievals_wait;
42053-extern atomic_t fscache_n_retrievals_nodata;
42054-extern atomic_t fscache_n_retrievals_nobufs;
42055-extern atomic_t fscache_n_retrievals_intr;
42056-extern atomic_t fscache_n_retrievals_nomem;
42057-extern atomic_t fscache_n_retrievals_object_dead;
42058-extern atomic_t fscache_n_retrieval_ops;
42059-extern atomic_t fscache_n_retrieval_op_waits;
42060-
42061-extern atomic_t fscache_n_stores;
42062-extern atomic_t fscache_n_stores_ok;
42063-extern atomic_t fscache_n_stores_again;
42064-extern atomic_t fscache_n_stores_nobufs;
42065-extern atomic_t fscache_n_stores_oom;
42066-extern atomic_t fscache_n_store_ops;
42067-extern atomic_t fscache_n_store_calls;
42068-extern atomic_t fscache_n_store_pages;
42069-extern atomic_t fscache_n_store_radix_deletes;
42070-extern atomic_t fscache_n_store_pages_over_limit;
42071-
42072-extern atomic_t fscache_n_store_vmscan_not_storing;
42073-extern atomic_t fscache_n_store_vmscan_gone;
42074-extern atomic_t fscache_n_store_vmscan_busy;
42075-extern atomic_t fscache_n_store_vmscan_cancelled;
42076-
42077-extern atomic_t fscache_n_marks;
42078-extern atomic_t fscache_n_uncaches;
42079-
42080-extern atomic_t fscache_n_acquires;
42081-extern atomic_t fscache_n_acquires_null;
42082-extern atomic_t fscache_n_acquires_no_cache;
42083-extern atomic_t fscache_n_acquires_ok;
42084-extern atomic_t fscache_n_acquires_nobufs;
42085-extern atomic_t fscache_n_acquires_oom;
42086-
42087-extern atomic_t fscache_n_updates;
42088-extern atomic_t fscache_n_updates_null;
42089-extern atomic_t fscache_n_updates_run;
42090-
42091-extern atomic_t fscache_n_relinquishes;
42092-extern atomic_t fscache_n_relinquishes_null;
42093-extern atomic_t fscache_n_relinquishes_waitcrt;
42094-extern atomic_t fscache_n_relinquishes_retire;
42095-
42096-extern atomic_t fscache_n_cookie_index;
42097-extern atomic_t fscache_n_cookie_data;
42098-extern atomic_t fscache_n_cookie_special;
42099-
42100-extern atomic_t fscache_n_object_alloc;
42101-extern atomic_t fscache_n_object_no_alloc;
42102-extern atomic_t fscache_n_object_lookups;
42103-extern atomic_t fscache_n_object_lookups_negative;
42104-extern atomic_t fscache_n_object_lookups_positive;
42105-extern atomic_t fscache_n_object_lookups_timed_out;
42106-extern atomic_t fscache_n_object_created;
42107-extern atomic_t fscache_n_object_avail;
42108-extern atomic_t fscache_n_object_dead;
42109-
42110-extern atomic_t fscache_n_checkaux_none;
42111-extern atomic_t fscache_n_checkaux_okay;
42112-extern atomic_t fscache_n_checkaux_update;
42113-extern atomic_t fscache_n_checkaux_obsolete;
42114+extern atomic_unchecked_t fscache_n_op_pend;
42115+extern atomic_unchecked_t fscache_n_op_run;
42116+extern atomic_unchecked_t fscache_n_op_enqueue;
42117+extern atomic_unchecked_t fscache_n_op_deferred_release;
42118+extern atomic_unchecked_t fscache_n_op_release;
42119+extern atomic_unchecked_t fscache_n_op_gc;
42120+extern atomic_unchecked_t fscache_n_op_cancelled;
42121+extern atomic_unchecked_t fscache_n_op_rejected;
42122+
42123+extern atomic_unchecked_t fscache_n_attr_changed;
42124+extern atomic_unchecked_t fscache_n_attr_changed_ok;
42125+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
42126+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
42127+extern atomic_unchecked_t fscache_n_attr_changed_calls;
42128+
42129+extern atomic_unchecked_t fscache_n_allocs;
42130+extern atomic_unchecked_t fscache_n_allocs_ok;
42131+extern atomic_unchecked_t fscache_n_allocs_wait;
42132+extern atomic_unchecked_t fscache_n_allocs_nobufs;
42133+extern atomic_unchecked_t fscache_n_allocs_intr;
42134+extern atomic_unchecked_t fscache_n_allocs_object_dead;
42135+extern atomic_unchecked_t fscache_n_alloc_ops;
42136+extern atomic_unchecked_t fscache_n_alloc_op_waits;
42137+
42138+extern atomic_unchecked_t fscache_n_retrievals;
42139+extern atomic_unchecked_t fscache_n_retrievals_ok;
42140+extern atomic_unchecked_t fscache_n_retrievals_wait;
42141+extern atomic_unchecked_t fscache_n_retrievals_nodata;
42142+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
42143+extern atomic_unchecked_t fscache_n_retrievals_intr;
42144+extern atomic_unchecked_t fscache_n_retrievals_nomem;
42145+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
42146+extern atomic_unchecked_t fscache_n_retrieval_ops;
42147+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
42148+
42149+extern atomic_unchecked_t fscache_n_stores;
42150+extern atomic_unchecked_t fscache_n_stores_ok;
42151+extern atomic_unchecked_t fscache_n_stores_again;
42152+extern atomic_unchecked_t fscache_n_stores_nobufs;
42153+extern atomic_unchecked_t fscache_n_stores_oom;
42154+extern atomic_unchecked_t fscache_n_store_ops;
42155+extern atomic_unchecked_t fscache_n_store_calls;
42156+extern atomic_unchecked_t fscache_n_store_pages;
42157+extern atomic_unchecked_t fscache_n_store_radix_deletes;
42158+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
42159+
42160+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
42161+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
42162+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
42163+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
42164+
42165+extern atomic_unchecked_t fscache_n_marks;
42166+extern atomic_unchecked_t fscache_n_uncaches;
42167+
42168+extern atomic_unchecked_t fscache_n_acquires;
42169+extern atomic_unchecked_t fscache_n_acquires_null;
42170+extern atomic_unchecked_t fscache_n_acquires_no_cache;
42171+extern atomic_unchecked_t fscache_n_acquires_ok;
42172+extern atomic_unchecked_t fscache_n_acquires_nobufs;
42173+extern atomic_unchecked_t fscache_n_acquires_oom;
42174+
42175+extern atomic_unchecked_t fscache_n_updates;
42176+extern atomic_unchecked_t fscache_n_updates_null;
42177+extern atomic_unchecked_t fscache_n_updates_run;
42178+
42179+extern atomic_unchecked_t fscache_n_relinquishes;
42180+extern atomic_unchecked_t fscache_n_relinquishes_null;
42181+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
42182+extern atomic_unchecked_t fscache_n_relinquishes_retire;
42183+
42184+extern atomic_unchecked_t fscache_n_cookie_index;
42185+extern atomic_unchecked_t fscache_n_cookie_data;
42186+extern atomic_unchecked_t fscache_n_cookie_special;
42187+
42188+extern atomic_unchecked_t fscache_n_object_alloc;
42189+extern atomic_unchecked_t fscache_n_object_no_alloc;
42190+extern atomic_unchecked_t fscache_n_object_lookups;
42191+extern atomic_unchecked_t fscache_n_object_lookups_negative;
42192+extern atomic_unchecked_t fscache_n_object_lookups_positive;
42193+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
42194+extern atomic_unchecked_t fscache_n_object_created;
42195+extern atomic_unchecked_t fscache_n_object_avail;
42196+extern atomic_unchecked_t fscache_n_object_dead;
42197+
42198+extern atomic_unchecked_t fscache_n_checkaux_none;
42199+extern atomic_unchecked_t fscache_n_checkaux_okay;
42200+extern atomic_unchecked_t fscache_n_checkaux_update;
42201+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
42202
42203 extern atomic_t fscache_n_cop_alloc_object;
42204 extern atomic_t fscache_n_cop_lookup_object;
42205@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t
42206 atomic_inc(stat);
42207 }
42208
42209+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
42210+{
42211+ atomic_inc_unchecked(stat);
42212+}
42213+
42214 static inline void fscache_stat_d(atomic_t *stat)
42215 {
42216 atomic_dec(stat);
42217@@ -267,6 +272,7 @@ extern const struct file_operations fsca
42218
42219 #define __fscache_stat(stat) (NULL)
42220 #define fscache_stat(stat) do {} while (0)
42221+#define fscache_stat_unchecked(stat) do {} while (0)
42222 #define fscache_stat_d(stat) do {} while (0)
42223 #endif
42224
42225diff -urNp linux-3.0.7/fs/fscache/object.c linux-3.0.7/fs/fscache/object.c
42226--- linux-3.0.7/fs/fscache/object.c 2011-07-21 22:17:23.000000000 -0400
42227+++ linux-3.0.7/fs/fscache/object.c 2011-08-23 21:47:56.000000000 -0400
42228@@ -128,7 +128,7 @@ static void fscache_object_state_machine
42229 /* update the object metadata on disk */
42230 case FSCACHE_OBJECT_UPDATING:
42231 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
42232- fscache_stat(&fscache_n_updates_run);
42233+ fscache_stat_unchecked(&fscache_n_updates_run);
42234 fscache_stat(&fscache_n_cop_update_object);
42235 object->cache->ops->update_object(object);
42236 fscache_stat_d(&fscache_n_cop_update_object);
42237@@ -217,7 +217,7 @@ static void fscache_object_state_machine
42238 spin_lock(&object->lock);
42239 object->state = FSCACHE_OBJECT_DEAD;
42240 spin_unlock(&object->lock);
42241- fscache_stat(&fscache_n_object_dead);
42242+ fscache_stat_unchecked(&fscache_n_object_dead);
42243 goto terminal_transit;
42244
42245 /* handle the parent cache of this object being withdrawn from
42246@@ -232,7 +232,7 @@ static void fscache_object_state_machine
42247 spin_lock(&object->lock);
42248 object->state = FSCACHE_OBJECT_DEAD;
42249 spin_unlock(&object->lock);
42250- fscache_stat(&fscache_n_object_dead);
42251+ fscache_stat_unchecked(&fscache_n_object_dead);
42252 goto terminal_transit;
42253
42254 /* complain about the object being woken up once it is
42255@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct
42256 parent->cookie->def->name, cookie->def->name,
42257 object->cache->tag->name);
42258
42259- fscache_stat(&fscache_n_object_lookups);
42260+ fscache_stat_unchecked(&fscache_n_object_lookups);
42261 fscache_stat(&fscache_n_cop_lookup_object);
42262 ret = object->cache->ops->lookup_object(object);
42263 fscache_stat_d(&fscache_n_cop_lookup_object);
42264@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct
42265 if (ret == -ETIMEDOUT) {
42266 /* probably stuck behind another object, so move this one to
42267 * the back of the queue */
42268- fscache_stat(&fscache_n_object_lookups_timed_out);
42269+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
42270 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
42271 }
42272
42273@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(stru
42274
42275 spin_lock(&object->lock);
42276 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
42277- fscache_stat(&fscache_n_object_lookups_negative);
42278+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
42279
42280 /* transit here to allow write requests to begin stacking up
42281 * and read requests to begin returning ENODATA */
42282@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fsca
42283 * result, in which case there may be data available */
42284 spin_lock(&object->lock);
42285 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
42286- fscache_stat(&fscache_n_object_lookups_positive);
42287+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
42288
42289 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
42290
42291@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fsca
42292 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
42293 } else {
42294 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
42295- fscache_stat(&fscache_n_object_created);
42296+ fscache_stat_unchecked(&fscache_n_object_created);
42297
42298 object->state = FSCACHE_OBJECT_AVAILABLE;
42299 spin_unlock(&object->lock);
42300@@ -602,7 +602,7 @@ static void fscache_object_available(str
42301 fscache_enqueue_dependents(object);
42302
42303 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
42304- fscache_stat(&fscache_n_object_avail);
42305+ fscache_stat_unchecked(&fscache_n_object_avail);
42306
42307 _leave("");
42308 }
42309@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
42310 enum fscache_checkaux result;
42311
42312 if (!object->cookie->def->check_aux) {
42313- fscache_stat(&fscache_n_checkaux_none);
42314+ fscache_stat_unchecked(&fscache_n_checkaux_none);
42315 return FSCACHE_CHECKAUX_OKAY;
42316 }
42317
42318@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
42319 switch (result) {
42320 /* entry okay as is */
42321 case FSCACHE_CHECKAUX_OKAY:
42322- fscache_stat(&fscache_n_checkaux_okay);
42323+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
42324 break;
42325
42326 /* entry requires update */
42327 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
42328- fscache_stat(&fscache_n_checkaux_update);
42329+ fscache_stat_unchecked(&fscache_n_checkaux_update);
42330 break;
42331
42332 /* entry requires deletion */
42333 case FSCACHE_CHECKAUX_OBSOLETE:
42334- fscache_stat(&fscache_n_checkaux_obsolete);
42335+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
42336 break;
42337
42338 default:
42339diff -urNp linux-3.0.7/fs/fscache/operation.c linux-3.0.7/fs/fscache/operation.c
42340--- linux-3.0.7/fs/fscache/operation.c 2011-07-21 22:17:23.000000000 -0400
42341+++ linux-3.0.7/fs/fscache/operation.c 2011-08-23 21:47:56.000000000 -0400
42342@@ -17,7 +17,7 @@
42343 #include <linux/slab.h>
42344 #include "internal.h"
42345
42346-atomic_t fscache_op_debug_id;
42347+atomic_unchecked_t fscache_op_debug_id;
42348 EXPORT_SYMBOL(fscache_op_debug_id);
42349
42350 /**
42351@@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fs
42352 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
42353 ASSERTCMP(atomic_read(&op->usage), >, 0);
42354
42355- fscache_stat(&fscache_n_op_enqueue);
42356+ fscache_stat_unchecked(&fscache_n_op_enqueue);
42357 switch (op->flags & FSCACHE_OP_TYPE) {
42358 case FSCACHE_OP_ASYNC:
42359 _debug("queue async");
42360@@ -69,7 +69,7 @@ static void fscache_run_op(struct fscach
42361 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
42362 if (op->processor)
42363 fscache_enqueue_operation(op);
42364- fscache_stat(&fscache_n_op_run);
42365+ fscache_stat_unchecked(&fscache_n_op_run);
42366 }
42367
42368 /*
42369@@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct f
42370 if (object->n_ops > 1) {
42371 atomic_inc(&op->usage);
42372 list_add_tail(&op->pend_link, &object->pending_ops);
42373- fscache_stat(&fscache_n_op_pend);
42374+ fscache_stat_unchecked(&fscache_n_op_pend);
42375 } else if (!list_empty(&object->pending_ops)) {
42376 atomic_inc(&op->usage);
42377 list_add_tail(&op->pend_link, &object->pending_ops);
42378- fscache_stat(&fscache_n_op_pend);
42379+ fscache_stat_unchecked(&fscache_n_op_pend);
42380 fscache_start_operations(object);
42381 } else {
42382 ASSERTCMP(object->n_in_progress, ==, 0);
42383@@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct f
42384 object->n_exclusive++; /* reads and writes must wait */
42385 atomic_inc(&op->usage);
42386 list_add_tail(&op->pend_link, &object->pending_ops);
42387- fscache_stat(&fscache_n_op_pend);
42388+ fscache_stat_unchecked(&fscache_n_op_pend);
42389 ret = 0;
42390 } else {
42391 /* not allowed to submit ops in any other state */
42392@@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_obj
42393 if (object->n_exclusive > 0) {
42394 atomic_inc(&op->usage);
42395 list_add_tail(&op->pend_link, &object->pending_ops);
42396- fscache_stat(&fscache_n_op_pend);
42397+ fscache_stat_unchecked(&fscache_n_op_pend);
42398 } else if (!list_empty(&object->pending_ops)) {
42399 atomic_inc(&op->usage);
42400 list_add_tail(&op->pend_link, &object->pending_ops);
42401- fscache_stat(&fscache_n_op_pend);
42402+ fscache_stat_unchecked(&fscache_n_op_pend);
42403 fscache_start_operations(object);
42404 } else {
42405 ASSERTCMP(object->n_exclusive, ==, 0);
42406@@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_obj
42407 object->n_ops++;
42408 atomic_inc(&op->usage);
42409 list_add_tail(&op->pend_link, &object->pending_ops);
42410- fscache_stat(&fscache_n_op_pend);
42411+ fscache_stat_unchecked(&fscache_n_op_pend);
42412 ret = 0;
42413 } else if (object->state == FSCACHE_OBJECT_DYING ||
42414 object->state == FSCACHE_OBJECT_LC_DYING ||
42415 object->state == FSCACHE_OBJECT_WITHDRAWING) {
42416- fscache_stat(&fscache_n_op_rejected);
42417+ fscache_stat_unchecked(&fscache_n_op_rejected);
42418 ret = -ENOBUFS;
42419 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
42420 fscache_report_unexpected_submission(object, op, ostate);
42421@@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_ope
42422
42423 ret = -EBUSY;
42424 if (!list_empty(&op->pend_link)) {
42425- fscache_stat(&fscache_n_op_cancelled);
42426+ fscache_stat_unchecked(&fscache_n_op_cancelled);
42427 list_del_init(&op->pend_link);
42428 object->n_ops--;
42429 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
42430@@ -331,7 +331,7 @@ void fscache_put_operation(struct fscach
42431 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
42432 BUG();
42433
42434- fscache_stat(&fscache_n_op_release);
42435+ fscache_stat_unchecked(&fscache_n_op_release);
42436
42437 if (op->release) {
42438 op->release(op);
42439@@ -348,7 +348,7 @@ void fscache_put_operation(struct fscach
42440 * lock, and defer it otherwise */
42441 if (!spin_trylock(&object->lock)) {
42442 _debug("defer put");
42443- fscache_stat(&fscache_n_op_deferred_release);
42444+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
42445
42446 cache = object->cache;
42447 spin_lock(&cache->op_gc_list_lock);
42448@@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_st
42449
42450 _debug("GC DEFERRED REL OBJ%x OP%x",
42451 object->debug_id, op->debug_id);
42452- fscache_stat(&fscache_n_op_gc);
42453+ fscache_stat_unchecked(&fscache_n_op_gc);
42454
42455 ASSERTCMP(atomic_read(&op->usage), ==, 0);
42456
42457diff -urNp linux-3.0.7/fs/fscache/page.c linux-3.0.7/fs/fscache/page.c
42458--- linux-3.0.7/fs/fscache/page.c 2011-07-21 22:17:23.000000000 -0400
42459+++ linux-3.0.7/fs/fscache/page.c 2011-08-23 21:47:56.000000000 -0400
42460@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct
42461 val = radix_tree_lookup(&cookie->stores, page->index);
42462 if (!val) {
42463 rcu_read_unlock();
42464- fscache_stat(&fscache_n_store_vmscan_not_storing);
42465+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
42466 __fscache_uncache_page(cookie, page);
42467 return true;
42468 }
42469@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct
42470 spin_unlock(&cookie->stores_lock);
42471
42472 if (xpage) {
42473- fscache_stat(&fscache_n_store_vmscan_cancelled);
42474- fscache_stat(&fscache_n_store_radix_deletes);
42475+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
42476+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
42477 ASSERTCMP(xpage, ==, page);
42478 } else {
42479- fscache_stat(&fscache_n_store_vmscan_gone);
42480+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
42481 }
42482
42483 wake_up_bit(&cookie->flags, 0);
42484@@ -107,7 +107,7 @@ page_busy:
42485 /* we might want to wait here, but that could deadlock the allocator as
42486 * the work threads writing to the cache may all end up sleeping
42487 * on memory allocation */
42488- fscache_stat(&fscache_n_store_vmscan_busy);
42489+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
42490 return false;
42491 }
42492 EXPORT_SYMBOL(__fscache_maybe_release_page);
42493@@ -131,7 +131,7 @@ static void fscache_end_page_write(struc
42494 FSCACHE_COOKIE_STORING_TAG);
42495 if (!radix_tree_tag_get(&cookie->stores, page->index,
42496 FSCACHE_COOKIE_PENDING_TAG)) {
42497- fscache_stat(&fscache_n_store_radix_deletes);
42498+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
42499 xpage = radix_tree_delete(&cookie->stores, page->index);
42500 }
42501 spin_unlock(&cookie->stores_lock);
42502@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(stru
42503
42504 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
42505
42506- fscache_stat(&fscache_n_attr_changed_calls);
42507+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
42508
42509 if (fscache_object_is_active(object)) {
42510 fscache_stat(&fscache_n_cop_attr_changed);
42511@@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscach
42512
42513 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42514
42515- fscache_stat(&fscache_n_attr_changed);
42516+ fscache_stat_unchecked(&fscache_n_attr_changed);
42517
42518 op = kzalloc(sizeof(*op), GFP_KERNEL);
42519 if (!op) {
42520- fscache_stat(&fscache_n_attr_changed_nomem);
42521+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
42522 _leave(" = -ENOMEM");
42523 return -ENOMEM;
42524 }
42525@@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscach
42526 if (fscache_submit_exclusive_op(object, op) < 0)
42527 goto nobufs;
42528 spin_unlock(&cookie->lock);
42529- fscache_stat(&fscache_n_attr_changed_ok);
42530+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
42531 fscache_put_operation(op);
42532 _leave(" = 0");
42533 return 0;
42534@@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscach
42535 nobufs:
42536 spin_unlock(&cookie->lock);
42537 kfree(op);
42538- fscache_stat(&fscache_n_attr_changed_nobufs);
42539+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
42540 _leave(" = %d", -ENOBUFS);
42541 return -ENOBUFS;
42542 }
42543@@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache
42544 /* allocate a retrieval operation and attempt to submit it */
42545 op = kzalloc(sizeof(*op), GFP_NOIO);
42546 if (!op) {
42547- fscache_stat(&fscache_n_retrievals_nomem);
42548+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42549 return NULL;
42550 }
42551
42552@@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_loo
42553 return 0;
42554 }
42555
42556- fscache_stat(&fscache_n_retrievals_wait);
42557+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
42558
42559 jif = jiffies;
42560 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
42561 fscache_wait_bit_interruptible,
42562 TASK_INTERRUPTIBLE) != 0) {
42563- fscache_stat(&fscache_n_retrievals_intr);
42564+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
42565 _leave(" = -ERESTARTSYS");
42566 return -ERESTARTSYS;
42567 }
42568@@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_loo
42569 */
42570 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
42571 struct fscache_retrieval *op,
42572- atomic_t *stat_op_waits,
42573- atomic_t *stat_object_dead)
42574+ atomic_unchecked_t *stat_op_waits,
42575+ atomic_unchecked_t *stat_object_dead)
42576 {
42577 int ret;
42578
42579@@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_ac
42580 goto check_if_dead;
42581
42582 _debug(">>> WT");
42583- fscache_stat(stat_op_waits);
42584+ fscache_stat_unchecked(stat_op_waits);
42585 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
42586 fscache_wait_bit_interruptible,
42587 TASK_INTERRUPTIBLE) < 0) {
42588@@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_ac
42589
42590 check_if_dead:
42591 if (unlikely(fscache_object_is_dead(object))) {
42592- fscache_stat(stat_object_dead);
42593+ fscache_stat_unchecked(stat_object_dead);
42594 return -ENOBUFS;
42595 }
42596 return 0;
42597@@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct
42598
42599 _enter("%p,%p,,,", cookie, page);
42600
42601- fscache_stat(&fscache_n_retrievals);
42602+ fscache_stat_unchecked(&fscache_n_retrievals);
42603
42604 if (hlist_empty(&cookie->backing_objects))
42605 goto nobufs;
42606@@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct
42607 goto nobufs_unlock;
42608 spin_unlock(&cookie->lock);
42609
42610- fscache_stat(&fscache_n_retrieval_ops);
42611+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
42612
42613 /* pin the netfs read context in case we need to do the actual netfs
42614 * read because we've encountered a cache read failure */
42615@@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct
42616
42617 error:
42618 if (ret == -ENOMEM)
42619- fscache_stat(&fscache_n_retrievals_nomem);
42620+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42621 else if (ret == -ERESTARTSYS)
42622- fscache_stat(&fscache_n_retrievals_intr);
42623+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
42624 else if (ret == -ENODATA)
42625- fscache_stat(&fscache_n_retrievals_nodata);
42626+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
42627 else if (ret < 0)
42628- fscache_stat(&fscache_n_retrievals_nobufs);
42629+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42630 else
42631- fscache_stat(&fscache_n_retrievals_ok);
42632+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
42633
42634 fscache_put_retrieval(op);
42635 _leave(" = %d", ret);
42636@@ -429,7 +429,7 @@ nobufs_unlock:
42637 spin_unlock(&cookie->lock);
42638 kfree(op);
42639 nobufs:
42640- fscache_stat(&fscache_n_retrievals_nobufs);
42641+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42642 _leave(" = -ENOBUFS");
42643 return -ENOBUFS;
42644 }
42645@@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct
42646
42647 _enter("%p,,%d,,,", cookie, *nr_pages);
42648
42649- fscache_stat(&fscache_n_retrievals);
42650+ fscache_stat_unchecked(&fscache_n_retrievals);
42651
42652 if (hlist_empty(&cookie->backing_objects))
42653 goto nobufs;
42654@@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct
42655 goto nobufs_unlock;
42656 spin_unlock(&cookie->lock);
42657
42658- fscache_stat(&fscache_n_retrieval_ops);
42659+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
42660
42661 /* pin the netfs read context in case we need to do the actual netfs
42662 * read because we've encountered a cache read failure */
42663@@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct
42664
42665 error:
42666 if (ret == -ENOMEM)
42667- fscache_stat(&fscache_n_retrievals_nomem);
42668+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42669 else if (ret == -ERESTARTSYS)
42670- fscache_stat(&fscache_n_retrievals_intr);
42671+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
42672 else if (ret == -ENODATA)
42673- fscache_stat(&fscache_n_retrievals_nodata);
42674+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
42675 else if (ret < 0)
42676- fscache_stat(&fscache_n_retrievals_nobufs);
42677+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42678 else
42679- fscache_stat(&fscache_n_retrievals_ok);
42680+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
42681
42682 fscache_put_retrieval(op);
42683 _leave(" = %d", ret);
42684@@ -545,7 +545,7 @@ nobufs_unlock:
42685 spin_unlock(&cookie->lock);
42686 kfree(op);
42687 nobufs:
42688- fscache_stat(&fscache_n_retrievals_nobufs);
42689+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42690 _leave(" = -ENOBUFS");
42691 return -ENOBUFS;
42692 }
42693@@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_
42694
42695 _enter("%p,%p,,,", cookie, page);
42696
42697- fscache_stat(&fscache_n_allocs);
42698+ fscache_stat_unchecked(&fscache_n_allocs);
42699
42700 if (hlist_empty(&cookie->backing_objects))
42701 goto nobufs;
42702@@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_
42703 goto nobufs_unlock;
42704 spin_unlock(&cookie->lock);
42705
42706- fscache_stat(&fscache_n_alloc_ops);
42707+ fscache_stat_unchecked(&fscache_n_alloc_ops);
42708
42709 ret = fscache_wait_for_retrieval_activation(
42710 object, op,
42711@@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_
42712
42713 error:
42714 if (ret == -ERESTARTSYS)
42715- fscache_stat(&fscache_n_allocs_intr);
42716+ fscache_stat_unchecked(&fscache_n_allocs_intr);
42717 else if (ret < 0)
42718- fscache_stat(&fscache_n_allocs_nobufs);
42719+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
42720 else
42721- fscache_stat(&fscache_n_allocs_ok);
42722+ fscache_stat_unchecked(&fscache_n_allocs_ok);
42723
42724 fscache_put_retrieval(op);
42725 _leave(" = %d", ret);
42726@@ -625,7 +625,7 @@ nobufs_unlock:
42727 spin_unlock(&cookie->lock);
42728 kfree(op);
42729 nobufs:
42730- fscache_stat(&fscache_n_allocs_nobufs);
42731+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
42732 _leave(" = -ENOBUFS");
42733 return -ENOBUFS;
42734 }
42735@@ -666,7 +666,7 @@ static void fscache_write_op(struct fsca
42736
42737 spin_lock(&cookie->stores_lock);
42738
42739- fscache_stat(&fscache_n_store_calls);
42740+ fscache_stat_unchecked(&fscache_n_store_calls);
42741
42742 /* find a page to store */
42743 page = NULL;
42744@@ -677,7 +677,7 @@ static void fscache_write_op(struct fsca
42745 page = results[0];
42746 _debug("gang %d [%lx]", n, page->index);
42747 if (page->index > op->store_limit) {
42748- fscache_stat(&fscache_n_store_pages_over_limit);
42749+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
42750 goto superseded;
42751 }
42752
42753@@ -689,7 +689,7 @@ static void fscache_write_op(struct fsca
42754 spin_unlock(&cookie->stores_lock);
42755 spin_unlock(&object->lock);
42756
42757- fscache_stat(&fscache_n_store_pages);
42758+ fscache_stat_unchecked(&fscache_n_store_pages);
42759 fscache_stat(&fscache_n_cop_write_page);
42760 ret = object->cache->ops->write_page(op, page);
42761 fscache_stat_d(&fscache_n_cop_write_page);
42762@@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_
42763 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42764 ASSERT(PageFsCache(page));
42765
42766- fscache_stat(&fscache_n_stores);
42767+ fscache_stat_unchecked(&fscache_n_stores);
42768
42769 op = kzalloc(sizeof(*op), GFP_NOIO);
42770 if (!op)
42771@@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_
42772 spin_unlock(&cookie->stores_lock);
42773 spin_unlock(&object->lock);
42774
42775- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
42776+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
42777 op->store_limit = object->store_limit;
42778
42779 if (fscache_submit_op(object, &op->op) < 0)
42780@@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_
42781
42782 spin_unlock(&cookie->lock);
42783 radix_tree_preload_end();
42784- fscache_stat(&fscache_n_store_ops);
42785- fscache_stat(&fscache_n_stores_ok);
42786+ fscache_stat_unchecked(&fscache_n_store_ops);
42787+ fscache_stat_unchecked(&fscache_n_stores_ok);
42788
42789 /* the work queue now carries its own ref on the object */
42790 fscache_put_operation(&op->op);
42791@@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_
42792 return 0;
42793
42794 already_queued:
42795- fscache_stat(&fscache_n_stores_again);
42796+ fscache_stat_unchecked(&fscache_n_stores_again);
42797 already_pending:
42798 spin_unlock(&cookie->stores_lock);
42799 spin_unlock(&object->lock);
42800 spin_unlock(&cookie->lock);
42801 radix_tree_preload_end();
42802 kfree(op);
42803- fscache_stat(&fscache_n_stores_ok);
42804+ fscache_stat_unchecked(&fscache_n_stores_ok);
42805 _leave(" = 0");
42806 return 0;
42807
42808@@ -851,14 +851,14 @@ nobufs:
42809 spin_unlock(&cookie->lock);
42810 radix_tree_preload_end();
42811 kfree(op);
42812- fscache_stat(&fscache_n_stores_nobufs);
42813+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
42814 _leave(" = -ENOBUFS");
42815 return -ENOBUFS;
42816
42817 nomem_free:
42818 kfree(op);
42819 nomem:
42820- fscache_stat(&fscache_n_stores_oom);
42821+ fscache_stat_unchecked(&fscache_n_stores_oom);
42822 _leave(" = -ENOMEM");
42823 return -ENOMEM;
42824 }
42825@@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscac
42826 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42827 ASSERTCMP(page, !=, NULL);
42828
42829- fscache_stat(&fscache_n_uncaches);
42830+ fscache_stat_unchecked(&fscache_n_uncaches);
42831
42832 /* cache withdrawal may beat us to it */
42833 if (!PageFsCache(page))
42834@@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fs
42835 unsigned long loop;
42836
42837 #ifdef CONFIG_FSCACHE_STATS
42838- atomic_add(pagevec->nr, &fscache_n_marks);
42839+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
42840 #endif
42841
42842 for (loop = 0; loop < pagevec->nr; loop++) {
42843diff -urNp linux-3.0.7/fs/fscache/stats.c linux-3.0.7/fs/fscache/stats.c
42844--- linux-3.0.7/fs/fscache/stats.c 2011-07-21 22:17:23.000000000 -0400
42845+++ linux-3.0.7/fs/fscache/stats.c 2011-08-23 21:47:56.000000000 -0400
42846@@ -18,95 +18,95 @@
42847 /*
42848 * operation counters
42849 */
42850-atomic_t fscache_n_op_pend;
42851-atomic_t fscache_n_op_run;
42852-atomic_t fscache_n_op_enqueue;
42853-atomic_t fscache_n_op_requeue;
42854-atomic_t fscache_n_op_deferred_release;
42855-atomic_t fscache_n_op_release;
42856-atomic_t fscache_n_op_gc;
42857-atomic_t fscache_n_op_cancelled;
42858-atomic_t fscache_n_op_rejected;
42859-
42860-atomic_t fscache_n_attr_changed;
42861-atomic_t fscache_n_attr_changed_ok;
42862-atomic_t fscache_n_attr_changed_nobufs;
42863-atomic_t fscache_n_attr_changed_nomem;
42864-atomic_t fscache_n_attr_changed_calls;
42865-
42866-atomic_t fscache_n_allocs;
42867-atomic_t fscache_n_allocs_ok;
42868-atomic_t fscache_n_allocs_wait;
42869-atomic_t fscache_n_allocs_nobufs;
42870-atomic_t fscache_n_allocs_intr;
42871-atomic_t fscache_n_allocs_object_dead;
42872-atomic_t fscache_n_alloc_ops;
42873-atomic_t fscache_n_alloc_op_waits;
42874-
42875-atomic_t fscache_n_retrievals;
42876-atomic_t fscache_n_retrievals_ok;
42877-atomic_t fscache_n_retrievals_wait;
42878-atomic_t fscache_n_retrievals_nodata;
42879-atomic_t fscache_n_retrievals_nobufs;
42880-atomic_t fscache_n_retrievals_intr;
42881-atomic_t fscache_n_retrievals_nomem;
42882-atomic_t fscache_n_retrievals_object_dead;
42883-atomic_t fscache_n_retrieval_ops;
42884-atomic_t fscache_n_retrieval_op_waits;
42885-
42886-atomic_t fscache_n_stores;
42887-atomic_t fscache_n_stores_ok;
42888-atomic_t fscache_n_stores_again;
42889-atomic_t fscache_n_stores_nobufs;
42890-atomic_t fscache_n_stores_oom;
42891-atomic_t fscache_n_store_ops;
42892-atomic_t fscache_n_store_calls;
42893-atomic_t fscache_n_store_pages;
42894-atomic_t fscache_n_store_radix_deletes;
42895-atomic_t fscache_n_store_pages_over_limit;
42896-
42897-atomic_t fscache_n_store_vmscan_not_storing;
42898-atomic_t fscache_n_store_vmscan_gone;
42899-atomic_t fscache_n_store_vmscan_busy;
42900-atomic_t fscache_n_store_vmscan_cancelled;
42901-
42902-atomic_t fscache_n_marks;
42903-atomic_t fscache_n_uncaches;
42904-
42905-atomic_t fscache_n_acquires;
42906-atomic_t fscache_n_acquires_null;
42907-atomic_t fscache_n_acquires_no_cache;
42908-atomic_t fscache_n_acquires_ok;
42909-atomic_t fscache_n_acquires_nobufs;
42910-atomic_t fscache_n_acquires_oom;
42911-
42912-atomic_t fscache_n_updates;
42913-atomic_t fscache_n_updates_null;
42914-atomic_t fscache_n_updates_run;
42915-
42916-atomic_t fscache_n_relinquishes;
42917-atomic_t fscache_n_relinquishes_null;
42918-atomic_t fscache_n_relinquishes_waitcrt;
42919-atomic_t fscache_n_relinquishes_retire;
42920-
42921-atomic_t fscache_n_cookie_index;
42922-atomic_t fscache_n_cookie_data;
42923-atomic_t fscache_n_cookie_special;
42924-
42925-atomic_t fscache_n_object_alloc;
42926-atomic_t fscache_n_object_no_alloc;
42927-atomic_t fscache_n_object_lookups;
42928-atomic_t fscache_n_object_lookups_negative;
42929-atomic_t fscache_n_object_lookups_positive;
42930-atomic_t fscache_n_object_lookups_timed_out;
42931-atomic_t fscache_n_object_created;
42932-atomic_t fscache_n_object_avail;
42933-atomic_t fscache_n_object_dead;
42934-
42935-atomic_t fscache_n_checkaux_none;
42936-atomic_t fscache_n_checkaux_okay;
42937-atomic_t fscache_n_checkaux_update;
42938-atomic_t fscache_n_checkaux_obsolete;
42939+atomic_unchecked_t fscache_n_op_pend;
42940+atomic_unchecked_t fscache_n_op_run;
42941+atomic_unchecked_t fscache_n_op_enqueue;
42942+atomic_unchecked_t fscache_n_op_requeue;
42943+atomic_unchecked_t fscache_n_op_deferred_release;
42944+atomic_unchecked_t fscache_n_op_release;
42945+atomic_unchecked_t fscache_n_op_gc;
42946+atomic_unchecked_t fscache_n_op_cancelled;
42947+atomic_unchecked_t fscache_n_op_rejected;
42948+
42949+atomic_unchecked_t fscache_n_attr_changed;
42950+atomic_unchecked_t fscache_n_attr_changed_ok;
42951+atomic_unchecked_t fscache_n_attr_changed_nobufs;
42952+atomic_unchecked_t fscache_n_attr_changed_nomem;
42953+atomic_unchecked_t fscache_n_attr_changed_calls;
42954+
42955+atomic_unchecked_t fscache_n_allocs;
42956+atomic_unchecked_t fscache_n_allocs_ok;
42957+atomic_unchecked_t fscache_n_allocs_wait;
42958+atomic_unchecked_t fscache_n_allocs_nobufs;
42959+atomic_unchecked_t fscache_n_allocs_intr;
42960+atomic_unchecked_t fscache_n_allocs_object_dead;
42961+atomic_unchecked_t fscache_n_alloc_ops;
42962+atomic_unchecked_t fscache_n_alloc_op_waits;
42963+
42964+atomic_unchecked_t fscache_n_retrievals;
42965+atomic_unchecked_t fscache_n_retrievals_ok;
42966+atomic_unchecked_t fscache_n_retrievals_wait;
42967+atomic_unchecked_t fscache_n_retrievals_nodata;
42968+atomic_unchecked_t fscache_n_retrievals_nobufs;
42969+atomic_unchecked_t fscache_n_retrievals_intr;
42970+atomic_unchecked_t fscache_n_retrievals_nomem;
42971+atomic_unchecked_t fscache_n_retrievals_object_dead;
42972+atomic_unchecked_t fscache_n_retrieval_ops;
42973+atomic_unchecked_t fscache_n_retrieval_op_waits;
42974+
42975+atomic_unchecked_t fscache_n_stores;
42976+atomic_unchecked_t fscache_n_stores_ok;
42977+atomic_unchecked_t fscache_n_stores_again;
42978+atomic_unchecked_t fscache_n_stores_nobufs;
42979+atomic_unchecked_t fscache_n_stores_oom;
42980+atomic_unchecked_t fscache_n_store_ops;
42981+atomic_unchecked_t fscache_n_store_calls;
42982+atomic_unchecked_t fscache_n_store_pages;
42983+atomic_unchecked_t fscache_n_store_radix_deletes;
42984+atomic_unchecked_t fscache_n_store_pages_over_limit;
42985+
42986+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
42987+atomic_unchecked_t fscache_n_store_vmscan_gone;
42988+atomic_unchecked_t fscache_n_store_vmscan_busy;
42989+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
42990+
42991+atomic_unchecked_t fscache_n_marks;
42992+atomic_unchecked_t fscache_n_uncaches;
42993+
42994+atomic_unchecked_t fscache_n_acquires;
42995+atomic_unchecked_t fscache_n_acquires_null;
42996+atomic_unchecked_t fscache_n_acquires_no_cache;
42997+atomic_unchecked_t fscache_n_acquires_ok;
42998+atomic_unchecked_t fscache_n_acquires_nobufs;
42999+atomic_unchecked_t fscache_n_acquires_oom;
43000+
43001+atomic_unchecked_t fscache_n_updates;
43002+atomic_unchecked_t fscache_n_updates_null;
43003+atomic_unchecked_t fscache_n_updates_run;
43004+
43005+atomic_unchecked_t fscache_n_relinquishes;
43006+atomic_unchecked_t fscache_n_relinquishes_null;
43007+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
43008+atomic_unchecked_t fscache_n_relinquishes_retire;
43009+
43010+atomic_unchecked_t fscache_n_cookie_index;
43011+atomic_unchecked_t fscache_n_cookie_data;
43012+atomic_unchecked_t fscache_n_cookie_special;
43013+
43014+atomic_unchecked_t fscache_n_object_alloc;
43015+atomic_unchecked_t fscache_n_object_no_alloc;
43016+atomic_unchecked_t fscache_n_object_lookups;
43017+atomic_unchecked_t fscache_n_object_lookups_negative;
43018+atomic_unchecked_t fscache_n_object_lookups_positive;
43019+atomic_unchecked_t fscache_n_object_lookups_timed_out;
43020+atomic_unchecked_t fscache_n_object_created;
43021+atomic_unchecked_t fscache_n_object_avail;
43022+atomic_unchecked_t fscache_n_object_dead;
43023+
43024+atomic_unchecked_t fscache_n_checkaux_none;
43025+atomic_unchecked_t fscache_n_checkaux_okay;
43026+atomic_unchecked_t fscache_n_checkaux_update;
43027+atomic_unchecked_t fscache_n_checkaux_obsolete;
43028
43029 atomic_t fscache_n_cop_alloc_object;
43030 atomic_t fscache_n_cop_lookup_object;
43031@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
43032 seq_puts(m, "FS-Cache statistics\n");
43033
43034 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
43035- atomic_read(&fscache_n_cookie_index),
43036- atomic_read(&fscache_n_cookie_data),
43037- atomic_read(&fscache_n_cookie_special));
43038+ atomic_read_unchecked(&fscache_n_cookie_index),
43039+ atomic_read_unchecked(&fscache_n_cookie_data),
43040+ atomic_read_unchecked(&fscache_n_cookie_special));
43041
43042 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
43043- atomic_read(&fscache_n_object_alloc),
43044- atomic_read(&fscache_n_object_no_alloc),
43045- atomic_read(&fscache_n_object_avail),
43046- atomic_read(&fscache_n_object_dead));
43047+ atomic_read_unchecked(&fscache_n_object_alloc),
43048+ atomic_read_unchecked(&fscache_n_object_no_alloc),
43049+ atomic_read_unchecked(&fscache_n_object_avail),
43050+ atomic_read_unchecked(&fscache_n_object_dead));
43051 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
43052- atomic_read(&fscache_n_checkaux_none),
43053- atomic_read(&fscache_n_checkaux_okay),
43054- atomic_read(&fscache_n_checkaux_update),
43055- atomic_read(&fscache_n_checkaux_obsolete));
43056+ atomic_read_unchecked(&fscache_n_checkaux_none),
43057+ atomic_read_unchecked(&fscache_n_checkaux_okay),
43058+ atomic_read_unchecked(&fscache_n_checkaux_update),
43059+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
43060
43061 seq_printf(m, "Pages : mrk=%u unc=%u\n",
43062- atomic_read(&fscache_n_marks),
43063- atomic_read(&fscache_n_uncaches));
43064+ atomic_read_unchecked(&fscache_n_marks),
43065+ atomic_read_unchecked(&fscache_n_uncaches));
43066
43067 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
43068 " oom=%u\n",
43069- atomic_read(&fscache_n_acquires),
43070- atomic_read(&fscache_n_acquires_null),
43071- atomic_read(&fscache_n_acquires_no_cache),
43072- atomic_read(&fscache_n_acquires_ok),
43073- atomic_read(&fscache_n_acquires_nobufs),
43074- atomic_read(&fscache_n_acquires_oom));
43075+ atomic_read_unchecked(&fscache_n_acquires),
43076+ atomic_read_unchecked(&fscache_n_acquires_null),
43077+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
43078+ atomic_read_unchecked(&fscache_n_acquires_ok),
43079+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
43080+ atomic_read_unchecked(&fscache_n_acquires_oom));
43081
43082 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
43083- atomic_read(&fscache_n_object_lookups),
43084- atomic_read(&fscache_n_object_lookups_negative),
43085- atomic_read(&fscache_n_object_lookups_positive),
43086- atomic_read(&fscache_n_object_created),
43087- atomic_read(&fscache_n_object_lookups_timed_out));
43088+ atomic_read_unchecked(&fscache_n_object_lookups),
43089+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
43090+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
43091+ atomic_read_unchecked(&fscache_n_object_created),
43092+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
43093
43094 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
43095- atomic_read(&fscache_n_updates),
43096- atomic_read(&fscache_n_updates_null),
43097- atomic_read(&fscache_n_updates_run));
43098+ atomic_read_unchecked(&fscache_n_updates),
43099+ atomic_read_unchecked(&fscache_n_updates_null),
43100+ atomic_read_unchecked(&fscache_n_updates_run));
43101
43102 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
43103- atomic_read(&fscache_n_relinquishes),
43104- atomic_read(&fscache_n_relinquishes_null),
43105- atomic_read(&fscache_n_relinquishes_waitcrt),
43106- atomic_read(&fscache_n_relinquishes_retire));
43107+ atomic_read_unchecked(&fscache_n_relinquishes),
43108+ atomic_read_unchecked(&fscache_n_relinquishes_null),
43109+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
43110+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
43111
43112 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
43113- atomic_read(&fscache_n_attr_changed),
43114- atomic_read(&fscache_n_attr_changed_ok),
43115- atomic_read(&fscache_n_attr_changed_nobufs),
43116- atomic_read(&fscache_n_attr_changed_nomem),
43117- atomic_read(&fscache_n_attr_changed_calls));
43118+ atomic_read_unchecked(&fscache_n_attr_changed),
43119+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
43120+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
43121+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
43122+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
43123
43124 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
43125- atomic_read(&fscache_n_allocs),
43126- atomic_read(&fscache_n_allocs_ok),
43127- atomic_read(&fscache_n_allocs_wait),
43128- atomic_read(&fscache_n_allocs_nobufs),
43129- atomic_read(&fscache_n_allocs_intr));
43130+ atomic_read_unchecked(&fscache_n_allocs),
43131+ atomic_read_unchecked(&fscache_n_allocs_ok),
43132+ atomic_read_unchecked(&fscache_n_allocs_wait),
43133+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
43134+ atomic_read_unchecked(&fscache_n_allocs_intr));
43135 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
43136- atomic_read(&fscache_n_alloc_ops),
43137- atomic_read(&fscache_n_alloc_op_waits),
43138- atomic_read(&fscache_n_allocs_object_dead));
43139+ atomic_read_unchecked(&fscache_n_alloc_ops),
43140+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
43141+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
43142
43143 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
43144 " int=%u oom=%u\n",
43145- atomic_read(&fscache_n_retrievals),
43146- atomic_read(&fscache_n_retrievals_ok),
43147- atomic_read(&fscache_n_retrievals_wait),
43148- atomic_read(&fscache_n_retrievals_nodata),
43149- atomic_read(&fscache_n_retrievals_nobufs),
43150- atomic_read(&fscache_n_retrievals_intr),
43151- atomic_read(&fscache_n_retrievals_nomem));
43152+ atomic_read_unchecked(&fscache_n_retrievals),
43153+ atomic_read_unchecked(&fscache_n_retrievals_ok),
43154+ atomic_read_unchecked(&fscache_n_retrievals_wait),
43155+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
43156+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
43157+ atomic_read_unchecked(&fscache_n_retrievals_intr),
43158+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
43159 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
43160- atomic_read(&fscache_n_retrieval_ops),
43161- atomic_read(&fscache_n_retrieval_op_waits),
43162- atomic_read(&fscache_n_retrievals_object_dead));
43163+ atomic_read_unchecked(&fscache_n_retrieval_ops),
43164+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
43165+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
43166
43167 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
43168- atomic_read(&fscache_n_stores),
43169- atomic_read(&fscache_n_stores_ok),
43170- atomic_read(&fscache_n_stores_again),
43171- atomic_read(&fscache_n_stores_nobufs),
43172- atomic_read(&fscache_n_stores_oom));
43173+ atomic_read_unchecked(&fscache_n_stores),
43174+ atomic_read_unchecked(&fscache_n_stores_ok),
43175+ atomic_read_unchecked(&fscache_n_stores_again),
43176+ atomic_read_unchecked(&fscache_n_stores_nobufs),
43177+ atomic_read_unchecked(&fscache_n_stores_oom));
43178 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
43179- atomic_read(&fscache_n_store_ops),
43180- atomic_read(&fscache_n_store_calls),
43181- atomic_read(&fscache_n_store_pages),
43182- atomic_read(&fscache_n_store_radix_deletes),
43183- atomic_read(&fscache_n_store_pages_over_limit));
43184+ atomic_read_unchecked(&fscache_n_store_ops),
43185+ atomic_read_unchecked(&fscache_n_store_calls),
43186+ atomic_read_unchecked(&fscache_n_store_pages),
43187+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
43188+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
43189
43190 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
43191- atomic_read(&fscache_n_store_vmscan_not_storing),
43192- atomic_read(&fscache_n_store_vmscan_gone),
43193- atomic_read(&fscache_n_store_vmscan_busy),
43194- atomic_read(&fscache_n_store_vmscan_cancelled));
43195+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
43196+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
43197+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
43198+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
43199
43200 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
43201- atomic_read(&fscache_n_op_pend),
43202- atomic_read(&fscache_n_op_run),
43203- atomic_read(&fscache_n_op_enqueue),
43204- atomic_read(&fscache_n_op_cancelled),
43205- atomic_read(&fscache_n_op_rejected));
43206+ atomic_read_unchecked(&fscache_n_op_pend),
43207+ atomic_read_unchecked(&fscache_n_op_run),
43208+ atomic_read_unchecked(&fscache_n_op_enqueue),
43209+ atomic_read_unchecked(&fscache_n_op_cancelled),
43210+ atomic_read_unchecked(&fscache_n_op_rejected));
43211 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
43212- atomic_read(&fscache_n_op_deferred_release),
43213- atomic_read(&fscache_n_op_release),
43214- atomic_read(&fscache_n_op_gc));
43215+ atomic_read_unchecked(&fscache_n_op_deferred_release),
43216+ atomic_read_unchecked(&fscache_n_op_release),
43217+ atomic_read_unchecked(&fscache_n_op_gc));
43218
43219 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
43220 atomic_read(&fscache_n_cop_alloc_object),
43221diff -urNp linux-3.0.7/fs/fs_struct.c linux-3.0.7/fs/fs_struct.c
43222--- linux-3.0.7/fs/fs_struct.c 2011-07-21 22:17:23.000000000 -0400
43223+++ linux-3.0.7/fs/fs_struct.c 2011-08-23 21:48:14.000000000 -0400
43224@@ -4,6 +4,7 @@
43225 #include <linux/path.h>
43226 #include <linux/slab.h>
43227 #include <linux/fs_struct.h>
43228+#include <linux/grsecurity.h>
43229 #include "internal.h"
43230
43231 static inline void path_get_longterm(struct path *path)
43232@@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, s
43233 old_root = fs->root;
43234 fs->root = *path;
43235 path_get_longterm(path);
43236+ gr_set_chroot_entries(current, path);
43237 write_seqcount_end(&fs->seq);
43238 spin_unlock(&fs->lock);
43239 if (old_root.dentry)
43240@@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_roo
43241 && fs->root.mnt == old_root->mnt) {
43242 path_get_longterm(new_root);
43243 fs->root = *new_root;
43244+ gr_set_chroot_entries(p, new_root);
43245 count++;
43246 }
43247 if (fs->pwd.dentry == old_root->dentry
43248@@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
43249 spin_lock(&fs->lock);
43250 write_seqcount_begin(&fs->seq);
43251 tsk->fs = NULL;
43252- kill = !--fs->users;
43253+ gr_clear_chroot_entries(tsk);
43254+ kill = !atomic_dec_return(&fs->users);
43255 write_seqcount_end(&fs->seq);
43256 spin_unlock(&fs->lock);
43257 task_unlock(tsk);
43258@@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct
43259 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
43260 /* We don't need to lock fs - think why ;-) */
43261 if (fs) {
43262- fs->users = 1;
43263+ atomic_set(&fs->users, 1);
43264 fs->in_exec = 0;
43265 spin_lock_init(&fs->lock);
43266 seqcount_init(&fs->seq);
43267@@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct
43268 spin_lock(&old->lock);
43269 fs->root = old->root;
43270 path_get_longterm(&fs->root);
43271+ /* instead of calling gr_set_chroot_entries here,
43272+ we call it from every caller of this function
43273+ */
43274 fs->pwd = old->pwd;
43275 path_get_longterm(&fs->pwd);
43276 spin_unlock(&old->lock);
43277@@ -150,8 +157,9 @@ int unshare_fs_struct(void)
43278
43279 task_lock(current);
43280 spin_lock(&fs->lock);
43281- kill = !--fs->users;
43282+ kill = !atomic_dec_return(&fs->users);
43283 current->fs = new_fs;
43284+ gr_set_chroot_entries(current, &new_fs->root);
43285 spin_unlock(&fs->lock);
43286 task_unlock(current);
43287
43288@@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
43289
43290 /* to be mentioned only in INIT_TASK */
43291 struct fs_struct init_fs = {
43292- .users = 1,
43293+ .users = ATOMIC_INIT(1),
43294 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
43295 .seq = SEQCNT_ZERO,
43296 .umask = 0022,
43297@@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
43298 task_lock(current);
43299
43300 spin_lock(&init_fs.lock);
43301- init_fs.users++;
43302+ atomic_inc(&init_fs.users);
43303 spin_unlock(&init_fs.lock);
43304
43305 spin_lock(&fs->lock);
43306 current->fs = &init_fs;
43307- kill = !--fs->users;
43308+ gr_set_chroot_entries(current, &current->fs->root);
43309+ kill = !atomic_dec_return(&fs->users);
43310 spin_unlock(&fs->lock);
43311
43312 task_unlock(current);
43313diff -urNp linux-3.0.7/fs/fuse/cuse.c linux-3.0.7/fs/fuse/cuse.c
43314--- linux-3.0.7/fs/fuse/cuse.c 2011-07-21 22:17:23.000000000 -0400
43315+++ linux-3.0.7/fs/fuse/cuse.c 2011-08-23 21:47:56.000000000 -0400
43316@@ -586,10 +586,12 @@ static int __init cuse_init(void)
43317 INIT_LIST_HEAD(&cuse_conntbl[i]);
43318
43319 /* inherit and extend fuse_dev_operations */
43320- cuse_channel_fops = fuse_dev_operations;
43321- cuse_channel_fops.owner = THIS_MODULE;
43322- cuse_channel_fops.open = cuse_channel_open;
43323- cuse_channel_fops.release = cuse_channel_release;
43324+ pax_open_kernel();
43325+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
43326+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
43327+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
43328+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
43329+ pax_close_kernel();
43330
43331 cuse_class = class_create(THIS_MODULE, "cuse");
43332 if (IS_ERR(cuse_class))
43333diff -urNp linux-3.0.7/fs/fuse/dev.c linux-3.0.7/fs/fuse/dev.c
43334--- linux-3.0.7/fs/fuse/dev.c 2011-09-02 18:11:26.000000000 -0400
43335+++ linux-3.0.7/fs/fuse/dev.c 2011-08-29 23:26:27.000000000 -0400
43336@@ -1238,7 +1238,7 @@ static ssize_t fuse_dev_splice_read(stru
43337 ret = 0;
43338 pipe_lock(pipe);
43339
43340- if (!pipe->readers) {
43341+ if (!atomic_read(&pipe->readers)) {
43342 send_sig(SIGPIPE, current, 0);
43343 if (!ret)
43344 ret = -EPIPE;
43345diff -urNp linux-3.0.7/fs/fuse/dir.c linux-3.0.7/fs/fuse/dir.c
43346--- linux-3.0.7/fs/fuse/dir.c 2011-07-21 22:17:23.000000000 -0400
43347+++ linux-3.0.7/fs/fuse/dir.c 2011-08-23 21:47:56.000000000 -0400
43348@@ -1148,7 +1148,7 @@ static char *read_link(struct dentry *de
43349 return link;
43350 }
43351
43352-static void free_link(char *link)
43353+static void free_link(const char *link)
43354 {
43355 if (!IS_ERR(link))
43356 free_page((unsigned long) link);
43357diff -urNp linux-3.0.7/fs/gfs2/inode.c linux-3.0.7/fs/gfs2/inode.c
43358--- linux-3.0.7/fs/gfs2/inode.c 2011-07-21 22:17:23.000000000 -0400
43359+++ linux-3.0.7/fs/gfs2/inode.c 2011-08-23 21:47:56.000000000 -0400
43360@@ -1525,7 +1525,7 @@ out:
43361
43362 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
43363 {
43364- char *s = nd_get_link(nd);
43365+ const char *s = nd_get_link(nd);
43366 if (!IS_ERR(s))
43367 kfree(s);
43368 }
43369diff -urNp linux-3.0.7/fs/hfsplus/catalog.c linux-3.0.7/fs/hfsplus/catalog.c
43370--- linux-3.0.7/fs/hfsplus/catalog.c 2011-07-21 22:17:23.000000000 -0400
43371+++ linux-3.0.7/fs/hfsplus/catalog.c 2011-08-23 21:48:14.000000000 -0400
43372@@ -179,6 +179,8 @@ int hfsplus_find_cat(struct super_block
43373 int err;
43374 u16 type;
43375
43376+ pax_track_stack();
43377+
43378 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
43379 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
43380 if (err)
43381@@ -210,6 +212,8 @@ int hfsplus_create_cat(u32 cnid, struct
43382 int entry_size;
43383 int err;
43384
43385+ pax_track_stack();
43386+
43387 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n",
43388 str->name, cnid, inode->i_nlink);
43389 hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
43390@@ -349,6 +353,8 @@ int hfsplus_rename_cat(u32 cnid,
43391 int entry_size, type;
43392 int err = 0;
43393
43394+ pax_track_stack();
43395+
43396 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
43397 cnid, src_dir->i_ino, src_name->name,
43398 dst_dir->i_ino, dst_name->name);
43399diff -urNp linux-3.0.7/fs/hfsplus/dir.c linux-3.0.7/fs/hfsplus/dir.c
43400--- linux-3.0.7/fs/hfsplus/dir.c 2011-07-21 22:17:23.000000000 -0400
43401+++ linux-3.0.7/fs/hfsplus/dir.c 2011-08-23 21:48:14.000000000 -0400
43402@@ -129,6 +129,8 @@ static int hfsplus_readdir(struct file *
43403 struct hfsplus_readdir_data *rd;
43404 u16 type;
43405
43406+ pax_track_stack();
43407+
43408 if (filp->f_pos >= inode->i_size)
43409 return 0;
43410
43411diff -urNp linux-3.0.7/fs/hfsplus/inode.c linux-3.0.7/fs/hfsplus/inode.c
43412--- linux-3.0.7/fs/hfsplus/inode.c 2011-07-21 22:17:23.000000000 -0400
43413+++ linux-3.0.7/fs/hfsplus/inode.c 2011-08-23 21:48:14.000000000 -0400
43414@@ -489,6 +489,8 @@ int hfsplus_cat_read_inode(struct inode
43415 int res = 0;
43416 u16 type;
43417
43418+ pax_track_stack();
43419+
43420 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
43421
43422 HFSPLUS_I(inode)->linkid = 0;
43423@@ -552,6 +554,8 @@ int hfsplus_cat_write_inode(struct inode
43424 struct hfs_find_data fd;
43425 hfsplus_cat_entry entry;
43426
43427+ pax_track_stack();
43428+
43429 if (HFSPLUS_IS_RSRC(inode))
43430 main_inode = HFSPLUS_I(inode)->rsrc_inode;
43431
43432diff -urNp linux-3.0.7/fs/hfsplus/ioctl.c linux-3.0.7/fs/hfsplus/ioctl.c
43433--- linux-3.0.7/fs/hfsplus/ioctl.c 2011-07-21 22:17:23.000000000 -0400
43434+++ linux-3.0.7/fs/hfsplus/ioctl.c 2011-08-23 21:48:14.000000000 -0400
43435@@ -122,6 +122,8 @@ int hfsplus_setxattr(struct dentry *dent
43436 struct hfsplus_cat_file *file;
43437 int res;
43438
43439+ pax_track_stack();
43440+
43441 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
43442 return -EOPNOTSUPP;
43443
43444@@ -166,6 +168,8 @@ ssize_t hfsplus_getxattr(struct dentry *
43445 struct hfsplus_cat_file *file;
43446 ssize_t res = 0;
43447
43448+ pax_track_stack();
43449+
43450 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
43451 return -EOPNOTSUPP;
43452
43453diff -urNp linux-3.0.7/fs/hfsplus/super.c linux-3.0.7/fs/hfsplus/super.c
43454--- linux-3.0.7/fs/hfsplus/super.c 2011-07-21 22:17:23.000000000 -0400
43455+++ linux-3.0.7/fs/hfsplus/super.c 2011-08-23 21:48:14.000000000 -0400
43456@@ -340,6 +340,8 @@ static int hfsplus_fill_super(struct sup
43457 struct nls_table *nls = NULL;
43458 int err;
43459
43460+ pax_track_stack();
43461+
43462 err = -EINVAL;
43463 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
43464 if (!sbi)
43465diff -urNp linux-3.0.7/fs/hugetlbfs/inode.c linux-3.0.7/fs/hugetlbfs/inode.c
43466--- linux-3.0.7/fs/hugetlbfs/inode.c 2011-07-21 22:17:23.000000000 -0400
43467+++ linux-3.0.7/fs/hugetlbfs/inode.c 2011-08-23 21:48:14.000000000 -0400
43468@@ -914,7 +914,7 @@ static struct file_system_type hugetlbfs
43469 .kill_sb = kill_litter_super,
43470 };
43471
43472-static struct vfsmount *hugetlbfs_vfsmount;
43473+struct vfsmount *hugetlbfs_vfsmount;
43474
43475 static int can_do_hugetlb_shm(void)
43476 {
43477diff -urNp linux-3.0.7/fs/inode.c linux-3.0.7/fs/inode.c
43478--- linux-3.0.7/fs/inode.c 2011-07-21 22:17:23.000000000 -0400
43479+++ linux-3.0.7/fs/inode.c 2011-08-23 21:47:56.000000000 -0400
43480@@ -829,8 +829,8 @@ unsigned int get_next_ino(void)
43481
43482 #ifdef CONFIG_SMP
43483 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
43484- static atomic_t shared_last_ino;
43485- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
43486+ static atomic_unchecked_t shared_last_ino;
43487+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
43488
43489 res = next - LAST_INO_BATCH;
43490 }
43491diff -urNp linux-3.0.7/fs/jbd/checkpoint.c linux-3.0.7/fs/jbd/checkpoint.c
43492--- linux-3.0.7/fs/jbd/checkpoint.c 2011-07-21 22:17:23.000000000 -0400
43493+++ linux-3.0.7/fs/jbd/checkpoint.c 2011-08-23 21:48:14.000000000 -0400
43494@@ -350,6 +350,8 @@ int log_do_checkpoint(journal_t *journal
43495 tid_t this_tid;
43496 int result;
43497
43498+ pax_track_stack();
43499+
43500 jbd_debug(1, "Start checkpoint\n");
43501
43502 /*
43503diff -urNp linux-3.0.7/fs/jffs2/compr_rtime.c linux-3.0.7/fs/jffs2/compr_rtime.c
43504--- linux-3.0.7/fs/jffs2/compr_rtime.c 2011-07-21 22:17:23.000000000 -0400
43505+++ linux-3.0.7/fs/jffs2/compr_rtime.c 2011-08-23 21:48:14.000000000 -0400
43506@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
43507 int outpos = 0;
43508 int pos=0;
43509
43510+ pax_track_stack();
43511+
43512 memset(positions,0,sizeof(positions));
43513
43514 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
43515@@ -78,6 +80,8 @@ static int jffs2_rtime_decompress(unsign
43516 int outpos = 0;
43517 int pos=0;
43518
43519+ pax_track_stack();
43520+
43521 memset(positions,0,sizeof(positions));
43522
43523 while (outpos<destlen) {
43524diff -urNp linux-3.0.7/fs/jffs2/compr_rubin.c linux-3.0.7/fs/jffs2/compr_rubin.c
43525--- linux-3.0.7/fs/jffs2/compr_rubin.c 2011-07-21 22:17:23.000000000 -0400
43526+++ linux-3.0.7/fs/jffs2/compr_rubin.c 2011-08-23 21:48:14.000000000 -0400
43527@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
43528 int ret;
43529 uint32_t mysrclen, mydstlen;
43530
43531+ pax_track_stack();
43532+
43533 mysrclen = *sourcelen;
43534 mydstlen = *dstlen - 8;
43535
43536diff -urNp linux-3.0.7/fs/jffs2/erase.c linux-3.0.7/fs/jffs2/erase.c
43537--- linux-3.0.7/fs/jffs2/erase.c 2011-07-21 22:17:23.000000000 -0400
43538+++ linux-3.0.7/fs/jffs2/erase.c 2011-08-23 21:47:56.000000000 -0400
43539@@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(stru
43540 struct jffs2_unknown_node marker = {
43541 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
43542 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
43543- .totlen = cpu_to_je32(c->cleanmarker_size)
43544+ .totlen = cpu_to_je32(c->cleanmarker_size),
43545+ .hdr_crc = cpu_to_je32(0)
43546 };
43547
43548 jffs2_prealloc_raw_node_refs(c, jeb, 1);
43549diff -urNp linux-3.0.7/fs/jffs2/wbuf.c linux-3.0.7/fs/jffs2/wbuf.c
43550--- linux-3.0.7/fs/jffs2/wbuf.c 2011-07-21 22:17:23.000000000 -0400
43551+++ linux-3.0.7/fs/jffs2/wbuf.c 2011-08-23 21:47:56.000000000 -0400
43552@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
43553 {
43554 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
43555 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
43556- .totlen = constant_cpu_to_je32(8)
43557+ .totlen = constant_cpu_to_je32(8),
43558+ .hdr_crc = constant_cpu_to_je32(0)
43559 };
43560
43561 /*
43562diff -urNp linux-3.0.7/fs/jffs2/xattr.c linux-3.0.7/fs/jffs2/xattr.c
43563--- linux-3.0.7/fs/jffs2/xattr.c 2011-07-21 22:17:23.000000000 -0400
43564+++ linux-3.0.7/fs/jffs2/xattr.c 2011-08-23 21:48:14.000000000 -0400
43565@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
43566
43567 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
43568
43569+ pax_track_stack();
43570+
43571 /* Phase.1 : Merge same xref */
43572 for (i=0; i < XREF_TMPHASH_SIZE; i++)
43573 xref_tmphash[i] = NULL;
43574diff -urNp linux-3.0.7/fs/jfs/super.c linux-3.0.7/fs/jfs/super.c
43575--- linux-3.0.7/fs/jfs/super.c 2011-07-21 22:17:23.000000000 -0400
43576+++ linux-3.0.7/fs/jfs/super.c 2011-08-23 21:47:56.000000000 -0400
43577@@ -803,7 +803,7 @@ static int __init init_jfs_fs(void)
43578
43579 jfs_inode_cachep =
43580 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
43581- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
43582+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
43583 init_once);
43584 if (jfs_inode_cachep == NULL)
43585 return -ENOMEM;
43586diff -urNp linux-3.0.7/fs/Kconfig.binfmt linux-3.0.7/fs/Kconfig.binfmt
43587--- linux-3.0.7/fs/Kconfig.binfmt 2011-07-21 22:17:23.000000000 -0400
43588+++ linux-3.0.7/fs/Kconfig.binfmt 2011-08-23 21:47:56.000000000 -0400
43589@@ -86,7 +86,7 @@ config HAVE_AOUT
43590
43591 config BINFMT_AOUT
43592 tristate "Kernel support for a.out and ECOFF binaries"
43593- depends on HAVE_AOUT
43594+ depends on HAVE_AOUT && BROKEN
43595 ---help---
43596 A.out (Assembler.OUTput) is a set of formats for libraries and
43597 executables used in the earliest versions of UNIX. Linux used
43598diff -urNp linux-3.0.7/fs/libfs.c linux-3.0.7/fs/libfs.c
43599--- linux-3.0.7/fs/libfs.c 2011-07-21 22:17:23.000000000 -0400
43600+++ linux-3.0.7/fs/libfs.c 2011-08-23 21:47:56.000000000 -0400
43601@@ -163,6 +163,9 @@ int dcache_readdir(struct file * filp, v
43602
43603 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
43604 struct dentry *next;
43605+ char d_name[sizeof(next->d_iname)];
43606+ const unsigned char *name;
43607+
43608 next = list_entry(p, struct dentry, d_u.d_child);
43609 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
43610 if (!simple_positive(next)) {
43611@@ -172,7 +175,12 @@ int dcache_readdir(struct file * filp, v
43612
43613 spin_unlock(&next->d_lock);
43614 spin_unlock(&dentry->d_lock);
43615- if (filldir(dirent, next->d_name.name,
43616+ name = next->d_name.name;
43617+ if (name == next->d_iname) {
43618+ memcpy(d_name, name, next->d_name.len);
43619+ name = d_name;
43620+ }
43621+ if (filldir(dirent, name,
43622 next->d_name.len, filp->f_pos,
43623 next->d_inode->i_ino,
43624 dt_type(next->d_inode)) < 0)
43625diff -urNp linux-3.0.7/fs/lockd/clntproc.c linux-3.0.7/fs/lockd/clntproc.c
43626--- linux-3.0.7/fs/lockd/clntproc.c 2011-07-21 22:17:23.000000000 -0400
43627+++ linux-3.0.7/fs/lockd/clntproc.c 2011-08-23 21:48:14.000000000 -0400
43628@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
43629 /*
43630 * Cookie counter for NLM requests
43631 */
43632-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
43633+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
43634
43635 void nlmclnt_next_cookie(struct nlm_cookie *c)
43636 {
43637- u32 cookie = atomic_inc_return(&nlm_cookie);
43638+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
43639
43640 memcpy(c->data, &cookie, 4);
43641 c->len=4;
43642@@ -620,6 +620,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
43643 struct nlm_rqst reqst, *req;
43644 int status;
43645
43646+ pax_track_stack();
43647+
43648 req = &reqst;
43649 memset(req, 0, sizeof(*req));
43650 locks_init_lock(&req->a_args.lock.fl);
43651diff -urNp linux-3.0.7/fs/locks.c linux-3.0.7/fs/locks.c
43652--- linux-3.0.7/fs/locks.c 2011-07-21 22:17:23.000000000 -0400
43653+++ linux-3.0.7/fs/locks.c 2011-08-23 21:47:56.000000000 -0400
43654@@ -2043,16 +2043,16 @@ void locks_remove_flock(struct file *fil
43655 return;
43656
43657 if (filp->f_op && filp->f_op->flock) {
43658- struct file_lock fl = {
43659+ struct file_lock flock = {
43660 .fl_pid = current->tgid,
43661 .fl_file = filp,
43662 .fl_flags = FL_FLOCK,
43663 .fl_type = F_UNLCK,
43664 .fl_end = OFFSET_MAX,
43665 };
43666- filp->f_op->flock(filp, F_SETLKW, &fl);
43667- if (fl.fl_ops && fl.fl_ops->fl_release_private)
43668- fl.fl_ops->fl_release_private(&fl);
43669+ filp->f_op->flock(filp, F_SETLKW, &flock);
43670+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
43671+ flock.fl_ops->fl_release_private(&flock);
43672 }
43673
43674 lock_flocks();
43675diff -urNp linux-3.0.7/fs/logfs/super.c linux-3.0.7/fs/logfs/super.c
43676--- linux-3.0.7/fs/logfs/super.c 2011-07-21 22:17:23.000000000 -0400
43677+++ linux-3.0.7/fs/logfs/super.c 2011-08-23 21:48:14.000000000 -0400
43678@@ -266,6 +266,8 @@ static int logfs_recover_sb(struct super
43679 struct logfs_disk_super _ds1, *ds1 = &_ds1;
43680 int err, valid0, valid1;
43681
43682+ pax_track_stack();
43683+
43684 /* read first superblock */
43685 err = wbuf_read(sb, super->s_sb_ofs[0], sizeof(*ds0), ds0);
43686 if (err)
43687diff -urNp linux-3.0.7/fs/namei.c linux-3.0.7/fs/namei.c
43688--- linux-3.0.7/fs/namei.c 2011-10-16 21:54:54.000000000 -0400
43689+++ linux-3.0.7/fs/namei.c 2011-10-16 21:55:28.000000000 -0400
43690@@ -237,21 +237,31 @@ int generic_permission(struct inode *ino
43691 return ret;
43692
43693 /*
43694- * Read/write DACs are always overridable.
43695- * Executable DACs are overridable for all directories and
43696- * for non-directories that have least one exec bit set.
43697+ * Searching includes executable on directories, else just read.
43698 */
43699- if (!(mask & MAY_EXEC) || execute_ok(inode))
43700- if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
43701+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
43702+ if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE))) {
43703+#ifdef CONFIG_GRKERNSEC
43704+ if (flags & IPERM_FLAG_RCU)
43705+ return -ECHILD;
43706+#endif
43707+ if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
43708 return 0;
43709+ }
43710
43711 /*
43712- * Searching includes executable on directories, else just read.
43713+ * Read/write DACs are always overridable.
43714+ * Executable DACs are overridable for all directories and
43715+ * for non-directories that have least one exec bit set.
43716 */
43717- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
43718- if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
43719- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
43720+ if (!(mask & MAY_EXEC) || execute_ok(inode)) {
43721+#ifdef CONFIG_GRKERNSEC
43722+ if (flags & IPERM_FLAG_RCU)
43723+ return -ECHILD;
43724+#endif
43725+ if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
43726 return 0;
43727+ }
43728
43729 return -EACCES;
43730 }
43731@@ -547,6 +557,9 @@ static int complete_walk(struct nameidat
43732 br_read_unlock(vfsmount_lock);
43733 }
43734
43735+ if (!(nd->flags & LOOKUP_PARENT) && !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
43736+ return -ENOENT;
43737+
43738 if (likely(!(nd->flags & LOOKUP_JUMPED)))
43739 return 0;
43740
43741@@ -593,9 +606,16 @@ static inline int exec_permission(struct
43742 if (ret == -ECHILD)
43743 return ret;
43744
43745- if (ns_capable(ns, CAP_DAC_OVERRIDE) ||
43746- ns_capable(ns, CAP_DAC_READ_SEARCH))
43747+ if (ns_capable_nolog(ns, CAP_DAC_OVERRIDE))
43748 goto ok;
43749+ else {
43750+#ifdef CONFIG_GRKERNSEC
43751+ if (flags & IPERM_FLAG_RCU)
43752+ return -ECHILD;
43753+#endif
43754+ if (ns_capable(ns, CAP_DAC_READ_SEARCH) || ns_capable(ns, CAP_DAC_OVERRIDE))
43755+ goto ok;
43756+ }
43757
43758 return ret;
43759 ok:
43760@@ -703,11 +723,26 @@ follow_link(struct path *link, struct na
43761 return error;
43762 }
43763
43764+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
43765+ dentry->d_inode, dentry, nd->path.mnt)) {
43766+ error = -EACCES;
43767+ *p = ERR_PTR(error); /* no ->put_link(), please */
43768+ path_put(&nd->path);
43769+ return error;
43770+ }
43771+
43772+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
43773+ error = -ENOENT;
43774+ *p = ERR_PTR(error); /* no ->put_link(), please */
43775+ path_put(&nd->path);
43776+ return error;
43777+ }
43778+
43779 nd->last_type = LAST_BIND;
43780 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
43781 error = PTR_ERR(*p);
43782 if (!IS_ERR(*p)) {
43783- char *s = nd_get_link(nd);
43784+ const char *s = nd_get_link(nd);
43785 error = 0;
43786 if (s)
43787 error = __vfs_follow_link(nd, s);
43788@@ -1625,6 +1660,9 @@ static int do_path_lookup(int dfd, const
43789 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
43790
43791 if (likely(!retval)) {
43792+ if (*name != '/' && nd->path.dentry && nd->inode && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
43793+ return -ENOENT;
43794+
43795 if (unlikely(!audit_dummy_context())) {
43796 if (nd->path.dentry && nd->inode)
43797 audit_inode(name, nd->path.dentry);
43798@@ -1935,6 +1973,30 @@ int vfs_create(struct inode *dir, struct
43799 return error;
43800 }
43801
43802+/*
43803+ * Note that while the flag value (low two bits) for sys_open means:
43804+ * 00 - read-only
43805+ * 01 - write-only
43806+ * 10 - read-write
43807+ * 11 - special
43808+ * it is changed into
43809+ * 00 - no permissions needed
43810+ * 01 - read-permission
43811+ * 10 - write-permission
43812+ * 11 - read-write
43813+ * for the internal routines (ie open_namei()/follow_link() etc)
43814+ * This is more logical, and also allows the 00 "no perm needed"
43815+ * to be used for symlinks (where the permissions are checked
43816+ * later).
43817+ *
43818+*/
43819+static inline int open_to_namei_flags(int flag)
43820+{
43821+ if ((flag+1) & O_ACCMODE)
43822+ flag++;
43823+ return flag;
43824+}
43825+
43826 static int may_open(struct path *path, int acc_mode, int flag)
43827 {
43828 struct dentry *dentry = path->dentry;
43829@@ -1987,7 +2049,27 @@ static int may_open(struct path *path, i
43830 /*
43831 * Ensure there are no outstanding leases on the file.
43832 */
43833- return break_lease(inode, flag);
43834+ error = break_lease(inode, flag);
43835+
43836+ if (error)
43837+ return error;
43838+
43839+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
43840+ error = -EPERM;
43841+ goto exit;
43842+ }
43843+
43844+ if (gr_handle_rawio(inode)) {
43845+ error = -EPERM;
43846+ goto exit;
43847+ }
43848+
43849+ if (!gr_acl_handle_open(dentry, path->mnt, open_to_namei_flags(flag))) {
43850+ error = -EACCES;
43851+ goto exit;
43852+ }
43853+exit:
43854+ return error;
43855 }
43856
43857 static int handle_truncate(struct file *filp)
43858@@ -2013,30 +2095,6 @@ static int handle_truncate(struct file *
43859 }
43860
43861 /*
43862- * Note that while the flag value (low two bits) for sys_open means:
43863- * 00 - read-only
43864- * 01 - write-only
43865- * 10 - read-write
43866- * 11 - special
43867- * it is changed into
43868- * 00 - no permissions needed
43869- * 01 - read-permission
43870- * 10 - write-permission
43871- * 11 - read-write
43872- * for the internal routines (ie open_namei()/follow_link() etc)
43873- * This is more logical, and also allows the 00 "no perm needed"
43874- * to be used for symlinks (where the permissions are checked
43875- * later).
43876- *
43877-*/
43878-static inline int open_to_namei_flags(int flag)
43879-{
43880- if ((flag+1) & O_ACCMODE)
43881- flag++;
43882- return flag;
43883-}
43884-
43885-/*
43886 * Handle the last step of open()
43887 */
43888 static struct file *do_last(struct nameidata *nd, struct path *path,
43889@@ -2045,6 +2103,7 @@ static struct file *do_last(struct namei
43890 struct dentry *dir = nd->path.dentry;
43891 struct dentry *dentry;
43892 int open_flag = op->open_flag;
43893+ int flag = open_to_namei_flags(open_flag);
43894 int will_truncate = open_flag & O_TRUNC;
43895 int want_write = 0;
43896 int acc_mode = op->acc_mode;
43897@@ -2132,6 +2191,12 @@ static struct file *do_last(struct namei
43898 /* Negative dentry, just create the file */
43899 if (!dentry->d_inode) {
43900 int mode = op->mode;
43901+
43902+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, flag, mode)) {
43903+ error = -EACCES;
43904+ goto exit_mutex_unlock;
43905+ }
43906+
43907 if (!IS_POSIXACL(dir->d_inode))
43908 mode &= ~current_umask();
43909 /*
43910@@ -2155,6 +2220,8 @@ static struct file *do_last(struct namei
43911 error = vfs_create(dir->d_inode, dentry, mode, nd);
43912 if (error)
43913 goto exit_mutex_unlock;
43914+ else
43915+ gr_handle_create(path->dentry, path->mnt);
43916 mutex_unlock(&dir->d_inode->i_mutex);
43917 dput(nd->path.dentry);
43918 nd->path.dentry = dentry;
43919@@ -2164,6 +2231,14 @@ static struct file *do_last(struct namei
43920 /*
43921 * It already exists.
43922 */
43923+
43924+ /* only check if O_CREAT is specified, all other checks need to go
43925+ into may_open */
43926+ if (gr_handle_fifo(path->dentry, path->mnt, dir, flag, acc_mode)) {
43927+ error = -EACCES;
43928+ goto exit_mutex_unlock;
43929+ }
43930+
43931 mutex_unlock(&dir->d_inode->i_mutex);
43932 audit_inode(pathname, path->dentry);
43933
43934@@ -2450,6 +2525,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
43935 error = may_mknod(mode);
43936 if (error)
43937 goto out_dput;
43938+
43939+ if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
43940+ error = -EPERM;
43941+ goto out_dput;
43942+ }
43943+
43944+ if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
43945+ error = -EACCES;
43946+ goto out_dput;
43947+ }
43948+
43949 error = mnt_want_write(nd.path.mnt);
43950 if (error)
43951 goto out_dput;
43952@@ -2470,6 +2556,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
43953 }
43954 out_drop_write:
43955 mnt_drop_write(nd.path.mnt);
43956+
43957+ if (!error)
43958+ gr_handle_create(dentry, nd.path.mnt);
43959 out_dput:
43960 dput(dentry);
43961 out_unlock:
43962@@ -2522,6 +2611,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
43963 if (IS_ERR(dentry))
43964 goto out_unlock;
43965
43966+ if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
43967+ error = -EACCES;
43968+ goto out_dput;
43969+ }
43970+
43971 if (!IS_POSIXACL(nd.path.dentry->d_inode))
43972 mode &= ~current_umask();
43973 error = mnt_want_write(nd.path.mnt);
43974@@ -2533,6 +2627,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
43975 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
43976 out_drop_write:
43977 mnt_drop_write(nd.path.mnt);
43978+
43979+ if (!error)
43980+ gr_handle_create(dentry, nd.path.mnt);
43981+
43982 out_dput:
43983 dput(dentry);
43984 out_unlock:
43985@@ -2615,6 +2713,8 @@ static long do_rmdir(int dfd, const char
43986 char * name;
43987 struct dentry *dentry;
43988 struct nameidata nd;
43989+ ino_t saved_ino = 0;
43990+ dev_t saved_dev = 0;
43991
43992 error = user_path_parent(dfd, pathname, &nd, &name);
43993 if (error)
43994@@ -2643,6 +2743,17 @@ static long do_rmdir(int dfd, const char
43995 error = -ENOENT;
43996 goto exit3;
43997 }
43998+
43999+ if (dentry->d_inode->i_nlink <= 1) {
44000+ saved_ino = dentry->d_inode->i_ino;
44001+ saved_dev = gr_get_dev_from_dentry(dentry);
44002+ }
44003+
44004+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
44005+ error = -EACCES;
44006+ goto exit3;
44007+ }
44008+
44009 error = mnt_want_write(nd.path.mnt);
44010 if (error)
44011 goto exit3;
44012@@ -2650,6 +2761,8 @@ static long do_rmdir(int dfd, const char
44013 if (error)
44014 goto exit4;
44015 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
44016+ if (!error && (saved_dev || saved_ino))
44017+ gr_handle_delete(saved_ino, saved_dev);
44018 exit4:
44019 mnt_drop_write(nd.path.mnt);
44020 exit3:
44021@@ -2712,6 +2825,8 @@ static long do_unlinkat(int dfd, const c
44022 struct dentry *dentry;
44023 struct nameidata nd;
44024 struct inode *inode = NULL;
44025+ ino_t saved_ino = 0;
44026+ dev_t saved_dev = 0;
44027
44028 error = user_path_parent(dfd, pathname, &nd, &name);
44029 if (error)
44030@@ -2734,6 +2849,16 @@ static long do_unlinkat(int dfd, const c
44031 if (!inode)
44032 goto slashes;
44033 ihold(inode);
44034+
44035+ if (inode->i_nlink <= 1) {
44036+ saved_ino = inode->i_ino;
44037+ saved_dev = gr_get_dev_from_dentry(dentry);
44038+ }
44039+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
44040+ error = -EACCES;
44041+ goto exit2;
44042+ }
44043+
44044 error = mnt_want_write(nd.path.mnt);
44045 if (error)
44046 goto exit2;
44047@@ -2741,6 +2866,8 @@ static long do_unlinkat(int dfd, const c
44048 if (error)
44049 goto exit3;
44050 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
44051+ if (!error && (saved_ino || saved_dev))
44052+ gr_handle_delete(saved_ino, saved_dev);
44053 exit3:
44054 mnt_drop_write(nd.path.mnt);
44055 exit2:
44056@@ -2818,6 +2945,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
44057 if (IS_ERR(dentry))
44058 goto out_unlock;
44059
44060+ if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
44061+ error = -EACCES;
44062+ goto out_dput;
44063+ }
44064+
44065 error = mnt_want_write(nd.path.mnt);
44066 if (error)
44067 goto out_dput;
44068@@ -2825,6 +2957,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
44069 if (error)
44070 goto out_drop_write;
44071 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
44072+ if (!error)
44073+ gr_handle_create(dentry, nd.path.mnt);
44074 out_drop_write:
44075 mnt_drop_write(nd.path.mnt);
44076 out_dput:
44077@@ -2933,6 +3067,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
44078 error = PTR_ERR(new_dentry);
44079 if (IS_ERR(new_dentry))
44080 goto out_unlock;
44081+
44082+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
44083+ old_path.dentry->d_inode,
44084+ old_path.dentry->d_inode->i_mode, to)) {
44085+ error = -EACCES;
44086+ goto out_dput;
44087+ }
44088+
44089+ if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
44090+ old_path.dentry, old_path.mnt, to)) {
44091+ error = -EACCES;
44092+ goto out_dput;
44093+ }
44094+
44095 error = mnt_want_write(nd.path.mnt);
44096 if (error)
44097 goto out_dput;
44098@@ -2940,6 +3088,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
44099 if (error)
44100 goto out_drop_write;
44101 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
44102+ if (!error)
44103+ gr_handle_create(new_dentry, nd.path.mnt);
44104 out_drop_write:
44105 mnt_drop_write(nd.path.mnt);
44106 out_dput:
44107@@ -3117,6 +3267,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44108 char *to;
44109 int error;
44110
44111+ pax_track_stack();
44112+
44113 error = user_path_parent(olddfd, oldname, &oldnd, &from);
44114 if (error)
44115 goto exit;
44116@@ -3173,6 +3325,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44117 if (new_dentry == trap)
44118 goto exit5;
44119
44120+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
44121+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
44122+ to);
44123+ if (error)
44124+ goto exit5;
44125+
44126 error = mnt_want_write(oldnd.path.mnt);
44127 if (error)
44128 goto exit5;
44129@@ -3182,6 +3340,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44130 goto exit6;
44131 error = vfs_rename(old_dir->d_inode, old_dentry,
44132 new_dir->d_inode, new_dentry);
44133+ if (!error)
44134+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
44135+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
44136 exit6:
44137 mnt_drop_write(oldnd.path.mnt);
44138 exit5:
44139@@ -3207,6 +3368,8 @@ SYSCALL_DEFINE2(rename, const char __use
44140
44141 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
44142 {
44143+ char tmpbuf[64];
44144+ const char *newlink;
44145 int len;
44146
44147 len = PTR_ERR(link);
44148@@ -3216,7 +3379,14 @@ int vfs_readlink(struct dentry *dentry,
44149 len = strlen(link);
44150 if (len > (unsigned) buflen)
44151 len = buflen;
44152- if (copy_to_user(buffer, link, len))
44153+
44154+ if (len < sizeof(tmpbuf)) {
44155+ memcpy(tmpbuf, link, len);
44156+ newlink = tmpbuf;
44157+ } else
44158+ newlink = link;
44159+
44160+ if (copy_to_user(buffer, newlink, len))
44161 len = -EFAULT;
44162 out:
44163 return len;
44164diff -urNp linux-3.0.7/fs/namespace.c linux-3.0.7/fs/namespace.c
44165--- linux-3.0.7/fs/namespace.c 2011-07-21 22:17:23.000000000 -0400
44166+++ linux-3.0.7/fs/namespace.c 2011-08-23 21:48:14.000000000 -0400
44167@@ -1328,6 +1328,9 @@ static int do_umount(struct vfsmount *mn
44168 if (!(sb->s_flags & MS_RDONLY))
44169 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
44170 up_write(&sb->s_umount);
44171+
44172+ gr_log_remount(mnt->mnt_devname, retval);
44173+
44174 return retval;
44175 }
44176
44177@@ -1347,6 +1350,9 @@ static int do_umount(struct vfsmount *mn
44178 br_write_unlock(vfsmount_lock);
44179 up_write(&namespace_sem);
44180 release_mounts(&umount_list);
44181+
44182+ gr_log_unmount(mnt->mnt_devname, retval);
44183+
44184 return retval;
44185 }
44186
44187@@ -2338,6 +2344,16 @@ long do_mount(char *dev_name, char *dir_
44188 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
44189 MS_STRICTATIME);
44190
44191+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
44192+ retval = -EPERM;
44193+ goto dput_out;
44194+ }
44195+
44196+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
44197+ retval = -EPERM;
44198+ goto dput_out;
44199+ }
44200+
44201 if (flags & MS_REMOUNT)
44202 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
44203 data_page);
44204@@ -2352,6 +2368,9 @@ long do_mount(char *dev_name, char *dir_
44205 dev_name, data_page);
44206 dput_out:
44207 path_put(&path);
44208+
44209+ gr_log_mount(dev_name, dir_name, retval);
44210+
44211 return retval;
44212 }
44213
44214@@ -2575,6 +2594,11 @@ SYSCALL_DEFINE2(pivot_root, const char _
44215 if (error)
44216 goto out2;
44217
44218+ if (gr_handle_chroot_pivot()) {
44219+ error = -EPERM;
44220+ goto out2;
44221+ }
44222+
44223 get_fs_root(current->fs, &root);
44224 error = lock_mount(&old);
44225 if (error)
44226diff -urNp linux-3.0.7/fs/ncpfs/dir.c linux-3.0.7/fs/ncpfs/dir.c
44227--- linux-3.0.7/fs/ncpfs/dir.c 2011-07-21 22:17:23.000000000 -0400
44228+++ linux-3.0.7/fs/ncpfs/dir.c 2011-08-23 21:48:14.000000000 -0400
44229@@ -299,6 +299,8 @@ ncp_lookup_validate(struct dentry *dentr
44230 int res, val = 0, len;
44231 __u8 __name[NCP_MAXPATHLEN + 1];
44232
44233+ pax_track_stack();
44234+
44235 if (dentry == dentry->d_sb->s_root)
44236 return 1;
44237
44238@@ -844,6 +846,8 @@ static struct dentry *ncp_lookup(struct
44239 int error, res, len;
44240 __u8 __name[NCP_MAXPATHLEN + 1];
44241
44242+ pax_track_stack();
44243+
44244 error = -EIO;
44245 if (!ncp_conn_valid(server))
44246 goto finished;
44247@@ -931,6 +935,8 @@ int ncp_create_new(struct inode *dir, st
44248 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
44249 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
44250
44251+ pax_track_stack();
44252+
44253 ncp_age_dentry(server, dentry);
44254 len = sizeof(__name);
44255 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
44256@@ -992,6 +998,8 @@ static int ncp_mkdir(struct inode *dir,
44257 int error, len;
44258 __u8 __name[NCP_MAXPATHLEN + 1];
44259
44260+ pax_track_stack();
44261+
44262 DPRINTK("ncp_mkdir: making %s/%s\n",
44263 dentry->d_parent->d_name.name, dentry->d_name.name);
44264
44265@@ -1140,6 +1148,8 @@ static int ncp_rename(struct inode *old_
44266 int old_len, new_len;
44267 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
44268
44269+ pax_track_stack();
44270+
44271 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
44272 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
44273 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
44274diff -urNp linux-3.0.7/fs/ncpfs/inode.c linux-3.0.7/fs/ncpfs/inode.c
44275--- linux-3.0.7/fs/ncpfs/inode.c 2011-07-21 22:17:23.000000000 -0400
44276+++ linux-3.0.7/fs/ncpfs/inode.c 2011-08-23 21:48:14.000000000 -0400
44277@@ -461,6 +461,8 @@ static int ncp_fill_super(struct super_b
44278 #endif
44279 struct ncp_entry_info finfo;
44280
44281+ pax_track_stack();
44282+
44283 memset(&data, 0, sizeof(data));
44284 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
44285 if (!server)
44286diff -urNp linux-3.0.7/fs/nfs/inode.c linux-3.0.7/fs/nfs/inode.c
44287--- linux-3.0.7/fs/nfs/inode.c 2011-07-21 22:17:23.000000000 -0400
44288+++ linux-3.0.7/fs/nfs/inode.c 2011-08-23 21:47:56.000000000 -0400
44289@@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct
44290 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
44291 nfsi->attrtimeo_timestamp = jiffies;
44292
44293- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
44294+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
44295 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
44296 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
44297 else
44298@@ -1000,16 +1000,16 @@ static int nfs_size_need_update(const st
44299 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
44300 }
44301
44302-static atomic_long_t nfs_attr_generation_counter;
44303+static atomic_long_unchecked_t nfs_attr_generation_counter;
44304
44305 static unsigned long nfs_read_attr_generation_counter(void)
44306 {
44307- return atomic_long_read(&nfs_attr_generation_counter);
44308+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
44309 }
44310
44311 unsigned long nfs_inc_attr_generation_counter(void)
44312 {
44313- return atomic_long_inc_return(&nfs_attr_generation_counter);
44314+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
44315 }
44316
44317 void nfs_fattr_init(struct nfs_fattr *fattr)
44318diff -urNp linux-3.0.7/fs/nfsd/nfs4state.c linux-3.0.7/fs/nfsd/nfs4state.c
44319--- linux-3.0.7/fs/nfsd/nfs4state.c 2011-09-02 18:11:21.000000000 -0400
44320+++ linux-3.0.7/fs/nfsd/nfs4state.c 2011-08-23 21:48:14.000000000 -0400
44321@@ -3794,6 +3794,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
44322 unsigned int strhashval;
44323 int err;
44324
44325+ pax_track_stack();
44326+
44327 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
44328 (long long) lock->lk_offset,
44329 (long long) lock->lk_length);
44330diff -urNp linux-3.0.7/fs/nfsd/nfs4xdr.c linux-3.0.7/fs/nfsd/nfs4xdr.c
44331--- linux-3.0.7/fs/nfsd/nfs4xdr.c 2011-07-21 22:17:23.000000000 -0400
44332+++ linux-3.0.7/fs/nfsd/nfs4xdr.c 2011-08-23 21:48:14.000000000 -0400
44333@@ -1788,6 +1788,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
44334 .dentry = dentry,
44335 };
44336
44337+ pax_track_stack();
44338+
44339 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
44340 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
44341 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
44342diff -urNp linux-3.0.7/fs/nfsd/vfs.c linux-3.0.7/fs/nfsd/vfs.c
44343--- linux-3.0.7/fs/nfsd/vfs.c 2011-07-21 22:17:23.000000000 -0400
44344+++ linux-3.0.7/fs/nfsd/vfs.c 2011-10-06 04:17:55.000000000 -0400
44345@@ -896,7 +896,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
44346 } else {
44347 oldfs = get_fs();
44348 set_fs(KERNEL_DS);
44349- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
44350+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
44351 set_fs(oldfs);
44352 }
44353
44354@@ -1000,7 +1000,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
44355
44356 /* Write the data. */
44357 oldfs = get_fs(); set_fs(KERNEL_DS);
44358- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
44359+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
44360 set_fs(oldfs);
44361 if (host_err < 0)
44362 goto out_nfserr;
44363@@ -1535,7 +1535,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
44364 */
44365
44366 oldfs = get_fs(); set_fs(KERNEL_DS);
44367- host_err = inode->i_op->readlink(dentry, buf, *lenp);
44368+ host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
44369 set_fs(oldfs);
44370
44371 if (host_err < 0)
44372diff -urNp linux-3.0.7/fs/notify/fanotify/fanotify_user.c linux-3.0.7/fs/notify/fanotify/fanotify_user.c
44373--- linux-3.0.7/fs/notify/fanotify/fanotify_user.c 2011-07-21 22:17:23.000000000 -0400
44374+++ linux-3.0.7/fs/notify/fanotify/fanotify_user.c 2011-08-23 21:48:14.000000000 -0400
44375@@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct
44376 goto out_close_fd;
44377
44378 ret = -EFAULT;
44379- if (copy_to_user(buf, &fanotify_event_metadata,
44380+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
44381+ copy_to_user(buf, &fanotify_event_metadata,
44382 fanotify_event_metadata.event_len))
44383 goto out_kill_access_response;
44384
44385diff -urNp linux-3.0.7/fs/notify/notification.c linux-3.0.7/fs/notify/notification.c
44386--- linux-3.0.7/fs/notify/notification.c 2011-07-21 22:17:23.000000000 -0400
44387+++ linux-3.0.7/fs/notify/notification.c 2011-08-23 21:47:56.000000000 -0400
44388@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
44389 * get set to 0 so it will never get 'freed'
44390 */
44391 static struct fsnotify_event *q_overflow_event;
44392-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44393+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44394
44395 /**
44396 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
44397@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
44398 */
44399 u32 fsnotify_get_cookie(void)
44400 {
44401- return atomic_inc_return(&fsnotify_sync_cookie);
44402+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
44403 }
44404 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
44405
44406diff -urNp linux-3.0.7/fs/ntfs/dir.c linux-3.0.7/fs/ntfs/dir.c
44407--- linux-3.0.7/fs/ntfs/dir.c 2011-07-21 22:17:23.000000000 -0400
44408+++ linux-3.0.7/fs/ntfs/dir.c 2011-08-23 21:47:56.000000000 -0400
44409@@ -1329,7 +1329,7 @@ find_next_index_buffer:
44410 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
44411 ~(s64)(ndir->itype.index.block_size - 1)));
44412 /* Bounds checks. */
44413- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44414+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44415 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
44416 "inode 0x%lx or driver bug.", vdir->i_ino);
44417 goto err_out;
44418diff -urNp linux-3.0.7/fs/ntfs/file.c linux-3.0.7/fs/ntfs/file.c
44419--- linux-3.0.7/fs/ntfs/file.c 2011-07-21 22:17:23.000000000 -0400
44420+++ linux-3.0.7/fs/ntfs/file.c 2011-08-23 21:47:56.000000000 -0400
44421@@ -2222,6 +2222,6 @@ const struct inode_operations ntfs_file_
44422 #endif /* NTFS_RW */
44423 };
44424
44425-const struct file_operations ntfs_empty_file_ops = {};
44426+const struct file_operations ntfs_empty_file_ops __read_only;
44427
44428-const struct inode_operations ntfs_empty_inode_ops = {};
44429+const struct inode_operations ntfs_empty_inode_ops __read_only;
44430diff -urNp linux-3.0.7/fs/ocfs2/localalloc.c linux-3.0.7/fs/ocfs2/localalloc.c
44431--- linux-3.0.7/fs/ocfs2/localalloc.c 2011-07-21 22:17:23.000000000 -0400
44432+++ linux-3.0.7/fs/ocfs2/localalloc.c 2011-08-23 21:47:56.000000000 -0400
44433@@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_windo
44434 goto bail;
44435 }
44436
44437- atomic_inc(&osb->alloc_stats.moves);
44438+ atomic_inc_unchecked(&osb->alloc_stats.moves);
44439
44440 bail:
44441 if (handle)
44442diff -urNp linux-3.0.7/fs/ocfs2/namei.c linux-3.0.7/fs/ocfs2/namei.c
44443--- linux-3.0.7/fs/ocfs2/namei.c 2011-07-21 22:17:23.000000000 -0400
44444+++ linux-3.0.7/fs/ocfs2/namei.c 2011-08-23 21:48:14.000000000 -0400
44445@@ -1063,6 +1063,8 @@ static int ocfs2_rename(struct inode *ol
44446 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
44447 struct ocfs2_dir_lookup_result target_insert = { NULL, };
44448
44449+ pax_track_stack();
44450+
44451 /* At some point it might be nice to break this function up a
44452 * bit. */
44453
44454diff -urNp linux-3.0.7/fs/ocfs2/ocfs2.h linux-3.0.7/fs/ocfs2/ocfs2.h
44455--- linux-3.0.7/fs/ocfs2/ocfs2.h 2011-07-21 22:17:23.000000000 -0400
44456+++ linux-3.0.7/fs/ocfs2/ocfs2.h 2011-08-23 21:47:56.000000000 -0400
44457@@ -235,11 +235,11 @@ enum ocfs2_vol_state
44458
44459 struct ocfs2_alloc_stats
44460 {
44461- atomic_t moves;
44462- atomic_t local_data;
44463- atomic_t bitmap_data;
44464- atomic_t bg_allocs;
44465- atomic_t bg_extends;
44466+ atomic_unchecked_t moves;
44467+ atomic_unchecked_t local_data;
44468+ atomic_unchecked_t bitmap_data;
44469+ atomic_unchecked_t bg_allocs;
44470+ atomic_unchecked_t bg_extends;
44471 };
44472
44473 enum ocfs2_local_alloc_state
44474diff -urNp linux-3.0.7/fs/ocfs2/suballoc.c linux-3.0.7/fs/ocfs2/suballoc.c
44475--- linux-3.0.7/fs/ocfs2/suballoc.c 2011-07-21 22:17:23.000000000 -0400
44476+++ linux-3.0.7/fs/ocfs2/suballoc.c 2011-08-23 21:47:56.000000000 -0400
44477@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(s
44478 mlog_errno(status);
44479 goto bail;
44480 }
44481- atomic_inc(&osb->alloc_stats.bg_extends);
44482+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
44483
44484 /* You should never ask for this much metadata */
44485 BUG_ON(bits_wanted >
44486@@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handl
44487 mlog_errno(status);
44488 goto bail;
44489 }
44490- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44491+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44492
44493 *suballoc_loc = res.sr_bg_blkno;
44494 *suballoc_bit_start = res.sr_bit_offset;
44495@@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_
44496 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
44497 res->sr_bits);
44498
44499- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44500+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44501
44502 BUG_ON(res->sr_bits != 1);
44503
44504@@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *hand
44505 mlog_errno(status);
44506 goto bail;
44507 }
44508- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44509+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44510
44511 BUG_ON(res.sr_bits != 1);
44512
44513@@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *han
44514 cluster_start,
44515 num_clusters);
44516 if (!status)
44517- atomic_inc(&osb->alloc_stats.local_data);
44518+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
44519 } else {
44520 if (min_clusters > (osb->bitmap_cpg - 1)) {
44521 /* The only paths asking for contiguousness
44522@@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *han
44523 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
44524 res.sr_bg_blkno,
44525 res.sr_bit_offset);
44526- atomic_inc(&osb->alloc_stats.bitmap_data);
44527+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
44528 *num_clusters = res.sr_bits;
44529 }
44530 }
44531diff -urNp linux-3.0.7/fs/ocfs2/super.c linux-3.0.7/fs/ocfs2/super.c
44532--- linux-3.0.7/fs/ocfs2/super.c 2011-07-21 22:17:23.000000000 -0400
44533+++ linux-3.0.7/fs/ocfs2/super.c 2011-08-23 21:47:56.000000000 -0400
44534@@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
44535 "%10s => GlobalAllocs: %d LocalAllocs: %d "
44536 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
44537 "Stats",
44538- atomic_read(&osb->alloc_stats.bitmap_data),
44539- atomic_read(&osb->alloc_stats.local_data),
44540- atomic_read(&osb->alloc_stats.bg_allocs),
44541- atomic_read(&osb->alloc_stats.moves),
44542- atomic_read(&osb->alloc_stats.bg_extends));
44543+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
44544+ atomic_read_unchecked(&osb->alloc_stats.local_data),
44545+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
44546+ atomic_read_unchecked(&osb->alloc_stats.moves),
44547+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
44548
44549 out += snprintf(buf + out, len - out,
44550 "%10s => State: %u Descriptor: %llu Size: %u bits "
44551@@ -2112,11 +2112,11 @@ static int ocfs2_initialize_super(struct
44552 spin_lock_init(&osb->osb_xattr_lock);
44553 ocfs2_init_steal_slots(osb);
44554
44555- atomic_set(&osb->alloc_stats.moves, 0);
44556- atomic_set(&osb->alloc_stats.local_data, 0);
44557- atomic_set(&osb->alloc_stats.bitmap_data, 0);
44558- atomic_set(&osb->alloc_stats.bg_allocs, 0);
44559- atomic_set(&osb->alloc_stats.bg_extends, 0);
44560+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
44561+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
44562+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
44563+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
44564+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
44565
44566 /* Copy the blockcheck stats from the superblock probe */
44567 osb->osb_ecc_stats = *stats;
44568diff -urNp linux-3.0.7/fs/ocfs2/symlink.c linux-3.0.7/fs/ocfs2/symlink.c
44569--- linux-3.0.7/fs/ocfs2/symlink.c 2011-07-21 22:17:23.000000000 -0400
44570+++ linux-3.0.7/fs/ocfs2/symlink.c 2011-08-23 21:47:56.000000000 -0400
44571@@ -142,7 +142,7 @@ bail:
44572
44573 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
44574 {
44575- char *link = nd_get_link(nd);
44576+ const char *link = nd_get_link(nd);
44577 if (!IS_ERR(link))
44578 kfree(link);
44579 }
44580diff -urNp linux-3.0.7/fs/open.c linux-3.0.7/fs/open.c
44581--- linux-3.0.7/fs/open.c 2011-07-21 22:17:23.000000000 -0400
44582+++ linux-3.0.7/fs/open.c 2011-09-14 09:16:46.000000000 -0400
44583@@ -112,6 +112,10 @@ static long do_sys_truncate(const char _
44584 error = locks_verify_truncate(inode, NULL, length);
44585 if (!error)
44586 error = security_path_truncate(&path);
44587+
44588+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
44589+ error = -EACCES;
44590+
44591 if (!error)
44592 error = do_truncate(path.dentry, length, 0, NULL);
44593
44594@@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
44595 if (__mnt_is_readonly(path.mnt))
44596 res = -EROFS;
44597
44598+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
44599+ res = -EACCES;
44600+
44601 out_path_release:
44602 path_put(&path);
44603 out:
44604@@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user
44605 if (error)
44606 goto dput_and_out;
44607
44608+ gr_log_chdir(path.dentry, path.mnt);
44609+
44610 set_fs_pwd(current->fs, &path);
44611
44612 dput_and_out:
44613@@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
44614 goto out_putf;
44615
44616 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
44617+
44618+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
44619+ error = -EPERM;
44620+
44621+ if (!error)
44622+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
44623+
44624 if (!error)
44625 set_fs_pwd(current->fs, &file->f_path);
44626 out_putf:
44627@@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __use
44628 if (error)
44629 goto dput_and_out;
44630
44631+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
44632+ goto dput_and_out;
44633+
44634 set_fs_root(current->fs, &path);
44635+
44636+ gr_handle_chroot_chdir(&path);
44637+
44638 error = 0;
44639 dput_and_out:
44640 path_put(&path);
44641@@ -466,12 +488,25 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
44642 err = mnt_want_write_file(file);
44643 if (err)
44644 goto out_putf;
44645+
44646 mutex_lock(&inode->i_mutex);
44647+
44648+ if (!gr_acl_handle_fchmod(dentry, file->f_vfsmnt, mode)) {
44649+ err = -EACCES;
44650+ goto out_unlock;
44651+ }
44652+
44653 err = security_path_chmod(dentry, file->f_vfsmnt, mode);
44654 if (err)
44655 goto out_unlock;
44656 if (mode == (mode_t) -1)
44657 mode = inode->i_mode;
44658+
44659+ if (gr_handle_chroot_chmod(dentry, file->f_vfsmnt, mode)) {
44660+ err = -EACCES;
44661+ goto out_unlock;
44662+ }
44663+
44664 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
44665 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
44666 err = notify_change(dentry, &newattrs);
44667@@ -499,12 +534,25 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
44668 error = mnt_want_write(path.mnt);
44669 if (error)
44670 goto dput_and_out;
44671+
44672 mutex_lock(&inode->i_mutex);
44673+
44674+ if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
44675+ error = -EACCES;
44676+ goto out_unlock;
44677+ }
44678+
44679 error = security_path_chmod(path.dentry, path.mnt, mode);
44680 if (error)
44681 goto out_unlock;
44682 if (mode == (mode_t) -1)
44683 mode = inode->i_mode;
44684+
44685+ if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
44686+ error = -EACCES;
44687+ goto out_unlock;
44688+ }
44689+
44690 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
44691 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
44692 error = notify_change(path.dentry, &newattrs);
44693@@ -528,6 +576,9 @@ static int chown_common(struct path *pat
44694 int error;
44695 struct iattr newattrs;
44696
44697+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
44698+ return -EACCES;
44699+
44700 newattrs.ia_valid = ATTR_CTIME;
44701 if (user != (uid_t) -1) {
44702 newattrs.ia_valid |= ATTR_UID;
44703@@ -998,7 +1049,10 @@ long do_sys_open(int dfd, const char __u
44704 if (!IS_ERR(tmp)) {
44705 fd = get_unused_fd_flags(flags);
44706 if (fd >= 0) {
44707- struct file *f = do_filp_open(dfd, tmp, &op, lookup);
44708+ struct file *f;
44709+ /* don't allow to be set by userland */
44710+ flags &= ~FMODE_GREXEC;
44711+ f = do_filp_open(dfd, tmp, &op, lookup);
44712 if (IS_ERR(f)) {
44713 put_unused_fd(fd);
44714 fd = PTR_ERR(f);
44715diff -urNp linux-3.0.7/fs/partitions/ldm.c linux-3.0.7/fs/partitions/ldm.c
44716--- linux-3.0.7/fs/partitions/ldm.c 2011-07-21 22:17:23.000000000 -0400
44717+++ linux-3.0.7/fs/partitions/ldm.c 2011-08-23 21:48:14.000000000 -0400
44718@@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
44719 ldm_error ("A VBLK claims to have %d parts.", num);
44720 return false;
44721 }
44722+
44723 if (rec >= num) {
44724 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
44725 return false;
44726@@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
44727 goto found;
44728 }
44729
44730- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
44731+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
44732 if (!f) {
44733 ldm_crit ("Out of memory.");
44734 return false;
44735diff -urNp linux-3.0.7/fs/pipe.c linux-3.0.7/fs/pipe.c
44736--- linux-3.0.7/fs/pipe.c 2011-07-21 22:17:23.000000000 -0400
44737+++ linux-3.0.7/fs/pipe.c 2011-08-23 21:48:14.000000000 -0400
44738@@ -420,9 +420,9 @@ redo:
44739 }
44740 if (bufs) /* More to do? */
44741 continue;
44742- if (!pipe->writers)
44743+ if (!atomic_read(&pipe->writers))
44744 break;
44745- if (!pipe->waiting_writers) {
44746+ if (!atomic_read(&pipe->waiting_writers)) {
44747 /* syscall merging: Usually we must not sleep
44748 * if O_NONBLOCK is set, or if we got some data.
44749 * But if a writer sleeps in kernel space, then
44750@@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const str
44751 mutex_lock(&inode->i_mutex);
44752 pipe = inode->i_pipe;
44753
44754- if (!pipe->readers) {
44755+ if (!atomic_read(&pipe->readers)) {
44756 send_sig(SIGPIPE, current, 0);
44757 ret = -EPIPE;
44758 goto out;
44759@@ -530,7 +530,7 @@ redo1:
44760 for (;;) {
44761 int bufs;
44762
44763- if (!pipe->readers) {
44764+ if (!atomic_read(&pipe->readers)) {
44765 send_sig(SIGPIPE, current, 0);
44766 if (!ret)
44767 ret = -EPIPE;
44768@@ -616,9 +616,9 @@ redo2:
44769 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
44770 do_wakeup = 0;
44771 }
44772- pipe->waiting_writers++;
44773+ atomic_inc(&pipe->waiting_writers);
44774 pipe_wait(pipe);
44775- pipe->waiting_writers--;
44776+ atomic_dec(&pipe->waiting_writers);
44777 }
44778 out:
44779 mutex_unlock(&inode->i_mutex);
44780@@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table
44781 mask = 0;
44782 if (filp->f_mode & FMODE_READ) {
44783 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
44784- if (!pipe->writers && filp->f_version != pipe->w_counter)
44785+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
44786 mask |= POLLHUP;
44787 }
44788
44789@@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table
44790 * Most Unices do not set POLLERR for FIFOs but on Linux they
44791 * behave exactly like pipes for poll().
44792 */
44793- if (!pipe->readers)
44794+ if (!atomic_read(&pipe->readers))
44795 mask |= POLLERR;
44796 }
44797
44798@@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int de
44799
44800 mutex_lock(&inode->i_mutex);
44801 pipe = inode->i_pipe;
44802- pipe->readers -= decr;
44803- pipe->writers -= decw;
44804+ atomic_sub(decr, &pipe->readers);
44805+ atomic_sub(decw, &pipe->writers);
44806
44807- if (!pipe->readers && !pipe->writers) {
44808+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
44809 free_pipe_info(inode);
44810 } else {
44811 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
44812@@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, stru
44813
44814 if (inode->i_pipe) {
44815 ret = 0;
44816- inode->i_pipe->readers++;
44817+ atomic_inc(&inode->i_pipe->readers);
44818 }
44819
44820 mutex_unlock(&inode->i_mutex);
44821@@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, str
44822
44823 if (inode->i_pipe) {
44824 ret = 0;
44825- inode->i_pipe->writers++;
44826+ atomic_inc(&inode->i_pipe->writers);
44827 }
44828
44829 mutex_unlock(&inode->i_mutex);
44830@@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, stru
44831 if (inode->i_pipe) {
44832 ret = 0;
44833 if (filp->f_mode & FMODE_READ)
44834- inode->i_pipe->readers++;
44835+ atomic_inc(&inode->i_pipe->readers);
44836 if (filp->f_mode & FMODE_WRITE)
44837- inode->i_pipe->writers++;
44838+ atomic_inc(&inode->i_pipe->writers);
44839 }
44840
44841 mutex_unlock(&inode->i_mutex);
44842@@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
44843 inode->i_pipe = NULL;
44844 }
44845
44846-static struct vfsmount *pipe_mnt __read_mostly;
44847+struct vfsmount *pipe_mnt __read_mostly;
44848
44849 /*
44850 * pipefs_dname() is called from d_path().
44851@@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(voi
44852 goto fail_iput;
44853 inode->i_pipe = pipe;
44854
44855- pipe->readers = pipe->writers = 1;
44856+ atomic_set(&pipe->readers, 1);
44857+ atomic_set(&pipe->writers, 1);
44858 inode->i_fop = &rdwr_pipefifo_fops;
44859
44860 /*
44861diff -urNp linux-3.0.7/fs/proc/array.c linux-3.0.7/fs/proc/array.c
44862--- linux-3.0.7/fs/proc/array.c 2011-07-21 22:17:23.000000000 -0400
44863+++ linux-3.0.7/fs/proc/array.c 2011-08-23 21:48:14.000000000 -0400
44864@@ -60,6 +60,7 @@
44865 #include <linux/tty.h>
44866 #include <linux/string.h>
44867 #include <linux/mman.h>
44868+#include <linux/grsecurity.h>
44869 #include <linux/proc_fs.h>
44870 #include <linux/ioport.h>
44871 #include <linux/uaccess.h>
44872@@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq
44873 seq_putc(m, '\n');
44874 }
44875
44876+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
44877+static inline void task_pax(struct seq_file *m, struct task_struct *p)
44878+{
44879+ if (p->mm)
44880+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
44881+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
44882+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
44883+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
44884+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
44885+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
44886+ else
44887+ seq_printf(m, "PaX:\t-----\n");
44888+}
44889+#endif
44890+
44891 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
44892 struct pid *pid, struct task_struct *task)
44893 {
44894@@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m,
44895 task_cpus_allowed(m, task);
44896 cpuset_task_status_allowed(m, task);
44897 task_context_switch_counts(m, task);
44898+
44899+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
44900+ task_pax(m, task);
44901+#endif
44902+
44903+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
44904+ task_grsec_rbac(m, task);
44905+#endif
44906+
44907 return 0;
44908 }
44909
44910+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44911+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
44912+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
44913+ _mm->pax_flags & MF_PAX_SEGMEXEC))
44914+#endif
44915+
44916 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
44917 struct pid *pid, struct task_struct *task, int whole)
44918 {
44919@@ -375,9 +406,11 @@ static int do_task_stat(struct seq_file
44920 cputime_t cutime, cstime, utime, stime;
44921 cputime_t cgtime, gtime;
44922 unsigned long rsslim = 0;
44923- char tcomm[sizeof(task->comm)];
44924+ char tcomm[sizeof(task->comm)] = { 0 };
44925 unsigned long flags;
44926
44927+ pax_track_stack();
44928+
44929 state = *get_task_state(task);
44930 vsize = eip = esp = 0;
44931 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
44932@@ -449,6 +482,19 @@ static int do_task_stat(struct seq_file
44933 gtime = task->gtime;
44934 }
44935
44936+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44937+ if (PAX_RAND_FLAGS(mm)) {
44938+ eip = 0;
44939+ esp = 0;
44940+ wchan = 0;
44941+ }
44942+#endif
44943+#ifdef CONFIG_GRKERNSEC_HIDESYM
44944+ wchan = 0;
44945+ eip =0;
44946+ esp =0;
44947+#endif
44948+
44949 /* scale priority and nice values from timeslices to -20..20 */
44950 /* to make it look like a "normal" Unix priority/nice value */
44951 priority = task_prio(task);
44952@@ -489,9 +535,15 @@ static int do_task_stat(struct seq_file
44953 vsize,
44954 mm ? get_mm_rss(mm) : 0,
44955 rsslim,
44956+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44957+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
44958+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
44959+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
44960+#else
44961 mm ? (permitted ? mm->start_code : 1) : 0,
44962 mm ? (permitted ? mm->end_code : 1) : 0,
44963 (permitted && mm) ? mm->start_stack : 0,
44964+#endif
44965 esp,
44966 eip,
44967 /* The signal information here is obsolete.
44968@@ -544,3 +596,18 @@ int proc_pid_statm(struct seq_file *m, s
44969
44970 return 0;
44971 }
44972+
44973+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
44974+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
44975+{
44976+ u32 curr_ip = 0;
44977+ unsigned long flags;
44978+
44979+ if (lock_task_sighand(task, &flags)) {
44980+ curr_ip = task->signal->curr_ip;
44981+ unlock_task_sighand(task, &flags);
44982+ }
44983+
44984+ return sprintf(buffer, "%pI4\n", &curr_ip);
44985+}
44986+#endif
44987diff -urNp linux-3.0.7/fs/proc/base.c linux-3.0.7/fs/proc/base.c
44988--- linux-3.0.7/fs/proc/base.c 2011-09-02 18:11:21.000000000 -0400
44989+++ linux-3.0.7/fs/proc/base.c 2011-10-17 06:38:10.000000000 -0400
44990@@ -107,6 +107,22 @@ struct pid_entry {
44991 union proc_op op;
44992 };
44993
44994+struct getdents_callback {
44995+ struct linux_dirent __user * current_dir;
44996+ struct linux_dirent __user * previous;
44997+ struct file * file;
44998+ int count;
44999+ int error;
45000+};
45001+
45002+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
45003+ loff_t offset, u64 ino, unsigned int d_type)
45004+{
45005+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
45006+ buf->error = -EINVAL;
45007+ return 0;
45008+}
45009+
45010 #define NOD(NAME, MODE, IOP, FOP, OP) { \
45011 .name = (NAME), \
45012 .len = sizeof(NAME) - 1, \
45013@@ -209,6 +225,9 @@ static struct mm_struct *__check_mem_per
45014 if (task == current)
45015 return mm;
45016
45017+ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
45018+ return ERR_PTR(-EPERM);
45019+
45020 /*
45021 * If current is actively ptrace'ing, and would also be
45022 * permitted to freshly attach with ptrace now, permit it.
45023@@ -282,6 +301,9 @@ static int proc_pid_cmdline(struct task_
45024 if (!mm->arg_end)
45025 goto out_mm; /* Shh! No looking before we're done */
45026
45027+ if (gr_acl_handle_procpidmem(task))
45028+ goto out_mm;
45029+
45030 len = mm->arg_end - mm->arg_start;
45031
45032 if (len > PAGE_SIZE)
45033@@ -309,12 +331,28 @@ out:
45034 return res;
45035 }
45036
45037+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45038+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45039+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
45040+ _mm->pax_flags & MF_PAX_SEGMEXEC))
45041+#endif
45042+
45043 static int proc_pid_auxv(struct task_struct *task, char *buffer)
45044 {
45045 struct mm_struct *mm = mm_for_maps(task);
45046 int res = PTR_ERR(mm);
45047 if (mm && !IS_ERR(mm)) {
45048 unsigned int nwords = 0;
45049+
45050+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45051+ /* allow if we're currently ptracing this task */
45052+ if (PAX_RAND_FLAGS(mm) &&
45053+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
45054+ mmput(mm);
45055+ return 0;
45056+ }
45057+#endif
45058+
45059 do {
45060 nwords += 2;
45061 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
45062@@ -328,7 +366,7 @@ static int proc_pid_auxv(struct task_str
45063 }
45064
45065
45066-#ifdef CONFIG_KALLSYMS
45067+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45068 /*
45069 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
45070 * Returns the resolved symbol. If that fails, simply return the address.
45071@@ -367,7 +405,7 @@ static void unlock_trace(struct task_str
45072 mutex_unlock(&task->signal->cred_guard_mutex);
45073 }
45074
45075-#ifdef CONFIG_STACKTRACE
45076+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45077
45078 #define MAX_STACK_TRACE_DEPTH 64
45079
45080@@ -558,7 +596,7 @@ static int proc_pid_limits(struct task_s
45081 return count;
45082 }
45083
45084-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45085+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45086 static int proc_pid_syscall(struct task_struct *task, char *buffer)
45087 {
45088 long nr;
45089@@ -587,7 +625,7 @@ static int proc_pid_syscall(struct task_
45090 /************************************************************************/
45091
45092 /* permission checks */
45093-static int proc_fd_access_allowed(struct inode *inode)
45094+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
45095 {
45096 struct task_struct *task;
45097 int allowed = 0;
45098@@ -597,7 +635,10 @@ static int proc_fd_access_allowed(struct
45099 */
45100 task = get_proc_task(inode);
45101 if (task) {
45102- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45103+ if (log)
45104+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
45105+ else
45106+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45107 put_task_struct(task);
45108 }
45109 return allowed;
45110@@ -978,6 +1019,9 @@ static ssize_t environ_read(struct file
45111 if (!task)
45112 goto out_no_task;
45113
45114+ if (gr_acl_handle_procpidmem(task))
45115+ goto out;
45116+
45117 ret = -ENOMEM;
45118 page = (char *)__get_free_page(GFP_TEMPORARY);
45119 if (!page)
45120@@ -1614,7 +1658,7 @@ static void *proc_pid_follow_link(struct
45121 path_put(&nd->path);
45122
45123 /* Are we allowed to snoop on the tasks file descriptors? */
45124- if (!proc_fd_access_allowed(inode))
45125+ if (!proc_fd_access_allowed(inode,0))
45126 goto out;
45127
45128 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
45129@@ -1653,8 +1697,18 @@ static int proc_pid_readlink(struct dent
45130 struct path path;
45131
45132 /* Are we allowed to snoop on the tasks file descriptors? */
45133- if (!proc_fd_access_allowed(inode))
45134- goto out;
45135+ /* logging this is needed for learning on chromium to work properly,
45136+ but we don't want to flood the logs from 'ps' which does a readlink
45137+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
45138+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
45139+ */
45140+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
45141+ if (!proc_fd_access_allowed(inode,0))
45142+ goto out;
45143+ } else {
45144+ if (!proc_fd_access_allowed(inode,1))
45145+ goto out;
45146+ }
45147
45148 error = PROC_I(inode)->op.proc_get_link(inode, &path);
45149 if (error)
45150@@ -1719,7 +1773,11 @@ struct inode *proc_pid_make_inode(struct
45151 rcu_read_lock();
45152 cred = __task_cred(task);
45153 inode->i_uid = cred->euid;
45154+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45155+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45156+#else
45157 inode->i_gid = cred->egid;
45158+#endif
45159 rcu_read_unlock();
45160 }
45161 security_task_to_inode(task, inode);
45162@@ -1737,6 +1795,9 @@ int pid_getattr(struct vfsmount *mnt, st
45163 struct inode *inode = dentry->d_inode;
45164 struct task_struct *task;
45165 const struct cred *cred;
45166+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45167+ const struct cred *tmpcred = current_cred();
45168+#endif
45169
45170 generic_fillattr(inode, stat);
45171
45172@@ -1744,13 +1805,41 @@ int pid_getattr(struct vfsmount *mnt, st
45173 stat->uid = 0;
45174 stat->gid = 0;
45175 task = pid_task(proc_pid(inode), PIDTYPE_PID);
45176+
45177+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
45178+ rcu_read_unlock();
45179+ return -ENOENT;
45180+ }
45181+
45182 if (task) {
45183+ cred = __task_cred(task);
45184+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45185+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
45186+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45187+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45188+#endif
45189+ ) {
45190+#endif
45191 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45192+#ifdef CONFIG_GRKERNSEC_PROC_USER
45193+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45194+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45195+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45196+#endif
45197 task_dumpable(task)) {
45198- cred = __task_cred(task);
45199 stat->uid = cred->euid;
45200+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45201+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
45202+#else
45203 stat->gid = cred->egid;
45204+#endif
45205+ }
45206+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45207+ } else {
45208+ rcu_read_unlock();
45209+ return -ENOENT;
45210 }
45211+#endif
45212 }
45213 rcu_read_unlock();
45214 return 0;
45215@@ -1787,11 +1876,20 @@ int pid_revalidate(struct dentry *dentry
45216
45217 if (task) {
45218 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45219+#ifdef CONFIG_GRKERNSEC_PROC_USER
45220+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45221+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45222+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45223+#endif
45224 task_dumpable(task)) {
45225 rcu_read_lock();
45226 cred = __task_cred(task);
45227 inode->i_uid = cred->euid;
45228+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45229+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45230+#else
45231 inode->i_gid = cred->egid;
45232+#endif
45233 rcu_read_unlock();
45234 } else {
45235 inode->i_uid = 0;
45236@@ -1909,7 +2007,8 @@ static int proc_fd_info(struct inode *in
45237 int fd = proc_fd(inode);
45238
45239 if (task) {
45240- files = get_files_struct(task);
45241+ if (!gr_acl_handle_procpidmem(task))
45242+ files = get_files_struct(task);
45243 put_task_struct(task);
45244 }
45245 if (files) {
45246@@ -2169,11 +2268,21 @@ static const struct file_operations proc
45247 */
45248 static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags)
45249 {
45250+ struct task_struct *task;
45251 int rv = generic_permission(inode, mask, flags, NULL);
45252- if (rv == 0)
45253- return 0;
45254+
45255 if (task_pid(current) == proc_pid(inode))
45256 rv = 0;
45257+
45258+ task = get_proc_task(inode);
45259+ if (task == NULL)
45260+ return rv;
45261+
45262+ if (gr_acl_handle_procpidmem(task))
45263+ rv = -EACCES;
45264+
45265+ put_task_struct(task);
45266+
45267 return rv;
45268 }
45269
45270@@ -2283,6 +2392,9 @@ static struct dentry *proc_pident_lookup
45271 if (!task)
45272 goto out_no_task;
45273
45274+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45275+ goto out;
45276+
45277 /*
45278 * Yes, it does not scale. And it should not. Don't add
45279 * new entries into /proc/<tgid>/ without very good reasons.
45280@@ -2327,6 +2439,9 @@ static int proc_pident_readdir(struct fi
45281 if (!task)
45282 goto out_no_task;
45283
45284+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45285+ goto out;
45286+
45287 ret = 0;
45288 i = filp->f_pos;
45289 switch (i) {
45290@@ -2597,7 +2712,7 @@ static void *proc_self_follow_link(struc
45291 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
45292 void *cookie)
45293 {
45294- char *s = nd_get_link(nd);
45295+ const char *s = nd_get_link(nd);
45296 if (!IS_ERR(s))
45297 __putname(s);
45298 }
45299@@ -2656,6 +2771,9 @@ static struct dentry *proc_base_instanti
45300 if (p->fop)
45301 inode->i_fop = p->fop;
45302 ei->op = p->op;
45303+
45304+ gr_handle_proc_create(dentry, inode);
45305+
45306 d_add(dentry, inode);
45307 error = NULL;
45308 out:
45309@@ -2795,7 +2913,7 @@ static const struct pid_entry tgid_base_
45310 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
45311 #endif
45312 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
45313-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45314+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45315 INF("syscall", S_IRUGO, proc_pid_syscall),
45316 #endif
45317 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45318@@ -2820,10 +2938,10 @@ static const struct pid_entry tgid_base_
45319 #ifdef CONFIG_SECURITY
45320 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45321 #endif
45322-#ifdef CONFIG_KALLSYMS
45323+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45324 INF("wchan", S_IRUGO, proc_pid_wchan),
45325 #endif
45326-#ifdef CONFIG_STACKTRACE
45327+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45328 ONE("stack", S_IRUGO, proc_pid_stack),
45329 #endif
45330 #ifdef CONFIG_SCHEDSTATS
45331@@ -2857,6 +2975,9 @@ static const struct pid_entry tgid_base_
45332 #ifdef CONFIG_HARDWALL
45333 INF("hardwall", S_IRUGO, proc_pid_hardwall),
45334 #endif
45335+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45336+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
45337+#endif
45338 };
45339
45340 static int proc_tgid_base_readdir(struct file * filp,
45341@@ -2982,7 +3103,14 @@ static struct dentry *proc_pid_instantia
45342 if (!inode)
45343 goto out;
45344
45345+#ifdef CONFIG_GRKERNSEC_PROC_USER
45346+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
45347+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45348+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45349+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
45350+#else
45351 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
45352+#endif
45353 inode->i_op = &proc_tgid_base_inode_operations;
45354 inode->i_fop = &proc_tgid_base_operations;
45355 inode->i_flags|=S_IMMUTABLE;
45356@@ -3024,7 +3152,14 @@ struct dentry *proc_pid_lookup(struct in
45357 if (!task)
45358 goto out;
45359
45360+ if (!has_group_leader_pid(task))
45361+ goto out_put_task;
45362+
45363+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45364+ goto out_put_task;
45365+
45366 result = proc_pid_instantiate(dir, dentry, task, NULL);
45367+out_put_task:
45368 put_task_struct(task);
45369 out:
45370 return result;
45371@@ -3089,6 +3224,11 @@ int proc_pid_readdir(struct file * filp,
45372 {
45373 unsigned int nr;
45374 struct task_struct *reaper;
45375+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45376+ const struct cred *tmpcred = current_cred();
45377+ const struct cred *itercred;
45378+#endif
45379+ filldir_t __filldir = filldir;
45380 struct tgid_iter iter;
45381 struct pid_namespace *ns;
45382
45383@@ -3112,8 +3252,27 @@ int proc_pid_readdir(struct file * filp,
45384 for (iter = next_tgid(ns, iter);
45385 iter.task;
45386 iter.tgid += 1, iter = next_tgid(ns, iter)) {
45387+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45388+ rcu_read_lock();
45389+ itercred = __task_cred(iter.task);
45390+#endif
45391+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
45392+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45393+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
45394+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45395+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45396+#endif
45397+ )
45398+#endif
45399+ )
45400+ __filldir = &gr_fake_filldir;
45401+ else
45402+ __filldir = filldir;
45403+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45404+ rcu_read_unlock();
45405+#endif
45406 filp->f_pos = iter.tgid + TGID_OFFSET;
45407- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
45408+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
45409 put_task_struct(iter.task);
45410 goto out;
45411 }
45412@@ -3141,7 +3300,7 @@ static const struct pid_entry tid_base_s
45413 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
45414 #endif
45415 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
45416-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45417+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45418 INF("syscall", S_IRUGO, proc_pid_syscall),
45419 #endif
45420 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45421@@ -3165,10 +3324,10 @@ static const struct pid_entry tid_base_s
45422 #ifdef CONFIG_SECURITY
45423 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45424 #endif
45425-#ifdef CONFIG_KALLSYMS
45426+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45427 INF("wchan", S_IRUGO, proc_pid_wchan),
45428 #endif
45429-#ifdef CONFIG_STACKTRACE
45430+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45431 ONE("stack", S_IRUGO, proc_pid_stack),
45432 #endif
45433 #ifdef CONFIG_SCHEDSTATS
45434diff -urNp linux-3.0.7/fs/proc/cmdline.c linux-3.0.7/fs/proc/cmdline.c
45435--- linux-3.0.7/fs/proc/cmdline.c 2011-07-21 22:17:23.000000000 -0400
45436+++ linux-3.0.7/fs/proc/cmdline.c 2011-08-23 21:48:14.000000000 -0400
45437@@ -23,7 +23,11 @@ static const struct file_operations cmdl
45438
45439 static int __init proc_cmdline_init(void)
45440 {
45441+#ifdef CONFIG_GRKERNSEC_PROC_ADD
45442+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
45443+#else
45444 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
45445+#endif
45446 return 0;
45447 }
45448 module_init(proc_cmdline_init);
45449diff -urNp linux-3.0.7/fs/proc/devices.c linux-3.0.7/fs/proc/devices.c
45450--- linux-3.0.7/fs/proc/devices.c 2011-07-21 22:17:23.000000000 -0400
45451+++ linux-3.0.7/fs/proc/devices.c 2011-08-23 21:48:14.000000000 -0400
45452@@ -64,7 +64,11 @@ static const struct file_operations proc
45453
45454 static int __init proc_devices_init(void)
45455 {
45456+#ifdef CONFIG_GRKERNSEC_PROC_ADD
45457+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
45458+#else
45459 proc_create("devices", 0, NULL, &proc_devinfo_operations);
45460+#endif
45461 return 0;
45462 }
45463 module_init(proc_devices_init);
45464diff -urNp linux-3.0.7/fs/proc/inode.c linux-3.0.7/fs/proc/inode.c
45465--- linux-3.0.7/fs/proc/inode.c 2011-07-21 22:17:23.000000000 -0400
45466+++ linux-3.0.7/fs/proc/inode.c 2011-10-17 06:56:02.000000000 -0400
45467@@ -18,6 +18,7 @@
45468 #include <linux/module.h>
45469 #include <linux/sysctl.h>
45470 #include <linux/slab.h>
45471+#include <linux/grsecurity.h>
45472
45473 #include <asm/system.h>
45474 #include <asm/uaccess.h>
45475@@ -102,10 +103,16 @@ void __init proc_init_inodecache(void)
45476 init_once);
45477 }
45478
45479+static int proc_drop_inode(struct inode *inode)
45480+{
45481+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
45482+ return generic_delete_inode(inode);
45483+}
45484+
45485 static const struct super_operations proc_sops = {
45486 .alloc_inode = proc_alloc_inode,
45487 .destroy_inode = proc_destroy_inode,
45488- .drop_inode = generic_delete_inode,
45489+ .drop_inode = proc_drop_inode,
45490 .evict_inode = proc_evict_inode,
45491 .statfs = simple_statfs,
45492 };
45493@@ -440,7 +447,11 @@ struct inode *proc_get_inode(struct supe
45494 if (de->mode) {
45495 inode->i_mode = de->mode;
45496 inode->i_uid = de->uid;
45497+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45498+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45499+#else
45500 inode->i_gid = de->gid;
45501+#endif
45502 }
45503 if (de->size)
45504 inode->i_size = de->size;
45505diff -urNp linux-3.0.7/fs/proc/internal.h linux-3.0.7/fs/proc/internal.h
45506--- linux-3.0.7/fs/proc/internal.h 2011-07-21 22:17:23.000000000 -0400
45507+++ linux-3.0.7/fs/proc/internal.h 2011-08-23 21:48:14.000000000 -0400
45508@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
45509 struct pid *pid, struct task_struct *task);
45510 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45511 struct pid *pid, struct task_struct *task);
45512+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45513+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
45514+#endif
45515 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
45516
45517 extern const struct file_operations proc_maps_operations;
45518diff -urNp linux-3.0.7/fs/proc/Kconfig linux-3.0.7/fs/proc/Kconfig
45519--- linux-3.0.7/fs/proc/Kconfig 2011-07-21 22:17:23.000000000 -0400
45520+++ linux-3.0.7/fs/proc/Kconfig 2011-08-23 21:48:14.000000000 -0400
45521@@ -30,12 +30,12 @@ config PROC_FS
45522
45523 config PROC_KCORE
45524 bool "/proc/kcore support" if !ARM
45525- depends on PROC_FS && MMU
45526+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
45527
45528 config PROC_VMCORE
45529 bool "/proc/vmcore support"
45530- depends on PROC_FS && CRASH_DUMP
45531- default y
45532+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
45533+ default n
45534 help
45535 Exports the dump image of crashed kernel in ELF format.
45536
45537@@ -59,8 +59,8 @@ config PROC_SYSCTL
45538 limited in memory.
45539
45540 config PROC_PAGE_MONITOR
45541- default y
45542- depends on PROC_FS && MMU
45543+ default n
45544+ depends on PROC_FS && MMU && !GRKERNSEC
45545 bool "Enable /proc page monitoring" if EXPERT
45546 help
45547 Various /proc files exist to monitor process memory utilization:
45548diff -urNp linux-3.0.7/fs/proc/kcore.c linux-3.0.7/fs/proc/kcore.c
45549--- linux-3.0.7/fs/proc/kcore.c 2011-07-21 22:17:23.000000000 -0400
45550+++ linux-3.0.7/fs/proc/kcore.c 2011-08-23 21:48:14.000000000 -0400
45551@@ -321,6 +321,8 @@ static void elf_kcore_store_hdr(char *bu
45552 off_t offset = 0;
45553 struct kcore_list *m;
45554
45555+ pax_track_stack();
45556+
45557 /* setup ELF header */
45558 elf = (struct elfhdr *) bufp;
45559 bufp += sizeof(struct elfhdr);
45560@@ -478,9 +480,10 @@ read_kcore(struct file *file, char __use
45561 * the addresses in the elf_phdr on our list.
45562 */
45563 start = kc_offset_to_vaddr(*fpos - elf_buflen);
45564- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
45565+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
45566+ if (tsz > buflen)
45567 tsz = buflen;
45568-
45569+
45570 while (buflen) {
45571 struct kcore_list *m;
45572
45573@@ -509,20 +512,23 @@ read_kcore(struct file *file, char __use
45574 kfree(elf_buf);
45575 } else {
45576 if (kern_addr_valid(start)) {
45577- unsigned long n;
45578+ char *elf_buf;
45579+ mm_segment_t oldfs;
45580
45581- n = copy_to_user(buffer, (char *)start, tsz);
45582- /*
45583- * We cannot distingush between fault on source
45584- * and fault on destination. When this happens
45585- * we clear too and hope it will trigger the
45586- * EFAULT again.
45587- */
45588- if (n) {
45589- if (clear_user(buffer + tsz - n,
45590- n))
45591+ elf_buf = kmalloc(tsz, GFP_KERNEL);
45592+ if (!elf_buf)
45593+ return -ENOMEM;
45594+ oldfs = get_fs();
45595+ set_fs(KERNEL_DS);
45596+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
45597+ set_fs(oldfs);
45598+ if (copy_to_user(buffer, elf_buf, tsz)) {
45599+ kfree(elf_buf);
45600 return -EFAULT;
45601+ }
45602 }
45603+ set_fs(oldfs);
45604+ kfree(elf_buf);
45605 } else {
45606 if (clear_user(buffer, tsz))
45607 return -EFAULT;
45608@@ -542,6 +548,9 @@ read_kcore(struct file *file, char __use
45609
45610 static int open_kcore(struct inode *inode, struct file *filp)
45611 {
45612+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
45613+ return -EPERM;
45614+#endif
45615 if (!capable(CAP_SYS_RAWIO))
45616 return -EPERM;
45617 if (kcore_need_update)
45618diff -urNp linux-3.0.7/fs/proc/meminfo.c linux-3.0.7/fs/proc/meminfo.c
45619--- linux-3.0.7/fs/proc/meminfo.c 2011-07-21 22:17:23.000000000 -0400
45620+++ linux-3.0.7/fs/proc/meminfo.c 2011-08-23 21:48:14.000000000 -0400
45621@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
45622 unsigned long pages[NR_LRU_LISTS];
45623 int lru;
45624
45625+ pax_track_stack();
45626+
45627 /*
45628 * display in kilobytes.
45629 */
45630@@ -157,7 +159,7 @@ static int meminfo_proc_show(struct seq_
45631 vmi.used >> 10,
45632 vmi.largest_chunk >> 10
45633 #ifdef CONFIG_MEMORY_FAILURE
45634- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
45635+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
45636 #endif
45637 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
45638 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
45639diff -urNp linux-3.0.7/fs/proc/nommu.c linux-3.0.7/fs/proc/nommu.c
45640--- linux-3.0.7/fs/proc/nommu.c 2011-07-21 22:17:23.000000000 -0400
45641+++ linux-3.0.7/fs/proc/nommu.c 2011-08-23 21:47:56.000000000 -0400
45642@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_
45643 if (len < 1)
45644 len = 1;
45645 seq_printf(m, "%*c", len, ' ');
45646- seq_path(m, &file->f_path, "");
45647+ seq_path(m, &file->f_path, "\n\\");
45648 }
45649
45650 seq_putc(m, '\n');
45651diff -urNp linux-3.0.7/fs/proc/proc_net.c linux-3.0.7/fs/proc/proc_net.c
45652--- linux-3.0.7/fs/proc/proc_net.c 2011-07-21 22:17:23.000000000 -0400
45653+++ linux-3.0.7/fs/proc/proc_net.c 2011-08-23 21:48:14.000000000 -0400
45654@@ -105,6 +105,17 @@ static struct net *get_proc_task_net(str
45655 struct task_struct *task;
45656 struct nsproxy *ns;
45657 struct net *net = NULL;
45658+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45659+ const struct cred *cred = current_cred();
45660+#endif
45661+
45662+#ifdef CONFIG_GRKERNSEC_PROC_USER
45663+ if (cred->fsuid)
45664+ return net;
45665+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45666+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
45667+ return net;
45668+#endif
45669
45670 rcu_read_lock();
45671 task = pid_task(proc_pid(dir), PIDTYPE_PID);
45672diff -urNp linux-3.0.7/fs/proc/proc_sysctl.c linux-3.0.7/fs/proc/proc_sysctl.c
45673--- linux-3.0.7/fs/proc/proc_sysctl.c 2011-07-21 22:17:23.000000000 -0400
45674+++ linux-3.0.7/fs/proc/proc_sysctl.c 2011-10-17 06:39:12.000000000 -0400
45675@@ -8,6 +8,8 @@
45676 #include <linux/namei.h>
45677 #include "internal.h"
45678
45679+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
45680+
45681 static const struct dentry_operations proc_sys_dentry_operations;
45682 static const struct file_operations proc_sys_file_operations;
45683 static const struct inode_operations proc_sys_inode_operations;
45684@@ -111,6 +113,9 @@ static struct dentry *proc_sys_lookup(st
45685 if (!p)
45686 goto out;
45687
45688+ if (gr_handle_sysctl(p, MAY_EXEC))
45689+ goto out;
45690+
45691 err = ERR_PTR(-ENOMEM);
45692 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
45693 if (h)
45694@@ -121,6 +126,9 @@ static struct dentry *proc_sys_lookup(st
45695
45696 err = NULL;
45697 d_set_d_op(dentry, &proc_sys_dentry_operations);
45698+
45699+ gr_handle_proc_create(dentry, inode);
45700+
45701 d_add(dentry, inode);
45702
45703 out:
45704@@ -202,6 +210,9 @@ static int proc_sys_fill_cache(struct fi
45705 return -ENOMEM;
45706 } else {
45707 d_set_d_op(child, &proc_sys_dentry_operations);
45708+
45709+ gr_handle_proc_create(child, inode);
45710+
45711 d_add(child, inode);
45712 }
45713 } else {
45714@@ -230,6 +241,9 @@ static int scan(struct ctl_table_header
45715 if (*pos < file->f_pos)
45716 continue;
45717
45718+ if (gr_handle_sysctl(table, 0))
45719+ continue;
45720+
45721 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
45722 if (res)
45723 return res;
45724@@ -355,6 +369,9 @@ static int proc_sys_getattr(struct vfsmo
45725 if (IS_ERR(head))
45726 return PTR_ERR(head);
45727
45728+ if (table && gr_handle_sysctl(table, MAY_EXEC))
45729+ return -ENOENT;
45730+
45731 generic_fillattr(inode, stat);
45732 if (table)
45733 stat->mode = (stat->mode & S_IFMT) | table->mode;
45734diff -urNp linux-3.0.7/fs/proc/root.c linux-3.0.7/fs/proc/root.c
45735--- linux-3.0.7/fs/proc/root.c 2011-07-21 22:17:23.000000000 -0400
45736+++ linux-3.0.7/fs/proc/root.c 2011-08-23 21:48:14.000000000 -0400
45737@@ -123,7 +123,15 @@ void __init proc_root_init(void)
45738 #ifdef CONFIG_PROC_DEVICETREE
45739 proc_device_tree_init();
45740 #endif
45741+#ifdef CONFIG_GRKERNSEC_PROC_ADD
45742+#ifdef CONFIG_GRKERNSEC_PROC_USER
45743+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
45744+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45745+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
45746+#endif
45747+#else
45748 proc_mkdir("bus", NULL);
45749+#endif
45750 proc_sys_init();
45751 }
45752
45753diff -urNp linux-3.0.7/fs/proc/task_mmu.c linux-3.0.7/fs/proc/task_mmu.c
45754--- linux-3.0.7/fs/proc/task_mmu.c 2011-10-16 21:54:54.000000000 -0400
45755+++ linux-3.0.7/fs/proc/task_mmu.c 2011-10-16 21:55:28.000000000 -0400
45756@@ -51,8 +51,13 @@ void task_mem(struct seq_file *m, struct
45757 "VmExe:\t%8lu kB\n"
45758 "VmLib:\t%8lu kB\n"
45759 "VmPTE:\t%8lu kB\n"
45760- "VmSwap:\t%8lu kB\n",
45761- hiwater_vm << (PAGE_SHIFT-10),
45762+ "VmSwap:\t%8lu kB\n"
45763+
45764+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
45765+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
45766+#endif
45767+
45768+ ,hiwater_vm << (PAGE_SHIFT-10),
45769 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
45770 mm->locked_vm << (PAGE_SHIFT-10),
45771 hiwater_rss << (PAGE_SHIFT-10),
45772@@ -60,7 +65,13 @@ void task_mem(struct seq_file *m, struct
45773 data << (PAGE_SHIFT-10),
45774 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
45775 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
45776- swap << (PAGE_SHIFT-10));
45777+ swap << (PAGE_SHIFT-10)
45778+
45779+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
45780+ , mm->context.user_cs_base, mm->context.user_cs_limit
45781+#endif
45782+
45783+ );
45784 }
45785
45786 unsigned long task_vsize(struct mm_struct *mm)
45787@@ -207,6 +218,12 @@ static int do_maps_open(struct inode *in
45788 return ret;
45789 }
45790
45791+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45792+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45793+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
45794+ _mm->pax_flags & MF_PAX_SEGMEXEC))
45795+#endif
45796+
45797 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
45798 {
45799 struct mm_struct *mm = vma->vm_mm;
45800@@ -225,13 +242,13 @@ static void show_map_vma(struct seq_file
45801 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
45802 }
45803
45804- /* We don't show the stack guard page in /proc/maps */
45805+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45806+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
45807+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
45808+#else
45809 start = vma->vm_start;
45810- if (stack_guard_page_start(vma, start))
45811- start += PAGE_SIZE;
45812 end = vma->vm_end;
45813- if (stack_guard_page_end(vma, end))
45814- end -= PAGE_SIZE;
45815+#endif
45816
45817 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
45818 start,
45819@@ -240,7 +257,11 @@ static void show_map_vma(struct seq_file
45820 flags & VM_WRITE ? 'w' : '-',
45821 flags & VM_EXEC ? 'x' : '-',
45822 flags & VM_MAYSHARE ? 's' : 'p',
45823+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45824+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
45825+#else
45826 pgoff,
45827+#endif
45828 MAJOR(dev), MINOR(dev), ino, &len);
45829
45830 /*
45831@@ -249,7 +270,7 @@ static void show_map_vma(struct seq_file
45832 */
45833 if (file) {
45834 pad_len_spaces(m, len);
45835- seq_path(m, &file->f_path, "\n");
45836+ seq_path(m, &file->f_path, "\n\\");
45837 } else {
45838 const char *name = arch_vma_name(vma);
45839 if (!name) {
45840@@ -257,8 +278,9 @@ static void show_map_vma(struct seq_file
45841 if (vma->vm_start <= mm->brk &&
45842 vma->vm_end >= mm->start_brk) {
45843 name = "[heap]";
45844- } else if (vma->vm_start <= mm->start_stack &&
45845- vma->vm_end >= mm->start_stack) {
45846+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
45847+ (vma->vm_start <= mm->start_stack &&
45848+ vma->vm_end >= mm->start_stack)) {
45849 name = "[stack]";
45850 }
45851 } else {
45852@@ -433,11 +455,16 @@ static int show_smap(struct seq_file *m,
45853 };
45854
45855 memset(&mss, 0, sizeof mss);
45856- mss.vma = vma;
45857- /* mmap_sem is held in m_start */
45858- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
45859- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
45860-
45861+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45862+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
45863+#endif
45864+ mss.vma = vma;
45865+ /* mmap_sem is held in m_start */
45866+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
45867+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
45868+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45869+ }
45870+#endif
45871 show_map_vma(m, vma);
45872
45873 seq_printf(m,
45874@@ -455,7 +482,11 @@ static int show_smap(struct seq_file *m,
45875 "KernelPageSize: %8lu kB\n"
45876 "MMUPageSize: %8lu kB\n"
45877 "Locked: %8lu kB\n",
45878+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45879+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
45880+#else
45881 (vma->vm_end - vma->vm_start) >> 10,
45882+#endif
45883 mss.resident >> 10,
45884 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
45885 mss.shared_clean >> 10,
45886@@ -1031,7 +1062,7 @@ static int show_numa_map(struct seq_file
45887
45888 if (file) {
45889 seq_printf(m, " file=");
45890- seq_path(m, &file->f_path, "\n\t= ");
45891+ seq_path(m, &file->f_path, "\n\t\\= ");
45892 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
45893 seq_printf(m, " heap");
45894 } else if (vma->vm_start <= mm->start_stack &&
45895diff -urNp linux-3.0.7/fs/proc/task_nommu.c linux-3.0.7/fs/proc/task_nommu.c
45896--- linux-3.0.7/fs/proc/task_nommu.c 2011-07-21 22:17:23.000000000 -0400
45897+++ linux-3.0.7/fs/proc/task_nommu.c 2011-08-23 21:47:56.000000000 -0400
45898@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct
45899 else
45900 bytes += kobjsize(mm);
45901
45902- if (current->fs && current->fs->users > 1)
45903+ if (current->fs && atomic_read(&current->fs->users) > 1)
45904 sbytes += kobjsize(current->fs);
45905 else
45906 bytes += kobjsize(current->fs);
45907@@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_fil
45908
45909 if (file) {
45910 pad_len_spaces(m, len);
45911- seq_path(m, &file->f_path, "");
45912+ seq_path(m, &file->f_path, "\n\\");
45913 } else if (mm) {
45914 if (vma->vm_start <= mm->start_stack &&
45915 vma->vm_end >= mm->start_stack) {
45916diff -urNp linux-3.0.7/fs/quota/netlink.c linux-3.0.7/fs/quota/netlink.c
45917--- linux-3.0.7/fs/quota/netlink.c 2011-07-21 22:17:23.000000000 -0400
45918+++ linux-3.0.7/fs/quota/netlink.c 2011-08-23 21:47:56.000000000 -0400
45919@@ -33,7 +33,7 @@ static struct genl_family quota_genl_fam
45920 void quota_send_warning(short type, unsigned int id, dev_t dev,
45921 const char warntype)
45922 {
45923- static atomic_t seq;
45924+ static atomic_unchecked_t seq;
45925 struct sk_buff *skb;
45926 void *msg_head;
45927 int ret;
45928@@ -49,7 +49,7 @@ void quota_send_warning(short type, unsi
45929 "VFS: Not enough memory to send quota warning.\n");
45930 return;
45931 }
45932- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
45933+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
45934 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
45935 if (!msg_head) {
45936 printk(KERN_ERR
45937diff -urNp linux-3.0.7/fs/readdir.c linux-3.0.7/fs/readdir.c
45938--- linux-3.0.7/fs/readdir.c 2011-07-21 22:17:23.000000000 -0400
45939+++ linux-3.0.7/fs/readdir.c 2011-10-06 04:17:55.000000000 -0400
45940@@ -17,6 +17,7 @@
45941 #include <linux/security.h>
45942 #include <linux/syscalls.h>
45943 #include <linux/unistd.h>
45944+#include <linux/namei.h>
45945
45946 #include <asm/uaccess.h>
45947
45948@@ -67,6 +68,7 @@ struct old_linux_dirent {
45949
45950 struct readdir_callback {
45951 struct old_linux_dirent __user * dirent;
45952+ struct file * file;
45953 int result;
45954 };
45955
45956@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
45957 buf->result = -EOVERFLOW;
45958 return -EOVERFLOW;
45959 }
45960+
45961+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
45962+ return 0;
45963+
45964 buf->result++;
45965 dirent = buf->dirent;
45966 if (!access_ok(VERIFY_WRITE, dirent,
45967@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
45968
45969 buf.result = 0;
45970 buf.dirent = dirent;
45971+ buf.file = file;
45972
45973 error = vfs_readdir(file, fillonedir, &buf);
45974 if (buf.result)
45975@@ -142,6 +149,7 @@ struct linux_dirent {
45976 struct getdents_callback {
45977 struct linux_dirent __user * current_dir;
45978 struct linux_dirent __user * previous;
45979+ struct file * file;
45980 int count;
45981 int error;
45982 };
45983@@ -163,6 +171,10 @@ static int filldir(void * __buf, const c
45984 buf->error = -EOVERFLOW;
45985 return -EOVERFLOW;
45986 }
45987+
45988+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
45989+ return 0;
45990+
45991 dirent = buf->previous;
45992 if (dirent) {
45993 if (__put_user(offset, &dirent->d_off))
45994@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
45995 buf.previous = NULL;
45996 buf.count = count;
45997 buf.error = 0;
45998+ buf.file = file;
45999
46000 error = vfs_readdir(file, filldir, &buf);
46001 if (error >= 0)
46002@@ -229,6 +242,7 @@ out:
46003 struct getdents_callback64 {
46004 struct linux_dirent64 __user * current_dir;
46005 struct linux_dirent64 __user * previous;
46006+ struct file *file;
46007 int count;
46008 int error;
46009 };
46010@@ -244,6 +258,10 @@ static int filldir64(void * __buf, const
46011 buf->error = -EINVAL; /* only used if we fail.. */
46012 if (reclen > buf->count)
46013 return -EINVAL;
46014+
46015+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46016+ return 0;
46017+
46018 dirent = buf->previous;
46019 if (dirent) {
46020 if (__put_user(offset, &dirent->d_off))
46021@@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
46022
46023 buf.current_dir = dirent;
46024 buf.previous = NULL;
46025+ buf.file = file;
46026 buf.count = count;
46027 buf.error = 0;
46028
46029@@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
46030 error = buf.error;
46031 lastdirent = buf.previous;
46032 if (lastdirent) {
46033- typeof(lastdirent->d_off) d_off = file->f_pos;
46034+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
46035 if (__put_user(d_off, &lastdirent->d_off))
46036 error = -EFAULT;
46037 else
46038diff -urNp linux-3.0.7/fs/reiserfs/dir.c linux-3.0.7/fs/reiserfs/dir.c
46039--- linux-3.0.7/fs/reiserfs/dir.c 2011-07-21 22:17:23.000000000 -0400
46040+++ linux-3.0.7/fs/reiserfs/dir.c 2011-08-23 21:48:14.000000000 -0400
46041@@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
46042 struct reiserfs_dir_entry de;
46043 int ret = 0;
46044
46045+ pax_track_stack();
46046+
46047 reiserfs_write_lock(inode->i_sb);
46048
46049 reiserfs_check_lock_depth(inode->i_sb, "readdir");
46050diff -urNp linux-3.0.7/fs/reiserfs/do_balan.c linux-3.0.7/fs/reiserfs/do_balan.c
46051--- linux-3.0.7/fs/reiserfs/do_balan.c 2011-07-21 22:17:23.000000000 -0400
46052+++ linux-3.0.7/fs/reiserfs/do_balan.c 2011-08-23 21:47:56.000000000 -0400
46053@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb,
46054 return;
46055 }
46056
46057- atomic_inc(&(fs_generation(tb->tb_sb)));
46058+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
46059 do_balance_starts(tb);
46060
46061 /* balance leaf returns 0 except if combining L R and S into
46062diff -urNp linux-3.0.7/fs/reiserfs/journal.c linux-3.0.7/fs/reiserfs/journal.c
46063--- linux-3.0.7/fs/reiserfs/journal.c 2011-07-21 22:17:23.000000000 -0400
46064+++ linux-3.0.7/fs/reiserfs/journal.c 2011-08-23 21:48:14.000000000 -0400
46065@@ -2299,6 +2299,8 @@ static struct buffer_head *reiserfs_brea
46066 struct buffer_head *bh;
46067 int i, j;
46068
46069+ pax_track_stack();
46070+
46071 bh = __getblk(dev, block, bufsize);
46072 if (buffer_uptodate(bh))
46073 return (bh);
46074diff -urNp linux-3.0.7/fs/reiserfs/namei.c linux-3.0.7/fs/reiserfs/namei.c
46075--- linux-3.0.7/fs/reiserfs/namei.c 2011-07-21 22:17:23.000000000 -0400
46076+++ linux-3.0.7/fs/reiserfs/namei.c 2011-08-23 21:48:14.000000000 -0400
46077@@ -1225,6 +1225,8 @@ static int reiserfs_rename(struct inode
46078 unsigned long savelink = 1;
46079 struct timespec ctime;
46080
46081+ pax_track_stack();
46082+
46083 /* three balancings: (1) old name removal, (2) new name insertion
46084 and (3) maybe "save" link insertion
46085 stat data updates: (1) old directory,
46086diff -urNp linux-3.0.7/fs/reiserfs/procfs.c linux-3.0.7/fs/reiserfs/procfs.c
46087--- linux-3.0.7/fs/reiserfs/procfs.c 2011-07-21 22:17:23.000000000 -0400
46088+++ linux-3.0.7/fs/reiserfs/procfs.c 2011-08-23 21:48:14.000000000 -0400
46089@@ -113,7 +113,7 @@ static int show_super(struct seq_file *m
46090 "SMALL_TAILS " : "NO_TAILS ",
46091 replay_only(sb) ? "REPLAY_ONLY " : "",
46092 convert_reiserfs(sb) ? "CONV " : "",
46093- atomic_read(&r->s_generation_counter),
46094+ atomic_read_unchecked(&r->s_generation_counter),
46095 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
46096 SF(s_do_balance), SF(s_unneeded_left_neighbor),
46097 SF(s_good_search_by_key_reada), SF(s_bmaps),
46098@@ -299,6 +299,8 @@ static int show_journal(struct seq_file
46099 struct journal_params *jp = &rs->s_v1.s_journal;
46100 char b[BDEVNAME_SIZE];
46101
46102+ pax_track_stack();
46103+
46104 seq_printf(m, /* on-disk fields */
46105 "jp_journal_1st_block: \t%i\n"
46106 "jp_journal_dev: \t%s[%x]\n"
46107diff -urNp linux-3.0.7/fs/reiserfs/stree.c linux-3.0.7/fs/reiserfs/stree.c
46108--- linux-3.0.7/fs/reiserfs/stree.c 2011-07-21 22:17:23.000000000 -0400
46109+++ linux-3.0.7/fs/reiserfs/stree.c 2011-08-23 21:48:14.000000000 -0400
46110@@ -1196,6 +1196,8 @@ int reiserfs_delete_item(struct reiserfs
46111 int iter = 0;
46112 #endif
46113
46114+ pax_track_stack();
46115+
46116 BUG_ON(!th->t_trans_id);
46117
46118 init_tb_struct(th, &s_del_balance, sb, path,
46119@@ -1333,6 +1335,8 @@ void reiserfs_delete_solid_item(struct r
46120 int retval;
46121 int quota_cut_bytes = 0;
46122
46123+ pax_track_stack();
46124+
46125 BUG_ON(!th->t_trans_id);
46126
46127 le_key2cpu_key(&cpu_key, key);
46128@@ -1562,6 +1566,8 @@ int reiserfs_cut_from_item(struct reiser
46129 int quota_cut_bytes;
46130 loff_t tail_pos = 0;
46131
46132+ pax_track_stack();
46133+
46134 BUG_ON(!th->t_trans_id);
46135
46136 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
46137@@ -1957,6 +1963,8 @@ int reiserfs_paste_into_item(struct reis
46138 int retval;
46139 int fs_gen;
46140
46141+ pax_track_stack();
46142+
46143 BUG_ON(!th->t_trans_id);
46144
46145 fs_gen = get_generation(inode->i_sb);
46146@@ -2045,6 +2053,8 @@ int reiserfs_insert_item(struct reiserfs
46147 int fs_gen = 0;
46148 int quota_bytes = 0;
46149
46150+ pax_track_stack();
46151+
46152 BUG_ON(!th->t_trans_id);
46153
46154 if (inode) { /* Do we count quotas for item? */
46155diff -urNp linux-3.0.7/fs/reiserfs/super.c linux-3.0.7/fs/reiserfs/super.c
46156--- linux-3.0.7/fs/reiserfs/super.c 2011-07-21 22:17:23.000000000 -0400
46157+++ linux-3.0.7/fs/reiserfs/super.c 2011-08-23 21:48:14.000000000 -0400
46158@@ -927,6 +927,8 @@ static int reiserfs_parse_options(struct
46159 {.option_name = NULL}
46160 };
46161
46162+ pax_track_stack();
46163+
46164 *blocks = 0;
46165 if (!options || !*options)
46166 /* use default configuration: create tails, journaling on, no
46167diff -urNp linux-3.0.7/fs/select.c linux-3.0.7/fs/select.c
46168--- linux-3.0.7/fs/select.c 2011-07-21 22:17:23.000000000 -0400
46169+++ linux-3.0.7/fs/select.c 2011-08-23 21:48:14.000000000 -0400
46170@@ -20,6 +20,7 @@
46171 #include <linux/module.h>
46172 #include <linux/slab.h>
46173 #include <linux/poll.h>
46174+#include <linux/security.h>
46175 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
46176 #include <linux/file.h>
46177 #include <linux/fdtable.h>
46178@@ -403,6 +404,8 @@ int do_select(int n, fd_set_bits *fds, s
46179 int retval, i, timed_out = 0;
46180 unsigned long slack = 0;
46181
46182+ pax_track_stack();
46183+
46184 rcu_read_lock();
46185 retval = max_select_fd(n, fds);
46186 rcu_read_unlock();
46187@@ -528,6 +531,8 @@ int core_sys_select(int n, fd_set __user
46188 /* Allocate small arguments on the stack to save memory and be faster */
46189 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
46190
46191+ pax_track_stack();
46192+
46193 ret = -EINVAL;
46194 if (n < 0)
46195 goto out_nofds;
46196@@ -837,6 +842,9 @@ int do_sys_poll(struct pollfd __user *uf
46197 struct poll_list *walk = head;
46198 unsigned long todo = nfds;
46199
46200+ pax_track_stack();
46201+
46202+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
46203 if (nfds > rlimit(RLIMIT_NOFILE))
46204 return -EINVAL;
46205
46206diff -urNp linux-3.0.7/fs/seq_file.c linux-3.0.7/fs/seq_file.c
46207--- linux-3.0.7/fs/seq_file.c 2011-07-21 22:17:23.000000000 -0400
46208+++ linux-3.0.7/fs/seq_file.c 2011-08-23 21:47:56.000000000 -0400
46209@@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
46210 return 0;
46211 }
46212 if (!m->buf) {
46213- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46214+ m->size = PAGE_SIZE;
46215+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46216 if (!m->buf)
46217 return -ENOMEM;
46218 }
46219@@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
46220 Eoverflow:
46221 m->op->stop(m, p);
46222 kfree(m->buf);
46223- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46224+ m->size <<= 1;
46225+ m->buf = kmalloc(m->size, GFP_KERNEL);
46226 return !m->buf ? -ENOMEM : -EAGAIN;
46227 }
46228
46229@@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
46230 m->version = file->f_version;
46231 /* grab buffer if we didn't have one */
46232 if (!m->buf) {
46233- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46234+ m->size = PAGE_SIZE;
46235+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46236 if (!m->buf)
46237 goto Enomem;
46238 }
46239@@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
46240 goto Fill;
46241 m->op->stop(m, p);
46242 kfree(m->buf);
46243- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46244+ m->size <<= 1;
46245+ m->buf = kmalloc(m->size, GFP_KERNEL);
46246 if (!m->buf)
46247 goto Enomem;
46248 m->count = 0;
46249@@ -549,7 +553,7 @@ static void single_stop(struct seq_file
46250 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
46251 void *data)
46252 {
46253- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
46254+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
46255 int res = -ENOMEM;
46256
46257 if (op) {
46258diff -urNp linux-3.0.7/fs/splice.c linux-3.0.7/fs/splice.c
46259--- linux-3.0.7/fs/splice.c 2011-07-21 22:17:23.000000000 -0400
46260+++ linux-3.0.7/fs/splice.c 2011-10-06 04:17:55.000000000 -0400
46261@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode
46262 pipe_lock(pipe);
46263
46264 for (;;) {
46265- if (!pipe->readers) {
46266+ if (!atomic_read(&pipe->readers)) {
46267 send_sig(SIGPIPE, current, 0);
46268 if (!ret)
46269 ret = -EPIPE;
46270@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode
46271 do_wakeup = 0;
46272 }
46273
46274- pipe->waiting_writers++;
46275+ atomic_inc(&pipe->waiting_writers);
46276 pipe_wait(pipe);
46277- pipe->waiting_writers--;
46278+ atomic_dec(&pipe->waiting_writers);
46279 }
46280
46281 pipe_unlock(pipe);
46282@@ -320,6 +320,8 @@ __generic_file_splice_read(struct file *
46283 .spd_release = spd_release_page,
46284 };
46285
46286+ pax_track_stack();
46287+
46288 if (splice_grow_spd(pipe, &spd))
46289 return -ENOMEM;
46290
46291@@ -560,7 +562,7 @@ static ssize_t kernel_readv(struct file
46292 old_fs = get_fs();
46293 set_fs(get_ds());
46294 /* The cast to a user pointer is valid due to the set_fs() */
46295- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
46296+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
46297 set_fs(old_fs);
46298
46299 return res;
46300@@ -575,7 +577,7 @@ static ssize_t kernel_write(struct file
46301 old_fs = get_fs();
46302 set_fs(get_ds());
46303 /* The cast to a user pointer is valid due to the set_fs() */
46304- res = vfs_write(file, (const char __user *)buf, count, &pos);
46305+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
46306 set_fs(old_fs);
46307
46308 return res;
46309@@ -603,6 +605,8 @@ ssize_t default_file_splice_read(struct
46310 .spd_release = spd_release_page,
46311 };
46312
46313+ pax_track_stack();
46314+
46315 if (splice_grow_spd(pipe, &spd))
46316 return -ENOMEM;
46317
46318@@ -626,7 +630,7 @@ ssize_t default_file_splice_read(struct
46319 goto err;
46320
46321 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
46322- vec[i].iov_base = (void __user *) page_address(page);
46323+ vec[i].iov_base = (void __force_user *) page_address(page);
46324 vec[i].iov_len = this_len;
46325 spd.pages[i] = page;
46326 spd.nr_pages++;
46327@@ -846,10 +850,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
46328 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
46329 {
46330 while (!pipe->nrbufs) {
46331- if (!pipe->writers)
46332+ if (!atomic_read(&pipe->writers))
46333 return 0;
46334
46335- if (!pipe->waiting_writers && sd->num_spliced)
46336+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
46337 return 0;
46338
46339 if (sd->flags & SPLICE_F_NONBLOCK)
46340@@ -1182,7 +1186,7 @@ ssize_t splice_direct_to_actor(struct fi
46341 * out of the pipe right after the splice_to_pipe(). So set
46342 * PIPE_READERS appropriately.
46343 */
46344- pipe->readers = 1;
46345+ atomic_set(&pipe->readers, 1);
46346
46347 current->splice_pipe = pipe;
46348 }
46349@@ -1619,6 +1623,8 @@ static long vmsplice_to_pipe(struct file
46350 };
46351 long ret;
46352
46353+ pax_track_stack();
46354+
46355 pipe = get_pipe_info(file);
46356 if (!pipe)
46357 return -EBADF;
46358@@ -1734,9 +1740,9 @@ static int ipipe_prep(struct pipe_inode_
46359 ret = -ERESTARTSYS;
46360 break;
46361 }
46362- if (!pipe->writers)
46363+ if (!atomic_read(&pipe->writers))
46364 break;
46365- if (!pipe->waiting_writers) {
46366+ if (!atomic_read(&pipe->waiting_writers)) {
46367 if (flags & SPLICE_F_NONBLOCK) {
46368 ret = -EAGAIN;
46369 break;
46370@@ -1768,7 +1774,7 @@ static int opipe_prep(struct pipe_inode_
46371 pipe_lock(pipe);
46372
46373 while (pipe->nrbufs >= pipe->buffers) {
46374- if (!pipe->readers) {
46375+ if (!atomic_read(&pipe->readers)) {
46376 send_sig(SIGPIPE, current, 0);
46377 ret = -EPIPE;
46378 break;
46379@@ -1781,9 +1787,9 @@ static int opipe_prep(struct pipe_inode_
46380 ret = -ERESTARTSYS;
46381 break;
46382 }
46383- pipe->waiting_writers++;
46384+ atomic_inc(&pipe->waiting_writers);
46385 pipe_wait(pipe);
46386- pipe->waiting_writers--;
46387+ atomic_dec(&pipe->waiting_writers);
46388 }
46389
46390 pipe_unlock(pipe);
46391@@ -1819,14 +1825,14 @@ retry:
46392 pipe_double_lock(ipipe, opipe);
46393
46394 do {
46395- if (!opipe->readers) {
46396+ if (!atomic_read(&opipe->readers)) {
46397 send_sig(SIGPIPE, current, 0);
46398 if (!ret)
46399 ret = -EPIPE;
46400 break;
46401 }
46402
46403- if (!ipipe->nrbufs && !ipipe->writers)
46404+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
46405 break;
46406
46407 /*
46408@@ -1923,7 +1929,7 @@ static int link_pipe(struct pipe_inode_i
46409 pipe_double_lock(ipipe, opipe);
46410
46411 do {
46412- if (!opipe->readers) {
46413+ if (!atomic_read(&opipe->readers)) {
46414 send_sig(SIGPIPE, current, 0);
46415 if (!ret)
46416 ret = -EPIPE;
46417@@ -1968,7 +1974,7 @@ static int link_pipe(struct pipe_inode_i
46418 * return EAGAIN if we have the potential of some data in the
46419 * future, otherwise just return 0
46420 */
46421- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
46422+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
46423 ret = -EAGAIN;
46424
46425 pipe_unlock(ipipe);
46426diff -urNp linux-3.0.7/fs/sysfs/file.c linux-3.0.7/fs/sysfs/file.c
46427--- linux-3.0.7/fs/sysfs/file.c 2011-07-21 22:17:23.000000000 -0400
46428+++ linux-3.0.7/fs/sysfs/file.c 2011-08-23 21:47:56.000000000 -0400
46429@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
46430
46431 struct sysfs_open_dirent {
46432 atomic_t refcnt;
46433- atomic_t event;
46434+ atomic_unchecked_t event;
46435 wait_queue_head_t poll;
46436 struct list_head buffers; /* goes through sysfs_buffer.list */
46437 };
46438@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentr
46439 if (!sysfs_get_active(attr_sd))
46440 return -ENODEV;
46441
46442- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
46443+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
46444 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
46445
46446 sysfs_put_active(attr_sd);
46447@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct
46448 return -ENOMEM;
46449
46450 atomic_set(&new_od->refcnt, 0);
46451- atomic_set(&new_od->event, 1);
46452+ atomic_set_unchecked(&new_od->event, 1);
46453 init_waitqueue_head(&new_od->poll);
46454 INIT_LIST_HEAD(&new_od->buffers);
46455 goto retry;
46456@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct fi
46457
46458 sysfs_put_active(attr_sd);
46459
46460- if (buffer->event != atomic_read(&od->event))
46461+ if (buffer->event != atomic_read_unchecked(&od->event))
46462 goto trigger;
46463
46464 return DEFAULT_POLLMASK;
46465@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_di
46466
46467 od = sd->s_attr.open;
46468 if (od) {
46469- atomic_inc(&od->event);
46470+ atomic_inc_unchecked(&od->event);
46471 wake_up_interruptible(&od->poll);
46472 }
46473
46474diff -urNp linux-3.0.7/fs/sysfs/mount.c linux-3.0.7/fs/sysfs/mount.c
46475--- linux-3.0.7/fs/sysfs/mount.c 2011-07-21 22:17:23.000000000 -0400
46476+++ linux-3.0.7/fs/sysfs/mount.c 2011-08-23 21:48:14.000000000 -0400
46477@@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
46478 .s_name = "",
46479 .s_count = ATOMIC_INIT(1),
46480 .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
46481+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
46482+ .s_mode = S_IFDIR | S_IRWXU,
46483+#else
46484 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
46485+#endif
46486 .s_ino = 1,
46487 };
46488
46489diff -urNp linux-3.0.7/fs/sysfs/symlink.c linux-3.0.7/fs/sysfs/symlink.c
46490--- linux-3.0.7/fs/sysfs/symlink.c 2011-07-21 22:17:23.000000000 -0400
46491+++ linux-3.0.7/fs/sysfs/symlink.c 2011-08-23 21:47:56.000000000 -0400
46492@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct de
46493
46494 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
46495 {
46496- char *page = nd_get_link(nd);
46497+ const char *page = nd_get_link(nd);
46498 if (!IS_ERR(page))
46499 free_page((unsigned long)page);
46500 }
46501diff -urNp linux-3.0.7/fs/udf/inode.c linux-3.0.7/fs/udf/inode.c
46502--- linux-3.0.7/fs/udf/inode.c 2011-07-21 22:17:23.000000000 -0400
46503+++ linux-3.0.7/fs/udf/inode.c 2011-08-23 21:48:14.000000000 -0400
46504@@ -560,6 +560,8 @@ static struct buffer_head *inode_getblk(
46505 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
46506 int lastblock = 0;
46507
46508+ pax_track_stack();
46509+
46510 prev_epos.offset = udf_file_entry_alloc_offset(inode);
46511 prev_epos.block = iinfo->i_location;
46512 prev_epos.bh = NULL;
46513diff -urNp linux-3.0.7/fs/udf/misc.c linux-3.0.7/fs/udf/misc.c
46514--- linux-3.0.7/fs/udf/misc.c 2011-07-21 22:17:23.000000000 -0400
46515+++ linux-3.0.7/fs/udf/misc.c 2011-08-23 21:47:56.000000000 -0400
46516@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
46517
46518 u8 udf_tag_checksum(const struct tag *t)
46519 {
46520- u8 *data = (u8 *)t;
46521+ const u8 *data = (const u8 *)t;
46522 u8 checksum = 0;
46523 int i;
46524 for (i = 0; i < sizeof(struct tag); ++i)
46525diff -urNp linux-3.0.7/fs/utimes.c linux-3.0.7/fs/utimes.c
46526--- linux-3.0.7/fs/utimes.c 2011-07-21 22:17:23.000000000 -0400
46527+++ linux-3.0.7/fs/utimes.c 2011-08-23 21:48:14.000000000 -0400
46528@@ -1,6 +1,7 @@
46529 #include <linux/compiler.h>
46530 #include <linux/file.h>
46531 #include <linux/fs.h>
46532+#include <linux/security.h>
46533 #include <linux/linkage.h>
46534 #include <linux/mount.h>
46535 #include <linux/namei.h>
46536@@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
46537 goto mnt_drop_write_and_out;
46538 }
46539 }
46540+
46541+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
46542+ error = -EACCES;
46543+ goto mnt_drop_write_and_out;
46544+ }
46545+
46546 mutex_lock(&inode->i_mutex);
46547 error = notify_change(path->dentry, &newattrs);
46548 mutex_unlock(&inode->i_mutex);
46549diff -urNp linux-3.0.7/fs/xattr_acl.c linux-3.0.7/fs/xattr_acl.c
46550--- linux-3.0.7/fs/xattr_acl.c 2011-07-21 22:17:23.000000000 -0400
46551+++ linux-3.0.7/fs/xattr_acl.c 2011-08-23 21:47:56.000000000 -0400
46552@@ -17,8 +17,8 @@
46553 struct posix_acl *
46554 posix_acl_from_xattr(const void *value, size_t size)
46555 {
46556- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
46557- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
46558+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
46559+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
46560 int count;
46561 struct posix_acl *acl;
46562 struct posix_acl_entry *acl_e;
46563diff -urNp linux-3.0.7/fs/xattr.c linux-3.0.7/fs/xattr.c
46564--- linux-3.0.7/fs/xattr.c 2011-07-21 22:17:23.000000000 -0400
46565+++ linux-3.0.7/fs/xattr.c 2011-08-23 21:48:14.000000000 -0400
46566@@ -254,7 +254,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
46567 * Extended attribute SET operations
46568 */
46569 static long
46570-setxattr(struct dentry *d, const char __user *name, const void __user *value,
46571+setxattr(struct path *path, const char __user *name, const void __user *value,
46572 size_t size, int flags)
46573 {
46574 int error;
46575@@ -278,7 +278,13 @@ setxattr(struct dentry *d, const char __
46576 return PTR_ERR(kvalue);
46577 }
46578
46579- error = vfs_setxattr(d, kname, kvalue, size, flags);
46580+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
46581+ error = -EACCES;
46582+ goto out;
46583+ }
46584+
46585+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
46586+out:
46587 kfree(kvalue);
46588 return error;
46589 }
46590@@ -295,7 +301,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
46591 return error;
46592 error = mnt_want_write(path.mnt);
46593 if (!error) {
46594- error = setxattr(path.dentry, name, value, size, flags);
46595+ error = setxattr(&path, name, value, size, flags);
46596 mnt_drop_write(path.mnt);
46597 }
46598 path_put(&path);
46599@@ -314,7 +320,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
46600 return error;
46601 error = mnt_want_write(path.mnt);
46602 if (!error) {
46603- error = setxattr(path.dentry, name, value, size, flags);
46604+ error = setxattr(&path, name, value, size, flags);
46605 mnt_drop_write(path.mnt);
46606 }
46607 path_put(&path);
46608@@ -325,17 +331,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
46609 const void __user *,value, size_t, size, int, flags)
46610 {
46611 struct file *f;
46612- struct dentry *dentry;
46613 int error = -EBADF;
46614
46615 f = fget(fd);
46616 if (!f)
46617 return error;
46618- dentry = f->f_path.dentry;
46619- audit_inode(NULL, dentry);
46620+ audit_inode(NULL, f->f_path.dentry);
46621 error = mnt_want_write_file(f);
46622 if (!error) {
46623- error = setxattr(dentry, name, value, size, flags);
46624+ error = setxattr(&f->f_path, name, value, size, flags);
46625 mnt_drop_write(f->f_path.mnt);
46626 }
46627 fput(f);
46628diff -urNp linux-3.0.7/fs/xfs/linux-2.6/xfs_ioctl32.c linux-3.0.7/fs/xfs/linux-2.6/xfs_ioctl32.c
46629--- linux-3.0.7/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-07-21 22:17:23.000000000 -0400
46630+++ linux-3.0.7/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-08-23 21:48:14.000000000 -0400
46631@@ -73,6 +73,7 @@ xfs_compat_ioc_fsgeometry_v1(
46632 xfs_fsop_geom_t fsgeo;
46633 int error;
46634
46635+ memset(&fsgeo, 0, sizeof(fsgeo));
46636 error = xfs_fs_geometry(mp, &fsgeo, 3);
46637 if (error)
46638 return -error;
46639diff -urNp linux-3.0.7/fs/xfs/linux-2.6/xfs_ioctl.c linux-3.0.7/fs/xfs/linux-2.6/xfs_ioctl.c
46640--- linux-3.0.7/fs/xfs/linux-2.6/xfs_ioctl.c 2011-07-21 22:17:23.000000000 -0400
46641+++ linux-3.0.7/fs/xfs/linux-2.6/xfs_ioctl.c 2011-08-23 21:47:56.000000000 -0400
46642@@ -128,7 +128,7 @@ xfs_find_handle(
46643 }
46644
46645 error = -EFAULT;
46646- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
46647+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
46648 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
46649 goto out_put;
46650
46651diff -urNp linux-3.0.7/fs/xfs/linux-2.6/xfs_iops.c linux-3.0.7/fs/xfs/linux-2.6/xfs_iops.c
46652--- linux-3.0.7/fs/xfs/linux-2.6/xfs_iops.c 2011-07-21 22:17:23.000000000 -0400
46653+++ linux-3.0.7/fs/xfs/linux-2.6/xfs_iops.c 2011-08-23 21:47:56.000000000 -0400
46654@@ -437,7 +437,7 @@ xfs_vn_put_link(
46655 struct nameidata *nd,
46656 void *p)
46657 {
46658- char *s = nd_get_link(nd);
46659+ const char *s = nd_get_link(nd);
46660
46661 if (!IS_ERR(s))
46662 kfree(s);
46663diff -urNp linux-3.0.7/fs/xfs/xfs_bmap.c linux-3.0.7/fs/xfs/xfs_bmap.c
46664--- linux-3.0.7/fs/xfs/xfs_bmap.c 2011-07-21 22:17:23.000000000 -0400
46665+++ linux-3.0.7/fs/xfs/xfs_bmap.c 2011-08-23 21:47:56.000000000 -0400
46666@@ -253,7 +253,7 @@ xfs_bmap_validate_ret(
46667 int nmap,
46668 int ret_nmap);
46669 #else
46670-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
46671+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
46672 #endif /* DEBUG */
46673
46674 STATIC int
46675diff -urNp linux-3.0.7/fs/xfs/xfs_dir2_sf.c linux-3.0.7/fs/xfs/xfs_dir2_sf.c
46676--- linux-3.0.7/fs/xfs/xfs_dir2_sf.c 2011-07-21 22:17:23.000000000 -0400
46677+++ linux-3.0.7/fs/xfs/xfs_dir2_sf.c 2011-08-23 21:47:56.000000000 -0400
46678@@ -780,7 +780,15 @@ xfs_dir2_sf_getdents(
46679 }
46680
46681 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
46682- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
46683+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
46684+ char name[sfep->namelen];
46685+ memcpy(name, sfep->name, sfep->namelen);
46686+ if (filldir(dirent, name, sfep->namelen,
46687+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
46688+ *offset = off & 0x7fffffff;
46689+ return 0;
46690+ }
46691+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
46692 off & 0x7fffffff, ino, DT_UNKNOWN)) {
46693 *offset = off & 0x7fffffff;
46694 return 0;
46695diff -urNp linux-3.0.7/grsecurity/gracl_alloc.c linux-3.0.7/grsecurity/gracl_alloc.c
46696--- linux-3.0.7/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
46697+++ linux-3.0.7/grsecurity/gracl_alloc.c 2011-08-23 21:48:14.000000000 -0400
46698@@ -0,0 +1,105 @@
46699+#include <linux/kernel.h>
46700+#include <linux/mm.h>
46701+#include <linux/slab.h>
46702+#include <linux/vmalloc.h>
46703+#include <linux/gracl.h>
46704+#include <linux/grsecurity.h>
46705+
46706+static unsigned long alloc_stack_next = 1;
46707+static unsigned long alloc_stack_size = 1;
46708+static void **alloc_stack;
46709+
46710+static __inline__ int
46711+alloc_pop(void)
46712+{
46713+ if (alloc_stack_next == 1)
46714+ return 0;
46715+
46716+ kfree(alloc_stack[alloc_stack_next - 2]);
46717+
46718+ alloc_stack_next--;
46719+
46720+ return 1;
46721+}
46722+
46723+static __inline__ int
46724+alloc_push(void *buf)
46725+{
46726+ if (alloc_stack_next >= alloc_stack_size)
46727+ return 1;
46728+
46729+ alloc_stack[alloc_stack_next - 1] = buf;
46730+
46731+ alloc_stack_next++;
46732+
46733+ return 0;
46734+}
46735+
46736+void *
46737+acl_alloc(unsigned long len)
46738+{
46739+ void *ret = NULL;
46740+
46741+ if (!len || len > PAGE_SIZE)
46742+ goto out;
46743+
46744+ ret = kmalloc(len, GFP_KERNEL);
46745+
46746+ if (ret) {
46747+ if (alloc_push(ret)) {
46748+ kfree(ret);
46749+ ret = NULL;
46750+ }
46751+ }
46752+
46753+out:
46754+ return ret;
46755+}
46756+
46757+void *
46758+acl_alloc_num(unsigned long num, unsigned long len)
46759+{
46760+ if (!len || (num > (PAGE_SIZE / len)))
46761+ return NULL;
46762+
46763+ return acl_alloc(num * len);
46764+}
46765+
46766+void
46767+acl_free_all(void)
46768+{
46769+ if (gr_acl_is_enabled() || !alloc_stack)
46770+ return;
46771+
46772+ while (alloc_pop()) ;
46773+
46774+ if (alloc_stack) {
46775+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
46776+ kfree(alloc_stack);
46777+ else
46778+ vfree(alloc_stack);
46779+ }
46780+
46781+ alloc_stack = NULL;
46782+ alloc_stack_size = 1;
46783+ alloc_stack_next = 1;
46784+
46785+ return;
46786+}
46787+
46788+int
46789+acl_alloc_stack_init(unsigned long size)
46790+{
46791+ if ((size * sizeof (void *)) <= PAGE_SIZE)
46792+ alloc_stack =
46793+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
46794+ else
46795+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
46796+
46797+ alloc_stack_size = size;
46798+
46799+ if (!alloc_stack)
46800+ return 0;
46801+ else
46802+ return 1;
46803+}
46804diff -urNp linux-3.0.7/grsecurity/gracl.c linux-3.0.7/grsecurity/gracl.c
46805--- linux-3.0.7/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
46806+++ linux-3.0.7/grsecurity/gracl.c 2011-10-17 06:42:59.000000000 -0400
46807@@ -0,0 +1,4154 @@
46808+#include <linux/kernel.h>
46809+#include <linux/module.h>
46810+#include <linux/sched.h>
46811+#include <linux/mm.h>
46812+#include <linux/file.h>
46813+#include <linux/fs.h>
46814+#include <linux/namei.h>
46815+#include <linux/mount.h>
46816+#include <linux/tty.h>
46817+#include <linux/proc_fs.h>
46818+#include <linux/lglock.h>
46819+#include <linux/slab.h>
46820+#include <linux/vmalloc.h>
46821+#include <linux/types.h>
46822+#include <linux/sysctl.h>
46823+#include <linux/netdevice.h>
46824+#include <linux/ptrace.h>
46825+#include <linux/gracl.h>
46826+#include <linux/gralloc.h>
46827+#include <linux/grsecurity.h>
46828+#include <linux/grinternal.h>
46829+#include <linux/pid_namespace.h>
46830+#include <linux/fdtable.h>
46831+#include <linux/percpu.h>
46832+
46833+#include <asm/uaccess.h>
46834+#include <asm/errno.h>
46835+#include <asm/mman.h>
46836+
46837+static struct acl_role_db acl_role_set;
46838+static struct name_db name_set;
46839+static struct inodev_db inodev_set;
46840+
46841+/* for keeping track of userspace pointers used for subjects, so we
46842+ can share references in the kernel as well
46843+*/
46844+
46845+static struct path real_root;
46846+
46847+static struct acl_subj_map_db subj_map_set;
46848+
46849+static struct acl_role_label *default_role;
46850+
46851+static struct acl_role_label *role_list;
46852+
46853+static u16 acl_sp_role_value;
46854+
46855+extern char *gr_shared_page[4];
46856+static DEFINE_MUTEX(gr_dev_mutex);
46857+DEFINE_RWLOCK(gr_inode_lock);
46858+
46859+struct gr_arg *gr_usermode;
46860+
46861+static unsigned int gr_status __read_only = GR_STATUS_INIT;
46862+
46863+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
46864+extern void gr_clear_learn_entries(void);
46865+
46866+#ifdef CONFIG_GRKERNSEC_RESLOG
46867+extern void gr_log_resource(const struct task_struct *task,
46868+ const int res, const unsigned long wanted, const int gt);
46869+#endif
46870+
46871+unsigned char *gr_system_salt;
46872+unsigned char *gr_system_sum;
46873+
46874+static struct sprole_pw **acl_special_roles = NULL;
46875+static __u16 num_sprole_pws = 0;
46876+
46877+static struct acl_role_label *kernel_role = NULL;
46878+
46879+static unsigned int gr_auth_attempts = 0;
46880+static unsigned long gr_auth_expires = 0UL;
46881+
46882+#ifdef CONFIG_NET
46883+extern struct vfsmount *sock_mnt;
46884+#endif
46885+
46886+extern struct vfsmount *pipe_mnt;
46887+extern struct vfsmount *shm_mnt;
46888+#ifdef CONFIG_HUGETLBFS
46889+extern struct vfsmount *hugetlbfs_vfsmount;
46890+#endif
46891+
46892+static struct acl_object_label *fakefs_obj_rw;
46893+static struct acl_object_label *fakefs_obj_rwx;
46894+
46895+extern int gr_init_uidset(void);
46896+extern void gr_free_uidset(void);
46897+extern void gr_remove_uid(uid_t uid);
46898+extern int gr_find_uid(uid_t uid);
46899+
46900+DECLARE_BRLOCK(vfsmount_lock);
46901+
46902+__inline__ int
46903+gr_acl_is_enabled(void)
46904+{
46905+ return (gr_status & GR_READY);
46906+}
46907+
46908+#ifdef CONFIG_BTRFS_FS
46909+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
46910+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
46911+#endif
46912+
46913+static inline dev_t __get_dev(const struct dentry *dentry)
46914+{
46915+#ifdef CONFIG_BTRFS_FS
46916+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
46917+ return get_btrfs_dev_from_inode(dentry->d_inode);
46918+ else
46919+#endif
46920+ return dentry->d_inode->i_sb->s_dev;
46921+}
46922+
46923+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
46924+{
46925+ return __get_dev(dentry);
46926+}
46927+
46928+static char gr_task_roletype_to_char(struct task_struct *task)
46929+{
46930+ switch (task->role->roletype &
46931+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
46932+ GR_ROLE_SPECIAL)) {
46933+ case GR_ROLE_DEFAULT:
46934+ return 'D';
46935+ case GR_ROLE_USER:
46936+ return 'U';
46937+ case GR_ROLE_GROUP:
46938+ return 'G';
46939+ case GR_ROLE_SPECIAL:
46940+ return 'S';
46941+ }
46942+
46943+ return 'X';
46944+}
46945+
46946+char gr_roletype_to_char(void)
46947+{
46948+ return gr_task_roletype_to_char(current);
46949+}
46950+
46951+__inline__ int
46952+gr_acl_tpe_check(void)
46953+{
46954+ if (unlikely(!(gr_status & GR_READY)))
46955+ return 0;
46956+ if (current->role->roletype & GR_ROLE_TPE)
46957+ return 1;
46958+ else
46959+ return 0;
46960+}
46961+
46962+int
46963+gr_handle_rawio(const struct inode *inode)
46964+{
46965+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
46966+ if (inode && S_ISBLK(inode->i_mode) &&
46967+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
46968+ !capable(CAP_SYS_RAWIO))
46969+ return 1;
46970+#endif
46971+ return 0;
46972+}
46973+
46974+static int
46975+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
46976+{
46977+ if (likely(lena != lenb))
46978+ return 0;
46979+
46980+ return !memcmp(a, b, lena);
46981+}
46982+
46983+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
46984+{
46985+ *buflen -= namelen;
46986+ if (*buflen < 0)
46987+ return -ENAMETOOLONG;
46988+ *buffer -= namelen;
46989+ memcpy(*buffer, str, namelen);
46990+ return 0;
46991+}
46992+
46993+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
46994+{
46995+ return prepend(buffer, buflen, name->name, name->len);
46996+}
46997+
46998+static int prepend_path(const struct path *path, struct path *root,
46999+ char **buffer, int *buflen)
47000+{
47001+ struct dentry *dentry = path->dentry;
47002+ struct vfsmount *vfsmnt = path->mnt;
47003+ bool slash = false;
47004+ int error = 0;
47005+
47006+ while (dentry != root->dentry || vfsmnt != root->mnt) {
47007+ struct dentry * parent;
47008+
47009+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
47010+ /* Global root? */
47011+ if (vfsmnt->mnt_parent == vfsmnt) {
47012+ goto out;
47013+ }
47014+ dentry = vfsmnt->mnt_mountpoint;
47015+ vfsmnt = vfsmnt->mnt_parent;
47016+ continue;
47017+ }
47018+ parent = dentry->d_parent;
47019+ prefetch(parent);
47020+ spin_lock(&dentry->d_lock);
47021+ error = prepend_name(buffer, buflen, &dentry->d_name);
47022+ spin_unlock(&dentry->d_lock);
47023+ if (!error)
47024+ error = prepend(buffer, buflen, "/", 1);
47025+ if (error)
47026+ break;
47027+
47028+ slash = true;
47029+ dentry = parent;
47030+ }
47031+
47032+out:
47033+ if (!error && !slash)
47034+ error = prepend(buffer, buflen, "/", 1);
47035+
47036+ return error;
47037+}
47038+
47039+/* this must be called with vfsmount_lock and rename_lock held */
47040+
47041+static char *__our_d_path(const struct path *path, struct path *root,
47042+ char *buf, int buflen)
47043+{
47044+ char *res = buf + buflen;
47045+ int error;
47046+
47047+ prepend(&res, &buflen, "\0", 1);
47048+ error = prepend_path(path, root, &res, &buflen);
47049+ if (error)
47050+ return ERR_PTR(error);
47051+
47052+ return res;
47053+}
47054+
47055+static char *
47056+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
47057+{
47058+ char *retval;
47059+
47060+ retval = __our_d_path(path, root, buf, buflen);
47061+ if (unlikely(IS_ERR(retval)))
47062+ retval = strcpy(buf, "<path too long>");
47063+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
47064+ retval[1] = '\0';
47065+
47066+ return retval;
47067+}
47068+
47069+static char *
47070+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
47071+ char *buf, int buflen)
47072+{
47073+ struct path path;
47074+ char *res;
47075+
47076+ path.dentry = (struct dentry *)dentry;
47077+ path.mnt = (struct vfsmount *)vfsmnt;
47078+
47079+ /* we can use real_root.dentry, real_root.mnt, because this is only called
47080+ by the RBAC system */
47081+ res = gen_full_path(&path, &real_root, buf, buflen);
47082+
47083+ return res;
47084+}
47085+
47086+static char *
47087+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
47088+ char *buf, int buflen)
47089+{
47090+ char *res;
47091+ struct path path;
47092+ struct path root;
47093+ struct task_struct *reaper = &init_task;
47094+
47095+ path.dentry = (struct dentry *)dentry;
47096+ path.mnt = (struct vfsmount *)vfsmnt;
47097+
47098+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
47099+ get_fs_root(reaper->fs, &root);
47100+
47101+ write_seqlock(&rename_lock);
47102+ br_read_lock(vfsmount_lock);
47103+ res = gen_full_path(&path, &root, buf, buflen);
47104+ br_read_unlock(vfsmount_lock);
47105+ write_sequnlock(&rename_lock);
47106+
47107+ path_put(&root);
47108+ return res;
47109+}
47110+
47111+static char *
47112+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
47113+{
47114+ char *ret;
47115+ write_seqlock(&rename_lock);
47116+ br_read_lock(vfsmount_lock);
47117+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
47118+ PAGE_SIZE);
47119+ br_read_unlock(vfsmount_lock);
47120+ write_sequnlock(&rename_lock);
47121+ return ret;
47122+}
47123+
47124+static char *
47125+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
47126+{
47127+ char *ret;
47128+ char *buf;
47129+ int buflen;
47130+
47131+ write_seqlock(&rename_lock);
47132+ br_read_lock(vfsmount_lock);
47133+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
47134+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
47135+ buflen = (int)(ret - buf);
47136+ if (buflen >= 5)
47137+ prepend(&ret, &buflen, "/proc", 5);
47138+ else
47139+ ret = strcpy(buf, "<path too long>");
47140+ br_read_unlock(vfsmount_lock);
47141+ write_sequnlock(&rename_lock);
47142+ return ret;
47143+}
47144+
47145+char *
47146+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
47147+{
47148+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
47149+ PAGE_SIZE);
47150+}
47151+
47152+char *
47153+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
47154+{
47155+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
47156+ PAGE_SIZE);
47157+}
47158+
47159+char *
47160+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
47161+{
47162+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
47163+ PAGE_SIZE);
47164+}
47165+
47166+char *
47167+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
47168+{
47169+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
47170+ PAGE_SIZE);
47171+}
47172+
47173+char *
47174+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
47175+{
47176+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
47177+ PAGE_SIZE);
47178+}
47179+
47180+__inline__ __u32
47181+to_gr_audit(const __u32 reqmode)
47182+{
47183+ /* masks off auditable permission flags, then shifts them to create
47184+ auditing flags, and adds the special case of append auditing if
47185+ we're requesting write */
47186+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
47187+}
47188+
47189+struct acl_subject_label *
47190+lookup_subject_map(const struct acl_subject_label *userp)
47191+{
47192+ unsigned int index = shash(userp, subj_map_set.s_size);
47193+ struct subject_map *match;
47194+
47195+ match = subj_map_set.s_hash[index];
47196+
47197+ while (match && match->user != userp)
47198+ match = match->next;
47199+
47200+ if (match != NULL)
47201+ return match->kernel;
47202+ else
47203+ return NULL;
47204+}
47205+
47206+static void
47207+insert_subj_map_entry(struct subject_map *subjmap)
47208+{
47209+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
47210+ struct subject_map **curr;
47211+
47212+ subjmap->prev = NULL;
47213+
47214+ curr = &subj_map_set.s_hash[index];
47215+ if (*curr != NULL)
47216+ (*curr)->prev = subjmap;
47217+
47218+ subjmap->next = *curr;
47219+ *curr = subjmap;
47220+
47221+ return;
47222+}
47223+
47224+static struct acl_role_label *
47225+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
47226+ const gid_t gid)
47227+{
47228+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
47229+ struct acl_role_label *match;
47230+ struct role_allowed_ip *ipp;
47231+ unsigned int x;
47232+ u32 curr_ip = task->signal->curr_ip;
47233+
47234+ task->signal->saved_ip = curr_ip;
47235+
47236+ match = acl_role_set.r_hash[index];
47237+
47238+ while (match) {
47239+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
47240+ for (x = 0; x < match->domain_child_num; x++) {
47241+ if (match->domain_children[x] == uid)
47242+ goto found;
47243+ }
47244+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
47245+ break;
47246+ match = match->next;
47247+ }
47248+found:
47249+ if (match == NULL) {
47250+ try_group:
47251+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
47252+ match = acl_role_set.r_hash[index];
47253+
47254+ while (match) {
47255+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
47256+ for (x = 0; x < match->domain_child_num; x++) {
47257+ if (match->domain_children[x] == gid)
47258+ goto found2;
47259+ }
47260+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
47261+ break;
47262+ match = match->next;
47263+ }
47264+found2:
47265+ if (match == NULL)
47266+ match = default_role;
47267+ if (match->allowed_ips == NULL)
47268+ return match;
47269+ else {
47270+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
47271+ if (likely
47272+ ((ntohl(curr_ip) & ipp->netmask) ==
47273+ (ntohl(ipp->addr) & ipp->netmask)))
47274+ return match;
47275+ }
47276+ match = default_role;
47277+ }
47278+ } else if (match->allowed_ips == NULL) {
47279+ return match;
47280+ } else {
47281+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
47282+ if (likely
47283+ ((ntohl(curr_ip) & ipp->netmask) ==
47284+ (ntohl(ipp->addr) & ipp->netmask)))
47285+ return match;
47286+ }
47287+ goto try_group;
47288+ }
47289+
47290+ return match;
47291+}
47292+
47293+struct acl_subject_label *
47294+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
47295+ const struct acl_role_label *role)
47296+{
47297+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
47298+ struct acl_subject_label *match;
47299+
47300+ match = role->subj_hash[index];
47301+
47302+ while (match && (match->inode != ino || match->device != dev ||
47303+ (match->mode & GR_DELETED))) {
47304+ match = match->next;
47305+ }
47306+
47307+ if (match && !(match->mode & GR_DELETED))
47308+ return match;
47309+ else
47310+ return NULL;
47311+}
47312+
47313+struct acl_subject_label *
47314+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
47315+ const struct acl_role_label *role)
47316+{
47317+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
47318+ struct acl_subject_label *match;
47319+
47320+ match = role->subj_hash[index];
47321+
47322+ while (match && (match->inode != ino || match->device != dev ||
47323+ !(match->mode & GR_DELETED))) {
47324+ match = match->next;
47325+ }
47326+
47327+ if (match && (match->mode & GR_DELETED))
47328+ return match;
47329+ else
47330+ return NULL;
47331+}
47332+
47333+static struct acl_object_label *
47334+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
47335+ const struct acl_subject_label *subj)
47336+{
47337+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
47338+ struct acl_object_label *match;
47339+
47340+ match = subj->obj_hash[index];
47341+
47342+ while (match && (match->inode != ino || match->device != dev ||
47343+ (match->mode & GR_DELETED))) {
47344+ match = match->next;
47345+ }
47346+
47347+ if (match && !(match->mode & GR_DELETED))
47348+ return match;
47349+ else
47350+ return NULL;
47351+}
47352+
47353+static struct acl_object_label *
47354+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
47355+ const struct acl_subject_label *subj)
47356+{
47357+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
47358+ struct acl_object_label *match;
47359+
47360+ match = subj->obj_hash[index];
47361+
47362+ while (match && (match->inode != ino || match->device != dev ||
47363+ !(match->mode & GR_DELETED))) {
47364+ match = match->next;
47365+ }
47366+
47367+ if (match && (match->mode & GR_DELETED))
47368+ return match;
47369+
47370+ match = subj->obj_hash[index];
47371+
47372+ while (match && (match->inode != ino || match->device != dev ||
47373+ (match->mode & GR_DELETED))) {
47374+ match = match->next;
47375+ }
47376+
47377+ if (match && !(match->mode & GR_DELETED))
47378+ return match;
47379+ else
47380+ return NULL;
47381+}
47382+
47383+static struct name_entry *
47384+lookup_name_entry(const char *name)
47385+{
47386+ unsigned int len = strlen(name);
47387+ unsigned int key = full_name_hash(name, len);
47388+ unsigned int index = key % name_set.n_size;
47389+ struct name_entry *match;
47390+
47391+ match = name_set.n_hash[index];
47392+
47393+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
47394+ match = match->next;
47395+
47396+ return match;
47397+}
47398+
47399+static struct name_entry *
47400+lookup_name_entry_create(const char *name)
47401+{
47402+ unsigned int len = strlen(name);
47403+ unsigned int key = full_name_hash(name, len);
47404+ unsigned int index = key % name_set.n_size;
47405+ struct name_entry *match;
47406+
47407+ match = name_set.n_hash[index];
47408+
47409+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
47410+ !match->deleted))
47411+ match = match->next;
47412+
47413+ if (match && match->deleted)
47414+ return match;
47415+
47416+ match = name_set.n_hash[index];
47417+
47418+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
47419+ match->deleted))
47420+ match = match->next;
47421+
47422+ if (match && !match->deleted)
47423+ return match;
47424+ else
47425+ return NULL;
47426+}
47427+
47428+static struct inodev_entry *
47429+lookup_inodev_entry(const ino_t ino, const dev_t dev)
47430+{
47431+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
47432+ struct inodev_entry *match;
47433+
47434+ match = inodev_set.i_hash[index];
47435+
47436+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
47437+ match = match->next;
47438+
47439+ return match;
47440+}
47441+
47442+static void
47443+insert_inodev_entry(struct inodev_entry *entry)
47444+{
47445+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
47446+ inodev_set.i_size);
47447+ struct inodev_entry **curr;
47448+
47449+ entry->prev = NULL;
47450+
47451+ curr = &inodev_set.i_hash[index];
47452+ if (*curr != NULL)
47453+ (*curr)->prev = entry;
47454+
47455+ entry->next = *curr;
47456+ *curr = entry;
47457+
47458+ return;
47459+}
47460+
47461+static void
47462+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
47463+{
47464+ unsigned int index =
47465+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
47466+ struct acl_role_label **curr;
47467+ struct acl_role_label *tmp;
47468+
47469+ curr = &acl_role_set.r_hash[index];
47470+
47471+ /* if role was already inserted due to domains and already has
47472+ a role in the same bucket as it attached, then we need to
47473+ combine these two buckets
47474+ */
47475+ if (role->next) {
47476+ tmp = role->next;
47477+ while (tmp->next)
47478+ tmp = tmp->next;
47479+ tmp->next = *curr;
47480+ } else
47481+ role->next = *curr;
47482+ *curr = role;
47483+
47484+ return;
47485+}
47486+
47487+static void
47488+insert_acl_role_label(struct acl_role_label *role)
47489+{
47490+ int i;
47491+
47492+ if (role_list == NULL) {
47493+ role_list = role;
47494+ role->prev = NULL;
47495+ } else {
47496+ role->prev = role_list;
47497+ role_list = role;
47498+ }
47499+
47500+ /* used for hash chains */
47501+ role->next = NULL;
47502+
47503+ if (role->roletype & GR_ROLE_DOMAIN) {
47504+ for (i = 0; i < role->domain_child_num; i++)
47505+ __insert_acl_role_label(role, role->domain_children[i]);
47506+ } else
47507+ __insert_acl_role_label(role, role->uidgid);
47508+}
47509+
47510+static int
47511+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
47512+{
47513+ struct name_entry **curr, *nentry;
47514+ struct inodev_entry *ientry;
47515+ unsigned int len = strlen(name);
47516+ unsigned int key = full_name_hash(name, len);
47517+ unsigned int index = key % name_set.n_size;
47518+
47519+ curr = &name_set.n_hash[index];
47520+
47521+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
47522+ curr = &((*curr)->next);
47523+
47524+ if (*curr != NULL)
47525+ return 1;
47526+
47527+ nentry = acl_alloc(sizeof (struct name_entry));
47528+ if (nentry == NULL)
47529+ return 0;
47530+ ientry = acl_alloc(sizeof (struct inodev_entry));
47531+ if (ientry == NULL)
47532+ return 0;
47533+ ientry->nentry = nentry;
47534+
47535+ nentry->key = key;
47536+ nentry->name = name;
47537+ nentry->inode = inode;
47538+ nentry->device = device;
47539+ nentry->len = len;
47540+ nentry->deleted = deleted;
47541+
47542+ nentry->prev = NULL;
47543+ curr = &name_set.n_hash[index];
47544+ if (*curr != NULL)
47545+ (*curr)->prev = nentry;
47546+ nentry->next = *curr;
47547+ *curr = nentry;
47548+
47549+ /* insert us into the table searchable by inode/dev */
47550+ insert_inodev_entry(ientry);
47551+
47552+ return 1;
47553+}
47554+
47555+static void
47556+insert_acl_obj_label(struct acl_object_label *obj,
47557+ struct acl_subject_label *subj)
47558+{
47559+ unsigned int index =
47560+ fhash(obj->inode, obj->device, subj->obj_hash_size);
47561+ struct acl_object_label **curr;
47562+
47563+
47564+ obj->prev = NULL;
47565+
47566+ curr = &subj->obj_hash[index];
47567+ if (*curr != NULL)
47568+ (*curr)->prev = obj;
47569+
47570+ obj->next = *curr;
47571+ *curr = obj;
47572+
47573+ return;
47574+}
47575+
47576+static void
47577+insert_acl_subj_label(struct acl_subject_label *obj,
47578+ struct acl_role_label *role)
47579+{
47580+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
47581+ struct acl_subject_label **curr;
47582+
47583+ obj->prev = NULL;
47584+
47585+ curr = &role->subj_hash[index];
47586+ if (*curr != NULL)
47587+ (*curr)->prev = obj;
47588+
47589+ obj->next = *curr;
47590+ *curr = obj;
47591+
47592+ return;
47593+}
47594+
47595+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
47596+
47597+static void *
47598+create_table(__u32 * len, int elementsize)
47599+{
47600+ unsigned int table_sizes[] = {
47601+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
47602+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
47603+ 4194301, 8388593, 16777213, 33554393, 67108859
47604+ };
47605+ void *newtable = NULL;
47606+ unsigned int pwr = 0;
47607+
47608+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
47609+ table_sizes[pwr] <= *len)
47610+ pwr++;
47611+
47612+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
47613+ return newtable;
47614+
47615+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
47616+ newtable =
47617+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
47618+ else
47619+ newtable = vmalloc(table_sizes[pwr] * elementsize);
47620+
47621+ *len = table_sizes[pwr];
47622+
47623+ return newtable;
47624+}
47625+
47626+static int
47627+init_variables(const struct gr_arg *arg)
47628+{
47629+ struct task_struct *reaper = &init_task;
47630+ unsigned int stacksize;
47631+
47632+ subj_map_set.s_size = arg->role_db.num_subjects;
47633+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
47634+ name_set.n_size = arg->role_db.num_objects;
47635+ inodev_set.i_size = arg->role_db.num_objects;
47636+
47637+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
47638+ !name_set.n_size || !inodev_set.i_size)
47639+ return 1;
47640+
47641+ if (!gr_init_uidset())
47642+ return 1;
47643+
47644+ /* set up the stack that holds allocation info */
47645+
47646+ stacksize = arg->role_db.num_pointers + 5;
47647+
47648+ if (!acl_alloc_stack_init(stacksize))
47649+ return 1;
47650+
47651+ /* grab reference for the real root dentry and vfsmount */
47652+ get_fs_root(reaper->fs, &real_root);
47653+
47654+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47655+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
47656+#endif
47657+
47658+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
47659+ if (fakefs_obj_rw == NULL)
47660+ return 1;
47661+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
47662+
47663+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
47664+ if (fakefs_obj_rwx == NULL)
47665+ return 1;
47666+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
47667+
47668+ subj_map_set.s_hash =
47669+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
47670+ acl_role_set.r_hash =
47671+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
47672+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
47673+ inodev_set.i_hash =
47674+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
47675+
47676+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
47677+ !name_set.n_hash || !inodev_set.i_hash)
47678+ return 1;
47679+
47680+ memset(subj_map_set.s_hash, 0,
47681+ sizeof(struct subject_map *) * subj_map_set.s_size);
47682+ memset(acl_role_set.r_hash, 0,
47683+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
47684+ memset(name_set.n_hash, 0,
47685+ sizeof (struct name_entry *) * name_set.n_size);
47686+ memset(inodev_set.i_hash, 0,
47687+ sizeof (struct inodev_entry *) * inodev_set.i_size);
47688+
47689+ return 0;
47690+}
47691+
47692+/* free information not needed after startup
47693+ currently contains user->kernel pointer mappings for subjects
47694+*/
47695+
47696+static void
47697+free_init_variables(void)
47698+{
47699+ __u32 i;
47700+
47701+ if (subj_map_set.s_hash) {
47702+ for (i = 0; i < subj_map_set.s_size; i++) {
47703+ if (subj_map_set.s_hash[i]) {
47704+ kfree(subj_map_set.s_hash[i]);
47705+ subj_map_set.s_hash[i] = NULL;
47706+ }
47707+ }
47708+
47709+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
47710+ PAGE_SIZE)
47711+ kfree(subj_map_set.s_hash);
47712+ else
47713+ vfree(subj_map_set.s_hash);
47714+ }
47715+
47716+ return;
47717+}
47718+
47719+static void
47720+free_variables(void)
47721+{
47722+ struct acl_subject_label *s;
47723+ struct acl_role_label *r;
47724+ struct task_struct *task, *task2;
47725+ unsigned int x;
47726+
47727+ gr_clear_learn_entries();
47728+
47729+ read_lock(&tasklist_lock);
47730+ do_each_thread(task2, task) {
47731+ task->acl_sp_role = 0;
47732+ task->acl_role_id = 0;
47733+ task->acl = NULL;
47734+ task->role = NULL;
47735+ } while_each_thread(task2, task);
47736+ read_unlock(&tasklist_lock);
47737+
47738+ /* release the reference to the real root dentry and vfsmount */
47739+ path_put(&real_root);
47740+
47741+ /* free all object hash tables */
47742+
47743+ FOR_EACH_ROLE_START(r)
47744+ if (r->subj_hash == NULL)
47745+ goto next_role;
47746+ FOR_EACH_SUBJECT_START(r, s, x)
47747+ if (s->obj_hash == NULL)
47748+ break;
47749+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
47750+ kfree(s->obj_hash);
47751+ else
47752+ vfree(s->obj_hash);
47753+ FOR_EACH_SUBJECT_END(s, x)
47754+ FOR_EACH_NESTED_SUBJECT_START(r, s)
47755+ if (s->obj_hash == NULL)
47756+ break;
47757+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
47758+ kfree(s->obj_hash);
47759+ else
47760+ vfree(s->obj_hash);
47761+ FOR_EACH_NESTED_SUBJECT_END(s)
47762+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
47763+ kfree(r->subj_hash);
47764+ else
47765+ vfree(r->subj_hash);
47766+ r->subj_hash = NULL;
47767+next_role:
47768+ FOR_EACH_ROLE_END(r)
47769+
47770+ acl_free_all();
47771+
47772+ if (acl_role_set.r_hash) {
47773+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
47774+ PAGE_SIZE)
47775+ kfree(acl_role_set.r_hash);
47776+ else
47777+ vfree(acl_role_set.r_hash);
47778+ }
47779+ if (name_set.n_hash) {
47780+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
47781+ PAGE_SIZE)
47782+ kfree(name_set.n_hash);
47783+ else
47784+ vfree(name_set.n_hash);
47785+ }
47786+
47787+ if (inodev_set.i_hash) {
47788+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
47789+ PAGE_SIZE)
47790+ kfree(inodev_set.i_hash);
47791+ else
47792+ vfree(inodev_set.i_hash);
47793+ }
47794+
47795+ gr_free_uidset();
47796+
47797+ memset(&name_set, 0, sizeof (struct name_db));
47798+ memset(&inodev_set, 0, sizeof (struct inodev_db));
47799+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
47800+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
47801+
47802+ default_role = NULL;
47803+ role_list = NULL;
47804+
47805+ return;
47806+}
47807+
47808+static __u32
47809+count_user_objs(struct acl_object_label *userp)
47810+{
47811+ struct acl_object_label o_tmp;
47812+ __u32 num = 0;
47813+
47814+ while (userp) {
47815+ if (copy_from_user(&o_tmp, userp,
47816+ sizeof (struct acl_object_label)))
47817+ break;
47818+
47819+ userp = o_tmp.prev;
47820+ num++;
47821+ }
47822+
47823+ return num;
47824+}
47825+
47826+static struct acl_subject_label *
47827+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
47828+
47829+static int
47830+copy_user_glob(struct acl_object_label *obj)
47831+{
47832+ struct acl_object_label *g_tmp, **guser;
47833+ unsigned int len;
47834+ char *tmp;
47835+
47836+ if (obj->globbed == NULL)
47837+ return 0;
47838+
47839+ guser = &obj->globbed;
47840+ while (*guser) {
47841+ g_tmp = (struct acl_object_label *)
47842+ acl_alloc(sizeof (struct acl_object_label));
47843+ if (g_tmp == NULL)
47844+ return -ENOMEM;
47845+
47846+ if (copy_from_user(g_tmp, *guser,
47847+ sizeof (struct acl_object_label)))
47848+ return -EFAULT;
47849+
47850+ len = strnlen_user(g_tmp->filename, PATH_MAX);
47851+
47852+ if (!len || len >= PATH_MAX)
47853+ return -EINVAL;
47854+
47855+ if ((tmp = (char *) acl_alloc(len)) == NULL)
47856+ return -ENOMEM;
47857+
47858+ if (copy_from_user(tmp, g_tmp->filename, len))
47859+ return -EFAULT;
47860+ tmp[len-1] = '\0';
47861+ g_tmp->filename = tmp;
47862+
47863+ *guser = g_tmp;
47864+ guser = &(g_tmp->next);
47865+ }
47866+
47867+ return 0;
47868+}
47869+
47870+static int
47871+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
47872+ struct acl_role_label *role)
47873+{
47874+ struct acl_object_label *o_tmp;
47875+ unsigned int len;
47876+ int ret;
47877+ char *tmp;
47878+
47879+ while (userp) {
47880+ if ((o_tmp = (struct acl_object_label *)
47881+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
47882+ return -ENOMEM;
47883+
47884+ if (copy_from_user(o_tmp, userp,
47885+ sizeof (struct acl_object_label)))
47886+ return -EFAULT;
47887+
47888+ userp = o_tmp->prev;
47889+
47890+ len = strnlen_user(o_tmp->filename, PATH_MAX);
47891+
47892+ if (!len || len >= PATH_MAX)
47893+ return -EINVAL;
47894+
47895+ if ((tmp = (char *) acl_alloc(len)) == NULL)
47896+ return -ENOMEM;
47897+
47898+ if (copy_from_user(tmp, o_tmp->filename, len))
47899+ return -EFAULT;
47900+ tmp[len-1] = '\0';
47901+ o_tmp->filename = tmp;
47902+
47903+ insert_acl_obj_label(o_tmp, subj);
47904+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
47905+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
47906+ return -ENOMEM;
47907+
47908+ ret = copy_user_glob(o_tmp);
47909+ if (ret)
47910+ return ret;
47911+
47912+ if (o_tmp->nested) {
47913+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
47914+ if (IS_ERR(o_tmp->nested))
47915+ return PTR_ERR(o_tmp->nested);
47916+
47917+ /* insert into nested subject list */
47918+ o_tmp->nested->next = role->hash->first;
47919+ role->hash->first = o_tmp->nested;
47920+ }
47921+ }
47922+
47923+ return 0;
47924+}
47925+
47926+static __u32
47927+count_user_subjs(struct acl_subject_label *userp)
47928+{
47929+ struct acl_subject_label s_tmp;
47930+ __u32 num = 0;
47931+
47932+ while (userp) {
47933+ if (copy_from_user(&s_tmp, userp,
47934+ sizeof (struct acl_subject_label)))
47935+ break;
47936+
47937+ userp = s_tmp.prev;
47938+ /* do not count nested subjects against this count, since
47939+ they are not included in the hash table, but are
47940+ attached to objects. We have already counted
47941+ the subjects in userspace for the allocation
47942+ stack
47943+ */
47944+ if (!(s_tmp.mode & GR_NESTED))
47945+ num++;
47946+ }
47947+
47948+ return num;
47949+}
47950+
47951+static int
47952+copy_user_allowedips(struct acl_role_label *rolep)
47953+{
47954+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
47955+
47956+ ruserip = rolep->allowed_ips;
47957+
47958+ while (ruserip) {
47959+ rlast = rtmp;
47960+
47961+ if ((rtmp = (struct role_allowed_ip *)
47962+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
47963+ return -ENOMEM;
47964+
47965+ if (copy_from_user(rtmp, ruserip,
47966+ sizeof (struct role_allowed_ip)))
47967+ return -EFAULT;
47968+
47969+ ruserip = rtmp->prev;
47970+
47971+ if (!rlast) {
47972+ rtmp->prev = NULL;
47973+ rolep->allowed_ips = rtmp;
47974+ } else {
47975+ rlast->next = rtmp;
47976+ rtmp->prev = rlast;
47977+ }
47978+
47979+ if (!ruserip)
47980+ rtmp->next = NULL;
47981+ }
47982+
47983+ return 0;
47984+}
47985+
47986+static int
47987+copy_user_transitions(struct acl_role_label *rolep)
47988+{
47989+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
47990+
47991+ unsigned int len;
47992+ char *tmp;
47993+
47994+ rusertp = rolep->transitions;
47995+
47996+ while (rusertp) {
47997+ rlast = rtmp;
47998+
47999+ if ((rtmp = (struct role_transition *)
48000+ acl_alloc(sizeof (struct role_transition))) == NULL)
48001+ return -ENOMEM;
48002+
48003+ if (copy_from_user(rtmp, rusertp,
48004+ sizeof (struct role_transition)))
48005+ return -EFAULT;
48006+
48007+ rusertp = rtmp->prev;
48008+
48009+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
48010+
48011+ if (!len || len >= GR_SPROLE_LEN)
48012+ return -EINVAL;
48013+
48014+ if ((tmp = (char *) acl_alloc(len)) == NULL)
48015+ return -ENOMEM;
48016+
48017+ if (copy_from_user(tmp, rtmp->rolename, len))
48018+ return -EFAULT;
48019+ tmp[len-1] = '\0';
48020+ rtmp->rolename = tmp;
48021+
48022+ if (!rlast) {
48023+ rtmp->prev = NULL;
48024+ rolep->transitions = rtmp;
48025+ } else {
48026+ rlast->next = rtmp;
48027+ rtmp->prev = rlast;
48028+ }
48029+
48030+ if (!rusertp)
48031+ rtmp->next = NULL;
48032+ }
48033+
48034+ return 0;
48035+}
48036+
48037+static struct acl_subject_label *
48038+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
48039+{
48040+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
48041+ unsigned int len;
48042+ char *tmp;
48043+ __u32 num_objs;
48044+ struct acl_ip_label **i_tmp, *i_utmp2;
48045+ struct gr_hash_struct ghash;
48046+ struct subject_map *subjmap;
48047+ unsigned int i_num;
48048+ int err;
48049+
48050+ s_tmp = lookup_subject_map(userp);
48051+
48052+ /* we've already copied this subject into the kernel, just return
48053+ the reference to it, and don't copy it over again
48054+ */
48055+ if (s_tmp)
48056+ return(s_tmp);
48057+
48058+ if ((s_tmp = (struct acl_subject_label *)
48059+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
48060+ return ERR_PTR(-ENOMEM);
48061+
48062+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
48063+ if (subjmap == NULL)
48064+ return ERR_PTR(-ENOMEM);
48065+
48066+ subjmap->user = userp;
48067+ subjmap->kernel = s_tmp;
48068+ insert_subj_map_entry(subjmap);
48069+
48070+ if (copy_from_user(s_tmp, userp,
48071+ sizeof (struct acl_subject_label)))
48072+ return ERR_PTR(-EFAULT);
48073+
48074+ len = strnlen_user(s_tmp->filename, PATH_MAX);
48075+
48076+ if (!len || len >= PATH_MAX)
48077+ return ERR_PTR(-EINVAL);
48078+
48079+ if ((tmp = (char *) acl_alloc(len)) == NULL)
48080+ return ERR_PTR(-ENOMEM);
48081+
48082+ if (copy_from_user(tmp, s_tmp->filename, len))
48083+ return ERR_PTR(-EFAULT);
48084+ tmp[len-1] = '\0';
48085+ s_tmp->filename = tmp;
48086+
48087+ if (!strcmp(s_tmp->filename, "/"))
48088+ role->root_label = s_tmp;
48089+
48090+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
48091+ return ERR_PTR(-EFAULT);
48092+
48093+ /* copy user and group transition tables */
48094+
48095+ if (s_tmp->user_trans_num) {
48096+ uid_t *uidlist;
48097+
48098+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
48099+ if (uidlist == NULL)
48100+ return ERR_PTR(-ENOMEM);
48101+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
48102+ return ERR_PTR(-EFAULT);
48103+
48104+ s_tmp->user_transitions = uidlist;
48105+ }
48106+
48107+ if (s_tmp->group_trans_num) {
48108+ gid_t *gidlist;
48109+
48110+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
48111+ if (gidlist == NULL)
48112+ return ERR_PTR(-ENOMEM);
48113+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
48114+ return ERR_PTR(-EFAULT);
48115+
48116+ s_tmp->group_transitions = gidlist;
48117+ }
48118+
48119+ /* set up object hash table */
48120+ num_objs = count_user_objs(ghash.first);
48121+
48122+ s_tmp->obj_hash_size = num_objs;
48123+ s_tmp->obj_hash =
48124+ (struct acl_object_label **)
48125+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
48126+
48127+ if (!s_tmp->obj_hash)
48128+ return ERR_PTR(-ENOMEM);
48129+
48130+ memset(s_tmp->obj_hash, 0,
48131+ s_tmp->obj_hash_size *
48132+ sizeof (struct acl_object_label *));
48133+
48134+ /* add in objects */
48135+ err = copy_user_objs(ghash.first, s_tmp, role);
48136+
48137+ if (err)
48138+ return ERR_PTR(err);
48139+
48140+ /* set pointer for parent subject */
48141+ if (s_tmp->parent_subject) {
48142+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
48143+
48144+ if (IS_ERR(s_tmp2))
48145+ return s_tmp2;
48146+
48147+ s_tmp->parent_subject = s_tmp2;
48148+ }
48149+
48150+ /* add in ip acls */
48151+
48152+ if (!s_tmp->ip_num) {
48153+ s_tmp->ips = NULL;
48154+ goto insert;
48155+ }
48156+
48157+ i_tmp =
48158+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
48159+ sizeof (struct acl_ip_label *));
48160+
48161+ if (!i_tmp)
48162+ return ERR_PTR(-ENOMEM);
48163+
48164+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
48165+ *(i_tmp + i_num) =
48166+ (struct acl_ip_label *)
48167+ acl_alloc(sizeof (struct acl_ip_label));
48168+ if (!*(i_tmp + i_num))
48169+ return ERR_PTR(-ENOMEM);
48170+
48171+ if (copy_from_user
48172+ (&i_utmp2, s_tmp->ips + i_num,
48173+ sizeof (struct acl_ip_label *)))
48174+ return ERR_PTR(-EFAULT);
48175+
48176+ if (copy_from_user
48177+ (*(i_tmp + i_num), i_utmp2,
48178+ sizeof (struct acl_ip_label)))
48179+ return ERR_PTR(-EFAULT);
48180+
48181+ if ((*(i_tmp + i_num))->iface == NULL)
48182+ continue;
48183+
48184+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
48185+ if (!len || len >= IFNAMSIZ)
48186+ return ERR_PTR(-EINVAL);
48187+ tmp = acl_alloc(len);
48188+ if (tmp == NULL)
48189+ return ERR_PTR(-ENOMEM);
48190+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
48191+ return ERR_PTR(-EFAULT);
48192+ (*(i_tmp + i_num))->iface = tmp;
48193+ }
48194+
48195+ s_tmp->ips = i_tmp;
48196+
48197+insert:
48198+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
48199+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
48200+ return ERR_PTR(-ENOMEM);
48201+
48202+ return s_tmp;
48203+}
48204+
48205+static int
48206+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
48207+{
48208+ struct acl_subject_label s_pre;
48209+ struct acl_subject_label * ret;
48210+ int err;
48211+
48212+ while (userp) {
48213+ if (copy_from_user(&s_pre, userp,
48214+ sizeof (struct acl_subject_label)))
48215+ return -EFAULT;
48216+
48217+ /* do not add nested subjects here, add
48218+ while parsing objects
48219+ */
48220+
48221+ if (s_pre.mode & GR_NESTED) {
48222+ userp = s_pre.prev;
48223+ continue;
48224+ }
48225+
48226+ ret = do_copy_user_subj(userp, role);
48227+
48228+ err = PTR_ERR(ret);
48229+ if (IS_ERR(ret))
48230+ return err;
48231+
48232+ insert_acl_subj_label(ret, role);
48233+
48234+ userp = s_pre.prev;
48235+ }
48236+
48237+ return 0;
48238+}
48239+
48240+static int
48241+copy_user_acl(struct gr_arg *arg)
48242+{
48243+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
48244+ struct sprole_pw *sptmp;
48245+ struct gr_hash_struct *ghash;
48246+ uid_t *domainlist;
48247+ unsigned int r_num;
48248+ unsigned int len;
48249+ char *tmp;
48250+ int err = 0;
48251+ __u16 i;
48252+ __u32 num_subjs;
48253+
48254+ /* we need a default and kernel role */
48255+ if (arg->role_db.num_roles < 2)
48256+ return -EINVAL;
48257+
48258+ /* copy special role authentication info from userspace */
48259+
48260+ num_sprole_pws = arg->num_sprole_pws;
48261+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
48262+
48263+ if (!acl_special_roles) {
48264+ err = -ENOMEM;
48265+ goto cleanup;
48266+ }
48267+
48268+ for (i = 0; i < num_sprole_pws; i++) {
48269+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
48270+ if (!sptmp) {
48271+ err = -ENOMEM;
48272+ goto cleanup;
48273+ }
48274+ if (copy_from_user(sptmp, arg->sprole_pws + i,
48275+ sizeof (struct sprole_pw))) {
48276+ err = -EFAULT;
48277+ goto cleanup;
48278+ }
48279+
48280+ len =
48281+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
48282+
48283+ if (!len || len >= GR_SPROLE_LEN) {
48284+ err = -EINVAL;
48285+ goto cleanup;
48286+ }
48287+
48288+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
48289+ err = -ENOMEM;
48290+ goto cleanup;
48291+ }
48292+
48293+ if (copy_from_user(tmp, sptmp->rolename, len)) {
48294+ err = -EFAULT;
48295+ goto cleanup;
48296+ }
48297+ tmp[len-1] = '\0';
48298+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
48299+ printk(KERN_ALERT "Copying special role %s\n", tmp);
48300+#endif
48301+ sptmp->rolename = tmp;
48302+ acl_special_roles[i] = sptmp;
48303+ }
48304+
48305+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
48306+
48307+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
48308+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
48309+
48310+ if (!r_tmp) {
48311+ err = -ENOMEM;
48312+ goto cleanup;
48313+ }
48314+
48315+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
48316+ sizeof (struct acl_role_label *))) {
48317+ err = -EFAULT;
48318+ goto cleanup;
48319+ }
48320+
48321+ if (copy_from_user(r_tmp, r_utmp2,
48322+ sizeof (struct acl_role_label))) {
48323+ err = -EFAULT;
48324+ goto cleanup;
48325+ }
48326+
48327+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
48328+
48329+ if (!len || len >= PATH_MAX) {
48330+ err = -EINVAL;
48331+ goto cleanup;
48332+ }
48333+
48334+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
48335+ err = -ENOMEM;
48336+ goto cleanup;
48337+ }
48338+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
48339+ err = -EFAULT;
48340+ goto cleanup;
48341+ }
48342+ tmp[len-1] = '\0';
48343+ r_tmp->rolename = tmp;
48344+
48345+ if (!strcmp(r_tmp->rolename, "default")
48346+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
48347+ default_role = r_tmp;
48348+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
48349+ kernel_role = r_tmp;
48350+ }
48351+
48352+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
48353+ err = -ENOMEM;
48354+ goto cleanup;
48355+ }
48356+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
48357+ err = -EFAULT;
48358+ goto cleanup;
48359+ }
48360+
48361+ r_tmp->hash = ghash;
48362+
48363+ num_subjs = count_user_subjs(r_tmp->hash->first);
48364+
48365+ r_tmp->subj_hash_size = num_subjs;
48366+ r_tmp->subj_hash =
48367+ (struct acl_subject_label **)
48368+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
48369+
48370+ if (!r_tmp->subj_hash) {
48371+ err = -ENOMEM;
48372+ goto cleanup;
48373+ }
48374+
48375+ err = copy_user_allowedips(r_tmp);
48376+ if (err)
48377+ goto cleanup;
48378+
48379+ /* copy domain info */
48380+ if (r_tmp->domain_children != NULL) {
48381+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
48382+ if (domainlist == NULL) {
48383+ err = -ENOMEM;
48384+ goto cleanup;
48385+ }
48386+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
48387+ err = -EFAULT;
48388+ goto cleanup;
48389+ }
48390+ r_tmp->domain_children = domainlist;
48391+ }
48392+
48393+ err = copy_user_transitions(r_tmp);
48394+ if (err)
48395+ goto cleanup;
48396+
48397+ memset(r_tmp->subj_hash, 0,
48398+ r_tmp->subj_hash_size *
48399+ sizeof (struct acl_subject_label *));
48400+
48401+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
48402+
48403+ if (err)
48404+ goto cleanup;
48405+
48406+ /* set nested subject list to null */
48407+ r_tmp->hash->first = NULL;
48408+
48409+ insert_acl_role_label(r_tmp);
48410+ }
48411+
48412+ goto return_err;
48413+ cleanup:
48414+ free_variables();
48415+ return_err:
48416+ return err;
48417+
48418+}
48419+
48420+static int
48421+gracl_init(struct gr_arg *args)
48422+{
48423+ int error = 0;
48424+
48425+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
48426+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
48427+
48428+ if (init_variables(args)) {
48429+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
48430+ error = -ENOMEM;
48431+ free_variables();
48432+ goto out;
48433+ }
48434+
48435+ error = copy_user_acl(args);
48436+ free_init_variables();
48437+ if (error) {
48438+ free_variables();
48439+ goto out;
48440+ }
48441+
48442+ if ((error = gr_set_acls(0))) {
48443+ free_variables();
48444+ goto out;
48445+ }
48446+
48447+ pax_open_kernel();
48448+ gr_status |= GR_READY;
48449+ pax_close_kernel();
48450+
48451+ out:
48452+ return error;
48453+}
48454+
48455+/* derived from glibc fnmatch() 0: match, 1: no match*/
48456+
48457+static int
48458+glob_match(const char *p, const char *n)
48459+{
48460+ char c;
48461+
48462+ while ((c = *p++) != '\0') {
48463+ switch (c) {
48464+ case '?':
48465+ if (*n == '\0')
48466+ return 1;
48467+ else if (*n == '/')
48468+ return 1;
48469+ break;
48470+ case '\\':
48471+ if (*n != c)
48472+ return 1;
48473+ break;
48474+ case '*':
48475+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
48476+ if (*n == '/')
48477+ return 1;
48478+ else if (c == '?') {
48479+ if (*n == '\0')
48480+ return 1;
48481+ else
48482+ ++n;
48483+ }
48484+ }
48485+ if (c == '\0') {
48486+ return 0;
48487+ } else {
48488+ const char *endp;
48489+
48490+ if ((endp = strchr(n, '/')) == NULL)
48491+ endp = n + strlen(n);
48492+
48493+ if (c == '[') {
48494+ for (--p; n < endp; ++n)
48495+ if (!glob_match(p, n))
48496+ return 0;
48497+ } else if (c == '/') {
48498+ while (*n != '\0' && *n != '/')
48499+ ++n;
48500+ if (*n == '/' && !glob_match(p, n + 1))
48501+ return 0;
48502+ } else {
48503+ for (--p; n < endp; ++n)
48504+ if (*n == c && !glob_match(p, n))
48505+ return 0;
48506+ }
48507+
48508+ return 1;
48509+ }
48510+ case '[':
48511+ {
48512+ int not;
48513+ char cold;
48514+
48515+ if (*n == '\0' || *n == '/')
48516+ return 1;
48517+
48518+ not = (*p == '!' || *p == '^');
48519+ if (not)
48520+ ++p;
48521+
48522+ c = *p++;
48523+ for (;;) {
48524+ unsigned char fn = (unsigned char)*n;
48525+
48526+ if (c == '\0')
48527+ return 1;
48528+ else {
48529+ if (c == fn)
48530+ goto matched;
48531+ cold = c;
48532+ c = *p++;
48533+
48534+ if (c == '-' && *p != ']') {
48535+ unsigned char cend = *p++;
48536+
48537+ if (cend == '\0')
48538+ return 1;
48539+
48540+ if (cold <= fn && fn <= cend)
48541+ goto matched;
48542+
48543+ c = *p++;
48544+ }
48545+ }
48546+
48547+ if (c == ']')
48548+ break;
48549+ }
48550+ if (!not)
48551+ return 1;
48552+ break;
48553+ matched:
48554+ while (c != ']') {
48555+ if (c == '\0')
48556+ return 1;
48557+
48558+ c = *p++;
48559+ }
48560+ if (not)
48561+ return 1;
48562+ }
48563+ break;
48564+ default:
48565+ if (c != *n)
48566+ return 1;
48567+ }
48568+
48569+ ++n;
48570+ }
48571+
48572+ if (*n == '\0')
48573+ return 0;
48574+
48575+ if (*n == '/')
48576+ return 0;
48577+
48578+ return 1;
48579+}
48580+
48581+static struct acl_object_label *
48582+chk_glob_label(struct acl_object_label *globbed,
48583+ struct dentry *dentry, struct vfsmount *mnt, char **path)
48584+{
48585+ struct acl_object_label *tmp;
48586+
48587+ if (*path == NULL)
48588+ *path = gr_to_filename_nolock(dentry, mnt);
48589+
48590+ tmp = globbed;
48591+
48592+ while (tmp) {
48593+ if (!glob_match(tmp->filename, *path))
48594+ return tmp;
48595+ tmp = tmp->next;
48596+ }
48597+
48598+ return NULL;
48599+}
48600+
48601+static struct acl_object_label *
48602+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
48603+ const ino_t curr_ino, const dev_t curr_dev,
48604+ const struct acl_subject_label *subj, char **path, const int checkglob)
48605+{
48606+ struct acl_subject_label *tmpsubj;
48607+ struct acl_object_label *retval;
48608+ struct acl_object_label *retval2;
48609+
48610+ tmpsubj = (struct acl_subject_label *) subj;
48611+ read_lock(&gr_inode_lock);
48612+ do {
48613+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
48614+ if (retval) {
48615+ if (checkglob && retval->globbed) {
48616+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
48617+ (struct vfsmount *)orig_mnt, path);
48618+ if (retval2)
48619+ retval = retval2;
48620+ }
48621+ break;
48622+ }
48623+ } while ((tmpsubj = tmpsubj->parent_subject));
48624+ read_unlock(&gr_inode_lock);
48625+
48626+ return retval;
48627+}
48628+
48629+static __inline__ struct acl_object_label *
48630+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
48631+ struct dentry *curr_dentry,
48632+ const struct acl_subject_label *subj, char **path, const int checkglob)
48633+{
48634+ int newglob = checkglob;
48635+ ino_t inode;
48636+ dev_t device;
48637+
48638+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
48639+ as we don't want a / * rule to match instead of the / object
48640+ don't do this for create lookups that call this function though, since they're looking up
48641+ on the parent and thus need globbing checks on all paths
48642+ */
48643+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
48644+ newglob = GR_NO_GLOB;
48645+
48646+ spin_lock(&curr_dentry->d_lock);
48647+ inode = curr_dentry->d_inode->i_ino;
48648+ device = __get_dev(curr_dentry);
48649+ spin_unlock(&curr_dentry->d_lock);
48650+
48651+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
48652+}
48653+
48654+static struct acl_object_label *
48655+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48656+ const struct acl_subject_label *subj, char *path, const int checkglob)
48657+{
48658+ struct dentry *dentry = (struct dentry *) l_dentry;
48659+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
48660+ struct acl_object_label *retval;
48661+ struct dentry *parent;
48662+
48663+ write_seqlock(&rename_lock);
48664+ br_read_lock(vfsmount_lock);
48665+
48666+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
48667+#ifdef CONFIG_NET
48668+ mnt == sock_mnt ||
48669+#endif
48670+#ifdef CONFIG_HUGETLBFS
48671+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
48672+#endif
48673+ /* ignore Eric Biederman */
48674+ IS_PRIVATE(l_dentry->d_inode))) {
48675+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
48676+ goto out;
48677+ }
48678+
48679+ for (;;) {
48680+ if (dentry == real_root.dentry && mnt == real_root.mnt)
48681+ break;
48682+
48683+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
48684+ if (mnt->mnt_parent == mnt)
48685+ break;
48686+
48687+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
48688+ if (retval != NULL)
48689+ goto out;
48690+
48691+ dentry = mnt->mnt_mountpoint;
48692+ mnt = mnt->mnt_parent;
48693+ continue;
48694+ }
48695+
48696+ parent = dentry->d_parent;
48697+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
48698+ if (retval != NULL)
48699+ goto out;
48700+
48701+ dentry = parent;
48702+ }
48703+
48704+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
48705+
48706+ /* real_root is pinned so we don't have to hold a reference */
48707+ if (retval == NULL)
48708+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
48709+out:
48710+ br_read_unlock(vfsmount_lock);
48711+ write_sequnlock(&rename_lock);
48712+
48713+ BUG_ON(retval == NULL);
48714+
48715+ return retval;
48716+}
48717+
48718+static __inline__ struct acl_object_label *
48719+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48720+ const struct acl_subject_label *subj)
48721+{
48722+ char *path = NULL;
48723+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
48724+}
48725+
48726+static __inline__ struct acl_object_label *
48727+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48728+ const struct acl_subject_label *subj)
48729+{
48730+ char *path = NULL;
48731+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
48732+}
48733+
48734+static __inline__ struct acl_object_label *
48735+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48736+ const struct acl_subject_label *subj, char *path)
48737+{
48738+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
48739+}
48740+
48741+static struct acl_subject_label *
48742+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48743+ const struct acl_role_label *role)
48744+{
48745+ struct dentry *dentry = (struct dentry *) l_dentry;
48746+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
48747+ struct acl_subject_label *retval;
48748+ struct dentry *parent;
48749+
48750+ write_seqlock(&rename_lock);
48751+ br_read_lock(vfsmount_lock);
48752+
48753+ for (;;) {
48754+ if (dentry == real_root.dentry && mnt == real_root.mnt)
48755+ break;
48756+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
48757+ if (mnt->mnt_parent == mnt)
48758+ break;
48759+
48760+ spin_lock(&dentry->d_lock);
48761+ read_lock(&gr_inode_lock);
48762+ retval =
48763+ lookup_acl_subj_label(dentry->d_inode->i_ino,
48764+ __get_dev(dentry), role);
48765+ read_unlock(&gr_inode_lock);
48766+ spin_unlock(&dentry->d_lock);
48767+ if (retval != NULL)
48768+ goto out;
48769+
48770+ dentry = mnt->mnt_mountpoint;
48771+ mnt = mnt->mnt_parent;
48772+ continue;
48773+ }
48774+
48775+ spin_lock(&dentry->d_lock);
48776+ read_lock(&gr_inode_lock);
48777+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
48778+ __get_dev(dentry), role);
48779+ read_unlock(&gr_inode_lock);
48780+ parent = dentry->d_parent;
48781+ spin_unlock(&dentry->d_lock);
48782+
48783+ if (retval != NULL)
48784+ goto out;
48785+
48786+ dentry = parent;
48787+ }
48788+
48789+ spin_lock(&dentry->d_lock);
48790+ read_lock(&gr_inode_lock);
48791+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
48792+ __get_dev(dentry), role);
48793+ read_unlock(&gr_inode_lock);
48794+ spin_unlock(&dentry->d_lock);
48795+
48796+ if (unlikely(retval == NULL)) {
48797+ /* real_root is pinned, we don't need to hold a reference */
48798+ read_lock(&gr_inode_lock);
48799+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
48800+ __get_dev(real_root.dentry), role);
48801+ read_unlock(&gr_inode_lock);
48802+ }
48803+out:
48804+ br_read_unlock(vfsmount_lock);
48805+ write_sequnlock(&rename_lock);
48806+
48807+ BUG_ON(retval == NULL);
48808+
48809+ return retval;
48810+}
48811+
48812+static void
48813+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
48814+{
48815+ struct task_struct *task = current;
48816+ const struct cred *cred = current_cred();
48817+
48818+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
48819+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
48820+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
48821+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
48822+
48823+ return;
48824+}
48825+
48826+static void
48827+gr_log_learn_sysctl(const char *path, const __u32 mode)
48828+{
48829+ struct task_struct *task = current;
48830+ const struct cred *cred = current_cred();
48831+
48832+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
48833+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
48834+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
48835+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
48836+
48837+ return;
48838+}
48839+
48840+static void
48841+gr_log_learn_id_change(const char type, const unsigned int real,
48842+ const unsigned int effective, const unsigned int fs)
48843+{
48844+ struct task_struct *task = current;
48845+ const struct cred *cred = current_cred();
48846+
48847+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
48848+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
48849+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
48850+ type, real, effective, fs, &task->signal->saved_ip);
48851+
48852+ return;
48853+}
48854+
48855+__u32
48856+gr_search_file(const struct dentry * dentry, const __u32 mode,
48857+ const struct vfsmount * mnt)
48858+{
48859+ __u32 retval = mode;
48860+ struct acl_subject_label *curracl;
48861+ struct acl_object_label *currobj;
48862+
48863+ if (unlikely(!(gr_status & GR_READY)))
48864+ return (mode & ~GR_AUDITS);
48865+
48866+ curracl = current->acl;
48867+
48868+ currobj = chk_obj_label(dentry, mnt, curracl);
48869+ retval = currobj->mode & mode;
48870+
48871+ /* if we're opening a specified transfer file for writing
48872+ (e.g. /dev/initctl), then transfer our role to init
48873+ */
48874+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
48875+ current->role->roletype & GR_ROLE_PERSIST)) {
48876+ struct task_struct *task = init_pid_ns.child_reaper;
48877+
48878+ if (task->role != current->role) {
48879+ task->acl_sp_role = 0;
48880+ task->acl_role_id = current->acl_role_id;
48881+ task->role = current->role;
48882+ rcu_read_lock();
48883+ read_lock(&grsec_exec_file_lock);
48884+ gr_apply_subject_to_task(task);
48885+ read_unlock(&grsec_exec_file_lock);
48886+ rcu_read_unlock();
48887+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
48888+ }
48889+ }
48890+
48891+ if (unlikely
48892+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
48893+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
48894+ __u32 new_mode = mode;
48895+
48896+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
48897+
48898+ retval = new_mode;
48899+
48900+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
48901+ new_mode |= GR_INHERIT;
48902+
48903+ if (!(mode & GR_NOLEARN))
48904+ gr_log_learn(dentry, mnt, new_mode);
48905+ }
48906+
48907+ return retval;
48908+}
48909+
48910+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
48911+ const struct dentry *parent,
48912+ const struct vfsmount *mnt)
48913+{
48914+ struct name_entry *match;
48915+ struct acl_object_label *matchpo;
48916+ struct acl_subject_label *curracl;
48917+ char *path;
48918+
48919+ if (unlikely(!(gr_status & GR_READY)))
48920+ return NULL;
48921+
48922+ preempt_disable();
48923+ path = gr_to_filename_rbac(new_dentry, mnt);
48924+ match = lookup_name_entry_create(path);
48925+
48926+ curracl = current->acl;
48927+
48928+ if (match) {
48929+ read_lock(&gr_inode_lock);
48930+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
48931+ read_unlock(&gr_inode_lock);
48932+
48933+ if (matchpo) {
48934+ preempt_enable();
48935+ return matchpo;
48936+ }
48937+ }
48938+
48939+ // lookup parent
48940+
48941+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
48942+
48943+ preempt_enable();
48944+ return matchpo;
48945+}
48946+
48947+__u32
48948+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
48949+ const struct vfsmount * mnt, const __u32 mode)
48950+{
48951+ struct acl_object_label *matchpo;
48952+ __u32 retval;
48953+
48954+ if (unlikely(!(gr_status & GR_READY)))
48955+ return (mode & ~GR_AUDITS);
48956+
48957+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
48958+
48959+ retval = matchpo->mode & mode;
48960+
48961+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
48962+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
48963+ __u32 new_mode = mode;
48964+
48965+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
48966+
48967+ gr_log_learn(new_dentry, mnt, new_mode);
48968+ return new_mode;
48969+ }
48970+
48971+ return retval;
48972+}
48973+
48974+__u32
48975+gr_check_link(const struct dentry * new_dentry,
48976+ const struct dentry * parent_dentry,
48977+ const struct vfsmount * parent_mnt,
48978+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
48979+{
48980+ struct acl_object_label *obj;
48981+ __u32 oldmode, newmode;
48982+ __u32 needmode;
48983+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
48984+ GR_DELETE | GR_INHERIT;
48985+
48986+ if (unlikely(!(gr_status & GR_READY)))
48987+ return (GR_CREATE | GR_LINK);
48988+
48989+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
48990+ oldmode = obj->mode;
48991+
48992+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
48993+ newmode = obj->mode;
48994+
48995+ needmode = newmode & checkmodes;
48996+
48997+ // old name for hardlink must have at least the permissions of the new name
48998+ if ((oldmode & needmode) != needmode)
48999+ goto bad;
49000+
49001+ // if old name had restrictions/auditing, make sure the new name does as well
49002+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
49003+
49004+ // don't allow hardlinking of suid/sgid files without permission
49005+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
49006+ needmode |= GR_SETID;
49007+
49008+ if ((newmode & needmode) != needmode)
49009+ goto bad;
49010+
49011+ // enforce minimum permissions
49012+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
49013+ return newmode;
49014+bad:
49015+ needmode = oldmode;
49016+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
49017+ needmode |= GR_SETID;
49018+
49019+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
49020+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
49021+ return (GR_CREATE | GR_LINK);
49022+ } else if (newmode & GR_SUPPRESS)
49023+ return GR_SUPPRESS;
49024+ else
49025+ return 0;
49026+}
49027+
49028+int
49029+gr_check_hidden_task(const struct task_struct *task)
49030+{
49031+ if (unlikely(!(gr_status & GR_READY)))
49032+ return 0;
49033+
49034+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
49035+ return 1;
49036+
49037+ return 0;
49038+}
49039+
49040+int
49041+gr_check_protected_task(const struct task_struct *task)
49042+{
49043+ if (unlikely(!(gr_status & GR_READY) || !task))
49044+ return 0;
49045+
49046+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
49047+ task->acl != current->acl)
49048+ return 1;
49049+
49050+ return 0;
49051+}
49052+
49053+int
49054+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
49055+{
49056+ struct task_struct *p;
49057+ int ret = 0;
49058+
49059+ if (unlikely(!(gr_status & GR_READY) || !pid))
49060+ return ret;
49061+
49062+ read_lock(&tasklist_lock);
49063+ do_each_pid_task(pid, type, p) {
49064+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
49065+ p->acl != current->acl) {
49066+ ret = 1;
49067+ goto out;
49068+ }
49069+ } while_each_pid_task(pid, type, p);
49070+out:
49071+ read_unlock(&tasklist_lock);
49072+
49073+ return ret;
49074+}
49075+
49076+void
49077+gr_copy_label(struct task_struct *tsk)
49078+{
49079+ tsk->signal->used_accept = 0;
49080+ tsk->acl_sp_role = 0;
49081+ tsk->acl_role_id = current->acl_role_id;
49082+ tsk->acl = current->acl;
49083+ tsk->role = current->role;
49084+ tsk->signal->curr_ip = current->signal->curr_ip;
49085+ tsk->signal->saved_ip = current->signal->saved_ip;
49086+ if (current->exec_file)
49087+ get_file(current->exec_file);
49088+ tsk->exec_file = current->exec_file;
49089+ tsk->is_writable = current->is_writable;
49090+ if (unlikely(current->signal->used_accept)) {
49091+ current->signal->curr_ip = 0;
49092+ current->signal->saved_ip = 0;
49093+ }
49094+
49095+ return;
49096+}
49097+
49098+static void
49099+gr_set_proc_res(struct task_struct *task)
49100+{
49101+ struct acl_subject_label *proc;
49102+ unsigned short i;
49103+
49104+ proc = task->acl;
49105+
49106+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
49107+ return;
49108+
49109+ for (i = 0; i < RLIM_NLIMITS; i++) {
49110+ if (!(proc->resmask & (1 << i)))
49111+ continue;
49112+
49113+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
49114+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
49115+ }
49116+
49117+ return;
49118+}
49119+
49120+extern int __gr_process_user_ban(struct user_struct *user);
49121+
49122+int
49123+gr_check_user_change(int real, int effective, int fs)
49124+{
49125+ unsigned int i;
49126+ __u16 num;
49127+ uid_t *uidlist;
49128+ int curuid;
49129+ int realok = 0;
49130+ int effectiveok = 0;
49131+ int fsok = 0;
49132+
49133+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
49134+ struct user_struct *user;
49135+
49136+ if (real == -1)
49137+ goto skipit;
49138+
49139+ user = find_user(real);
49140+ if (user == NULL)
49141+ goto skipit;
49142+
49143+ if (__gr_process_user_ban(user)) {
49144+ /* for find_user */
49145+ free_uid(user);
49146+ return 1;
49147+ }
49148+
49149+ /* for find_user */
49150+ free_uid(user);
49151+
49152+skipit:
49153+#endif
49154+
49155+ if (unlikely(!(gr_status & GR_READY)))
49156+ return 0;
49157+
49158+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
49159+ gr_log_learn_id_change('u', real, effective, fs);
49160+
49161+ num = current->acl->user_trans_num;
49162+ uidlist = current->acl->user_transitions;
49163+
49164+ if (uidlist == NULL)
49165+ return 0;
49166+
49167+ if (real == -1)
49168+ realok = 1;
49169+ if (effective == -1)
49170+ effectiveok = 1;
49171+ if (fs == -1)
49172+ fsok = 1;
49173+
49174+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
49175+ for (i = 0; i < num; i++) {
49176+ curuid = (int)uidlist[i];
49177+ if (real == curuid)
49178+ realok = 1;
49179+ if (effective == curuid)
49180+ effectiveok = 1;
49181+ if (fs == curuid)
49182+ fsok = 1;
49183+ }
49184+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
49185+ for (i = 0; i < num; i++) {
49186+ curuid = (int)uidlist[i];
49187+ if (real == curuid)
49188+ break;
49189+ if (effective == curuid)
49190+ break;
49191+ if (fs == curuid)
49192+ break;
49193+ }
49194+ /* not in deny list */
49195+ if (i == num) {
49196+ realok = 1;
49197+ effectiveok = 1;
49198+ fsok = 1;
49199+ }
49200+ }
49201+
49202+ if (realok && effectiveok && fsok)
49203+ return 0;
49204+ else {
49205+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
49206+ return 1;
49207+ }
49208+}
49209+
49210+int
49211+gr_check_group_change(int real, int effective, int fs)
49212+{
49213+ unsigned int i;
49214+ __u16 num;
49215+ gid_t *gidlist;
49216+ int curgid;
49217+ int realok = 0;
49218+ int effectiveok = 0;
49219+ int fsok = 0;
49220+
49221+ if (unlikely(!(gr_status & GR_READY)))
49222+ return 0;
49223+
49224+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
49225+ gr_log_learn_id_change('g', real, effective, fs);
49226+
49227+ num = current->acl->group_trans_num;
49228+ gidlist = current->acl->group_transitions;
49229+
49230+ if (gidlist == NULL)
49231+ return 0;
49232+
49233+ if (real == -1)
49234+ realok = 1;
49235+ if (effective == -1)
49236+ effectiveok = 1;
49237+ if (fs == -1)
49238+ fsok = 1;
49239+
49240+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
49241+ for (i = 0; i < num; i++) {
49242+ curgid = (int)gidlist[i];
49243+ if (real == curgid)
49244+ realok = 1;
49245+ if (effective == curgid)
49246+ effectiveok = 1;
49247+ if (fs == curgid)
49248+ fsok = 1;
49249+ }
49250+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
49251+ for (i = 0; i < num; i++) {
49252+ curgid = (int)gidlist[i];
49253+ if (real == curgid)
49254+ break;
49255+ if (effective == curgid)
49256+ break;
49257+ if (fs == curgid)
49258+ break;
49259+ }
49260+ /* not in deny list */
49261+ if (i == num) {
49262+ realok = 1;
49263+ effectiveok = 1;
49264+ fsok = 1;
49265+ }
49266+ }
49267+
49268+ if (realok && effectiveok && fsok)
49269+ return 0;
49270+ else {
49271+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
49272+ return 1;
49273+ }
49274+}
49275+
49276+void
49277+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
49278+{
49279+ struct acl_role_label *role = task->role;
49280+ struct acl_subject_label *subj = NULL;
49281+ struct acl_object_label *obj;
49282+ struct file *filp;
49283+
49284+ if (unlikely(!(gr_status & GR_READY)))
49285+ return;
49286+
49287+ filp = task->exec_file;
49288+
49289+ /* kernel process, we'll give them the kernel role */
49290+ if (unlikely(!filp)) {
49291+ task->role = kernel_role;
49292+ task->acl = kernel_role->root_label;
49293+ return;
49294+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
49295+ role = lookup_acl_role_label(task, uid, gid);
49296+
49297+ /* perform subject lookup in possibly new role
49298+ we can use this result below in the case where role == task->role
49299+ */
49300+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
49301+
49302+ /* if we changed uid/gid, but result in the same role
49303+ and are using inheritance, don't lose the inherited subject
49304+ if current subject is other than what normal lookup
49305+ would result in, we arrived via inheritance, don't
49306+ lose subject
49307+ */
49308+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
49309+ (subj == task->acl)))
49310+ task->acl = subj;
49311+
49312+ task->role = role;
49313+
49314+ task->is_writable = 0;
49315+
49316+ /* ignore additional mmap checks for processes that are writable
49317+ by the default ACL */
49318+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
49319+ if (unlikely(obj->mode & GR_WRITE))
49320+ task->is_writable = 1;
49321+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
49322+ if (unlikely(obj->mode & GR_WRITE))
49323+ task->is_writable = 1;
49324+
49325+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49326+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49327+#endif
49328+
49329+ gr_set_proc_res(task);
49330+
49331+ return;
49332+}
49333+
49334+int
49335+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
49336+ const int unsafe_share)
49337+{
49338+ struct task_struct *task = current;
49339+ struct acl_subject_label *newacl;
49340+ struct acl_object_label *obj;
49341+ __u32 retmode;
49342+
49343+ if (unlikely(!(gr_status & GR_READY)))
49344+ return 0;
49345+
49346+ newacl = chk_subj_label(dentry, mnt, task->role);
49347+
49348+ task_lock(task);
49349+ if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
49350+ !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
49351+ !(task->role->roletype & GR_ROLE_GOD) &&
49352+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
49353+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
49354+ task_unlock(task);
49355+ if (unsafe_share)
49356+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
49357+ else
49358+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
49359+ return -EACCES;
49360+ }
49361+ task_unlock(task);
49362+
49363+ obj = chk_obj_label(dentry, mnt, task->acl);
49364+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
49365+
49366+ if (!(task->acl->mode & GR_INHERITLEARN) &&
49367+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
49368+ if (obj->nested)
49369+ task->acl = obj->nested;
49370+ else
49371+ task->acl = newacl;
49372+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
49373+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
49374+
49375+ task->is_writable = 0;
49376+
49377+ /* ignore additional mmap checks for processes that are writable
49378+ by the default ACL */
49379+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
49380+ if (unlikely(obj->mode & GR_WRITE))
49381+ task->is_writable = 1;
49382+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
49383+ if (unlikely(obj->mode & GR_WRITE))
49384+ task->is_writable = 1;
49385+
49386+ gr_set_proc_res(task);
49387+
49388+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49389+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49390+#endif
49391+ return 0;
49392+}
49393+
49394+/* always called with valid inodev ptr */
49395+static void
49396+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
49397+{
49398+ struct acl_object_label *matchpo;
49399+ struct acl_subject_label *matchps;
49400+ struct acl_subject_label *subj;
49401+ struct acl_role_label *role;
49402+ unsigned int x;
49403+
49404+ FOR_EACH_ROLE_START(role)
49405+ FOR_EACH_SUBJECT_START(role, subj, x)
49406+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
49407+ matchpo->mode |= GR_DELETED;
49408+ FOR_EACH_SUBJECT_END(subj,x)
49409+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
49410+ if (subj->inode == ino && subj->device == dev)
49411+ subj->mode |= GR_DELETED;
49412+ FOR_EACH_NESTED_SUBJECT_END(subj)
49413+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
49414+ matchps->mode |= GR_DELETED;
49415+ FOR_EACH_ROLE_END(role)
49416+
49417+ inodev->nentry->deleted = 1;
49418+
49419+ return;
49420+}
49421+
49422+void
49423+gr_handle_delete(const ino_t ino, const dev_t dev)
49424+{
49425+ struct inodev_entry *inodev;
49426+
49427+ if (unlikely(!(gr_status & GR_READY)))
49428+ return;
49429+
49430+ write_lock(&gr_inode_lock);
49431+ inodev = lookup_inodev_entry(ino, dev);
49432+ if (inodev != NULL)
49433+ do_handle_delete(inodev, ino, dev);
49434+ write_unlock(&gr_inode_lock);
49435+
49436+ return;
49437+}
49438+
49439+static void
49440+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
49441+ const ino_t newinode, const dev_t newdevice,
49442+ struct acl_subject_label *subj)
49443+{
49444+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
49445+ struct acl_object_label *match;
49446+
49447+ match = subj->obj_hash[index];
49448+
49449+ while (match && (match->inode != oldinode ||
49450+ match->device != olddevice ||
49451+ !(match->mode & GR_DELETED)))
49452+ match = match->next;
49453+
49454+ if (match && (match->inode == oldinode)
49455+ && (match->device == olddevice)
49456+ && (match->mode & GR_DELETED)) {
49457+ if (match->prev == NULL) {
49458+ subj->obj_hash[index] = match->next;
49459+ if (match->next != NULL)
49460+ match->next->prev = NULL;
49461+ } else {
49462+ match->prev->next = match->next;
49463+ if (match->next != NULL)
49464+ match->next->prev = match->prev;
49465+ }
49466+ match->prev = NULL;
49467+ match->next = NULL;
49468+ match->inode = newinode;
49469+ match->device = newdevice;
49470+ match->mode &= ~GR_DELETED;
49471+
49472+ insert_acl_obj_label(match, subj);
49473+ }
49474+
49475+ return;
49476+}
49477+
49478+static void
49479+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
49480+ const ino_t newinode, const dev_t newdevice,
49481+ struct acl_role_label *role)
49482+{
49483+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
49484+ struct acl_subject_label *match;
49485+
49486+ match = role->subj_hash[index];
49487+
49488+ while (match && (match->inode != oldinode ||
49489+ match->device != olddevice ||
49490+ !(match->mode & GR_DELETED)))
49491+ match = match->next;
49492+
49493+ if (match && (match->inode == oldinode)
49494+ && (match->device == olddevice)
49495+ && (match->mode & GR_DELETED)) {
49496+ if (match->prev == NULL) {
49497+ role->subj_hash[index] = match->next;
49498+ if (match->next != NULL)
49499+ match->next->prev = NULL;
49500+ } else {
49501+ match->prev->next = match->next;
49502+ if (match->next != NULL)
49503+ match->next->prev = match->prev;
49504+ }
49505+ match->prev = NULL;
49506+ match->next = NULL;
49507+ match->inode = newinode;
49508+ match->device = newdevice;
49509+ match->mode &= ~GR_DELETED;
49510+
49511+ insert_acl_subj_label(match, role);
49512+ }
49513+
49514+ return;
49515+}
49516+
49517+static void
49518+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
49519+ const ino_t newinode, const dev_t newdevice)
49520+{
49521+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
49522+ struct inodev_entry *match;
49523+
49524+ match = inodev_set.i_hash[index];
49525+
49526+ while (match && (match->nentry->inode != oldinode ||
49527+ match->nentry->device != olddevice || !match->nentry->deleted))
49528+ match = match->next;
49529+
49530+ if (match && (match->nentry->inode == oldinode)
49531+ && (match->nentry->device == olddevice) &&
49532+ match->nentry->deleted) {
49533+ if (match->prev == NULL) {
49534+ inodev_set.i_hash[index] = match->next;
49535+ if (match->next != NULL)
49536+ match->next->prev = NULL;
49537+ } else {
49538+ match->prev->next = match->next;
49539+ if (match->next != NULL)
49540+ match->next->prev = match->prev;
49541+ }
49542+ match->prev = NULL;
49543+ match->next = NULL;
49544+ match->nentry->inode = newinode;
49545+ match->nentry->device = newdevice;
49546+ match->nentry->deleted = 0;
49547+
49548+ insert_inodev_entry(match);
49549+ }
49550+
49551+ return;
49552+}
49553+
49554+static void
49555+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
49556+{
49557+ struct acl_subject_label *subj;
49558+ struct acl_role_label *role;
49559+ unsigned int x;
49560+
49561+ FOR_EACH_ROLE_START(role)
49562+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
49563+
49564+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
49565+ if ((subj->inode == ino) && (subj->device == dev)) {
49566+ subj->inode = ino;
49567+ subj->device = dev;
49568+ }
49569+ FOR_EACH_NESTED_SUBJECT_END(subj)
49570+ FOR_EACH_SUBJECT_START(role, subj, x)
49571+ update_acl_obj_label(matchn->inode, matchn->device,
49572+ ino, dev, subj);
49573+ FOR_EACH_SUBJECT_END(subj,x)
49574+ FOR_EACH_ROLE_END(role)
49575+
49576+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
49577+
49578+ return;
49579+}
49580+
49581+static void
49582+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
49583+ const struct vfsmount *mnt)
49584+{
49585+ ino_t ino = dentry->d_inode->i_ino;
49586+ dev_t dev = __get_dev(dentry);
49587+
49588+ __do_handle_create(matchn, ino, dev);
49589+
49590+ return;
49591+}
49592+
49593+void
49594+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
49595+{
49596+ struct name_entry *matchn;
49597+
49598+ if (unlikely(!(gr_status & GR_READY)))
49599+ return;
49600+
49601+ preempt_disable();
49602+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
49603+
49604+ if (unlikely((unsigned long)matchn)) {
49605+ write_lock(&gr_inode_lock);
49606+ do_handle_create(matchn, dentry, mnt);
49607+ write_unlock(&gr_inode_lock);
49608+ }
49609+ preempt_enable();
49610+
49611+ return;
49612+}
49613+
49614+void
49615+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
49616+{
49617+ struct name_entry *matchn;
49618+
49619+ if (unlikely(!(gr_status & GR_READY)))
49620+ return;
49621+
49622+ preempt_disable();
49623+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
49624+
49625+ if (unlikely((unsigned long)matchn)) {
49626+ write_lock(&gr_inode_lock);
49627+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
49628+ write_unlock(&gr_inode_lock);
49629+ }
49630+ preempt_enable();
49631+
49632+ return;
49633+}
49634+
49635+void
49636+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
49637+ struct dentry *old_dentry,
49638+ struct dentry *new_dentry,
49639+ struct vfsmount *mnt, const __u8 replace)
49640+{
49641+ struct name_entry *matchn;
49642+ struct inodev_entry *inodev;
49643+ ino_t old_ino = old_dentry->d_inode->i_ino;
49644+ dev_t old_dev = __get_dev(old_dentry);
49645+
49646+ /* vfs_rename swaps the name and parent link for old_dentry and
49647+ new_dentry
49648+ at this point, old_dentry has the new name, parent link, and inode
49649+ for the renamed file
49650+ if a file is being replaced by a rename, new_dentry has the inode
49651+ and name for the replaced file
49652+ */
49653+
49654+ if (unlikely(!(gr_status & GR_READY)))
49655+ return;
49656+
49657+ preempt_disable();
49658+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
49659+
49660+ /* we wouldn't have to check d_inode if it weren't for
49661+ NFS silly-renaming
49662+ */
49663+
49664+ write_lock(&gr_inode_lock);
49665+ if (unlikely(replace && new_dentry->d_inode)) {
49666+ ino_t new_ino = new_dentry->d_inode->i_ino;
49667+ dev_t new_dev = __get_dev(new_dentry);
49668+
49669+ inodev = lookup_inodev_entry(new_ino, new_dev);
49670+ if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
49671+ do_handle_delete(inodev, new_ino, new_dev);
49672+ }
49673+
49674+ inodev = lookup_inodev_entry(old_ino, old_dev);
49675+ if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
49676+ do_handle_delete(inodev, old_ino, old_dev);
49677+
49678+ if (unlikely((unsigned long)matchn))
49679+ do_handle_create(matchn, old_dentry, mnt);
49680+
49681+ write_unlock(&gr_inode_lock);
49682+ preempt_enable();
49683+
49684+ return;
49685+}
49686+
49687+static int
49688+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
49689+ unsigned char **sum)
49690+{
49691+ struct acl_role_label *r;
49692+ struct role_allowed_ip *ipp;
49693+ struct role_transition *trans;
49694+ unsigned int i;
49695+ int found = 0;
49696+ u32 curr_ip = current->signal->curr_ip;
49697+
49698+ current->signal->saved_ip = curr_ip;
49699+
49700+ /* check transition table */
49701+
49702+ for (trans = current->role->transitions; trans; trans = trans->next) {
49703+ if (!strcmp(rolename, trans->rolename)) {
49704+ found = 1;
49705+ break;
49706+ }
49707+ }
49708+
49709+ if (!found)
49710+ return 0;
49711+
49712+ /* handle special roles that do not require authentication
49713+ and check ip */
49714+
49715+ FOR_EACH_ROLE_START(r)
49716+ if (!strcmp(rolename, r->rolename) &&
49717+ (r->roletype & GR_ROLE_SPECIAL)) {
49718+ found = 0;
49719+ if (r->allowed_ips != NULL) {
49720+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
49721+ if ((ntohl(curr_ip) & ipp->netmask) ==
49722+ (ntohl(ipp->addr) & ipp->netmask))
49723+ found = 1;
49724+ }
49725+ } else
49726+ found = 2;
49727+ if (!found)
49728+ return 0;
49729+
49730+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
49731+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
49732+ *salt = NULL;
49733+ *sum = NULL;
49734+ return 1;
49735+ }
49736+ }
49737+ FOR_EACH_ROLE_END(r)
49738+
49739+ for (i = 0; i < num_sprole_pws; i++) {
49740+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
49741+ *salt = acl_special_roles[i]->salt;
49742+ *sum = acl_special_roles[i]->sum;
49743+ return 1;
49744+ }
49745+ }
49746+
49747+ return 0;
49748+}
49749+
49750+static void
49751+assign_special_role(char *rolename)
49752+{
49753+ struct acl_object_label *obj;
49754+ struct acl_role_label *r;
49755+ struct acl_role_label *assigned = NULL;
49756+ struct task_struct *tsk;
49757+ struct file *filp;
49758+
49759+ FOR_EACH_ROLE_START(r)
49760+ if (!strcmp(rolename, r->rolename) &&
49761+ (r->roletype & GR_ROLE_SPECIAL)) {
49762+ assigned = r;
49763+ break;
49764+ }
49765+ FOR_EACH_ROLE_END(r)
49766+
49767+ if (!assigned)
49768+ return;
49769+
49770+ read_lock(&tasklist_lock);
49771+ read_lock(&grsec_exec_file_lock);
49772+
49773+ tsk = current->real_parent;
49774+ if (tsk == NULL)
49775+ goto out_unlock;
49776+
49777+ filp = tsk->exec_file;
49778+ if (filp == NULL)
49779+ goto out_unlock;
49780+
49781+ tsk->is_writable = 0;
49782+
49783+ tsk->acl_sp_role = 1;
49784+ tsk->acl_role_id = ++acl_sp_role_value;
49785+ tsk->role = assigned;
49786+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
49787+
49788+ /* ignore additional mmap checks for processes that are writable
49789+ by the default ACL */
49790+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
49791+ if (unlikely(obj->mode & GR_WRITE))
49792+ tsk->is_writable = 1;
49793+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
49794+ if (unlikely(obj->mode & GR_WRITE))
49795+ tsk->is_writable = 1;
49796+
49797+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49798+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
49799+#endif
49800+
49801+out_unlock:
49802+ read_unlock(&grsec_exec_file_lock);
49803+ read_unlock(&tasklist_lock);
49804+ return;
49805+}
49806+
49807+int gr_check_secure_terminal(struct task_struct *task)
49808+{
49809+ struct task_struct *p, *p2, *p3;
49810+ struct files_struct *files;
49811+ struct fdtable *fdt;
49812+ struct file *our_file = NULL, *file;
49813+ int i;
49814+
49815+ if (task->signal->tty == NULL)
49816+ return 1;
49817+
49818+ files = get_files_struct(task);
49819+ if (files != NULL) {
49820+ rcu_read_lock();
49821+ fdt = files_fdtable(files);
49822+ for (i=0; i < fdt->max_fds; i++) {
49823+ file = fcheck_files(files, i);
49824+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
49825+ get_file(file);
49826+ our_file = file;
49827+ }
49828+ }
49829+ rcu_read_unlock();
49830+ put_files_struct(files);
49831+ }
49832+
49833+ if (our_file == NULL)
49834+ return 1;
49835+
49836+ read_lock(&tasklist_lock);
49837+ do_each_thread(p2, p) {
49838+ files = get_files_struct(p);
49839+ if (files == NULL ||
49840+ (p->signal && p->signal->tty == task->signal->tty)) {
49841+ if (files != NULL)
49842+ put_files_struct(files);
49843+ continue;
49844+ }
49845+ rcu_read_lock();
49846+ fdt = files_fdtable(files);
49847+ for (i=0; i < fdt->max_fds; i++) {
49848+ file = fcheck_files(files, i);
49849+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
49850+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
49851+ p3 = task;
49852+ while (p3->pid > 0) {
49853+ if (p3 == p)
49854+ break;
49855+ p3 = p3->real_parent;
49856+ }
49857+ if (p3 == p)
49858+ break;
49859+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
49860+ gr_handle_alertkill(p);
49861+ rcu_read_unlock();
49862+ put_files_struct(files);
49863+ read_unlock(&tasklist_lock);
49864+ fput(our_file);
49865+ return 0;
49866+ }
49867+ }
49868+ rcu_read_unlock();
49869+ put_files_struct(files);
49870+ } while_each_thread(p2, p);
49871+ read_unlock(&tasklist_lock);
49872+
49873+ fput(our_file);
49874+ return 1;
49875+}
49876+
49877+ssize_t
49878+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
49879+{
49880+ struct gr_arg_wrapper uwrap;
49881+ unsigned char *sprole_salt = NULL;
49882+ unsigned char *sprole_sum = NULL;
49883+ int error = sizeof (struct gr_arg_wrapper);
49884+ int error2 = 0;
49885+
49886+ mutex_lock(&gr_dev_mutex);
49887+
49888+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
49889+ error = -EPERM;
49890+ goto out;
49891+ }
49892+
49893+ if (count != sizeof (struct gr_arg_wrapper)) {
49894+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
49895+ error = -EINVAL;
49896+ goto out;
49897+ }
49898+
49899+
49900+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
49901+ gr_auth_expires = 0;
49902+ gr_auth_attempts = 0;
49903+ }
49904+
49905+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
49906+ error = -EFAULT;
49907+ goto out;
49908+ }
49909+
49910+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
49911+ error = -EINVAL;
49912+ goto out;
49913+ }
49914+
49915+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
49916+ error = -EFAULT;
49917+ goto out;
49918+ }
49919+
49920+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
49921+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
49922+ time_after(gr_auth_expires, get_seconds())) {
49923+ error = -EBUSY;
49924+ goto out;
49925+ }
49926+
49927+ /* if non-root trying to do anything other than use a special role,
49928+ do not attempt authentication, do not count towards authentication
49929+ locking
49930+ */
49931+
49932+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
49933+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
49934+ current_uid()) {
49935+ error = -EPERM;
49936+ goto out;
49937+ }
49938+
49939+ /* ensure pw and special role name are null terminated */
49940+
49941+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
49942+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
49943+
49944+ /* Okay.
49945+ * We have our enough of the argument structure..(we have yet
49946+ * to copy_from_user the tables themselves) . Copy the tables
49947+ * only if we need them, i.e. for loading operations. */
49948+
49949+ switch (gr_usermode->mode) {
49950+ case GR_STATUS:
49951+ if (gr_status & GR_READY) {
49952+ error = 1;
49953+ if (!gr_check_secure_terminal(current))
49954+ error = 3;
49955+ } else
49956+ error = 2;
49957+ goto out;
49958+ case GR_SHUTDOWN:
49959+ if ((gr_status & GR_READY)
49960+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
49961+ pax_open_kernel();
49962+ gr_status &= ~GR_READY;
49963+ pax_close_kernel();
49964+
49965+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
49966+ free_variables();
49967+ memset(gr_usermode, 0, sizeof (struct gr_arg));
49968+ memset(gr_system_salt, 0, GR_SALT_LEN);
49969+ memset(gr_system_sum, 0, GR_SHA_LEN);
49970+ } else if (gr_status & GR_READY) {
49971+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
49972+ error = -EPERM;
49973+ } else {
49974+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
49975+ error = -EAGAIN;
49976+ }
49977+ break;
49978+ case GR_ENABLE:
49979+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
49980+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
49981+ else {
49982+ if (gr_status & GR_READY)
49983+ error = -EAGAIN;
49984+ else
49985+ error = error2;
49986+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
49987+ }
49988+ break;
49989+ case GR_RELOAD:
49990+ if (!(gr_status & GR_READY)) {
49991+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
49992+ error = -EAGAIN;
49993+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
49994+ preempt_disable();
49995+
49996+ pax_open_kernel();
49997+ gr_status &= ~GR_READY;
49998+ pax_close_kernel();
49999+
50000+ free_variables();
50001+ if (!(error2 = gracl_init(gr_usermode))) {
50002+ preempt_enable();
50003+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
50004+ } else {
50005+ preempt_enable();
50006+ error = error2;
50007+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
50008+ }
50009+ } else {
50010+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
50011+ error = -EPERM;
50012+ }
50013+ break;
50014+ case GR_SEGVMOD:
50015+ if (unlikely(!(gr_status & GR_READY))) {
50016+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
50017+ error = -EAGAIN;
50018+ break;
50019+ }
50020+
50021+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
50022+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
50023+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
50024+ struct acl_subject_label *segvacl;
50025+ segvacl =
50026+ lookup_acl_subj_label(gr_usermode->segv_inode,
50027+ gr_usermode->segv_device,
50028+ current->role);
50029+ if (segvacl) {
50030+ segvacl->crashes = 0;
50031+ segvacl->expires = 0;
50032+ }
50033+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
50034+ gr_remove_uid(gr_usermode->segv_uid);
50035+ }
50036+ } else {
50037+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
50038+ error = -EPERM;
50039+ }
50040+ break;
50041+ case GR_SPROLE:
50042+ case GR_SPROLEPAM:
50043+ if (unlikely(!(gr_status & GR_READY))) {
50044+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
50045+ error = -EAGAIN;
50046+ break;
50047+ }
50048+
50049+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
50050+ current->role->expires = 0;
50051+ current->role->auth_attempts = 0;
50052+ }
50053+
50054+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
50055+ time_after(current->role->expires, get_seconds())) {
50056+ error = -EBUSY;
50057+ goto out;
50058+ }
50059+
50060+ if (lookup_special_role_auth
50061+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
50062+ && ((!sprole_salt && !sprole_sum)
50063+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
50064+ char *p = "";
50065+ assign_special_role(gr_usermode->sp_role);
50066+ read_lock(&tasklist_lock);
50067+ if (current->real_parent)
50068+ p = current->real_parent->role->rolename;
50069+ read_unlock(&tasklist_lock);
50070+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
50071+ p, acl_sp_role_value);
50072+ } else {
50073+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
50074+ error = -EPERM;
50075+ if(!(current->role->auth_attempts++))
50076+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
50077+
50078+ goto out;
50079+ }
50080+ break;
50081+ case GR_UNSPROLE:
50082+ if (unlikely(!(gr_status & GR_READY))) {
50083+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
50084+ error = -EAGAIN;
50085+ break;
50086+ }
50087+
50088+ if (current->role->roletype & GR_ROLE_SPECIAL) {
50089+ char *p = "";
50090+ int i = 0;
50091+
50092+ read_lock(&tasklist_lock);
50093+ if (current->real_parent) {
50094+ p = current->real_parent->role->rolename;
50095+ i = current->real_parent->acl_role_id;
50096+ }
50097+ read_unlock(&tasklist_lock);
50098+
50099+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
50100+ gr_set_acls(1);
50101+ } else {
50102+ error = -EPERM;
50103+ goto out;
50104+ }
50105+ break;
50106+ default:
50107+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
50108+ error = -EINVAL;
50109+ break;
50110+ }
50111+
50112+ if (error != -EPERM)
50113+ goto out;
50114+
50115+ if(!(gr_auth_attempts++))
50116+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
50117+
50118+ out:
50119+ mutex_unlock(&gr_dev_mutex);
50120+ return error;
50121+}
50122+
50123+/* must be called with
50124+ rcu_read_lock();
50125+ read_lock(&tasklist_lock);
50126+ read_lock(&grsec_exec_file_lock);
50127+*/
50128+int gr_apply_subject_to_task(struct task_struct *task)
50129+{
50130+ struct acl_object_label *obj;
50131+ char *tmpname;
50132+ struct acl_subject_label *tmpsubj;
50133+ struct file *filp;
50134+ struct name_entry *nmatch;
50135+
50136+ filp = task->exec_file;
50137+ if (filp == NULL)
50138+ return 0;
50139+
50140+ /* the following is to apply the correct subject
50141+ on binaries running when the RBAC system
50142+ is enabled, when the binaries have been
50143+ replaced or deleted since their execution
50144+ -----
50145+ when the RBAC system starts, the inode/dev
50146+ from exec_file will be one the RBAC system
50147+ is unaware of. It only knows the inode/dev
50148+ of the present file on disk, or the absence
50149+ of it.
50150+ */
50151+ preempt_disable();
50152+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
50153+
50154+ nmatch = lookup_name_entry(tmpname);
50155+ preempt_enable();
50156+ tmpsubj = NULL;
50157+ if (nmatch) {
50158+ if (nmatch->deleted)
50159+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
50160+ else
50161+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
50162+ if (tmpsubj != NULL)
50163+ task->acl = tmpsubj;
50164+ }
50165+ if (tmpsubj == NULL)
50166+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
50167+ task->role);
50168+ if (task->acl) {
50169+ task->is_writable = 0;
50170+ /* ignore additional mmap checks for processes that are writable
50171+ by the default ACL */
50172+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50173+ if (unlikely(obj->mode & GR_WRITE))
50174+ task->is_writable = 1;
50175+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
50176+ if (unlikely(obj->mode & GR_WRITE))
50177+ task->is_writable = 1;
50178+
50179+ gr_set_proc_res(task);
50180+
50181+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50182+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
50183+#endif
50184+ } else {
50185+ return 1;
50186+ }
50187+
50188+ return 0;
50189+}
50190+
50191+int
50192+gr_set_acls(const int type)
50193+{
50194+ struct task_struct *task, *task2;
50195+ struct acl_role_label *role = current->role;
50196+ __u16 acl_role_id = current->acl_role_id;
50197+ const struct cred *cred;
50198+ int ret;
50199+
50200+ rcu_read_lock();
50201+ read_lock(&tasklist_lock);
50202+ read_lock(&grsec_exec_file_lock);
50203+ do_each_thread(task2, task) {
50204+ /* check to see if we're called from the exit handler,
50205+ if so, only replace ACLs that have inherited the admin
50206+ ACL */
50207+
50208+ if (type && (task->role != role ||
50209+ task->acl_role_id != acl_role_id))
50210+ continue;
50211+
50212+ task->acl_role_id = 0;
50213+ task->acl_sp_role = 0;
50214+
50215+ if (task->exec_file) {
50216+ cred = __task_cred(task);
50217+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
50218+ ret = gr_apply_subject_to_task(task);
50219+ if (ret) {
50220+ read_unlock(&grsec_exec_file_lock);
50221+ read_unlock(&tasklist_lock);
50222+ rcu_read_unlock();
50223+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
50224+ return ret;
50225+ }
50226+ } else {
50227+ // it's a kernel process
50228+ task->role = kernel_role;
50229+ task->acl = kernel_role->root_label;
50230+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
50231+ task->acl->mode &= ~GR_PROCFIND;
50232+#endif
50233+ }
50234+ } while_each_thread(task2, task);
50235+ read_unlock(&grsec_exec_file_lock);
50236+ read_unlock(&tasklist_lock);
50237+ rcu_read_unlock();
50238+
50239+ return 0;
50240+}
50241+
50242+void
50243+gr_learn_resource(const struct task_struct *task,
50244+ const int res, const unsigned long wanted, const int gt)
50245+{
50246+ struct acl_subject_label *acl;
50247+ const struct cred *cred;
50248+
50249+ if (unlikely((gr_status & GR_READY) &&
50250+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
50251+ goto skip_reslog;
50252+
50253+#ifdef CONFIG_GRKERNSEC_RESLOG
50254+ gr_log_resource(task, res, wanted, gt);
50255+#endif
50256+ skip_reslog:
50257+
50258+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
50259+ return;
50260+
50261+ acl = task->acl;
50262+
50263+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
50264+ !(acl->resmask & (1 << (unsigned short) res))))
50265+ return;
50266+
50267+ if (wanted >= acl->res[res].rlim_cur) {
50268+ unsigned long res_add;
50269+
50270+ res_add = wanted;
50271+ switch (res) {
50272+ case RLIMIT_CPU:
50273+ res_add += GR_RLIM_CPU_BUMP;
50274+ break;
50275+ case RLIMIT_FSIZE:
50276+ res_add += GR_RLIM_FSIZE_BUMP;
50277+ break;
50278+ case RLIMIT_DATA:
50279+ res_add += GR_RLIM_DATA_BUMP;
50280+ break;
50281+ case RLIMIT_STACK:
50282+ res_add += GR_RLIM_STACK_BUMP;
50283+ break;
50284+ case RLIMIT_CORE:
50285+ res_add += GR_RLIM_CORE_BUMP;
50286+ break;
50287+ case RLIMIT_RSS:
50288+ res_add += GR_RLIM_RSS_BUMP;
50289+ break;
50290+ case RLIMIT_NPROC:
50291+ res_add += GR_RLIM_NPROC_BUMP;
50292+ break;
50293+ case RLIMIT_NOFILE:
50294+ res_add += GR_RLIM_NOFILE_BUMP;
50295+ break;
50296+ case RLIMIT_MEMLOCK:
50297+ res_add += GR_RLIM_MEMLOCK_BUMP;
50298+ break;
50299+ case RLIMIT_AS:
50300+ res_add += GR_RLIM_AS_BUMP;
50301+ break;
50302+ case RLIMIT_LOCKS:
50303+ res_add += GR_RLIM_LOCKS_BUMP;
50304+ break;
50305+ case RLIMIT_SIGPENDING:
50306+ res_add += GR_RLIM_SIGPENDING_BUMP;
50307+ break;
50308+ case RLIMIT_MSGQUEUE:
50309+ res_add += GR_RLIM_MSGQUEUE_BUMP;
50310+ break;
50311+ case RLIMIT_NICE:
50312+ res_add += GR_RLIM_NICE_BUMP;
50313+ break;
50314+ case RLIMIT_RTPRIO:
50315+ res_add += GR_RLIM_RTPRIO_BUMP;
50316+ break;
50317+ case RLIMIT_RTTIME:
50318+ res_add += GR_RLIM_RTTIME_BUMP;
50319+ break;
50320+ }
50321+
50322+ acl->res[res].rlim_cur = res_add;
50323+
50324+ if (wanted > acl->res[res].rlim_max)
50325+ acl->res[res].rlim_max = res_add;
50326+
50327+ /* only log the subject filename, since resource logging is supported for
50328+ single-subject learning only */
50329+ rcu_read_lock();
50330+ cred = __task_cred(task);
50331+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
50332+ task->role->roletype, cred->uid, cred->gid, acl->filename,
50333+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
50334+ "", (unsigned long) res, &task->signal->saved_ip);
50335+ rcu_read_unlock();
50336+ }
50337+
50338+ return;
50339+}
50340+
50341+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
50342+void
50343+pax_set_initial_flags(struct linux_binprm *bprm)
50344+{
50345+ struct task_struct *task = current;
50346+ struct acl_subject_label *proc;
50347+ unsigned long flags;
50348+
50349+ if (unlikely(!(gr_status & GR_READY)))
50350+ return;
50351+
50352+ flags = pax_get_flags(task);
50353+
50354+ proc = task->acl;
50355+
50356+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
50357+ flags &= ~MF_PAX_PAGEEXEC;
50358+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
50359+ flags &= ~MF_PAX_SEGMEXEC;
50360+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
50361+ flags &= ~MF_PAX_RANDMMAP;
50362+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
50363+ flags &= ~MF_PAX_EMUTRAMP;
50364+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
50365+ flags &= ~MF_PAX_MPROTECT;
50366+
50367+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
50368+ flags |= MF_PAX_PAGEEXEC;
50369+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
50370+ flags |= MF_PAX_SEGMEXEC;
50371+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
50372+ flags |= MF_PAX_RANDMMAP;
50373+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
50374+ flags |= MF_PAX_EMUTRAMP;
50375+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
50376+ flags |= MF_PAX_MPROTECT;
50377+
50378+ pax_set_flags(task, flags);
50379+
50380+ return;
50381+}
50382+#endif
50383+
50384+#ifdef CONFIG_SYSCTL
50385+/* Eric Biederman likes breaking userland ABI and every inode-based security
50386+ system to save 35kb of memory */
50387+
50388+/* we modify the passed in filename, but adjust it back before returning */
50389+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
50390+{
50391+ struct name_entry *nmatch;
50392+ char *p, *lastp = NULL;
50393+ struct acl_object_label *obj = NULL, *tmp;
50394+ struct acl_subject_label *tmpsubj;
50395+ char c = '\0';
50396+
50397+ read_lock(&gr_inode_lock);
50398+
50399+ p = name + len - 1;
50400+ do {
50401+ nmatch = lookup_name_entry(name);
50402+ if (lastp != NULL)
50403+ *lastp = c;
50404+
50405+ if (nmatch == NULL)
50406+ goto next_component;
50407+ tmpsubj = current->acl;
50408+ do {
50409+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
50410+ if (obj != NULL) {
50411+ tmp = obj->globbed;
50412+ while (tmp) {
50413+ if (!glob_match(tmp->filename, name)) {
50414+ obj = tmp;
50415+ goto found_obj;
50416+ }
50417+ tmp = tmp->next;
50418+ }
50419+ goto found_obj;
50420+ }
50421+ } while ((tmpsubj = tmpsubj->parent_subject));
50422+next_component:
50423+ /* end case */
50424+ if (p == name)
50425+ break;
50426+
50427+ while (*p != '/')
50428+ p--;
50429+ if (p == name)
50430+ lastp = p + 1;
50431+ else {
50432+ lastp = p;
50433+ p--;
50434+ }
50435+ c = *lastp;
50436+ *lastp = '\0';
50437+ } while (1);
50438+found_obj:
50439+ read_unlock(&gr_inode_lock);
50440+ /* obj returned will always be non-null */
50441+ return obj;
50442+}
50443+
50444+/* returns 0 when allowing, non-zero on error
50445+ op of 0 is used for readdir, so we don't log the names of hidden files
50446+*/
50447+__u32
50448+gr_handle_sysctl(const struct ctl_table *table, const int op)
50449+{
50450+ struct ctl_table *tmp;
50451+ const char *proc_sys = "/proc/sys";
50452+ char *path;
50453+ struct acl_object_label *obj;
50454+ unsigned short len = 0, pos = 0, depth = 0, i;
50455+ __u32 err = 0;
50456+ __u32 mode = 0;
50457+
50458+ if (unlikely(!(gr_status & GR_READY)))
50459+ return 0;
50460+
50461+ /* for now, ignore operations on non-sysctl entries if it's not a
50462+ readdir*/
50463+ if (table->child != NULL && op != 0)
50464+ return 0;
50465+
50466+ mode |= GR_FIND;
50467+ /* it's only a read if it's an entry, read on dirs is for readdir */
50468+ if (op & MAY_READ)
50469+ mode |= GR_READ;
50470+ if (op & MAY_WRITE)
50471+ mode |= GR_WRITE;
50472+
50473+ preempt_disable();
50474+
50475+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
50476+
50477+ /* it's only a read/write if it's an actual entry, not a dir
50478+ (which are opened for readdir)
50479+ */
50480+
50481+ /* convert the requested sysctl entry into a pathname */
50482+
50483+ for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
50484+ len += strlen(tmp->procname);
50485+ len++;
50486+ depth++;
50487+ }
50488+
50489+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
50490+ /* deny */
50491+ goto out;
50492+ }
50493+
50494+ memset(path, 0, PAGE_SIZE);
50495+
50496+ memcpy(path, proc_sys, strlen(proc_sys));
50497+
50498+ pos += strlen(proc_sys);
50499+
50500+ for (; depth > 0; depth--) {
50501+ path[pos] = '/';
50502+ pos++;
50503+ for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
50504+ if (depth == i) {
50505+ memcpy(path + pos, tmp->procname,
50506+ strlen(tmp->procname));
50507+ pos += strlen(tmp->procname);
50508+ }
50509+ i++;
50510+ }
50511+ }
50512+
50513+ obj = gr_lookup_by_name(path, pos);
50514+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
50515+
50516+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
50517+ ((err & mode) != mode))) {
50518+ __u32 new_mode = mode;
50519+
50520+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50521+
50522+ err = 0;
50523+ gr_log_learn_sysctl(path, new_mode);
50524+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
50525+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
50526+ err = -ENOENT;
50527+ } else if (!(err & GR_FIND)) {
50528+ err = -ENOENT;
50529+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
50530+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
50531+ path, (mode & GR_READ) ? " reading" : "",
50532+ (mode & GR_WRITE) ? " writing" : "");
50533+ err = -EACCES;
50534+ } else if ((err & mode) != mode) {
50535+ err = -EACCES;
50536+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
50537+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
50538+ path, (mode & GR_READ) ? " reading" : "",
50539+ (mode & GR_WRITE) ? " writing" : "");
50540+ err = 0;
50541+ } else
50542+ err = 0;
50543+
50544+ out:
50545+ preempt_enable();
50546+
50547+ return err;
50548+}
50549+#endif
50550+
50551+int
50552+gr_handle_proc_ptrace(struct task_struct *task)
50553+{
50554+ struct file *filp;
50555+ struct task_struct *tmp = task;
50556+ struct task_struct *curtemp = current;
50557+ __u32 retmode;
50558+
50559+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
50560+ if (unlikely(!(gr_status & GR_READY)))
50561+ return 0;
50562+#endif
50563+
50564+ read_lock(&tasklist_lock);
50565+ read_lock(&grsec_exec_file_lock);
50566+ filp = task->exec_file;
50567+
50568+ while (tmp->pid > 0) {
50569+ if (tmp == curtemp)
50570+ break;
50571+ tmp = tmp->real_parent;
50572+ }
50573+
50574+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
50575+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
50576+ read_unlock(&grsec_exec_file_lock);
50577+ read_unlock(&tasklist_lock);
50578+ return 1;
50579+ }
50580+
50581+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
50582+ if (!(gr_status & GR_READY)) {
50583+ read_unlock(&grsec_exec_file_lock);
50584+ read_unlock(&tasklist_lock);
50585+ return 0;
50586+ }
50587+#endif
50588+
50589+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
50590+ read_unlock(&grsec_exec_file_lock);
50591+ read_unlock(&tasklist_lock);
50592+
50593+ if (retmode & GR_NOPTRACE)
50594+ return 1;
50595+
50596+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
50597+ && (current->acl != task->acl || (current->acl != current->role->root_label
50598+ && current->pid != task->pid)))
50599+ return 1;
50600+
50601+ return 0;
50602+}
50603+
50604+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
50605+{
50606+ if (unlikely(!(gr_status & GR_READY)))
50607+ return;
50608+
50609+ if (!(current->role->roletype & GR_ROLE_GOD))
50610+ return;
50611+
50612+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
50613+ p->role->rolename, gr_task_roletype_to_char(p),
50614+ p->acl->filename);
50615+}
50616+
50617+int
50618+gr_handle_ptrace(struct task_struct *task, const long request)
50619+{
50620+ struct task_struct *tmp = task;
50621+ struct task_struct *curtemp = current;
50622+ __u32 retmode;
50623+
50624+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
50625+ if (unlikely(!(gr_status & GR_READY)))
50626+ return 0;
50627+#endif
50628+
50629+ read_lock(&tasklist_lock);
50630+ while (tmp->pid > 0) {
50631+ if (tmp == curtemp)
50632+ break;
50633+ tmp = tmp->real_parent;
50634+ }
50635+
50636+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
50637+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
50638+ read_unlock(&tasklist_lock);
50639+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
50640+ return 1;
50641+ }
50642+ read_unlock(&tasklist_lock);
50643+
50644+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
50645+ if (!(gr_status & GR_READY))
50646+ return 0;
50647+#endif
50648+
50649+ read_lock(&grsec_exec_file_lock);
50650+ if (unlikely(!task->exec_file)) {
50651+ read_unlock(&grsec_exec_file_lock);
50652+ return 0;
50653+ }
50654+
50655+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
50656+ read_unlock(&grsec_exec_file_lock);
50657+
50658+ if (retmode & GR_NOPTRACE) {
50659+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
50660+ return 1;
50661+ }
50662+
50663+ if (retmode & GR_PTRACERD) {
50664+ switch (request) {
50665+ case PTRACE_POKETEXT:
50666+ case PTRACE_POKEDATA:
50667+ case PTRACE_POKEUSR:
50668+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
50669+ case PTRACE_SETREGS:
50670+ case PTRACE_SETFPREGS:
50671+#endif
50672+#ifdef CONFIG_X86
50673+ case PTRACE_SETFPXREGS:
50674+#endif
50675+#ifdef CONFIG_ALTIVEC
50676+ case PTRACE_SETVRREGS:
50677+#endif
50678+ return 1;
50679+ default:
50680+ return 0;
50681+ }
50682+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
50683+ !(current->role->roletype & GR_ROLE_GOD) &&
50684+ (current->acl != task->acl)) {
50685+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
50686+ return 1;
50687+ }
50688+
50689+ return 0;
50690+}
50691+
50692+static int is_writable_mmap(const struct file *filp)
50693+{
50694+ struct task_struct *task = current;
50695+ struct acl_object_label *obj, *obj2;
50696+
50697+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
50698+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
50699+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50700+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
50701+ task->role->root_label);
50702+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
50703+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
50704+ return 1;
50705+ }
50706+ }
50707+ return 0;
50708+}
50709+
50710+int
50711+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
50712+{
50713+ __u32 mode;
50714+
50715+ if (unlikely(!file || !(prot & PROT_EXEC)))
50716+ return 1;
50717+
50718+ if (is_writable_mmap(file))
50719+ return 0;
50720+
50721+ mode =
50722+ gr_search_file(file->f_path.dentry,
50723+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
50724+ file->f_path.mnt);
50725+
50726+ if (!gr_tpe_allow(file))
50727+ return 0;
50728+
50729+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
50730+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50731+ return 0;
50732+ } else if (unlikely(!(mode & GR_EXEC))) {
50733+ return 0;
50734+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
50735+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50736+ return 1;
50737+ }
50738+
50739+ return 1;
50740+}
50741+
50742+int
50743+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
50744+{
50745+ __u32 mode;
50746+
50747+ if (unlikely(!file || !(prot & PROT_EXEC)))
50748+ return 1;
50749+
50750+ if (is_writable_mmap(file))
50751+ return 0;
50752+
50753+ mode =
50754+ gr_search_file(file->f_path.dentry,
50755+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
50756+ file->f_path.mnt);
50757+
50758+ if (!gr_tpe_allow(file))
50759+ return 0;
50760+
50761+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
50762+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50763+ return 0;
50764+ } else if (unlikely(!(mode & GR_EXEC))) {
50765+ return 0;
50766+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
50767+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50768+ return 1;
50769+ }
50770+
50771+ return 1;
50772+}
50773+
50774+void
50775+gr_acl_handle_psacct(struct task_struct *task, const long code)
50776+{
50777+ unsigned long runtime;
50778+ unsigned long cputime;
50779+ unsigned int wday, cday;
50780+ __u8 whr, chr;
50781+ __u8 wmin, cmin;
50782+ __u8 wsec, csec;
50783+ struct timespec timeval;
50784+
50785+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
50786+ !(task->acl->mode & GR_PROCACCT)))
50787+ return;
50788+
50789+ do_posix_clock_monotonic_gettime(&timeval);
50790+ runtime = timeval.tv_sec - task->start_time.tv_sec;
50791+ wday = runtime / (3600 * 24);
50792+ runtime -= wday * (3600 * 24);
50793+ whr = runtime / 3600;
50794+ runtime -= whr * 3600;
50795+ wmin = runtime / 60;
50796+ runtime -= wmin * 60;
50797+ wsec = runtime;
50798+
50799+ cputime = (task->utime + task->stime) / HZ;
50800+ cday = cputime / (3600 * 24);
50801+ cputime -= cday * (3600 * 24);
50802+ chr = cputime / 3600;
50803+ cputime -= chr * 3600;
50804+ cmin = cputime / 60;
50805+ cputime -= cmin * 60;
50806+ csec = cputime;
50807+
50808+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
50809+
50810+ return;
50811+}
50812+
50813+void gr_set_kernel_label(struct task_struct *task)
50814+{
50815+ if (gr_status & GR_READY) {
50816+ task->role = kernel_role;
50817+ task->acl = kernel_role->root_label;
50818+ }
50819+ return;
50820+}
50821+
50822+#ifdef CONFIG_TASKSTATS
50823+int gr_is_taskstats_denied(int pid)
50824+{
50825+ struct task_struct *task;
50826+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50827+ const struct cred *cred;
50828+#endif
50829+ int ret = 0;
50830+
50831+ /* restrict taskstats viewing to un-chrooted root users
50832+ who have the 'view' subject flag if the RBAC system is enabled
50833+ */
50834+
50835+ rcu_read_lock();
50836+ read_lock(&tasklist_lock);
50837+ task = find_task_by_vpid(pid);
50838+ if (task) {
50839+#ifdef CONFIG_GRKERNSEC_CHROOT
50840+ if (proc_is_chrooted(task))
50841+ ret = -EACCES;
50842+#endif
50843+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50844+ cred = __task_cred(task);
50845+#ifdef CONFIG_GRKERNSEC_PROC_USER
50846+ if (cred->uid != 0)
50847+ ret = -EACCES;
50848+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50849+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
50850+ ret = -EACCES;
50851+#endif
50852+#endif
50853+ if (gr_status & GR_READY) {
50854+ if (!(task->acl->mode & GR_VIEW))
50855+ ret = -EACCES;
50856+ }
50857+ } else
50858+ ret = -ENOENT;
50859+
50860+ read_unlock(&tasklist_lock);
50861+ rcu_read_unlock();
50862+
50863+ return ret;
50864+}
50865+#endif
50866+
50867+/* AUXV entries are filled via a descendant of search_binary_handler
50868+ after we've already applied the subject for the target
50869+*/
50870+int gr_acl_enable_at_secure(void)
50871+{
50872+ if (unlikely(!(gr_status & GR_READY)))
50873+ return 0;
50874+
50875+ if (current->acl->mode & GR_ATSECURE)
50876+ return 1;
50877+
50878+ return 0;
50879+}
50880+
50881+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
50882+{
50883+ struct task_struct *task = current;
50884+ struct dentry *dentry = file->f_path.dentry;
50885+ struct vfsmount *mnt = file->f_path.mnt;
50886+ struct acl_object_label *obj, *tmp;
50887+ struct acl_subject_label *subj;
50888+ unsigned int bufsize;
50889+ int is_not_root;
50890+ char *path;
50891+ dev_t dev = __get_dev(dentry);
50892+
50893+ if (unlikely(!(gr_status & GR_READY)))
50894+ return 1;
50895+
50896+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
50897+ return 1;
50898+
50899+ /* ignore Eric Biederman */
50900+ if (IS_PRIVATE(dentry->d_inode))
50901+ return 1;
50902+
50903+ subj = task->acl;
50904+ do {
50905+ obj = lookup_acl_obj_label(ino, dev, subj);
50906+ if (obj != NULL)
50907+ return (obj->mode & GR_FIND) ? 1 : 0;
50908+ } while ((subj = subj->parent_subject));
50909+
50910+ /* this is purely an optimization since we're looking for an object
50911+ for the directory we're doing a readdir on
50912+ if it's possible for any globbed object to match the entry we're
50913+ filling into the directory, then the object we find here will be
50914+ an anchor point with attached globbed objects
50915+ */
50916+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
50917+ if (obj->globbed == NULL)
50918+ return (obj->mode & GR_FIND) ? 1 : 0;
50919+
50920+ is_not_root = ((obj->filename[0] == '/') &&
50921+ (obj->filename[1] == '\0')) ? 0 : 1;
50922+ bufsize = PAGE_SIZE - namelen - is_not_root;
50923+
50924+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
50925+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
50926+ return 1;
50927+
50928+ preempt_disable();
50929+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
50930+ bufsize);
50931+
50932+ bufsize = strlen(path);
50933+
50934+ /* if base is "/", don't append an additional slash */
50935+ if (is_not_root)
50936+ *(path + bufsize) = '/';
50937+ memcpy(path + bufsize + is_not_root, name, namelen);
50938+ *(path + bufsize + namelen + is_not_root) = '\0';
50939+
50940+ tmp = obj->globbed;
50941+ while (tmp) {
50942+ if (!glob_match(tmp->filename, path)) {
50943+ preempt_enable();
50944+ return (tmp->mode & GR_FIND) ? 1 : 0;
50945+ }
50946+ tmp = tmp->next;
50947+ }
50948+ preempt_enable();
50949+ return (obj->mode & GR_FIND) ? 1 : 0;
50950+}
50951+
50952+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
50953+EXPORT_SYMBOL(gr_acl_is_enabled);
50954+#endif
50955+EXPORT_SYMBOL(gr_learn_resource);
50956+EXPORT_SYMBOL(gr_set_kernel_label);
50957+#ifdef CONFIG_SECURITY
50958+EXPORT_SYMBOL(gr_check_user_change);
50959+EXPORT_SYMBOL(gr_check_group_change);
50960+#endif
50961+
50962diff -urNp linux-3.0.7/grsecurity/gracl_cap.c linux-3.0.7/grsecurity/gracl_cap.c
50963--- linux-3.0.7/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
50964+++ linux-3.0.7/grsecurity/gracl_cap.c 2011-09-14 09:21:24.000000000 -0400
50965@@ -0,0 +1,101 @@
50966+#include <linux/kernel.h>
50967+#include <linux/module.h>
50968+#include <linux/sched.h>
50969+#include <linux/gracl.h>
50970+#include <linux/grsecurity.h>
50971+#include <linux/grinternal.h>
50972+
50973+extern const char *captab_log[];
50974+extern int captab_log_entries;
50975+
50976+int
50977+gr_acl_is_capable(const int cap)
50978+{
50979+ struct task_struct *task = current;
50980+ const struct cred *cred = current_cred();
50981+ struct acl_subject_label *curracl;
50982+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
50983+ kernel_cap_t cap_audit = __cap_empty_set;
50984+
50985+ if (!gr_acl_is_enabled())
50986+ return 1;
50987+
50988+ curracl = task->acl;
50989+
50990+ cap_drop = curracl->cap_lower;
50991+ cap_mask = curracl->cap_mask;
50992+ cap_audit = curracl->cap_invert_audit;
50993+
50994+ while ((curracl = curracl->parent_subject)) {
50995+ /* if the cap isn't specified in the current computed mask but is specified in the
50996+ current level subject, and is lowered in the current level subject, then add
50997+ it to the set of dropped capabilities
50998+ otherwise, add the current level subject's mask to the current computed mask
50999+ */
51000+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
51001+ cap_raise(cap_mask, cap);
51002+ if (cap_raised(curracl->cap_lower, cap))
51003+ cap_raise(cap_drop, cap);
51004+ if (cap_raised(curracl->cap_invert_audit, cap))
51005+ cap_raise(cap_audit, cap);
51006+ }
51007+ }
51008+
51009+ if (!cap_raised(cap_drop, cap)) {
51010+ if (cap_raised(cap_audit, cap))
51011+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
51012+ return 1;
51013+ }
51014+
51015+ curracl = task->acl;
51016+
51017+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
51018+ && cap_raised(cred->cap_effective, cap)) {
51019+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
51020+ task->role->roletype, cred->uid,
51021+ cred->gid, task->exec_file ?
51022+ gr_to_filename(task->exec_file->f_path.dentry,
51023+ task->exec_file->f_path.mnt) : curracl->filename,
51024+ curracl->filename, 0UL,
51025+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
51026+ return 1;
51027+ }
51028+
51029+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
51030+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
51031+ return 0;
51032+}
51033+
51034+int
51035+gr_acl_is_capable_nolog(const int cap)
51036+{
51037+ struct acl_subject_label *curracl;
51038+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
51039+
51040+ if (!gr_acl_is_enabled())
51041+ return 1;
51042+
51043+ curracl = current->acl;
51044+
51045+ cap_drop = curracl->cap_lower;
51046+ cap_mask = curracl->cap_mask;
51047+
51048+ while ((curracl = curracl->parent_subject)) {
51049+ /* if the cap isn't specified in the current computed mask but is specified in the
51050+ current level subject, and is lowered in the current level subject, then add
51051+ it to the set of dropped capabilities
51052+ otherwise, add the current level subject's mask to the current computed mask
51053+ */
51054+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
51055+ cap_raise(cap_mask, cap);
51056+ if (cap_raised(curracl->cap_lower, cap))
51057+ cap_raise(cap_drop, cap);
51058+ }
51059+ }
51060+
51061+ if (!cap_raised(cap_drop, cap))
51062+ return 1;
51063+
51064+ return 0;
51065+}
51066+
51067diff -urNp linux-3.0.7/grsecurity/gracl_fs.c linux-3.0.7/grsecurity/gracl_fs.c
51068--- linux-3.0.7/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
51069+++ linux-3.0.7/grsecurity/gracl_fs.c 2011-10-17 01:22:26.000000000 -0400
51070@@ -0,0 +1,431 @@
51071+#include <linux/kernel.h>
51072+#include <linux/sched.h>
51073+#include <linux/types.h>
51074+#include <linux/fs.h>
51075+#include <linux/file.h>
51076+#include <linux/stat.h>
51077+#include <linux/grsecurity.h>
51078+#include <linux/grinternal.h>
51079+#include <linux/gracl.h>
51080+
51081+__u32
51082+gr_acl_handle_hidden_file(const struct dentry * dentry,
51083+ const struct vfsmount * mnt)
51084+{
51085+ __u32 mode;
51086+
51087+ if (unlikely(!dentry->d_inode))
51088+ return GR_FIND;
51089+
51090+ mode =
51091+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
51092+
51093+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
51094+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
51095+ return mode;
51096+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
51097+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
51098+ return 0;
51099+ } else if (unlikely(!(mode & GR_FIND)))
51100+ return 0;
51101+
51102+ return GR_FIND;
51103+}
51104+
51105+__u32
51106+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
51107+ const int fmode)
51108+{
51109+ __u32 reqmode = GR_FIND;
51110+ __u32 mode;
51111+
51112+ if (unlikely(!dentry->d_inode))
51113+ return reqmode;
51114+
51115+ if (unlikely(fmode & O_APPEND))
51116+ reqmode |= GR_APPEND;
51117+ else if (unlikely(fmode & FMODE_WRITE))
51118+ reqmode |= GR_WRITE;
51119+ if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
51120+ reqmode |= GR_READ;
51121+ if ((fmode & FMODE_GREXEC) && (fmode & __FMODE_EXEC))
51122+ reqmode &= ~GR_READ;
51123+ mode =
51124+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
51125+ mnt);
51126+
51127+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51128+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
51129+ reqmode & GR_READ ? " reading" : "",
51130+ reqmode & GR_WRITE ? " writing" : reqmode &
51131+ GR_APPEND ? " appending" : "");
51132+ return reqmode;
51133+ } else
51134+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51135+ {
51136+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
51137+ reqmode & GR_READ ? " reading" : "",
51138+ reqmode & GR_WRITE ? " writing" : reqmode &
51139+ GR_APPEND ? " appending" : "");
51140+ return 0;
51141+ } else if (unlikely((mode & reqmode) != reqmode))
51142+ return 0;
51143+
51144+ return reqmode;
51145+}
51146+
51147+__u32
51148+gr_acl_handle_creat(const struct dentry * dentry,
51149+ const struct dentry * p_dentry,
51150+ const struct vfsmount * p_mnt, const int fmode,
51151+ const int imode)
51152+{
51153+ __u32 reqmode = GR_WRITE | GR_CREATE;
51154+ __u32 mode;
51155+
51156+ if (unlikely(fmode & O_APPEND))
51157+ reqmode |= GR_APPEND;
51158+ if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
51159+ reqmode |= GR_READ;
51160+ if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
51161+ reqmode |= GR_SETID;
51162+
51163+ mode =
51164+ gr_check_create(dentry, p_dentry, p_mnt,
51165+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
51166+
51167+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51168+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
51169+ reqmode & GR_READ ? " reading" : "",
51170+ reqmode & GR_WRITE ? " writing" : reqmode &
51171+ GR_APPEND ? " appending" : "");
51172+ return reqmode;
51173+ } else
51174+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51175+ {
51176+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
51177+ reqmode & GR_READ ? " reading" : "",
51178+ reqmode & GR_WRITE ? " writing" : reqmode &
51179+ GR_APPEND ? " appending" : "");
51180+ return 0;
51181+ } else if (unlikely((mode & reqmode) != reqmode))
51182+ return 0;
51183+
51184+ return reqmode;
51185+}
51186+
51187+__u32
51188+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
51189+ const int fmode)
51190+{
51191+ __u32 mode, reqmode = GR_FIND;
51192+
51193+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
51194+ reqmode |= GR_EXEC;
51195+ if (fmode & S_IWOTH)
51196+ reqmode |= GR_WRITE;
51197+ if (fmode & S_IROTH)
51198+ reqmode |= GR_READ;
51199+
51200+ mode =
51201+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
51202+ mnt);
51203+
51204+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51205+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
51206+ reqmode & GR_READ ? " reading" : "",
51207+ reqmode & GR_WRITE ? " writing" : "",
51208+ reqmode & GR_EXEC ? " executing" : "");
51209+ return reqmode;
51210+ } else
51211+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51212+ {
51213+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
51214+ reqmode & GR_READ ? " reading" : "",
51215+ reqmode & GR_WRITE ? " writing" : "",
51216+ reqmode & GR_EXEC ? " executing" : "");
51217+ return 0;
51218+ } else if (unlikely((mode & reqmode) != reqmode))
51219+ return 0;
51220+
51221+ return reqmode;
51222+}
51223+
51224+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
51225+{
51226+ __u32 mode;
51227+
51228+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
51229+
51230+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
51231+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
51232+ return mode;
51233+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
51234+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
51235+ return 0;
51236+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
51237+ return 0;
51238+
51239+ return (reqmode);
51240+}
51241+
51242+__u32
51243+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
51244+{
51245+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
51246+}
51247+
51248+__u32
51249+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
51250+{
51251+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
51252+}
51253+
51254+__u32
51255+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
51256+{
51257+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
51258+}
51259+
51260+__u32
51261+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
51262+{
51263+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
51264+}
51265+
51266+__u32
51267+gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
51268+ mode_t mode)
51269+{
51270+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
51271+ return 1;
51272+
51273+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
51274+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
51275+ GR_FCHMOD_ACL_MSG);
51276+ } else {
51277+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
51278+ }
51279+}
51280+
51281+__u32
51282+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
51283+ mode_t mode)
51284+{
51285+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
51286+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
51287+ GR_CHMOD_ACL_MSG);
51288+ } else {
51289+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
51290+ }
51291+}
51292+
51293+__u32
51294+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
51295+{
51296+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
51297+}
51298+
51299+__u32
51300+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
51301+{
51302+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
51303+}
51304+
51305+__u32
51306+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
51307+{
51308+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
51309+}
51310+
51311+__u32
51312+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
51313+{
51314+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
51315+ GR_UNIXCONNECT_ACL_MSG);
51316+}
51317+
51318+/* hardlinks require at minimum create and link permission,
51319+ any additional privilege required is based on the
51320+ privilege of the file being linked to
51321+*/
51322+__u32
51323+gr_acl_handle_link(const struct dentry * new_dentry,
51324+ const struct dentry * parent_dentry,
51325+ const struct vfsmount * parent_mnt,
51326+ const struct dentry * old_dentry,
51327+ const struct vfsmount * old_mnt, const char *to)
51328+{
51329+ __u32 mode;
51330+ __u32 needmode = GR_CREATE | GR_LINK;
51331+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
51332+
51333+ mode =
51334+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
51335+ old_mnt);
51336+
51337+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
51338+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
51339+ return mode;
51340+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
51341+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
51342+ return 0;
51343+ } else if (unlikely((mode & needmode) != needmode))
51344+ return 0;
51345+
51346+ return 1;
51347+}
51348+
51349+__u32
51350+gr_acl_handle_symlink(const struct dentry * new_dentry,
51351+ const struct dentry * parent_dentry,
51352+ const struct vfsmount * parent_mnt, const char *from)
51353+{
51354+ __u32 needmode = GR_WRITE | GR_CREATE;
51355+ __u32 mode;
51356+
51357+ mode =
51358+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
51359+ GR_CREATE | GR_AUDIT_CREATE |
51360+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
51361+
51362+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
51363+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
51364+ return mode;
51365+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
51366+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
51367+ return 0;
51368+ } else if (unlikely((mode & needmode) != needmode))
51369+ return 0;
51370+
51371+ return (GR_WRITE | GR_CREATE);
51372+}
51373+
51374+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
51375+{
51376+ __u32 mode;
51377+
51378+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
51379+
51380+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
51381+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
51382+ return mode;
51383+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
51384+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
51385+ return 0;
51386+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
51387+ return 0;
51388+
51389+ return (reqmode);
51390+}
51391+
51392+__u32
51393+gr_acl_handle_mknod(const struct dentry * new_dentry,
51394+ const struct dentry * parent_dentry,
51395+ const struct vfsmount * parent_mnt,
51396+ const int mode)
51397+{
51398+ __u32 reqmode = GR_WRITE | GR_CREATE;
51399+ if (unlikely(mode & (S_ISUID | S_ISGID)))
51400+ reqmode |= GR_SETID;
51401+
51402+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
51403+ reqmode, GR_MKNOD_ACL_MSG);
51404+}
51405+
51406+__u32
51407+gr_acl_handle_mkdir(const struct dentry *new_dentry,
51408+ const struct dentry *parent_dentry,
51409+ const struct vfsmount *parent_mnt)
51410+{
51411+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
51412+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
51413+}
51414+
51415+#define RENAME_CHECK_SUCCESS(old, new) \
51416+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
51417+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
51418+
51419+int
51420+gr_acl_handle_rename(struct dentry *new_dentry,
51421+ struct dentry *parent_dentry,
51422+ const struct vfsmount *parent_mnt,
51423+ struct dentry *old_dentry,
51424+ struct inode *old_parent_inode,
51425+ struct vfsmount *old_mnt, const char *newname)
51426+{
51427+ __u32 comp1, comp2;
51428+ int error = 0;
51429+
51430+ if (unlikely(!gr_acl_is_enabled()))
51431+ return 0;
51432+
51433+ if (!new_dentry->d_inode) {
51434+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
51435+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
51436+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
51437+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
51438+ GR_DELETE | GR_AUDIT_DELETE |
51439+ GR_AUDIT_READ | GR_AUDIT_WRITE |
51440+ GR_SUPPRESS, old_mnt);
51441+ } else {
51442+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
51443+ GR_CREATE | GR_DELETE |
51444+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
51445+ GR_AUDIT_READ | GR_AUDIT_WRITE |
51446+ GR_SUPPRESS, parent_mnt);
51447+ comp2 =
51448+ gr_search_file(old_dentry,
51449+ GR_READ | GR_WRITE | GR_AUDIT_READ |
51450+ GR_DELETE | GR_AUDIT_DELETE |
51451+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
51452+ }
51453+
51454+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
51455+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
51456+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
51457+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
51458+ && !(comp2 & GR_SUPPRESS)) {
51459+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
51460+ error = -EACCES;
51461+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
51462+ error = -EACCES;
51463+
51464+ return error;
51465+}
51466+
51467+void
51468+gr_acl_handle_exit(void)
51469+{
51470+ u16 id;
51471+ char *rolename;
51472+ struct file *exec_file;
51473+
51474+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
51475+ !(current->role->roletype & GR_ROLE_PERSIST))) {
51476+ id = current->acl_role_id;
51477+ rolename = current->role->rolename;
51478+ gr_set_acls(1);
51479+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
51480+ }
51481+
51482+ write_lock(&grsec_exec_file_lock);
51483+ exec_file = current->exec_file;
51484+ current->exec_file = NULL;
51485+ write_unlock(&grsec_exec_file_lock);
51486+
51487+ if (exec_file)
51488+ fput(exec_file);
51489+}
51490+
51491+int
51492+gr_acl_handle_procpidmem(const struct task_struct *task)
51493+{
51494+ if (unlikely(!gr_acl_is_enabled()))
51495+ return 0;
51496+
51497+ if (task != current && task->acl->mode & GR_PROTPROCFD)
51498+ return -EACCES;
51499+
51500+ return 0;
51501+}
51502diff -urNp linux-3.0.7/grsecurity/gracl_ip.c linux-3.0.7/grsecurity/gracl_ip.c
51503--- linux-3.0.7/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
51504+++ linux-3.0.7/grsecurity/gracl_ip.c 2011-08-23 21:48:14.000000000 -0400
51505@@ -0,0 +1,381 @@
51506+#include <linux/kernel.h>
51507+#include <asm/uaccess.h>
51508+#include <asm/errno.h>
51509+#include <net/sock.h>
51510+#include <linux/file.h>
51511+#include <linux/fs.h>
51512+#include <linux/net.h>
51513+#include <linux/in.h>
51514+#include <linux/skbuff.h>
51515+#include <linux/ip.h>
51516+#include <linux/udp.h>
51517+#include <linux/types.h>
51518+#include <linux/sched.h>
51519+#include <linux/netdevice.h>
51520+#include <linux/inetdevice.h>
51521+#include <linux/gracl.h>
51522+#include <linux/grsecurity.h>
51523+#include <linux/grinternal.h>
51524+
51525+#define GR_BIND 0x01
51526+#define GR_CONNECT 0x02
51527+#define GR_INVERT 0x04
51528+#define GR_BINDOVERRIDE 0x08
51529+#define GR_CONNECTOVERRIDE 0x10
51530+#define GR_SOCK_FAMILY 0x20
51531+
51532+static const char * gr_protocols[IPPROTO_MAX] = {
51533+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
51534+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
51535+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
51536+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
51537+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
51538+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
51539+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
51540+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
51541+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
51542+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
51543+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
51544+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
51545+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
51546+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
51547+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
51548+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
51549+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
51550+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
51551+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
51552+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
51553+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
51554+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
51555+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
51556+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
51557+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
51558+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
51559+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
51560+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
51561+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
51562+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
51563+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
51564+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
51565+ };
51566+
51567+static const char * gr_socktypes[SOCK_MAX] = {
51568+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
51569+ "unknown:7", "unknown:8", "unknown:9", "packet"
51570+ };
51571+
51572+static const char * gr_sockfamilies[AF_MAX+1] = {
51573+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
51574+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
51575+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
51576+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
51577+ };
51578+
51579+const char *
51580+gr_proto_to_name(unsigned char proto)
51581+{
51582+ return gr_protocols[proto];
51583+}
51584+
51585+const char *
51586+gr_socktype_to_name(unsigned char type)
51587+{
51588+ return gr_socktypes[type];
51589+}
51590+
51591+const char *
51592+gr_sockfamily_to_name(unsigned char family)
51593+{
51594+ return gr_sockfamilies[family];
51595+}
51596+
51597+int
51598+gr_search_socket(const int domain, const int type, const int protocol)
51599+{
51600+ struct acl_subject_label *curr;
51601+ const struct cred *cred = current_cred();
51602+
51603+ if (unlikely(!gr_acl_is_enabled()))
51604+ goto exit;
51605+
51606+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
51607+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
51608+ goto exit; // let the kernel handle it
51609+
51610+ curr = current->acl;
51611+
51612+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
51613+ /* the family is allowed, if this is PF_INET allow it only if
51614+ the extra sock type/protocol checks pass */
51615+ if (domain == PF_INET)
51616+ goto inet_check;
51617+ goto exit;
51618+ } else {
51619+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
51620+ __u32 fakeip = 0;
51621+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51622+ current->role->roletype, cred->uid,
51623+ cred->gid, current->exec_file ?
51624+ gr_to_filename(current->exec_file->f_path.dentry,
51625+ current->exec_file->f_path.mnt) :
51626+ curr->filename, curr->filename,
51627+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
51628+ &current->signal->saved_ip);
51629+ goto exit;
51630+ }
51631+ goto exit_fail;
51632+ }
51633+
51634+inet_check:
51635+ /* the rest of this checking is for IPv4 only */
51636+ if (!curr->ips)
51637+ goto exit;
51638+
51639+ if ((curr->ip_type & (1 << type)) &&
51640+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
51641+ goto exit;
51642+
51643+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
51644+ /* we don't place acls on raw sockets , and sometimes
51645+ dgram/ip sockets are opened for ioctl and not
51646+ bind/connect, so we'll fake a bind learn log */
51647+ if (type == SOCK_RAW || type == SOCK_PACKET) {
51648+ __u32 fakeip = 0;
51649+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51650+ current->role->roletype, cred->uid,
51651+ cred->gid, current->exec_file ?
51652+ gr_to_filename(current->exec_file->f_path.dentry,
51653+ current->exec_file->f_path.mnt) :
51654+ curr->filename, curr->filename,
51655+ &fakeip, 0, type,
51656+ protocol, GR_CONNECT, &current->signal->saved_ip);
51657+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
51658+ __u32 fakeip = 0;
51659+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51660+ current->role->roletype, cred->uid,
51661+ cred->gid, current->exec_file ?
51662+ gr_to_filename(current->exec_file->f_path.dentry,
51663+ current->exec_file->f_path.mnt) :
51664+ curr->filename, curr->filename,
51665+ &fakeip, 0, type,
51666+ protocol, GR_BIND, &current->signal->saved_ip);
51667+ }
51668+ /* we'll log when they use connect or bind */
51669+ goto exit;
51670+ }
51671+
51672+exit_fail:
51673+ if (domain == PF_INET)
51674+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
51675+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
51676+ else
51677+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
51678+ gr_socktype_to_name(type), protocol);
51679+
51680+ return 0;
51681+exit:
51682+ return 1;
51683+}
51684+
51685+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
51686+{
51687+ if ((ip->mode & mode) &&
51688+ (ip_port >= ip->low) &&
51689+ (ip_port <= ip->high) &&
51690+ ((ntohl(ip_addr) & our_netmask) ==
51691+ (ntohl(our_addr) & our_netmask))
51692+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
51693+ && (ip->type & (1 << type))) {
51694+ if (ip->mode & GR_INVERT)
51695+ return 2; // specifically denied
51696+ else
51697+ return 1; // allowed
51698+ }
51699+
51700+ return 0; // not specifically allowed, may continue parsing
51701+}
51702+
51703+static int
51704+gr_search_connectbind(const int full_mode, struct sock *sk,
51705+ struct sockaddr_in *addr, const int type)
51706+{
51707+ char iface[IFNAMSIZ] = {0};
51708+ struct acl_subject_label *curr;
51709+ struct acl_ip_label *ip;
51710+ struct inet_sock *isk;
51711+ struct net_device *dev;
51712+ struct in_device *idev;
51713+ unsigned long i;
51714+ int ret;
51715+ int mode = full_mode & (GR_BIND | GR_CONNECT);
51716+ __u32 ip_addr = 0;
51717+ __u32 our_addr;
51718+ __u32 our_netmask;
51719+ char *p;
51720+ __u16 ip_port = 0;
51721+ const struct cred *cred = current_cred();
51722+
51723+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
51724+ return 0;
51725+
51726+ curr = current->acl;
51727+ isk = inet_sk(sk);
51728+
51729+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
51730+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
51731+ addr->sin_addr.s_addr = curr->inaddr_any_override;
51732+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
51733+ struct sockaddr_in saddr;
51734+ int err;
51735+
51736+ saddr.sin_family = AF_INET;
51737+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
51738+ saddr.sin_port = isk->inet_sport;
51739+
51740+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
51741+ if (err)
51742+ return err;
51743+
51744+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
51745+ if (err)
51746+ return err;
51747+ }
51748+
51749+ if (!curr->ips)
51750+ return 0;
51751+
51752+ ip_addr = addr->sin_addr.s_addr;
51753+ ip_port = ntohs(addr->sin_port);
51754+
51755+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
51756+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51757+ current->role->roletype, cred->uid,
51758+ cred->gid, current->exec_file ?
51759+ gr_to_filename(current->exec_file->f_path.dentry,
51760+ current->exec_file->f_path.mnt) :
51761+ curr->filename, curr->filename,
51762+ &ip_addr, ip_port, type,
51763+ sk->sk_protocol, mode, &current->signal->saved_ip);
51764+ return 0;
51765+ }
51766+
51767+ for (i = 0; i < curr->ip_num; i++) {
51768+ ip = *(curr->ips + i);
51769+ if (ip->iface != NULL) {
51770+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
51771+ p = strchr(iface, ':');
51772+ if (p != NULL)
51773+ *p = '\0';
51774+ dev = dev_get_by_name(sock_net(sk), iface);
51775+ if (dev == NULL)
51776+ continue;
51777+ idev = in_dev_get(dev);
51778+ if (idev == NULL) {
51779+ dev_put(dev);
51780+ continue;
51781+ }
51782+ rcu_read_lock();
51783+ for_ifa(idev) {
51784+ if (!strcmp(ip->iface, ifa->ifa_label)) {
51785+ our_addr = ifa->ifa_address;
51786+ our_netmask = 0xffffffff;
51787+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
51788+ if (ret == 1) {
51789+ rcu_read_unlock();
51790+ in_dev_put(idev);
51791+ dev_put(dev);
51792+ return 0;
51793+ } else if (ret == 2) {
51794+ rcu_read_unlock();
51795+ in_dev_put(idev);
51796+ dev_put(dev);
51797+ goto denied;
51798+ }
51799+ }
51800+ } endfor_ifa(idev);
51801+ rcu_read_unlock();
51802+ in_dev_put(idev);
51803+ dev_put(dev);
51804+ } else {
51805+ our_addr = ip->addr;
51806+ our_netmask = ip->netmask;
51807+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
51808+ if (ret == 1)
51809+ return 0;
51810+ else if (ret == 2)
51811+ goto denied;
51812+ }
51813+ }
51814+
51815+denied:
51816+ if (mode == GR_BIND)
51817+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
51818+ else if (mode == GR_CONNECT)
51819+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
51820+
51821+ return -EACCES;
51822+}
51823+
51824+int
51825+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
51826+{
51827+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
51828+}
51829+
51830+int
51831+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
51832+{
51833+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
51834+}
51835+
51836+int gr_search_listen(struct socket *sock)
51837+{
51838+ struct sock *sk = sock->sk;
51839+ struct sockaddr_in addr;
51840+
51841+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
51842+ addr.sin_port = inet_sk(sk)->inet_sport;
51843+
51844+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
51845+}
51846+
51847+int gr_search_accept(struct socket *sock)
51848+{
51849+ struct sock *sk = sock->sk;
51850+ struct sockaddr_in addr;
51851+
51852+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
51853+ addr.sin_port = inet_sk(sk)->inet_sport;
51854+
51855+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
51856+}
51857+
51858+int
51859+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
51860+{
51861+ if (addr)
51862+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
51863+ else {
51864+ struct sockaddr_in sin;
51865+ const struct inet_sock *inet = inet_sk(sk);
51866+
51867+ sin.sin_addr.s_addr = inet->inet_daddr;
51868+ sin.sin_port = inet->inet_dport;
51869+
51870+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
51871+ }
51872+}
51873+
51874+int
51875+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
51876+{
51877+ struct sockaddr_in sin;
51878+
51879+ if (unlikely(skb->len < sizeof (struct udphdr)))
51880+ return 0; // skip this packet
51881+
51882+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
51883+ sin.sin_port = udp_hdr(skb)->source;
51884+
51885+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
51886+}
51887diff -urNp linux-3.0.7/grsecurity/gracl_learn.c linux-3.0.7/grsecurity/gracl_learn.c
51888--- linux-3.0.7/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
51889+++ linux-3.0.7/grsecurity/gracl_learn.c 2011-08-23 21:48:14.000000000 -0400
51890@@ -0,0 +1,207 @@
51891+#include <linux/kernel.h>
51892+#include <linux/mm.h>
51893+#include <linux/sched.h>
51894+#include <linux/poll.h>
51895+#include <linux/string.h>
51896+#include <linux/file.h>
51897+#include <linux/types.h>
51898+#include <linux/vmalloc.h>
51899+#include <linux/grinternal.h>
51900+
51901+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
51902+ size_t count, loff_t *ppos);
51903+extern int gr_acl_is_enabled(void);
51904+
51905+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
51906+static int gr_learn_attached;
51907+
51908+/* use a 512k buffer */
51909+#define LEARN_BUFFER_SIZE (512 * 1024)
51910+
51911+static DEFINE_SPINLOCK(gr_learn_lock);
51912+static DEFINE_MUTEX(gr_learn_user_mutex);
51913+
51914+/* we need to maintain two buffers, so that the kernel context of grlearn
51915+ uses a semaphore around the userspace copying, and the other kernel contexts
51916+ use a spinlock when copying into the buffer, since they cannot sleep
51917+*/
51918+static char *learn_buffer;
51919+static char *learn_buffer_user;
51920+static int learn_buffer_len;
51921+static int learn_buffer_user_len;
51922+
51923+static ssize_t
51924+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
51925+{
51926+ DECLARE_WAITQUEUE(wait, current);
51927+ ssize_t retval = 0;
51928+
51929+ add_wait_queue(&learn_wait, &wait);
51930+ set_current_state(TASK_INTERRUPTIBLE);
51931+ do {
51932+ mutex_lock(&gr_learn_user_mutex);
51933+ spin_lock(&gr_learn_lock);
51934+ if (learn_buffer_len)
51935+ break;
51936+ spin_unlock(&gr_learn_lock);
51937+ mutex_unlock(&gr_learn_user_mutex);
51938+ if (file->f_flags & O_NONBLOCK) {
51939+ retval = -EAGAIN;
51940+ goto out;
51941+ }
51942+ if (signal_pending(current)) {
51943+ retval = -ERESTARTSYS;
51944+ goto out;
51945+ }
51946+
51947+ schedule();
51948+ } while (1);
51949+
51950+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
51951+ learn_buffer_user_len = learn_buffer_len;
51952+ retval = learn_buffer_len;
51953+ learn_buffer_len = 0;
51954+
51955+ spin_unlock(&gr_learn_lock);
51956+
51957+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
51958+ retval = -EFAULT;
51959+
51960+ mutex_unlock(&gr_learn_user_mutex);
51961+out:
51962+ set_current_state(TASK_RUNNING);
51963+ remove_wait_queue(&learn_wait, &wait);
51964+ return retval;
51965+}
51966+
51967+static unsigned int
51968+poll_learn(struct file * file, poll_table * wait)
51969+{
51970+ poll_wait(file, &learn_wait, wait);
51971+
51972+ if (learn_buffer_len)
51973+ return (POLLIN | POLLRDNORM);
51974+
51975+ return 0;
51976+}
51977+
51978+void
51979+gr_clear_learn_entries(void)
51980+{
51981+ char *tmp;
51982+
51983+ mutex_lock(&gr_learn_user_mutex);
51984+ spin_lock(&gr_learn_lock);
51985+ tmp = learn_buffer;
51986+ learn_buffer = NULL;
51987+ spin_unlock(&gr_learn_lock);
51988+ if (tmp)
51989+ vfree(tmp);
51990+ if (learn_buffer_user != NULL) {
51991+ vfree(learn_buffer_user);
51992+ learn_buffer_user = NULL;
51993+ }
51994+ learn_buffer_len = 0;
51995+ mutex_unlock(&gr_learn_user_mutex);
51996+
51997+ return;
51998+}
51999+
52000+void
52001+gr_add_learn_entry(const char *fmt, ...)
52002+{
52003+ va_list args;
52004+ unsigned int len;
52005+
52006+ if (!gr_learn_attached)
52007+ return;
52008+
52009+ spin_lock(&gr_learn_lock);
52010+
52011+ /* leave a gap at the end so we know when it's "full" but don't have to
52012+ compute the exact length of the string we're trying to append
52013+ */
52014+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
52015+ spin_unlock(&gr_learn_lock);
52016+ wake_up_interruptible(&learn_wait);
52017+ return;
52018+ }
52019+ if (learn_buffer == NULL) {
52020+ spin_unlock(&gr_learn_lock);
52021+ return;
52022+ }
52023+
52024+ va_start(args, fmt);
52025+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
52026+ va_end(args);
52027+
52028+ learn_buffer_len += len + 1;
52029+
52030+ spin_unlock(&gr_learn_lock);
52031+ wake_up_interruptible(&learn_wait);
52032+
52033+ return;
52034+}
52035+
52036+static int
52037+open_learn(struct inode *inode, struct file *file)
52038+{
52039+ if (file->f_mode & FMODE_READ && gr_learn_attached)
52040+ return -EBUSY;
52041+ if (file->f_mode & FMODE_READ) {
52042+ int retval = 0;
52043+ mutex_lock(&gr_learn_user_mutex);
52044+ if (learn_buffer == NULL)
52045+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
52046+ if (learn_buffer_user == NULL)
52047+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
52048+ if (learn_buffer == NULL) {
52049+ retval = -ENOMEM;
52050+ goto out_error;
52051+ }
52052+ if (learn_buffer_user == NULL) {
52053+ retval = -ENOMEM;
52054+ goto out_error;
52055+ }
52056+ learn_buffer_len = 0;
52057+ learn_buffer_user_len = 0;
52058+ gr_learn_attached = 1;
52059+out_error:
52060+ mutex_unlock(&gr_learn_user_mutex);
52061+ return retval;
52062+ }
52063+ return 0;
52064+}
52065+
52066+static int
52067+close_learn(struct inode *inode, struct file *file)
52068+{
52069+ if (file->f_mode & FMODE_READ) {
52070+ char *tmp = NULL;
52071+ mutex_lock(&gr_learn_user_mutex);
52072+ spin_lock(&gr_learn_lock);
52073+ tmp = learn_buffer;
52074+ learn_buffer = NULL;
52075+ spin_unlock(&gr_learn_lock);
52076+ if (tmp)
52077+ vfree(tmp);
52078+ if (learn_buffer_user != NULL) {
52079+ vfree(learn_buffer_user);
52080+ learn_buffer_user = NULL;
52081+ }
52082+ learn_buffer_len = 0;
52083+ learn_buffer_user_len = 0;
52084+ gr_learn_attached = 0;
52085+ mutex_unlock(&gr_learn_user_mutex);
52086+ }
52087+
52088+ return 0;
52089+}
52090+
52091+const struct file_operations grsec_fops = {
52092+ .read = read_learn,
52093+ .write = write_grsec_handler,
52094+ .open = open_learn,
52095+ .release = close_learn,
52096+ .poll = poll_learn,
52097+};
52098diff -urNp linux-3.0.7/grsecurity/gracl_res.c linux-3.0.7/grsecurity/gracl_res.c
52099--- linux-3.0.7/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
52100+++ linux-3.0.7/grsecurity/gracl_res.c 2011-08-23 21:48:14.000000000 -0400
52101@@ -0,0 +1,68 @@
52102+#include <linux/kernel.h>
52103+#include <linux/sched.h>
52104+#include <linux/gracl.h>
52105+#include <linux/grinternal.h>
52106+
52107+static const char *restab_log[] = {
52108+ [RLIMIT_CPU] = "RLIMIT_CPU",
52109+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
52110+ [RLIMIT_DATA] = "RLIMIT_DATA",
52111+ [RLIMIT_STACK] = "RLIMIT_STACK",
52112+ [RLIMIT_CORE] = "RLIMIT_CORE",
52113+ [RLIMIT_RSS] = "RLIMIT_RSS",
52114+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
52115+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
52116+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
52117+ [RLIMIT_AS] = "RLIMIT_AS",
52118+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
52119+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
52120+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
52121+ [RLIMIT_NICE] = "RLIMIT_NICE",
52122+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
52123+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
52124+ [GR_CRASH_RES] = "RLIMIT_CRASH"
52125+};
52126+
52127+void
52128+gr_log_resource(const struct task_struct *task,
52129+ const int res, const unsigned long wanted, const int gt)
52130+{
52131+ const struct cred *cred;
52132+ unsigned long rlim;
52133+
52134+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
52135+ return;
52136+
52137+ // not yet supported resource
52138+ if (unlikely(!restab_log[res]))
52139+ return;
52140+
52141+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
52142+ rlim = task_rlimit_max(task, res);
52143+ else
52144+ rlim = task_rlimit(task, res);
52145+
52146+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
52147+ return;
52148+
52149+ rcu_read_lock();
52150+ cred = __task_cred(task);
52151+
52152+ if (res == RLIMIT_NPROC &&
52153+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
52154+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
52155+ goto out_rcu_unlock;
52156+ else if (res == RLIMIT_MEMLOCK &&
52157+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
52158+ goto out_rcu_unlock;
52159+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
52160+ goto out_rcu_unlock;
52161+ rcu_read_unlock();
52162+
52163+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
52164+
52165+ return;
52166+out_rcu_unlock:
52167+ rcu_read_unlock();
52168+ return;
52169+}
52170diff -urNp linux-3.0.7/grsecurity/gracl_segv.c linux-3.0.7/grsecurity/gracl_segv.c
52171--- linux-3.0.7/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
52172+++ linux-3.0.7/grsecurity/gracl_segv.c 2011-08-23 21:48:14.000000000 -0400
52173@@ -0,0 +1,299 @@
52174+#include <linux/kernel.h>
52175+#include <linux/mm.h>
52176+#include <asm/uaccess.h>
52177+#include <asm/errno.h>
52178+#include <asm/mman.h>
52179+#include <net/sock.h>
52180+#include <linux/file.h>
52181+#include <linux/fs.h>
52182+#include <linux/net.h>
52183+#include <linux/in.h>
52184+#include <linux/slab.h>
52185+#include <linux/types.h>
52186+#include <linux/sched.h>
52187+#include <linux/timer.h>
52188+#include <linux/gracl.h>
52189+#include <linux/grsecurity.h>
52190+#include <linux/grinternal.h>
52191+
52192+static struct crash_uid *uid_set;
52193+static unsigned short uid_used;
52194+static DEFINE_SPINLOCK(gr_uid_lock);
52195+extern rwlock_t gr_inode_lock;
52196+extern struct acl_subject_label *
52197+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
52198+ struct acl_role_label *role);
52199+
52200+#ifdef CONFIG_BTRFS_FS
52201+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
52202+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
52203+#endif
52204+
52205+static inline dev_t __get_dev(const struct dentry *dentry)
52206+{
52207+#ifdef CONFIG_BTRFS_FS
52208+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
52209+ return get_btrfs_dev_from_inode(dentry->d_inode);
52210+ else
52211+#endif
52212+ return dentry->d_inode->i_sb->s_dev;
52213+}
52214+
52215+int
52216+gr_init_uidset(void)
52217+{
52218+ uid_set =
52219+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
52220+ uid_used = 0;
52221+
52222+ return uid_set ? 1 : 0;
52223+}
52224+
52225+void
52226+gr_free_uidset(void)
52227+{
52228+ if (uid_set)
52229+ kfree(uid_set);
52230+
52231+ return;
52232+}
52233+
52234+int
52235+gr_find_uid(const uid_t uid)
52236+{
52237+ struct crash_uid *tmp = uid_set;
52238+ uid_t buid;
52239+ int low = 0, high = uid_used - 1, mid;
52240+
52241+ while (high >= low) {
52242+ mid = (low + high) >> 1;
52243+ buid = tmp[mid].uid;
52244+ if (buid == uid)
52245+ return mid;
52246+ if (buid > uid)
52247+ high = mid - 1;
52248+ if (buid < uid)
52249+ low = mid + 1;
52250+ }
52251+
52252+ return -1;
52253+}
52254+
52255+static __inline__ void
52256+gr_insertsort(void)
52257+{
52258+ unsigned short i, j;
52259+ struct crash_uid index;
52260+
52261+ for (i = 1; i < uid_used; i++) {
52262+ index = uid_set[i];
52263+ j = i;
52264+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
52265+ uid_set[j] = uid_set[j - 1];
52266+ j--;
52267+ }
52268+ uid_set[j] = index;
52269+ }
52270+
52271+ return;
52272+}
52273+
52274+static __inline__ void
52275+gr_insert_uid(const uid_t uid, const unsigned long expires)
52276+{
52277+ int loc;
52278+
52279+ if (uid_used == GR_UIDTABLE_MAX)
52280+ return;
52281+
52282+ loc = gr_find_uid(uid);
52283+
52284+ if (loc >= 0) {
52285+ uid_set[loc].expires = expires;
52286+ return;
52287+ }
52288+
52289+ uid_set[uid_used].uid = uid;
52290+ uid_set[uid_used].expires = expires;
52291+ uid_used++;
52292+
52293+ gr_insertsort();
52294+
52295+ return;
52296+}
52297+
52298+void
52299+gr_remove_uid(const unsigned short loc)
52300+{
52301+ unsigned short i;
52302+
52303+ for (i = loc + 1; i < uid_used; i++)
52304+ uid_set[i - 1] = uid_set[i];
52305+
52306+ uid_used--;
52307+
52308+ return;
52309+}
52310+
52311+int
52312+gr_check_crash_uid(const uid_t uid)
52313+{
52314+ int loc;
52315+ int ret = 0;
52316+
52317+ if (unlikely(!gr_acl_is_enabled()))
52318+ return 0;
52319+
52320+ spin_lock(&gr_uid_lock);
52321+ loc = gr_find_uid(uid);
52322+
52323+ if (loc < 0)
52324+ goto out_unlock;
52325+
52326+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
52327+ gr_remove_uid(loc);
52328+ else
52329+ ret = 1;
52330+
52331+out_unlock:
52332+ spin_unlock(&gr_uid_lock);
52333+ return ret;
52334+}
52335+
52336+static __inline__ int
52337+proc_is_setxid(const struct cred *cred)
52338+{
52339+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
52340+ cred->uid != cred->fsuid)
52341+ return 1;
52342+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
52343+ cred->gid != cred->fsgid)
52344+ return 1;
52345+
52346+ return 0;
52347+}
52348+
52349+extern int gr_fake_force_sig(int sig, struct task_struct *t);
52350+
52351+void
52352+gr_handle_crash(struct task_struct *task, const int sig)
52353+{
52354+ struct acl_subject_label *curr;
52355+ struct acl_subject_label *curr2;
52356+ struct task_struct *tsk, *tsk2;
52357+ const struct cred *cred;
52358+ const struct cred *cred2;
52359+
52360+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
52361+ return;
52362+
52363+ if (unlikely(!gr_acl_is_enabled()))
52364+ return;
52365+
52366+ curr = task->acl;
52367+
52368+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
52369+ return;
52370+
52371+ if (time_before_eq(curr->expires, get_seconds())) {
52372+ curr->expires = 0;
52373+ curr->crashes = 0;
52374+ }
52375+
52376+ curr->crashes++;
52377+
52378+ if (!curr->expires)
52379+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
52380+
52381+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
52382+ time_after(curr->expires, get_seconds())) {
52383+ rcu_read_lock();
52384+ cred = __task_cred(task);
52385+ if (cred->uid && proc_is_setxid(cred)) {
52386+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
52387+ spin_lock(&gr_uid_lock);
52388+ gr_insert_uid(cred->uid, curr->expires);
52389+ spin_unlock(&gr_uid_lock);
52390+ curr->expires = 0;
52391+ curr->crashes = 0;
52392+ read_lock(&tasklist_lock);
52393+ do_each_thread(tsk2, tsk) {
52394+ cred2 = __task_cred(tsk);
52395+ if (tsk != task && cred2->uid == cred->uid)
52396+ gr_fake_force_sig(SIGKILL, tsk);
52397+ } while_each_thread(tsk2, tsk);
52398+ read_unlock(&tasklist_lock);
52399+ } else {
52400+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
52401+ read_lock(&tasklist_lock);
52402+ do_each_thread(tsk2, tsk) {
52403+ if (likely(tsk != task)) {
52404+ curr2 = tsk->acl;
52405+
52406+ if (curr2->device == curr->device &&
52407+ curr2->inode == curr->inode)
52408+ gr_fake_force_sig(SIGKILL, tsk);
52409+ }
52410+ } while_each_thread(tsk2, tsk);
52411+ read_unlock(&tasklist_lock);
52412+ }
52413+ rcu_read_unlock();
52414+ }
52415+
52416+ return;
52417+}
52418+
52419+int
52420+gr_check_crash_exec(const struct file *filp)
52421+{
52422+ struct acl_subject_label *curr;
52423+
52424+ if (unlikely(!gr_acl_is_enabled()))
52425+ return 0;
52426+
52427+ read_lock(&gr_inode_lock);
52428+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
52429+ __get_dev(filp->f_path.dentry),
52430+ current->role);
52431+ read_unlock(&gr_inode_lock);
52432+
52433+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
52434+ (!curr->crashes && !curr->expires))
52435+ return 0;
52436+
52437+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
52438+ time_after(curr->expires, get_seconds()))
52439+ return 1;
52440+ else if (time_before_eq(curr->expires, get_seconds())) {
52441+ curr->crashes = 0;
52442+ curr->expires = 0;
52443+ }
52444+
52445+ return 0;
52446+}
52447+
52448+void
52449+gr_handle_alertkill(struct task_struct *task)
52450+{
52451+ struct acl_subject_label *curracl;
52452+ __u32 curr_ip;
52453+ struct task_struct *p, *p2;
52454+
52455+ if (unlikely(!gr_acl_is_enabled()))
52456+ return;
52457+
52458+ curracl = task->acl;
52459+ curr_ip = task->signal->curr_ip;
52460+
52461+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
52462+ read_lock(&tasklist_lock);
52463+ do_each_thread(p2, p) {
52464+ if (p->signal->curr_ip == curr_ip)
52465+ gr_fake_force_sig(SIGKILL, p);
52466+ } while_each_thread(p2, p);
52467+ read_unlock(&tasklist_lock);
52468+ } else if (curracl->mode & GR_KILLPROC)
52469+ gr_fake_force_sig(SIGKILL, task);
52470+
52471+ return;
52472+}
52473diff -urNp linux-3.0.7/grsecurity/gracl_shm.c linux-3.0.7/grsecurity/gracl_shm.c
52474--- linux-3.0.7/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
52475+++ linux-3.0.7/grsecurity/gracl_shm.c 2011-08-23 21:48:14.000000000 -0400
52476@@ -0,0 +1,40 @@
52477+#include <linux/kernel.h>
52478+#include <linux/mm.h>
52479+#include <linux/sched.h>
52480+#include <linux/file.h>
52481+#include <linux/ipc.h>
52482+#include <linux/gracl.h>
52483+#include <linux/grsecurity.h>
52484+#include <linux/grinternal.h>
52485+
52486+int
52487+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
52488+ const time_t shm_createtime, const uid_t cuid, const int shmid)
52489+{
52490+ struct task_struct *task;
52491+
52492+ if (!gr_acl_is_enabled())
52493+ return 1;
52494+
52495+ rcu_read_lock();
52496+ read_lock(&tasklist_lock);
52497+
52498+ task = find_task_by_vpid(shm_cprid);
52499+
52500+ if (unlikely(!task))
52501+ task = find_task_by_vpid(shm_lapid);
52502+
52503+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
52504+ (task->pid == shm_lapid)) &&
52505+ (task->acl->mode & GR_PROTSHM) &&
52506+ (task->acl != current->acl))) {
52507+ read_unlock(&tasklist_lock);
52508+ rcu_read_unlock();
52509+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
52510+ return 0;
52511+ }
52512+ read_unlock(&tasklist_lock);
52513+ rcu_read_unlock();
52514+
52515+ return 1;
52516+}
52517diff -urNp linux-3.0.7/grsecurity/grsec_chdir.c linux-3.0.7/grsecurity/grsec_chdir.c
52518--- linux-3.0.7/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
52519+++ linux-3.0.7/grsecurity/grsec_chdir.c 2011-08-23 21:48:14.000000000 -0400
52520@@ -0,0 +1,19 @@
52521+#include <linux/kernel.h>
52522+#include <linux/sched.h>
52523+#include <linux/fs.h>
52524+#include <linux/file.h>
52525+#include <linux/grsecurity.h>
52526+#include <linux/grinternal.h>
52527+
52528+void
52529+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
52530+{
52531+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
52532+ if ((grsec_enable_chdir && grsec_enable_group &&
52533+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
52534+ !grsec_enable_group)) {
52535+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
52536+ }
52537+#endif
52538+ return;
52539+}
52540diff -urNp linux-3.0.7/grsecurity/grsec_chroot.c linux-3.0.7/grsecurity/grsec_chroot.c
52541--- linux-3.0.7/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
52542+++ linux-3.0.7/grsecurity/grsec_chroot.c 2011-09-15 06:47:48.000000000 -0400
52543@@ -0,0 +1,351 @@
52544+#include <linux/kernel.h>
52545+#include <linux/module.h>
52546+#include <linux/sched.h>
52547+#include <linux/file.h>
52548+#include <linux/fs.h>
52549+#include <linux/mount.h>
52550+#include <linux/types.h>
52551+#include <linux/pid_namespace.h>
52552+#include <linux/grsecurity.h>
52553+#include <linux/grinternal.h>
52554+
52555+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
52556+{
52557+#ifdef CONFIG_GRKERNSEC
52558+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
52559+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
52560+ task->gr_is_chrooted = 1;
52561+ else
52562+ task->gr_is_chrooted = 0;
52563+
52564+ task->gr_chroot_dentry = path->dentry;
52565+#endif
52566+ return;
52567+}
52568+
52569+void gr_clear_chroot_entries(struct task_struct *task)
52570+{
52571+#ifdef CONFIG_GRKERNSEC
52572+ task->gr_is_chrooted = 0;
52573+ task->gr_chroot_dentry = NULL;
52574+#endif
52575+ return;
52576+}
52577+
52578+int
52579+gr_handle_chroot_unix(const pid_t pid)
52580+{
52581+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
52582+ struct task_struct *p;
52583+
52584+ if (unlikely(!grsec_enable_chroot_unix))
52585+ return 1;
52586+
52587+ if (likely(!proc_is_chrooted(current)))
52588+ return 1;
52589+
52590+ rcu_read_lock();
52591+ read_lock(&tasklist_lock);
52592+ p = find_task_by_vpid_unrestricted(pid);
52593+ if (unlikely(p && !have_same_root(current, p))) {
52594+ read_unlock(&tasklist_lock);
52595+ rcu_read_unlock();
52596+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
52597+ return 0;
52598+ }
52599+ read_unlock(&tasklist_lock);
52600+ rcu_read_unlock();
52601+#endif
52602+ return 1;
52603+}
52604+
52605+int
52606+gr_handle_chroot_nice(void)
52607+{
52608+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
52609+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
52610+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
52611+ return -EPERM;
52612+ }
52613+#endif
52614+ return 0;
52615+}
52616+
52617+int
52618+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
52619+{
52620+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
52621+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
52622+ && proc_is_chrooted(current)) {
52623+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
52624+ return -EACCES;
52625+ }
52626+#endif
52627+ return 0;
52628+}
52629+
52630+int
52631+gr_handle_chroot_rawio(const struct inode *inode)
52632+{
52633+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
52634+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
52635+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
52636+ return 1;
52637+#endif
52638+ return 0;
52639+}
52640+
52641+int
52642+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
52643+{
52644+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52645+ struct task_struct *p;
52646+ int ret = 0;
52647+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
52648+ return ret;
52649+
52650+ read_lock(&tasklist_lock);
52651+ do_each_pid_task(pid, type, p) {
52652+ if (!have_same_root(current, p)) {
52653+ ret = 1;
52654+ goto out;
52655+ }
52656+ } while_each_pid_task(pid, type, p);
52657+out:
52658+ read_unlock(&tasklist_lock);
52659+ return ret;
52660+#endif
52661+ return 0;
52662+}
52663+
52664+int
52665+gr_pid_is_chrooted(struct task_struct *p)
52666+{
52667+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52668+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
52669+ return 0;
52670+
52671+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
52672+ !have_same_root(current, p)) {
52673+ return 1;
52674+ }
52675+#endif
52676+ return 0;
52677+}
52678+
52679+EXPORT_SYMBOL(gr_pid_is_chrooted);
52680+
52681+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
52682+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
52683+{
52684+ struct path path, currentroot;
52685+ int ret = 0;
52686+
52687+ path.dentry = (struct dentry *)u_dentry;
52688+ path.mnt = (struct vfsmount *)u_mnt;
52689+ get_fs_root(current->fs, &currentroot);
52690+ if (path_is_under(&path, &currentroot))
52691+ ret = 1;
52692+ path_put(&currentroot);
52693+
52694+ return ret;
52695+}
52696+#endif
52697+
52698+int
52699+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
52700+{
52701+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
52702+ if (!grsec_enable_chroot_fchdir)
52703+ return 1;
52704+
52705+ if (!proc_is_chrooted(current))
52706+ return 1;
52707+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
52708+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
52709+ return 0;
52710+ }
52711+#endif
52712+ return 1;
52713+}
52714+
52715+int
52716+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
52717+ const time_t shm_createtime)
52718+{
52719+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
52720+ struct task_struct *p;
52721+ time_t starttime;
52722+
52723+ if (unlikely(!grsec_enable_chroot_shmat))
52724+ return 1;
52725+
52726+ if (likely(!proc_is_chrooted(current)))
52727+ return 1;
52728+
52729+ rcu_read_lock();
52730+ read_lock(&tasklist_lock);
52731+
52732+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
52733+ starttime = p->start_time.tv_sec;
52734+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
52735+ if (have_same_root(current, p)) {
52736+ goto allow;
52737+ } else {
52738+ read_unlock(&tasklist_lock);
52739+ rcu_read_unlock();
52740+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
52741+ return 0;
52742+ }
52743+ }
52744+ /* creator exited, pid reuse, fall through to next check */
52745+ }
52746+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
52747+ if (unlikely(!have_same_root(current, p))) {
52748+ read_unlock(&tasklist_lock);
52749+ rcu_read_unlock();
52750+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
52751+ return 0;
52752+ }
52753+ }
52754+
52755+allow:
52756+ read_unlock(&tasklist_lock);
52757+ rcu_read_unlock();
52758+#endif
52759+ return 1;
52760+}
52761+
52762+void
52763+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
52764+{
52765+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
52766+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
52767+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
52768+#endif
52769+ return;
52770+}
52771+
52772+int
52773+gr_handle_chroot_mknod(const struct dentry *dentry,
52774+ const struct vfsmount *mnt, const int mode)
52775+{
52776+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
52777+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
52778+ proc_is_chrooted(current)) {
52779+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
52780+ return -EPERM;
52781+ }
52782+#endif
52783+ return 0;
52784+}
52785+
52786+int
52787+gr_handle_chroot_mount(const struct dentry *dentry,
52788+ const struct vfsmount *mnt, const char *dev_name)
52789+{
52790+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
52791+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
52792+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
52793+ return -EPERM;
52794+ }
52795+#endif
52796+ return 0;
52797+}
52798+
52799+int
52800+gr_handle_chroot_pivot(void)
52801+{
52802+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
52803+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
52804+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
52805+ return -EPERM;
52806+ }
52807+#endif
52808+ return 0;
52809+}
52810+
52811+int
52812+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
52813+{
52814+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
52815+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
52816+ !gr_is_outside_chroot(dentry, mnt)) {
52817+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
52818+ return -EPERM;
52819+ }
52820+#endif
52821+ return 0;
52822+}
52823+
52824+extern const char *captab_log[];
52825+extern int captab_log_entries;
52826+
52827+int
52828+gr_chroot_is_capable(const int cap)
52829+{
52830+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
52831+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
52832+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
52833+ if (cap_raised(chroot_caps, cap)) {
52834+ const struct cred *creds = current_cred();
52835+ if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
52836+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
52837+ }
52838+ return 0;
52839+ }
52840+ }
52841+#endif
52842+ return 1;
52843+}
52844+
52845+int
52846+gr_chroot_is_capable_nolog(const int cap)
52847+{
52848+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
52849+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
52850+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
52851+ if (cap_raised(chroot_caps, cap)) {
52852+ return 0;
52853+ }
52854+ }
52855+#endif
52856+ return 1;
52857+}
52858+
52859+int
52860+gr_handle_chroot_sysctl(const int op)
52861+{
52862+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
52863+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
52864+ proc_is_chrooted(current))
52865+ return -EACCES;
52866+#endif
52867+ return 0;
52868+}
52869+
52870+void
52871+gr_handle_chroot_chdir(struct path *path)
52872+{
52873+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
52874+ if (grsec_enable_chroot_chdir)
52875+ set_fs_pwd(current->fs, path);
52876+#endif
52877+ return;
52878+}
52879+
52880+int
52881+gr_handle_chroot_chmod(const struct dentry *dentry,
52882+ const struct vfsmount *mnt, const int mode)
52883+{
52884+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
52885+ /* allow chmod +s on directories, but not files */
52886+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
52887+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
52888+ proc_is_chrooted(current)) {
52889+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
52890+ return -EPERM;
52891+ }
52892+#endif
52893+ return 0;
52894+}
52895diff -urNp linux-3.0.7/grsecurity/grsec_disabled.c linux-3.0.7/grsecurity/grsec_disabled.c
52896--- linux-3.0.7/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
52897+++ linux-3.0.7/grsecurity/grsec_disabled.c 2011-09-24 08:13:01.000000000 -0400
52898@@ -0,0 +1,433 @@
52899+#include <linux/kernel.h>
52900+#include <linux/module.h>
52901+#include <linux/sched.h>
52902+#include <linux/file.h>
52903+#include <linux/fs.h>
52904+#include <linux/kdev_t.h>
52905+#include <linux/net.h>
52906+#include <linux/in.h>
52907+#include <linux/ip.h>
52908+#include <linux/skbuff.h>
52909+#include <linux/sysctl.h>
52910+
52911+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
52912+void
52913+pax_set_initial_flags(struct linux_binprm *bprm)
52914+{
52915+ return;
52916+}
52917+#endif
52918+
52919+#ifdef CONFIG_SYSCTL
52920+__u32
52921+gr_handle_sysctl(const struct ctl_table * table, const int op)
52922+{
52923+ return 0;
52924+}
52925+#endif
52926+
52927+#ifdef CONFIG_TASKSTATS
52928+int gr_is_taskstats_denied(int pid)
52929+{
52930+ return 0;
52931+}
52932+#endif
52933+
52934+int
52935+gr_acl_is_enabled(void)
52936+{
52937+ return 0;
52938+}
52939+
52940+int
52941+gr_handle_rawio(const struct inode *inode)
52942+{
52943+ return 0;
52944+}
52945+
52946+void
52947+gr_acl_handle_psacct(struct task_struct *task, const long code)
52948+{
52949+ return;
52950+}
52951+
52952+int
52953+gr_handle_ptrace(struct task_struct *task, const long request)
52954+{
52955+ return 0;
52956+}
52957+
52958+int
52959+gr_handle_proc_ptrace(struct task_struct *task)
52960+{
52961+ return 0;
52962+}
52963+
52964+void
52965+gr_learn_resource(const struct task_struct *task,
52966+ const int res, const unsigned long wanted, const int gt)
52967+{
52968+ return;
52969+}
52970+
52971+int
52972+gr_set_acls(const int type)
52973+{
52974+ return 0;
52975+}
52976+
52977+int
52978+gr_check_hidden_task(const struct task_struct *tsk)
52979+{
52980+ return 0;
52981+}
52982+
52983+int
52984+gr_check_protected_task(const struct task_struct *task)
52985+{
52986+ return 0;
52987+}
52988+
52989+int
52990+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
52991+{
52992+ return 0;
52993+}
52994+
52995+void
52996+gr_copy_label(struct task_struct *tsk)
52997+{
52998+ return;
52999+}
53000+
53001+void
53002+gr_set_pax_flags(struct task_struct *task)
53003+{
53004+ return;
53005+}
53006+
53007+int
53008+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
53009+ const int unsafe_share)
53010+{
53011+ return 0;
53012+}
53013+
53014+void
53015+gr_handle_delete(const ino_t ino, const dev_t dev)
53016+{
53017+ return;
53018+}
53019+
53020+void
53021+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
53022+{
53023+ return;
53024+}
53025+
53026+void
53027+gr_handle_crash(struct task_struct *task, const int sig)
53028+{
53029+ return;
53030+}
53031+
53032+int
53033+gr_check_crash_exec(const struct file *filp)
53034+{
53035+ return 0;
53036+}
53037+
53038+int
53039+gr_check_crash_uid(const uid_t uid)
53040+{
53041+ return 0;
53042+}
53043+
53044+void
53045+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
53046+ struct dentry *old_dentry,
53047+ struct dentry *new_dentry,
53048+ struct vfsmount *mnt, const __u8 replace)
53049+{
53050+ return;
53051+}
53052+
53053+int
53054+gr_search_socket(const int family, const int type, const int protocol)
53055+{
53056+ return 1;
53057+}
53058+
53059+int
53060+gr_search_connectbind(const int mode, const struct socket *sock,
53061+ const struct sockaddr_in *addr)
53062+{
53063+ return 0;
53064+}
53065+
53066+void
53067+gr_handle_alertkill(struct task_struct *task)
53068+{
53069+ return;
53070+}
53071+
53072+__u32
53073+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
53074+{
53075+ return 1;
53076+}
53077+
53078+__u32
53079+gr_acl_handle_hidden_file(const struct dentry * dentry,
53080+ const struct vfsmount * mnt)
53081+{
53082+ return 1;
53083+}
53084+
53085+__u32
53086+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
53087+ const int fmode)
53088+{
53089+ return 1;
53090+}
53091+
53092+__u32
53093+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
53094+{
53095+ return 1;
53096+}
53097+
53098+__u32
53099+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
53100+{
53101+ return 1;
53102+}
53103+
53104+int
53105+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
53106+ unsigned int *vm_flags)
53107+{
53108+ return 1;
53109+}
53110+
53111+__u32
53112+gr_acl_handle_truncate(const struct dentry * dentry,
53113+ const struct vfsmount * mnt)
53114+{
53115+ return 1;
53116+}
53117+
53118+__u32
53119+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
53120+{
53121+ return 1;
53122+}
53123+
53124+__u32
53125+gr_acl_handle_access(const struct dentry * dentry,
53126+ const struct vfsmount * mnt, const int fmode)
53127+{
53128+ return 1;
53129+}
53130+
53131+__u32
53132+gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
53133+ mode_t mode)
53134+{
53135+ return 1;
53136+}
53137+
53138+__u32
53139+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
53140+ mode_t mode)
53141+{
53142+ return 1;
53143+}
53144+
53145+__u32
53146+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
53147+{
53148+ return 1;
53149+}
53150+
53151+__u32
53152+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
53153+{
53154+ return 1;
53155+}
53156+
53157+void
53158+grsecurity_init(void)
53159+{
53160+ return;
53161+}
53162+
53163+__u32
53164+gr_acl_handle_mknod(const struct dentry * new_dentry,
53165+ const struct dentry * parent_dentry,
53166+ const struct vfsmount * parent_mnt,
53167+ const int mode)
53168+{
53169+ return 1;
53170+}
53171+
53172+__u32
53173+gr_acl_handle_mkdir(const struct dentry * new_dentry,
53174+ const struct dentry * parent_dentry,
53175+ const struct vfsmount * parent_mnt)
53176+{
53177+ return 1;
53178+}
53179+
53180+__u32
53181+gr_acl_handle_symlink(const struct dentry * new_dentry,
53182+ const struct dentry * parent_dentry,
53183+ const struct vfsmount * parent_mnt, const char *from)
53184+{
53185+ return 1;
53186+}
53187+
53188+__u32
53189+gr_acl_handle_link(const struct dentry * new_dentry,
53190+ const struct dentry * parent_dentry,
53191+ const struct vfsmount * parent_mnt,
53192+ const struct dentry * old_dentry,
53193+ const struct vfsmount * old_mnt, const char *to)
53194+{
53195+ return 1;
53196+}
53197+
53198+int
53199+gr_acl_handle_rename(const struct dentry *new_dentry,
53200+ const struct dentry *parent_dentry,
53201+ const struct vfsmount *parent_mnt,
53202+ const struct dentry *old_dentry,
53203+ const struct inode *old_parent_inode,
53204+ const struct vfsmount *old_mnt, const char *newname)
53205+{
53206+ return 0;
53207+}
53208+
53209+int
53210+gr_acl_handle_filldir(const struct file *file, const char *name,
53211+ const int namelen, const ino_t ino)
53212+{
53213+ return 1;
53214+}
53215+
53216+int
53217+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
53218+ const time_t shm_createtime, const uid_t cuid, const int shmid)
53219+{
53220+ return 1;
53221+}
53222+
53223+int
53224+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
53225+{
53226+ return 0;
53227+}
53228+
53229+int
53230+gr_search_accept(const struct socket *sock)
53231+{
53232+ return 0;
53233+}
53234+
53235+int
53236+gr_search_listen(const struct socket *sock)
53237+{
53238+ return 0;
53239+}
53240+
53241+int
53242+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
53243+{
53244+ return 0;
53245+}
53246+
53247+__u32
53248+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
53249+{
53250+ return 1;
53251+}
53252+
53253+__u32
53254+gr_acl_handle_creat(const struct dentry * dentry,
53255+ const struct dentry * p_dentry,
53256+ const struct vfsmount * p_mnt, const int fmode,
53257+ const int imode)
53258+{
53259+ return 1;
53260+}
53261+
53262+void
53263+gr_acl_handle_exit(void)
53264+{
53265+ return;
53266+}
53267+
53268+int
53269+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
53270+{
53271+ return 1;
53272+}
53273+
53274+void
53275+gr_set_role_label(const uid_t uid, const gid_t gid)
53276+{
53277+ return;
53278+}
53279+
53280+int
53281+gr_acl_handle_procpidmem(const struct task_struct *task)
53282+{
53283+ return 0;
53284+}
53285+
53286+int
53287+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
53288+{
53289+ return 0;
53290+}
53291+
53292+int
53293+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
53294+{
53295+ return 0;
53296+}
53297+
53298+void
53299+gr_set_kernel_label(struct task_struct *task)
53300+{
53301+ return;
53302+}
53303+
53304+int
53305+gr_check_user_change(int real, int effective, int fs)
53306+{
53307+ return 0;
53308+}
53309+
53310+int
53311+gr_check_group_change(int real, int effective, int fs)
53312+{
53313+ return 0;
53314+}
53315+
53316+int gr_acl_enable_at_secure(void)
53317+{
53318+ return 0;
53319+}
53320+
53321+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
53322+{
53323+ return dentry->d_inode->i_sb->s_dev;
53324+}
53325+
53326+EXPORT_SYMBOL(gr_learn_resource);
53327+EXPORT_SYMBOL(gr_set_kernel_label);
53328+#ifdef CONFIG_SECURITY
53329+EXPORT_SYMBOL(gr_check_user_change);
53330+EXPORT_SYMBOL(gr_check_group_change);
53331+#endif
53332diff -urNp linux-3.0.7/grsecurity/grsec_exec.c linux-3.0.7/grsecurity/grsec_exec.c
53333--- linux-3.0.7/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
53334+++ linux-3.0.7/grsecurity/grsec_exec.c 2011-09-14 09:20:28.000000000 -0400
53335@@ -0,0 +1,145 @@
53336+#include <linux/kernel.h>
53337+#include <linux/sched.h>
53338+#include <linux/file.h>
53339+#include <linux/binfmts.h>
53340+#include <linux/fs.h>
53341+#include <linux/types.h>
53342+#include <linux/grdefs.h>
53343+#include <linux/grsecurity.h>
53344+#include <linux/grinternal.h>
53345+#include <linux/capability.h>
53346+#include <linux/module.h>
53347+
53348+#include <asm/uaccess.h>
53349+
53350+#ifdef CONFIG_GRKERNSEC_EXECLOG
53351+static char gr_exec_arg_buf[132];
53352+static DEFINE_MUTEX(gr_exec_arg_mutex);
53353+#endif
53354+
53355+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
53356+
53357+void
53358+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
53359+{
53360+#ifdef CONFIG_GRKERNSEC_EXECLOG
53361+ char *grarg = gr_exec_arg_buf;
53362+ unsigned int i, x, execlen = 0;
53363+ char c;
53364+
53365+ if (!((grsec_enable_execlog && grsec_enable_group &&
53366+ in_group_p(grsec_audit_gid))
53367+ || (grsec_enable_execlog && !grsec_enable_group)))
53368+ return;
53369+
53370+ mutex_lock(&gr_exec_arg_mutex);
53371+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
53372+
53373+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
53374+ const char __user *p;
53375+ unsigned int len;
53376+
53377+ p = get_user_arg_ptr(argv, i);
53378+ if (IS_ERR(p))
53379+ goto log;
53380+
53381+ len = strnlen_user(p, 128 - execlen);
53382+ if (len > 128 - execlen)
53383+ len = 128 - execlen;
53384+ else if (len > 0)
53385+ len--;
53386+ if (copy_from_user(grarg + execlen, p, len))
53387+ goto log;
53388+
53389+ /* rewrite unprintable characters */
53390+ for (x = 0; x < len; x++) {
53391+ c = *(grarg + execlen + x);
53392+ if (c < 32 || c > 126)
53393+ *(grarg + execlen + x) = ' ';
53394+ }
53395+
53396+ execlen += len;
53397+ *(grarg + execlen) = ' ';
53398+ *(grarg + execlen + 1) = '\0';
53399+ execlen++;
53400+ }
53401+
53402+ log:
53403+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
53404+ bprm->file->f_path.mnt, grarg);
53405+ mutex_unlock(&gr_exec_arg_mutex);
53406+#endif
53407+ return;
53408+}
53409+
53410+#ifdef CONFIG_GRKERNSEC
53411+extern int gr_acl_is_capable(const int cap);
53412+extern int gr_acl_is_capable_nolog(const int cap);
53413+extern int gr_chroot_is_capable(const int cap);
53414+extern int gr_chroot_is_capable_nolog(const int cap);
53415+#endif
53416+
53417+const char *captab_log[] = {
53418+ "CAP_CHOWN",
53419+ "CAP_DAC_OVERRIDE",
53420+ "CAP_DAC_READ_SEARCH",
53421+ "CAP_FOWNER",
53422+ "CAP_FSETID",
53423+ "CAP_KILL",
53424+ "CAP_SETGID",
53425+ "CAP_SETUID",
53426+ "CAP_SETPCAP",
53427+ "CAP_LINUX_IMMUTABLE",
53428+ "CAP_NET_BIND_SERVICE",
53429+ "CAP_NET_BROADCAST",
53430+ "CAP_NET_ADMIN",
53431+ "CAP_NET_RAW",
53432+ "CAP_IPC_LOCK",
53433+ "CAP_IPC_OWNER",
53434+ "CAP_SYS_MODULE",
53435+ "CAP_SYS_RAWIO",
53436+ "CAP_SYS_CHROOT",
53437+ "CAP_SYS_PTRACE",
53438+ "CAP_SYS_PACCT",
53439+ "CAP_SYS_ADMIN",
53440+ "CAP_SYS_BOOT",
53441+ "CAP_SYS_NICE",
53442+ "CAP_SYS_RESOURCE",
53443+ "CAP_SYS_TIME",
53444+ "CAP_SYS_TTY_CONFIG",
53445+ "CAP_MKNOD",
53446+ "CAP_LEASE",
53447+ "CAP_AUDIT_WRITE",
53448+ "CAP_AUDIT_CONTROL",
53449+ "CAP_SETFCAP",
53450+ "CAP_MAC_OVERRIDE",
53451+ "CAP_MAC_ADMIN",
53452+ "CAP_SYSLOG"
53453+};
53454+
53455+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
53456+
53457+int gr_is_capable(const int cap)
53458+{
53459+#ifdef CONFIG_GRKERNSEC
53460+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
53461+ return 1;
53462+ return 0;
53463+#else
53464+ return 1;
53465+#endif
53466+}
53467+
53468+int gr_is_capable_nolog(const int cap)
53469+{
53470+#ifdef CONFIG_GRKERNSEC
53471+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
53472+ return 1;
53473+ return 0;
53474+#else
53475+ return 1;
53476+#endif
53477+}
53478+
53479+EXPORT_SYMBOL(gr_is_capable);
53480+EXPORT_SYMBOL(gr_is_capable_nolog);
53481diff -urNp linux-3.0.7/grsecurity/grsec_fifo.c linux-3.0.7/grsecurity/grsec_fifo.c
53482--- linux-3.0.7/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
53483+++ linux-3.0.7/grsecurity/grsec_fifo.c 2011-08-23 21:48:14.000000000 -0400
53484@@ -0,0 +1,24 @@
53485+#include <linux/kernel.h>
53486+#include <linux/sched.h>
53487+#include <linux/fs.h>
53488+#include <linux/file.h>
53489+#include <linux/grinternal.h>
53490+
53491+int
53492+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
53493+ const struct dentry *dir, const int flag, const int acc_mode)
53494+{
53495+#ifdef CONFIG_GRKERNSEC_FIFO
53496+ const struct cred *cred = current_cred();
53497+
53498+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
53499+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
53500+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
53501+ (cred->fsuid != dentry->d_inode->i_uid)) {
53502+ if (!inode_permission(dentry->d_inode, acc_mode))
53503+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
53504+ return -EACCES;
53505+ }
53506+#endif
53507+ return 0;
53508+}
53509diff -urNp linux-3.0.7/grsecurity/grsec_fork.c linux-3.0.7/grsecurity/grsec_fork.c
53510--- linux-3.0.7/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
53511+++ linux-3.0.7/grsecurity/grsec_fork.c 2011-08-23 21:48:14.000000000 -0400
53512@@ -0,0 +1,23 @@
53513+#include <linux/kernel.h>
53514+#include <linux/sched.h>
53515+#include <linux/grsecurity.h>
53516+#include <linux/grinternal.h>
53517+#include <linux/errno.h>
53518+
53519+void
53520+gr_log_forkfail(const int retval)
53521+{
53522+#ifdef CONFIG_GRKERNSEC_FORKFAIL
53523+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
53524+ switch (retval) {
53525+ case -EAGAIN:
53526+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
53527+ break;
53528+ case -ENOMEM:
53529+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
53530+ break;
53531+ }
53532+ }
53533+#endif
53534+ return;
53535+}
53536diff -urNp linux-3.0.7/grsecurity/grsec_init.c linux-3.0.7/grsecurity/grsec_init.c
53537--- linux-3.0.7/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
53538+++ linux-3.0.7/grsecurity/grsec_init.c 2011-08-25 17:25:12.000000000 -0400
53539@@ -0,0 +1,269 @@
53540+#include <linux/kernel.h>
53541+#include <linux/sched.h>
53542+#include <linux/mm.h>
53543+#include <linux/gracl.h>
53544+#include <linux/slab.h>
53545+#include <linux/vmalloc.h>
53546+#include <linux/percpu.h>
53547+#include <linux/module.h>
53548+
53549+int grsec_enable_brute;
53550+int grsec_enable_link;
53551+int grsec_enable_dmesg;
53552+int grsec_enable_harden_ptrace;
53553+int grsec_enable_fifo;
53554+int grsec_enable_execlog;
53555+int grsec_enable_signal;
53556+int grsec_enable_forkfail;
53557+int grsec_enable_audit_ptrace;
53558+int grsec_enable_time;
53559+int grsec_enable_audit_textrel;
53560+int grsec_enable_group;
53561+int grsec_audit_gid;
53562+int grsec_enable_chdir;
53563+int grsec_enable_mount;
53564+int grsec_enable_rofs;
53565+int grsec_enable_chroot_findtask;
53566+int grsec_enable_chroot_mount;
53567+int grsec_enable_chroot_shmat;
53568+int grsec_enable_chroot_fchdir;
53569+int grsec_enable_chroot_double;
53570+int grsec_enable_chroot_pivot;
53571+int grsec_enable_chroot_chdir;
53572+int grsec_enable_chroot_chmod;
53573+int grsec_enable_chroot_mknod;
53574+int grsec_enable_chroot_nice;
53575+int grsec_enable_chroot_execlog;
53576+int grsec_enable_chroot_caps;
53577+int grsec_enable_chroot_sysctl;
53578+int grsec_enable_chroot_unix;
53579+int grsec_enable_tpe;
53580+int grsec_tpe_gid;
53581+int grsec_enable_blackhole;
53582+#ifdef CONFIG_IPV6_MODULE
53583+EXPORT_SYMBOL(grsec_enable_blackhole);
53584+#endif
53585+int grsec_lastack_retries;
53586+int grsec_enable_tpe_all;
53587+int grsec_enable_tpe_invert;
53588+int grsec_enable_socket_all;
53589+int grsec_socket_all_gid;
53590+int grsec_enable_socket_client;
53591+int grsec_socket_client_gid;
53592+int grsec_enable_socket_server;
53593+int grsec_socket_server_gid;
53594+int grsec_resource_logging;
53595+int grsec_disable_privio;
53596+int grsec_enable_log_rwxmaps;
53597+int grsec_lock;
53598+
53599+DEFINE_SPINLOCK(grsec_alert_lock);
53600+unsigned long grsec_alert_wtime = 0;
53601+unsigned long grsec_alert_fyet = 0;
53602+
53603+DEFINE_SPINLOCK(grsec_audit_lock);
53604+
53605+DEFINE_RWLOCK(grsec_exec_file_lock);
53606+
53607+char *gr_shared_page[4];
53608+
53609+char *gr_alert_log_fmt;
53610+char *gr_audit_log_fmt;
53611+char *gr_alert_log_buf;
53612+char *gr_audit_log_buf;
53613+
53614+extern struct gr_arg *gr_usermode;
53615+extern unsigned char *gr_system_salt;
53616+extern unsigned char *gr_system_sum;
53617+
53618+void __init
53619+grsecurity_init(void)
53620+{
53621+ int j;
53622+ /* create the per-cpu shared pages */
53623+
53624+#ifdef CONFIG_X86
53625+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
53626+#endif
53627+
53628+ for (j = 0; j < 4; j++) {
53629+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
53630+ if (gr_shared_page[j] == NULL) {
53631+ panic("Unable to allocate grsecurity shared page");
53632+ return;
53633+ }
53634+ }
53635+
53636+ /* allocate log buffers */
53637+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
53638+ if (!gr_alert_log_fmt) {
53639+ panic("Unable to allocate grsecurity alert log format buffer");
53640+ return;
53641+ }
53642+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
53643+ if (!gr_audit_log_fmt) {
53644+ panic("Unable to allocate grsecurity audit log format buffer");
53645+ return;
53646+ }
53647+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
53648+ if (!gr_alert_log_buf) {
53649+ panic("Unable to allocate grsecurity alert log buffer");
53650+ return;
53651+ }
53652+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
53653+ if (!gr_audit_log_buf) {
53654+ panic("Unable to allocate grsecurity audit log buffer");
53655+ return;
53656+ }
53657+
53658+ /* allocate memory for authentication structure */
53659+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
53660+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
53661+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
53662+
53663+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
53664+ panic("Unable to allocate grsecurity authentication structure");
53665+ return;
53666+ }
53667+
53668+
53669+#ifdef CONFIG_GRKERNSEC_IO
53670+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
53671+ grsec_disable_privio = 1;
53672+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
53673+ grsec_disable_privio = 1;
53674+#else
53675+ grsec_disable_privio = 0;
53676+#endif
53677+#endif
53678+
53679+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
53680+ /* for backward compatibility, tpe_invert always defaults to on if
53681+ enabled in the kernel
53682+ */
53683+ grsec_enable_tpe_invert = 1;
53684+#endif
53685+
53686+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
53687+#ifndef CONFIG_GRKERNSEC_SYSCTL
53688+ grsec_lock = 1;
53689+#endif
53690+
53691+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
53692+ grsec_enable_audit_textrel = 1;
53693+#endif
53694+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
53695+ grsec_enable_log_rwxmaps = 1;
53696+#endif
53697+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
53698+ grsec_enable_group = 1;
53699+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
53700+#endif
53701+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
53702+ grsec_enable_chdir = 1;
53703+#endif
53704+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
53705+ grsec_enable_harden_ptrace = 1;
53706+#endif
53707+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
53708+ grsec_enable_mount = 1;
53709+#endif
53710+#ifdef CONFIG_GRKERNSEC_LINK
53711+ grsec_enable_link = 1;
53712+#endif
53713+#ifdef CONFIG_GRKERNSEC_BRUTE
53714+ grsec_enable_brute = 1;
53715+#endif
53716+#ifdef CONFIG_GRKERNSEC_DMESG
53717+ grsec_enable_dmesg = 1;
53718+#endif
53719+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
53720+ grsec_enable_blackhole = 1;
53721+ grsec_lastack_retries = 4;
53722+#endif
53723+#ifdef CONFIG_GRKERNSEC_FIFO
53724+ grsec_enable_fifo = 1;
53725+#endif
53726+#ifdef CONFIG_GRKERNSEC_EXECLOG
53727+ grsec_enable_execlog = 1;
53728+#endif
53729+#ifdef CONFIG_GRKERNSEC_SIGNAL
53730+ grsec_enable_signal = 1;
53731+#endif
53732+#ifdef CONFIG_GRKERNSEC_FORKFAIL
53733+ grsec_enable_forkfail = 1;
53734+#endif
53735+#ifdef CONFIG_GRKERNSEC_TIME
53736+ grsec_enable_time = 1;
53737+#endif
53738+#ifdef CONFIG_GRKERNSEC_RESLOG
53739+ grsec_resource_logging = 1;
53740+#endif
53741+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53742+ grsec_enable_chroot_findtask = 1;
53743+#endif
53744+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
53745+ grsec_enable_chroot_unix = 1;
53746+#endif
53747+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
53748+ grsec_enable_chroot_mount = 1;
53749+#endif
53750+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
53751+ grsec_enable_chroot_fchdir = 1;
53752+#endif
53753+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
53754+ grsec_enable_chroot_shmat = 1;
53755+#endif
53756+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
53757+ grsec_enable_audit_ptrace = 1;
53758+#endif
53759+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
53760+ grsec_enable_chroot_double = 1;
53761+#endif
53762+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
53763+ grsec_enable_chroot_pivot = 1;
53764+#endif
53765+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
53766+ grsec_enable_chroot_chdir = 1;
53767+#endif
53768+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
53769+ grsec_enable_chroot_chmod = 1;
53770+#endif
53771+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
53772+ grsec_enable_chroot_mknod = 1;
53773+#endif
53774+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
53775+ grsec_enable_chroot_nice = 1;
53776+#endif
53777+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
53778+ grsec_enable_chroot_execlog = 1;
53779+#endif
53780+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
53781+ grsec_enable_chroot_caps = 1;
53782+#endif
53783+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
53784+ grsec_enable_chroot_sysctl = 1;
53785+#endif
53786+#ifdef CONFIG_GRKERNSEC_TPE
53787+ grsec_enable_tpe = 1;
53788+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
53789+#ifdef CONFIG_GRKERNSEC_TPE_ALL
53790+ grsec_enable_tpe_all = 1;
53791+#endif
53792+#endif
53793+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
53794+ grsec_enable_socket_all = 1;
53795+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
53796+#endif
53797+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
53798+ grsec_enable_socket_client = 1;
53799+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
53800+#endif
53801+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
53802+ grsec_enable_socket_server = 1;
53803+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
53804+#endif
53805+#endif
53806+
53807+ return;
53808+}
53809diff -urNp linux-3.0.7/grsecurity/grsec_link.c linux-3.0.7/grsecurity/grsec_link.c
53810--- linux-3.0.7/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
53811+++ linux-3.0.7/grsecurity/grsec_link.c 2011-08-23 21:48:14.000000000 -0400
53812@@ -0,0 +1,43 @@
53813+#include <linux/kernel.h>
53814+#include <linux/sched.h>
53815+#include <linux/fs.h>
53816+#include <linux/file.h>
53817+#include <linux/grinternal.h>
53818+
53819+int
53820+gr_handle_follow_link(const struct inode *parent,
53821+ const struct inode *inode,
53822+ const struct dentry *dentry, const struct vfsmount *mnt)
53823+{
53824+#ifdef CONFIG_GRKERNSEC_LINK
53825+ const struct cred *cred = current_cred();
53826+
53827+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
53828+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
53829+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
53830+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
53831+ return -EACCES;
53832+ }
53833+#endif
53834+ return 0;
53835+}
53836+
53837+int
53838+gr_handle_hardlink(const struct dentry *dentry,
53839+ const struct vfsmount *mnt,
53840+ struct inode *inode, const int mode, const char *to)
53841+{
53842+#ifdef CONFIG_GRKERNSEC_LINK
53843+ const struct cred *cred = current_cred();
53844+
53845+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
53846+ (!S_ISREG(mode) || (mode & S_ISUID) ||
53847+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
53848+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
53849+ !capable(CAP_FOWNER) && cred->uid) {
53850+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
53851+ return -EPERM;
53852+ }
53853+#endif
53854+ return 0;
53855+}
53856diff -urNp linux-3.0.7/grsecurity/grsec_log.c linux-3.0.7/grsecurity/grsec_log.c
53857--- linux-3.0.7/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
53858+++ linux-3.0.7/grsecurity/grsec_log.c 2011-09-26 10:46:21.000000000 -0400
53859@@ -0,0 +1,315 @@
53860+#include <linux/kernel.h>
53861+#include <linux/sched.h>
53862+#include <linux/file.h>
53863+#include <linux/tty.h>
53864+#include <linux/fs.h>
53865+#include <linux/grinternal.h>
53866+
53867+#ifdef CONFIG_TREE_PREEMPT_RCU
53868+#define DISABLE_PREEMPT() preempt_disable()
53869+#define ENABLE_PREEMPT() preempt_enable()
53870+#else
53871+#define DISABLE_PREEMPT()
53872+#define ENABLE_PREEMPT()
53873+#endif
53874+
53875+#define BEGIN_LOCKS(x) \
53876+ DISABLE_PREEMPT(); \
53877+ rcu_read_lock(); \
53878+ read_lock(&tasklist_lock); \
53879+ read_lock(&grsec_exec_file_lock); \
53880+ if (x != GR_DO_AUDIT) \
53881+ spin_lock(&grsec_alert_lock); \
53882+ else \
53883+ spin_lock(&grsec_audit_lock)
53884+
53885+#define END_LOCKS(x) \
53886+ if (x != GR_DO_AUDIT) \
53887+ spin_unlock(&grsec_alert_lock); \
53888+ else \
53889+ spin_unlock(&grsec_audit_lock); \
53890+ read_unlock(&grsec_exec_file_lock); \
53891+ read_unlock(&tasklist_lock); \
53892+ rcu_read_unlock(); \
53893+ ENABLE_PREEMPT(); \
53894+ if (x == GR_DONT_AUDIT) \
53895+ gr_handle_alertkill(current)
53896+
53897+enum {
53898+ FLOODING,
53899+ NO_FLOODING
53900+};
53901+
53902+extern char *gr_alert_log_fmt;
53903+extern char *gr_audit_log_fmt;
53904+extern char *gr_alert_log_buf;
53905+extern char *gr_audit_log_buf;
53906+
53907+static int gr_log_start(int audit)
53908+{
53909+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
53910+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
53911+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
53912+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
53913+ unsigned long curr_secs = get_seconds();
53914+
53915+ if (audit == GR_DO_AUDIT)
53916+ goto set_fmt;
53917+
53918+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
53919+ grsec_alert_wtime = curr_secs;
53920+ grsec_alert_fyet = 0;
53921+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
53922+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
53923+ grsec_alert_fyet++;
53924+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
53925+ grsec_alert_wtime = curr_secs;
53926+ grsec_alert_fyet++;
53927+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
53928+ return FLOODING;
53929+ }
53930+ else return FLOODING;
53931+
53932+set_fmt:
53933+#endif
53934+ memset(buf, 0, PAGE_SIZE);
53935+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
53936+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
53937+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
53938+ } else if (current->signal->curr_ip) {
53939+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
53940+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
53941+ } else if (gr_acl_is_enabled()) {
53942+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
53943+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
53944+ } else {
53945+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
53946+ strcpy(buf, fmt);
53947+ }
53948+
53949+ return NO_FLOODING;
53950+}
53951+
53952+static void gr_log_middle(int audit, const char *msg, va_list ap)
53953+ __attribute__ ((format (printf, 2, 0)));
53954+
53955+static void gr_log_middle(int audit, const char *msg, va_list ap)
53956+{
53957+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
53958+ unsigned int len = strlen(buf);
53959+
53960+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
53961+
53962+ return;
53963+}
53964+
53965+static void gr_log_middle_varargs(int audit, const char *msg, ...)
53966+ __attribute__ ((format (printf, 2, 3)));
53967+
53968+static void gr_log_middle_varargs(int audit, const char *msg, ...)
53969+{
53970+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
53971+ unsigned int len = strlen(buf);
53972+ va_list ap;
53973+
53974+ va_start(ap, msg);
53975+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
53976+ va_end(ap);
53977+
53978+ return;
53979+}
53980+
53981+static void gr_log_end(int audit)
53982+{
53983+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
53984+ unsigned int len = strlen(buf);
53985+
53986+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
53987+ printk("%s\n", buf);
53988+
53989+ return;
53990+}
53991+
53992+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
53993+{
53994+ int logtype;
53995+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
53996+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
53997+ void *voidptr = NULL;
53998+ int num1 = 0, num2 = 0;
53999+ unsigned long ulong1 = 0, ulong2 = 0;
54000+ struct dentry *dentry = NULL;
54001+ struct vfsmount *mnt = NULL;
54002+ struct file *file = NULL;
54003+ struct task_struct *task = NULL;
54004+ const struct cred *cred, *pcred;
54005+ va_list ap;
54006+
54007+ BEGIN_LOCKS(audit);
54008+ logtype = gr_log_start(audit);
54009+ if (logtype == FLOODING) {
54010+ END_LOCKS(audit);
54011+ return;
54012+ }
54013+ va_start(ap, argtypes);
54014+ switch (argtypes) {
54015+ case GR_TTYSNIFF:
54016+ task = va_arg(ap, struct task_struct *);
54017+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
54018+ break;
54019+ case GR_SYSCTL_HIDDEN:
54020+ str1 = va_arg(ap, char *);
54021+ gr_log_middle_varargs(audit, msg, result, str1);
54022+ break;
54023+ case GR_RBAC:
54024+ dentry = va_arg(ap, struct dentry *);
54025+ mnt = va_arg(ap, struct vfsmount *);
54026+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
54027+ break;
54028+ case GR_RBAC_STR:
54029+ dentry = va_arg(ap, struct dentry *);
54030+ mnt = va_arg(ap, struct vfsmount *);
54031+ str1 = va_arg(ap, char *);
54032+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
54033+ break;
54034+ case GR_STR_RBAC:
54035+ str1 = va_arg(ap, char *);
54036+ dentry = va_arg(ap, struct dentry *);
54037+ mnt = va_arg(ap, struct vfsmount *);
54038+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
54039+ break;
54040+ case GR_RBAC_MODE2:
54041+ dentry = va_arg(ap, struct dentry *);
54042+ mnt = va_arg(ap, struct vfsmount *);
54043+ str1 = va_arg(ap, char *);
54044+ str2 = va_arg(ap, char *);
54045+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
54046+ break;
54047+ case GR_RBAC_MODE3:
54048+ dentry = va_arg(ap, struct dentry *);
54049+ mnt = va_arg(ap, struct vfsmount *);
54050+ str1 = va_arg(ap, char *);
54051+ str2 = va_arg(ap, char *);
54052+ str3 = va_arg(ap, char *);
54053+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
54054+ break;
54055+ case GR_FILENAME:
54056+ dentry = va_arg(ap, struct dentry *);
54057+ mnt = va_arg(ap, struct vfsmount *);
54058+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
54059+ break;
54060+ case GR_STR_FILENAME:
54061+ str1 = va_arg(ap, char *);
54062+ dentry = va_arg(ap, struct dentry *);
54063+ mnt = va_arg(ap, struct vfsmount *);
54064+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
54065+ break;
54066+ case GR_FILENAME_STR:
54067+ dentry = va_arg(ap, struct dentry *);
54068+ mnt = va_arg(ap, struct vfsmount *);
54069+ str1 = va_arg(ap, char *);
54070+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
54071+ break;
54072+ case GR_FILENAME_TWO_INT:
54073+ dentry = va_arg(ap, struct dentry *);
54074+ mnt = va_arg(ap, struct vfsmount *);
54075+ num1 = va_arg(ap, int);
54076+ num2 = va_arg(ap, int);
54077+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
54078+ break;
54079+ case GR_FILENAME_TWO_INT_STR:
54080+ dentry = va_arg(ap, struct dentry *);
54081+ mnt = va_arg(ap, struct vfsmount *);
54082+ num1 = va_arg(ap, int);
54083+ num2 = va_arg(ap, int);
54084+ str1 = va_arg(ap, char *);
54085+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
54086+ break;
54087+ case GR_TEXTREL:
54088+ file = va_arg(ap, struct file *);
54089+ ulong1 = va_arg(ap, unsigned long);
54090+ ulong2 = va_arg(ap, unsigned long);
54091+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
54092+ break;
54093+ case GR_PTRACE:
54094+ task = va_arg(ap, struct task_struct *);
54095+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
54096+ break;
54097+ case GR_RESOURCE:
54098+ task = va_arg(ap, struct task_struct *);
54099+ cred = __task_cred(task);
54100+ pcred = __task_cred(task->real_parent);
54101+ ulong1 = va_arg(ap, unsigned long);
54102+ str1 = va_arg(ap, char *);
54103+ ulong2 = va_arg(ap, unsigned long);
54104+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54105+ break;
54106+ case GR_CAP:
54107+ task = va_arg(ap, struct task_struct *);
54108+ cred = __task_cred(task);
54109+ pcred = __task_cred(task->real_parent);
54110+ str1 = va_arg(ap, char *);
54111+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54112+ break;
54113+ case GR_SIG:
54114+ str1 = va_arg(ap, char *);
54115+ voidptr = va_arg(ap, void *);
54116+ gr_log_middle_varargs(audit, msg, str1, voidptr);
54117+ break;
54118+ case GR_SIG2:
54119+ task = va_arg(ap, struct task_struct *);
54120+ cred = __task_cred(task);
54121+ pcred = __task_cred(task->real_parent);
54122+ num1 = va_arg(ap, int);
54123+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54124+ break;
54125+ case GR_CRASH1:
54126+ task = va_arg(ap, struct task_struct *);
54127+ cred = __task_cred(task);
54128+ pcred = __task_cred(task->real_parent);
54129+ ulong1 = va_arg(ap, unsigned long);
54130+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
54131+ break;
54132+ case GR_CRASH2:
54133+ task = va_arg(ap, struct task_struct *);
54134+ cred = __task_cred(task);
54135+ pcred = __task_cred(task->real_parent);
54136+ ulong1 = va_arg(ap, unsigned long);
54137+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
54138+ break;
54139+ case GR_RWXMAP:
54140+ file = va_arg(ap, struct file *);
54141+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
54142+ break;
54143+ case GR_PSACCT:
54144+ {
54145+ unsigned int wday, cday;
54146+ __u8 whr, chr;
54147+ __u8 wmin, cmin;
54148+ __u8 wsec, csec;
54149+ char cur_tty[64] = { 0 };
54150+ char parent_tty[64] = { 0 };
54151+
54152+ task = va_arg(ap, struct task_struct *);
54153+ wday = va_arg(ap, unsigned int);
54154+ cday = va_arg(ap, unsigned int);
54155+ whr = va_arg(ap, int);
54156+ chr = va_arg(ap, int);
54157+ wmin = va_arg(ap, int);
54158+ cmin = va_arg(ap, int);
54159+ wsec = va_arg(ap, int);
54160+ csec = va_arg(ap, int);
54161+ ulong1 = va_arg(ap, unsigned long);
54162+ cred = __task_cred(task);
54163+ pcred = __task_cred(task->real_parent);
54164+
54165+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54166+ }
54167+ break;
54168+ default:
54169+ gr_log_middle(audit, msg, ap);
54170+ }
54171+ va_end(ap);
54172+ gr_log_end(audit);
54173+ END_LOCKS(audit);
54174+}
54175diff -urNp linux-3.0.7/grsecurity/grsec_mem.c linux-3.0.7/grsecurity/grsec_mem.c
54176--- linux-3.0.7/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
54177+++ linux-3.0.7/grsecurity/grsec_mem.c 2011-08-23 21:48:14.000000000 -0400
54178@@ -0,0 +1,33 @@
54179+#include <linux/kernel.h>
54180+#include <linux/sched.h>
54181+#include <linux/mm.h>
54182+#include <linux/mman.h>
54183+#include <linux/grinternal.h>
54184+
54185+void
54186+gr_handle_ioperm(void)
54187+{
54188+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
54189+ return;
54190+}
54191+
54192+void
54193+gr_handle_iopl(void)
54194+{
54195+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
54196+ return;
54197+}
54198+
54199+void
54200+gr_handle_mem_readwrite(u64 from, u64 to)
54201+{
54202+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
54203+ return;
54204+}
54205+
54206+void
54207+gr_handle_vm86(void)
54208+{
54209+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
54210+ return;
54211+}
54212diff -urNp linux-3.0.7/grsecurity/grsec_mount.c linux-3.0.7/grsecurity/grsec_mount.c
54213--- linux-3.0.7/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
54214+++ linux-3.0.7/grsecurity/grsec_mount.c 2011-08-23 21:48:14.000000000 -0400
54215@@ -0,0 +1,62 @@
54216+#include <linux/kernel.h>
54217+#include <linux/sched.h>
54218+#include <linux/mount.h>
54219+#include <linux/grsecurity.h>
54220+#include <linux/grinternal.h>
54221+
54222+void
54223+gr_log_remount(const char *devname, const int retval)
54224+{
54225+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54226+ if (grsec_enable_mount && (retval >= 0))
54227+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
54228+#endif
54229+ return;
54230+}
54231+
54232+void
54233+gr_log_unmount(const char *devname, const int retval)
54234+{
54235+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54236+ if (grsec_enable_mount && (retval >= 0))
54237+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
54238+#endif
54239+ return;
54240+}
54241+
54242+void
54243+gr_log_mount(const char *from, const char *to, const int retval)
54244+{
54245+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54246+ if (grsec_enable_mount && (retval >= 0))
54247+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
54248+#endif
54249+ return;
54250+}
54251+
54252+int
54253+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
54254+{
54255+#ifdef CONFIG_GRKERNSEC_ROFS
54256+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
54257+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
54258+ return -EPERM;
54259+ } else
54260+ return 0;
54261+#endif
54262+ return 0;
54263+}
54264+
54265+int
54266+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
54267+{
54268+#ifdef CONFIG_GRKERNSEC_ROFS
54269+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
54270+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
54271+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
54272+ return -EPERM;
54273+ } else
54274+ return 0;
54275+#endif
54276+ return 0;
54277+}
54278diff -urNp linux-3.0.7/grsecurity/grsec_pax.c linux-3.0.7/grsecurity/grsec_pax.c
54279--- linux-3.0.7/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
54280+++ linux-3.0.7/grsecurity/grsec_pax.c 2011-08-23 21:48:14.000000000 -0400
54281@@ -0,0 +1,36 @@
54282+#include <linux/kernel.h>
54283+#include <linux/sched.h>
54284+#include <linux/mm.h>
54285+#include <linux/file.h>
54286+#include <linux/grinternal.h>
54287+#include <linux/grsecurity.h>
54288+
54289+void
54290+gr_log_textrel(struct vm_area_struct * vma)
54291+{
54292+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
54293+ if (grsec_enable_audit_textrel)
54294+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
54295+#endif
54296+ return;
54297+}
54298+
54299+void
54300+gr_log_rwxmmap(struct file *file)
54301+{
54302+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54303+ if (grsec_enable_log_rwxmaps)
54304+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
54305+#endif
54306+ return;
54307+}
54308+
54309+void
54310+gr_log_rwxmprotect(struct file *file)
54311+{
54312+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54313+ if (grsec_enable_log_rwxmaps)
54314+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
54315+#endif
54316+ return;
54317+}
54318diff -urNp linux-3.0.7/grsecurity/grsec_ptrace.c linux-3.0.7/grsecurity/grsec_ptrace.c
54319--- linux-3.0.7/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
54320+++ linux-3.0.7/grsecurity/grsec_ptrace.c 2011-08-23 21:48:14.000000000 -0400
54321@@ -0,0 +1,14 @@
54322+#include <linux/kernel.h>
54323+#include <linux/sched.h>
54324+#include <linux/grinternal.h>
54325+#include <linux/grsecurity.h>
54326+
54327+void
54328+gr_audit_ptrace(struct task_struct *task)
54329+{
54330+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
54331+ if (grsec_enable_audit_ptrace)
54332+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
54333+#endif
54334+ return;
54335+}
54336diff -urNp linux-3.0.7/grsecurity/grsec_sig.c linux-3.0.7/grsecurity/grsec_sig.c
54337--- linux-3.0.7/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
54338+++ linux-3.0.7/grsecurity/grsec_sig.c 2011-08-23 21:48:14.000000000 -0400
54339@@ -0,0 +1,206 @@
54340+#include <linux/kernel.h>
54341+#include <linux/sched.h>
54342+#include <linux/delay.h>
54343+#include <linux/grsecurity.h>
54344+#include <linux/grinternal.h>
54345+#include <linux/hardirq.h>
54346+
54347+char *signames[] = {
54348+ [SIGSEGV] = "Segmentation fault",
54349+ [SIGILL] = "Illegal instruction",
54350+ [SIGABRT] = "Abort",
54351+ [SIGBUS] = "Invalid alignment/Bus error"
54352+};
54353+
54354+void
54355+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
54356+{
54357+#ifdef CONFIG_GRKERNSEC_SIGNAL
54358+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
54359+ (sig == SIGABRT) || (sig == SIGBUS))) {
54360+ if (t->pid == current->pid) {
54361+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
54362+ } else {
54363+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
54364+ }
54365+ }
54366+#endif
54367+ return;
54368+}
54369+
54370+int
54371+gr_handle_signal(const struct task_struct *p, const int sig)
54372+{
54373+#ifdef CONFIG_GRKERNSEC
54374+ if (current->pid > 1 && gr_check_protected_task(p)) {
54375+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
54376+ return -EPERM;
54377+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
54378+ return -EPERM;
54379+ }
54380+#endif
54381+ return 0;
54382+}
54383+
54384+#ifdef CONFIG_GRKERNSEC
54385+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
54386+
54387+int gr_fake_force_sig(int sig, struct task_struct *t)
54388+{
54389+ unsigned long int flags;
54390+ int ret, blocked, ignored;
54391+ struct k_sigaction *action;
54392+
54393+ spin_lock_irqsave(&t->sighand->siglock, flags);
54394+ action = &t->sighand->action[sig-1];
54395+ ignored = action->sa.sa_handler == SIG_IGN;
54396+ blocked = sigismember(&t->blocked, sig);
54397+ if (blocked || ignored) {
54398+ action->sa.sa_handler = SIG_DFL;
54399+ if (blocked) {
54400+ sigdelset(&t->blocked, sig);
54401+ recalc_sigpending_and_wake(t);
54402+ }
54403+ }
54404+ if (action->sa.sa_handler == SIG_DFL)
54405+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
54406+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
54407+
54408+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
54409+
54410+ return ret;
54411+}
54412+#endif
54413+
54414+#ifdef CONFIG_GRKERNSEC_BRUTE
54415+#define GR_USER_BAN_TIME (15 * 60)
54416+
54417+static int __get_dumpable(unsigned long mm_flags)
54418+{
54419+ int ret;
54420+
54421+ ret = mm_flags & MMF_DUMPABLE_MASK;
54422+ return (ret >= 2) ? 2 : ret;
54423+}
54424+#endif
54425+
54426+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
54427+{
54428+#ifdef CONFIG_GRKERNSEC_BRUTE
54429+ uid_t uid = 0;
54430+
54431+ if (!grsec_enable_brute)
54432+ return;
54433+
54434+ rcu_read_lock();
54435+ read_lock(&tasklist_lock);
54436+ read_lock(&grsec_exec_file_lock);
54437+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
54438+ p->real_parent->brute = 1;
54439+ else {
54440+ const struct cred *cred = __task_cred(p), *cred2;
54441+ struct task_struct *tsk, *tsk2;
54442+
54443+ if (!__get_dumpable(mm_flags) && cred->uid) {
54444+ struct user_struct *user;
54445+
54446+ uid = cred->uid;
54447+
54448+ /* this is put upon execution past expiration */
54449+ user = find_user(uid);
54450+ if (user == NULL)
54451+ goto unlock;
54452+ user->banned = 1;
54453+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
54454+ if (user->ban_expires == ~0UL)
54455+ user->ban_expires--;
54456+
54457+ do_each_thread(tsk2, tsk) {
54458+ cred2 = __task_cred(tsk);
54459+ if (tsk != p && cred2->uid == uid)
54460+ gr_fake_force_sig(SIGKILL, tsk);
54461+ } while_each_thread(tsk2, tsk);
54462+ }
54463+ }
54464+unlock:
54465+ read_unlock(&grsec_exec_file_lock);
54466+ read_unlock(&tasklist_lock);
54467+ rcu_read_unlock();
54468+
54469+ if (uid)
54470+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
54471+
54472+#endif
54473+ return;
54474+}
54475+
54476+void gr_handle_brute_check(void)
54477+{
54478+#ifdef CONFIG_GRKERNSEC_BRUTE
54479+ if (current->brute)
54480+ msleep(30 * 1000);
54481+#endif
54482+ return;
54483+}
54484+
54485+void gr_handle_kernel_exploit(void)
54486+{
54487+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
54488+ const struct cred *cred;
54489+ struct task_struct *tsk, *tsk2;
54490+ struct user_struct *user;
54491+ uid_t uid;
54492+
54493+ if (in_irq() || in_serving_softirq() || in_nmi())
54494+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
54495+
54496+ uid = current_uid();
54497+
54498+ if (uid == 0)
54499+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
54500+ else {
54501+ /* kill all the processes of this user, hold a reference
54502+ to their creds struct, and prevent them from creating
54503+ another process until system reset
54504+ */
54505+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
54506+ /* we intentionally leak this ref */
54507+ user = get_uid(current->cred->user);
54508+ if (user) {
54509+ user->banned = 1;
54510+ user->ban_expires = ~0UL;
54511+ }
54512+
54513+ read_lock(&tasklist_lock);
54514+ do_each_thread(tsk2, tsk) {
54515+ cred = __task_cred(tsk);
54516+ if (cred->uid == uid)
54517+ gr_fake_force_sig(SIGKILL, tsk);
54518+ } while_each_thread(tsk2, tsk);
54519+ read_unlock(&tasklist_lock);
54520+ }
54521+#endif
54522+}
54523+
54524+int __gr_process_user_ban(struct user_struct *user)
54525+{
54526+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
54527+ if (unlikely(user->banned)) {
54528+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
54529+ user->banned = 0;
54530+ user->ban_expires = 0;
54531+ free_uid(user);
54532+ } else
54533+ return -EPERM;
54534+ }
54535+#endif
54536+ return 0;
54537+}
54538+
54539+int gr_process_user_ban(void)
54540+{
54541+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
54542+ return __gr_process_user_ban(current->cred->user);
54543+#endif
54544+ return 0;
54545+}
54546diff -urNp linux-3.0.7/grsecurity/grsec_sock.c linux-3.0.7/grsecurity/grsec_sock.c
54547--- linux-3.0.7/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
54548+++ linux-3.0.7/grsecurity/grsec_sock.c 2011-08-23 21:48:14.000000000 -0400
54549@@ -0,0 +1,244 @@
54550+#include <linux/kernel.h>
54551+#include <linux/module.h>
54552+#include <linux/sched.h>
54553+#include <linux/file.h>
54554+#include <linux/net.h>
54555+#include <linux/in.h>
54556+#include <linux/ip.h>
54557+#include <net/sock.h>
54558+#include <net/inet_sock.h>
54559+#include <linux/grsecurity.h>
54560+#include <linux/grinternal.h>
54561+#include <linux/gracl.h>
54562+
54563+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
54564+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
54565+
54566+EXPORT_SYMBOL(gr_search_udp_recvmsg);
54567+EXPORT_SYMBOL(gr_search_udp_sendmsg);
54568+
54569+#ifdef CONFIG_UNIX_MODULE
54570+EXPORT_SYMBOL(gr_acl_handle_unix);
54571+EXPORT_SYMBOL(gr_acl_handle_mknod);
54572+EXPORT_SYMBOL(gr_handle_chroot_unix);
54573+EXPORT_SYMBOL(gr_handle_create);
54574+#endif
54575+
54576+#ifdef CONFIG_GRKERNSEC
54577+#define gr_conn_table_size 32749
54578+struct conn_table_entry {
54579+ struct conn_table_entry *next;
54580+ struct signal_struct *sig;
54581+};
54582+
54583+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
54584+DEFINE_SPINLOCK(gr_conn_table_lock);
54585+
54586+extern const char * gr_socktype_to_name(unsigned char type);
54587+extern const char * gr_proto_to_name(unsigned char proto);
54588+extern const char * gr_sockfamily_to_name(unsigned char family);
54589+
54590+static __inline__ int
54591+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
54592+{
54593+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
54594+}
54595+
54596+static __inline__ int
54597+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
54598+ __u16 sport, __u16 dport)
54599+{
54600+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
54601+ sig->gr_sport == sport && sig->gr_dport == dport))
54602+ return 1;
54603+ else
54604+ return 0;
54605+}
54606+
54607+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
54608+{
54609+ struct conn_table_entry **match;
54610+ unsigned int index;
54611+
54612+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
54613+ sig->gr_sport, sig->gr_dport,
54614+ gr_conn_table_size);
54615+
54616+ newent->sig = sig;
54617+
54618+ match = &gr_conn_table[index];
54619+ newent->next = *match;
54620+ *match = newent;
54621+
54622+ return;
54623+}
54624+
54625+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
54626+{
54627+ struct conn_table_entry *match, *last = NULL;
54628+ unsigned int index;
54629+
54630+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
54631+ sig->gr_sport, sig->gr_dport,
54632+ gr_conn_table_size);
54633+
54634+ match = gr_conn_table[index];
54635+ while (match && !conn_match(match->sig,
54636+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
54637+ sig->gr_dport)) {
54638+ last = match;
54639+ match = match->next;
54640+ }
54641+
54642+ if (match) {
54643+ if (last)
54644+ last->next = match->next;
54645+ else
54646+ gr_conn_table[index] = NULL;
54647+ kfree(match);
54648+ }
54649+
54650+ return;
54651+}
54652+
54653+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
54654+ __u16 sport, __u16 dport)
54655+{
54656+ struct conn_table_entry *match;
54657+ unsigned int index;
54658+
54659+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
54660+
54661+ match = gr_conn_table[index];
54662+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
54663+ match = match->next;
54664+
54665+ if (match)
54666+ return match->sig;
54667+ else
54668+ return NULL;
54669+}
54670+
54671+#endif
54672+
54673+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
54674+{
54675+#ifdef CONFIG_GRKERNSEC
54676+ struct signal_struct *sig = task->signal;
54677+ struct conn_table_entry *newent;
54678+
54679+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
54680+ if (newent == NULL)
54681+ return;
54682+ /* no bh lock needed since we are called with bh disabled */
54683+ spin_lock(&gr_conn_table_lock);
54684+ gr_del_task_from_ip_table_nolock(sig);
54685+ sig->gr_saddr = inet->inet_rcv_saddr;
54686+ sig->gr_daddr = inet->inet_daddr;
54687+ sig->gr_sport = inet->inet_sport;
54688+ sig->gr_dport = inet->inet_dport;
54689+ gr_add_to_task_ip_table_nolock(sig, newent);
54690+ spin_unlock(&gr_conn_table_lock);
54691+#endif
54692+ return;
54693+}
54694+
54695+void gr_del_task_from_ip_table(struct task_struct *task)
54696+{
54697+#ifdef CONFIG_GRKERNSEC
54698+ spin_lock_bh(&gr_conn_table_lock);
54699+ gr_del_task_from_ip_table_nolock(task->signal);
54700+ spin_unlock_bh(&gr_conn_table_lock);
54701+#endif
54702+ return;
54703+}
54704+
54705+void
54706+gr_attach_curr_ip(const struct sock *sk)
54707+{
54708+#ifdef CONFIG_GRKERNSEC
54709+ struct signal_struct *p, *set;
54710+ const struct inet_sock *inet = inet_sk(sk);
54711+
54712+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
54713+ return;
54714+
54715+ set = current->signal;
54716+
54717+ spin_lock_bh(&gr_conn_table_lock);
54718+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
54719+ inet->inet_dport, inet->inet_sport);
54720+ if (unlikely(p != NULL)) {
54721+ set->curr_ip = p->curr_ip;
54722+ set->used_accept = 1;
54723+ gr_del_task_from_ip_table_nolock(p);
54724+ spin_unlock_bh(&gr_conn_table_lock);
54725+ return;
54726+ }
54727+ spin_unlock_bh(&gr_conn_table_lock);
54728+
54729+ set->curr_ip = inet->inet_daddr;
54730+ set->used_accept = 1;
54731+#endif
54732+ return;
54733+}
54734+
54735+int
54736+gr_handle_sock_all(const int family, const int type, const int protocol)
54737+{
54738+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
54739+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
54740+ (family != AF_UNIX)) {
54741+ if (family == AF_INET)
54742+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
54743+ else
54744+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
54745+ return -EACCES;
54746+ }
54747+#endif
54748+ return 0;
54749+}
54750+
54751+int
54752+gr_handle_sock_server(const struct sockaddr *sck)
54753+{
54754+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
54755+ if (grsec_enable_socket_server &&
54756+ in_group_p(grsec_socket_server_gid) &&
54757+ sck && (sck->sa_family != AF_UNIX) &&
54758+ (sck->sa_family != AF_LOCAL)) {
54759+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
54760+ return -EACCES;
54761+ }
54762+#endif
54763+ return 0;
54764+}
54765+
54766+int
54767+gr_handle_sock_server_other(const struct sock *sck)
54768+{
54769+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
54770+ if (grsec_enable_socket_server &&
54771+ in_group_p(grsec_socket_server_gid) &&
54772+ sck && (sck->sk_family != AF_UNIX) &&
54773+ (sck->sk_family != AF_LOCAL)) {
54774+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
54775+ return -EACCES;
54776+ }
54777+#endif
54778+ return 0;
54779+}
54780+
54781+int
54782+gr_handle_sock_client(const struct sockaddr *sck)
54783+{
54784+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
54785+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
54786+ sck && (sck->sa_family != AF_UNIX) &&
54787+ (sck->sa_family != AF_LOCAL)) {
54788+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
54789+ return -EACCES;
54790+ }
54791+#endif
54792+ return 0;
54793+}
54794diff -urNp linux-3.0.7/grsecurity/grsec_sysctl.c linux-3.0.7/grsecurity/grsec_sysctl.c
54795--- linux-3.0.7/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
54796+++ linux-3.0.7/grsecurity/grsec_sysctl.c 2011-08-25 17:26:15.000000000 -0400
54797@@ -0,0 +1,433 @@
54798+#include <linux/kernel.h>
54799+#include <linux/sched.h>
54800+#include <linux/sysctl.h>
54801+#include <linux/grsecurity.h>
54802+#include <linux/grinternal.h>
54803+
54804+int
54805+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
54806+{
54807+#ifdef CONFIG_GRKERNSEC_SYSCTL
54808+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
54809+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
54810+ return -EACCES;
54811+ }
54812+#endif
54813+ return 0;
54814+}
54815+
54816+#ifdef CONFIG_GRKERNSEC_ROFS
54817+static int __maybe_unused one = 1;
54818+#endif
54819+
54820+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
54821+struct ctl_table grsecurity_table[] = {
54822+#ifdef CONFIG_GRKERNSEC_SYSCTL
54823+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
54824+#ifdef CONFIG_GRKERNSEC_IO
54825+ {
54826+ .procname = "disable_priv_io",
54827+ .data = &grsec_disable_privio,
54828+ .maxlen = sizeof(int),
54829+ .mode = 0600,
54830+ .proc_handler = &proc_dointvec,
54831+ },
54832+#endif
54833+#endif
54834+#ifdef CONFIG_GRKERNSEC_LINK
54835+ {
54836+ .procname = "linking_restrictions",
54837+ .data = &grsec_enable_link,
54838+ .maxlen = sizeof(int),
54839+ .mode = 0600,
54840+ .proc_handler = &proc_dointvec,
54841+ },
54842+#endif
54843+#ifdef CONFIG_GRKERNSEC_BRUTE
54844+ {
54845+ .procname = "deter_bruteforce",
54846+ .data = &grsec_enable_brute,
54847+ .maxlen = sizeof(int),
54848+ .mode = 0600,
54849+ .proc_handler = &proc_dointvec,
54850+ },
54851+#endif
54852+#ifdef CONFIG_GRKERNSEC_FIFO
54853+ {
54854+ .procname = "fifo_restrictions",
54855+ .data = &grsec_enable_fifo,
54856+ .maxlen = sizeof(int),
54857+ .mode = 0600,
54858+ .proc_handler = &proc_dointvec,
54859+ },
54860+#endif
54861+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
54862+ {
54863+ .procname = "ip_blackhole",
54864+ .data = &grsec_enable_blackhole,
54865+ .maxlen = sizeof(int),
54866+ .mode = 0600,
54867+ .proc_handler = &proc_dointvec,
54868+ },
54869+ {
54870+ .procname = "lastack_retries",
54871+ .data = &grsec_lastack_retries,
54872+ .maxlen = sizeof(int),
54873+ .mode = 0600,
54874+ .proc_handler = &proc_dointvec,
54875+ },
54876+#endif
54877+#ifdef CONFIG_GRKERNSEC_EXECLOG
54878+ {
54879+ .procname = "exec_logging",
54880+ .data = &grsec_enable_execlog,
54881+ .maxlen = sizeof(int),
54882+ .mode = 0600,
54883+ .proc_handler = &proc_dointvec,
54884+ },
54885+#endif
54886+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54887+ {
54888+ .procname = "rwxmap_logging",
54889+ .data = &grsec_enable_log_rwxmaps,
54890+ .maxlen = sizeof(int),
54891+ .mode = 0600,
54892+ .proc_handler = &proc_dointvec,
54893+ },
54894+#endif
54895+#ifdef CONFIG_GRKERNSEC_SIGNAL
54896+ {
54897+ .procname = "signal_logging",
54898+ .data = &grsec_enable_signal,
54899+ .maxlen = sizeof(int),
54900+ .mode = 0600,
54901+ .proc_handler = &proc_dointvec,
54902+ },
54903+#endif
54904+#ifdef CONFIG_GRKERNSEC_FORKFAIL
54905+ {
54906+ .procname = "forkfail_logging",
54907+ .data = &grsec_enable_forkfail,
54908+ .maxlen = sizeof(int),
54909+ .mode = 0600,
54910+ .proc_handler = &proc_dointvec,
54911+ },
54912+#endif
54913+#ifdef CONFIG_GRKERNSEC_TIME
54914+ {
54915+ .procname = "timechange_logging",
54916+ .data = &grsec_enable_time,
54917+ .maxlen = sizeof(int),
54918+ .mode = 0600,
54919+ .proc_handler = &proc_dointvec,
54920+ },
54921+#endif
54922+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
54923+ {
54924+ .procname = "chroot_deny_shmat",
54925+ .data = &grsec_enable_chroot_shmat,
54926+ .maxlen = sizeof(int),
54927+ .mode = 0600,
54928+ .proc_handler = &proc_dointvec,
54929+ },
54930+#endif
54931+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
54932+ {
54933+ .procname = "chroot_deny_unix",
54934+ .data = &grsec_enable_chroot_unix,
54935+ .maxlen = sizeof(int),
54936+ .mode = 0600,
54937+ .proc_handler = &proc_dointvec,
54938+ },
54939+#endif
54940+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
54941+ {
54942+ .procname = "chroot_deny_mount",
54943+ .data = &grsec_enable_chroot_mount,
54944+ .maxlen = sizeof(int),
54945+ .mode = 0600,
54946+ .proc_handler = &proc_dointvec,
54947+ },
54948+#endif
54949+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
54950+ {
54951+ .procname = "chroot_deny_fchdir",
54952+ .data = &grsec_enable_chroot_fchdir,
54953+ .maxlen = sizeof(int),
54954+ .mode = 0600,
54955+ .proc_handler = &proc_dointvec,
54956+ },
54957+#endif
54958+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
54959+ {
54960+ .procname = "chroot_deny_chroot",
54961+ .data = &grsec_enable_chroot_double,
54962+ .maxlen = sizeof(int),
54963+ .mode = 0600,
54964+ .proc_handler = &proc_dointvec,
54965+ },
54966+#endif
54967+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
54968+ {
54969+ .procname = "chroot_deny_pivot",
54970+ .data = &grsec_enable_chroot_pivot,
54971+ .maxlen = sizeof(int),
54972+ .mode = 0600,
54973+ .proc_handler = &proc_dointvec,
54974+ },
54975+#endif
54976+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
54977+ {
54978+ .procname = "chroot_enforce_chdir",
54979+ .data = &grsec_enable_chroot_chdir,
54980+ .maxlen = sizeof(int),
54981+ .mode = 0600,
54982+ .proc_handler = &proc_dointvec,
54983+ },
54984+#endif
54985+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
54986+ {
54987+ .procname = "chroot_deny_chmod",
54988+ .data = &grsec_enable_chroot_chmod,
54989+ .maxlen = sizeof(int),
54990+ .mode = 0600,
54991+ .proc_handler = &proc_dointvec,
54992+ },
54993+#endif
54994+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
54995+ {
54996+ .procname = "chroot_deny_mknod",
54997+ .data = &grsec_enable_chroot_mknod,
54998+ .maxlen = sizeof(int),
54999+ .mode = 0600,
55000+ .proc_handler = &proc_dointvec,
55001+ },
55002+#endif
55003+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55004+ {
55005+ .procname = "chroot_restrict_nice",
55006+ .data = &grsec_enable_chroot_nice,
55007+ .maxlen = sizeof(int),
55008+ .mode = 0600,
55009+ .proc_handler = &proc_dointvec,
55010+ },
55011+#endif
55012+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
55013+ {
55014+ .procname = "chroot_execlog",
55015+ .data = &grsec_enable_chroot_execlog,
55016+ .maxlen = sizeof(int),
55017+ .mode = 0600,
55018+ .proc_handler = &proc_dointvec,
55019+ },
55020+#endif
55021+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55022+ {
55023+ .procname = "chroot_caps",
55024+ .data = &grsec_enable_chroot_caps,
55025+ .maxlen = sizeof(int),
55026+ .mode = 0600,
55027+ .proc_handler = &proc_dointvec,
55028+ },
55029+#endif
55030+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
55031+ {
55032+ .procname = "chroot_deny_sysctl",
55033+ .data = &grsec_enable_chroot_sysctl,
55034+ .maxlen = sizeof(int),
55035+ .mode = 0600,
55036+ .proc_handler = &proc_dointvec,
55037+ },
55038+#endif
55039+#ifdef CONFIG_GRKERNSEC_TPE
55040+ {
55041+ .procname = "tpe",
55042+ .data = &grsec_enable_tpe,
55043+ .maxlen = sizeof(int),
55044+ .mode = 0600,
55045+ .proc_handler = &proc_dointvec,
55046+ },
55047+ {
55048+ .procname = "tpe_gid",
55049+ .data = &grsec_tpe_gid,
55050+ .maxlen = sizeof(int),
55051+ .mode = 0600,
55052+ .proc_handler = &proc_dointvec,
55053+ },
55054+#endif
55055+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55056+ {
55057+ .procname = "tpe_invert",
55058+ .data = &grsec_enable_tpe_invert,
55059+ .maxlen = sizeof(int),
55060+ .mode = 0600,
55061+ .proc_handler = &proc_dointvec,
55062+ },
55063+#endif
55064+#ifdef CONFIG_GRKERNSEC_TPE_ALL
55065+ {
55066+ .procname = "tpe_restrict_all",
55067+ .data = &grsec_enable_tpe_all,
55068+ .maxlen = sizeof(int),
55069+ .mode = 0600,
55070+ .proc_handler = &proc_dointvec,
55071+ },
55072+#endif
55073+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
55074+ {
55075+ .procname = "socket_all",
55076+ .data = &grsec_enable_socket_all,
55077+ .maxlen = sizeof(int),
55078+ .mode = 0600,
55079+ .proc_handler = &proc_dointvec,
55080+ },
55081+ {
55082+ .procname = "socket_all_gid",
55083+ .data = &grsec_socket_all_gid,
55084+ .maxlen = sizeof(int),
55085+ .mode = 0600,
55086+ .proc_handler = &proc_dointvec,
55087+ },
55088+#endif
55089+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
55090+ {
55091+ .procname = "socket_client",
55092+ .data = &grsec_enable_socket_client,
55093+ .maxlen = sizeof(int),
55094+ .mode = 0600,
55095+ .proc_handler = &proc_dointvec,
55096+ },
55097+ {
55098+ .procname = "socket_client_gid",
55099+ .data = &grsec_socket_client_gid,
55100+ .maxlen = sizeof(int),
55101+ .mode = 0600,
55102+ .proc_handler = &proc_dointvec,
55103+ },
55104+#endif
55105+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55106+ {
55107+ .procname = "socket_server",
55108+ .data = &grsec_enable_socket_server,
55109+ .maxlen = sizeof(int),
55110+ .mode = 0600,
55111+ .proc_handler = &proc_dointvec,
55112+ },
55113+ {
55114+ .procname = "socket_server_gid",
55115+ .data = &grsec_socket_server_gid,
55116+ .maxlen = sizeof(int),
55117+ .mode = 0600,
55118+ .proc_handler = &proc_dointvec,
55119+ },
55120+#endif
55121+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
55122+ {
55123+ .procname = "audit_group",
55124+ .data = &grsec_enable_group,
55125+ .maxlen = sizeof(int),
55126+ .mode = 0600,
55127+ .proc_handler = &proc_dointvec,
55128+ },
55129+ {
55130+ .procname = "audit_gid",
55131+ .data = &grsec_audit_gid,
55132+ .maxlen = sizeof(int),
55133+ .mode = 0600,
55134+ .proc_handler = &proc_dointvec,
55135+ },
55136+#endif
55137+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
55138+ {
55139+ .procname = "audit_chdir",
55140+ .data = &grsec_enable_chdir,
55141+ .maxlen = sizeof(int),
55142+ .mode = 0600,
55143+ .proc_handler = &proc_dointvec,
55144+ },
55145+#endif
55146+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55147+ {
55148+ .procname = "audit_mount",
55149+ .data = &grsec_enable_mount,
55150+ .maxlen = sizeof(int),
55151+ .mode = 0600,
55152+ .proc_handler = &proc_dointvec,
55153+ },
55154+#endif
55155+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55156+ {
55157+ .procname = "audit_textrel",
55158+ .data = &grsec_enable_audit_textrel,
55159+ .maxlen = sizeof(int),
55160+ .mode = 0600,
55161+ .proc_handler = &proc_dointvec,
55162+ },
55163+#endif
55164+#ifdef CONFIG_GRKERNSEC_DMESG
55165+ {
55166+ .procname = "dmesg",
55167+ .data = &grsec_enable_dmesg,
55168+ .maxlen = sizeof(int),
55169+ .mode = 0600,
55170+ .proc_handler = &proc_dointvec,
55171+ },
55172+#endif
55173+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55174+ {
55175+ .procname = "chroot_findtask",
55176+ .data = &grsec_enable_chroot_findtask,
55177+ .maxlen = sizeof(int),
55178+ .mode = 0600,
55179+ .proc_handler = &proc_dointvec,
55180+ },
55181+#endif
55182+#ifdef CONFIG_GRKERNSEC_RESLOG
55183+ {
55184+ .procname = "resource_logging",
55185+ .data = &grsec_resource_logging,
55186+ .maxlen = sizeof(int),
55187+ .mode = 0600,
55188+ .proc_handler = &proc_dointvec,
55189+ },
55190+#endif
55191+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55192+ {
55193+ .procname = "audit_ptrace",
55194+ .data = &grsec_enable_audit_ptrace,
55195+ .maxlen = sizeof(int),
55196+ .mode = 0600,
55197+ .proc_handler = &proc_dointvec,
55198+ },
55199+#endif
55200+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55201+ {
55202+ .procname = "harden_ptrace",
55203+ .data = &grsec_enable_harden_ptrace,
55204+ .maxlen = sizeof(int),
55205+ .mode = 0600,
55206+ .proc_handler = &proc_dointvec,
55207+ },
55208+#endif
55209+ {
55210+ .procname = "grsec_lock",
55211+ .data = &grsec_lock,
55212+ .maxlen = sizeof(int),
55213+ .mode = 0600,
55214+ .proc_handler = &proc_dointvec,
55215+ },
55216+#endif
55217+#ifdef CONFIG_GRKERNSEC_ROFS
55218+ {
55219+ .procname = "romount_protect",
55220+ .data = &grsec_enable_rofs,
55221+ .maxlen = sizeof(int),
55222+ .mode = 0600,
55223+ .proc_handler = &proc_dointvec_minmax,
55224+ .extra1 = &one,
55225+ .extra2 = &one,
55226+ },
55227+#endif
55228+ { }
55229+};
55230+#endif
55231diff -urNp linux-3.0.7/grsecurity/grsec_time.c linux-3.0.7/grsecurity/grsec_time.c
55232--- linux-3.0.7/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
55233+++ linux-3.0.7/grsecurity/grsec_time.c 2011-08-23 21:48:14.000000000 -0400
55234@@ -0,0 +1,16 @@
55235+#include <linux/kernel.h>
55236+#include <linux/sched.h>
55237+#include <linux/grinternal.h>
55238+#include <linux/module.h>
55239+
55240+void
55241+gr_log_timechange(void)
55242+{
55243+#ifdef CONFIG_GRKERNSEC_TIME
55244+ if (grsec_enable_time)
55245+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
55246+#endif
55247+ return;
55248+}
55249+
55250+EXPORT_SYMBOL(gr_log_timechange);
55251diff -urNp linux-3.0.7/grsecurity/grsec_tpe.c linux-3.0.7/grsecurity/grsec_tpe.c
55252--- linux-3.0.7/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
55253+++ linux-3.0.7/grsecurity/grsec_tpe.c 2011-08-23 21:48:14.000000000 -0400
55254@@ -0,0 +1,39 @@
55255+#include <linux/kernel.h>
55256+#include <linux/sched.h>
55257+#include <linux/file.h>
55258+#include <linux/fs.h>
55259+#include <linux/grinternal.h>
55260+
55261+extern int gr_acl_tpe_check(void);
55262+
55263+int
55264+gr_tpe_allow(const struct file *file)
55265+{
55266+#ifdef CONFIG_GRKERNSEC
55267+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
55268+ const struct cred *cred = current_cred();
55269+
55270+ if (cred->uid && ((grsec_enable_tpe &&
55271+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55272+ ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
55273+ (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
55274+#else
55275+ in_group_p(grsec_tpe_gid)
55276+#endif
55277+ ) || gr_acl_tpe_check()) &&
55278+ (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
55279+ (inode->i_mode & S_IWOTH))))) {
55280+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
55281+ return 0;
55282+ }
55283+#ifdef CONFIG_GRKERNSEC_TPE_ALL
55284+ if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
55285+ ((inode->i_uid && (inode->i_uid != cred->uid)) ||
55286+ (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
55287+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
55288+ return 0;
55289+ }
55290+#endif
55291+#endif
55292+ return 1;
55293+}
55294diff -urNp linux-3.0.7/grsecurity/grsum.c linux-3.0.7/grsecurity/grsum.c
55295--- linux-3.0.7/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
55296+++ linux-3.0.7/grsecurity/grsum.c 2011-08-23 21:48:14.000000000 -0400
55297@@ -0,0 +1,61 @@
55298+#include <linux/err.h>
55299+#include <linux/kernel.h>
55300+#include <linux/sched.h>
55301+#include <linux/mm.h>
55302+#include <linux/scatterlist.h>
55303+#include <linux/crypto.h>
55304+#include <linux/gracl.h>
55305+
55306+
55307+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
55308+#error "crypto and sha256 must be built into the kernel"
55309+#endif
55310+
55311+int
55312+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
55313+{
55314+ char *p;
55315+ struct crypto_hash *tfm;
55316+ struct hash_desc desc;
55317+ struct scatterlist sg;
55318+ unsigned char temp_sum[GR_SHA_LEN];
55319+ volatile int retval = 0;
55320+ volatile int dummy = 0;
55321+ unsigned int i;
55322+
55323+ sg_init_table(&sg, 1);
55324+
55325+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
55326+ if (IS_ERR(tfm)) {
55327+ /* should never happen, since sha256 should be built in */
55328+ return 1;
55329+ }
55330+
55331+ desc.tfm = tfm;
55332+ desc.flags = 0;
55333+
55334+ crypto_hash_init(&desc);
55335+
55336+ p = salt;
55337+ sg_set_buf(&sg, p, GR_SALT_LEN);
55338+ crypto_hash_update(&desc, &sg, sg.length);
55339+
55340+ p = entry->pw;
55341+ sg_set_buf(&sg, p, strlen(p));
55342+
55343+ crypto_hash_update(&desc, &sg, sg.length);
55344+
55345+ crypto_hash_final(&desc, temp_sum);
55346+
55347+ memset(entry->pw, 0, GR_PW_LEN);
55348+
55349+ for (i = 0; i < GR_SHA_LEN; i++)
55350+ if (sum[i] != temp_sum[i])
55351+ retval = 1;
55352+ else
55353+ dummy = 1; // waste a cycle
55354+
55355+ crypto_free_hash(tfm);
55356+
55357+ return retval;
55358+}
55359diff -urNp linux-3.0.7/grsecurity/Kconfig linux-3.0.7/grsecurity/Kconfig
55360--- linux-3.0.7/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
55361+++ linux-3.0.7/grsecurity/Kconfig 2011-09-15 00:00:57.000000000 -0400
55362@@ -0,0 +1,1038 @@
55363+#
55364+# grecurity configuration
55365+#
55366+
55367+menu "Grsecurity"
55368+
55369+config GRKERNSEC
55370+ bool "Grsecurity"
55371+ select CRYPTO
55372+ select CRYPTO_SHA256
55373+ help
55374+ If you say Y here, you will be able to configure many features
55375+ that will enhance the security of your system. It is highly
55376+ recommended that you say Y here and read through the help
55377+ for each option so that you fully understand the features and
55378+ can evaluate their usefulness for your machine.
55379+
55380+choice
55381+ prompt "Security Level"
55382+ depends on GRKERNSEC
55383+ default GRKERNSEC_CUSTOM
55384+
55385+config GRKERNSEC_LOW
55386+ bool "Low"
55387+ select GRKERNSEC_LINK
55388+ select GRKERNSEC_FIFO
55389+ select GRKERNSEC_RANDNET
55390+ select GRKERNSEC_DMESG
55391+ select GRKERNSEC_CHROOT
55392+ select GRKERNSEC_CHROOT_CHDIR
55393+
55394+ help
55395+ If you choose this option, several of the grsecurity options will
55396+ be enabled that will give you greater protection against a number
55397+ of attacks, while assuring that none of your software will have any
55398+ conflicts with the additional security measures. If you run a lot
55399+ of unusual software, or you are having problems with the higher
55400+ security levels, you should say Y here. With this option, the
55401+ following features are enabled:
55402+
55403+ - Linking restrictions
55404+ - FIFO restrictions
55405+ - Restricted dmesg
55406+ - Enforced chdir("/") on chroot
55407+ - Runtime module disabling
55408+
55409+config GRKERNSEC_MEDIUM
55410+ bool "Medium"
55411+ select PAX
55412+ select PAX_EI_PAX
55413+ select PAX_PT_PAX_FLAGS
55414+ select PAX_HAVE_ACL_FLAGS
55415+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55416+ select GRKERNSEC_CHROOT
55417+ select GRKERNSEC_CHROOT_SYSCTL
55418+ select GRKERNSEC_LINK
55419+ select GRKERNSEC_FIFO
55420+ select GRKERNSEC_DMESG
55421+ select GRKERNSEC_RANDNET
55422+ select GRKERNSEC_FORKFAIL
55423+ select GRKERNSEC_TIME
55424+ select GRKERNSEC_SIGNAL
55425+ select GRKERNSEC_CHROOT
55426+ select GRKERNSEC_CHROOT_UNIX
55427+ select GRKERNSEC_CHROOT_MOUNT
55428+ select GRKERNSEC_CHROOT_PIVOT
55429+ select GRKERNSEC_CHROOT_DOUBLE
55430+ select GRKERNSEC_CHROOT_CHDIR
55431+ select GRKERNSEC_CHROOT_MKNOD
55432+ select GRKERNSEC_PROC
55433+ select GRKERNSEC_PROC_USERGROUP
55434+ select PAX_RANDUSTACK
55435+ select PAX_ASLR
55436+ select PAX_RANDMMAP
55437+ select PAX_REFCOUNT if (X86 || SPARC64)
55438+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55439+
55440+ help
55441+ If you say Y here, several features in addition to those included
55442+ in the low additional security level will be enabled. These
55443+ features provide even more security to your system, though in rare
55444+ cases they may be incompatible with very old or poorly written
55445+ software. If you enable this option, make sure that your auth
55446+ service (identd) is running as gid 1001. With this option,
55447+ the following features (in addition to those provided in the
55448+ low additional security level) will be enabled:
55449+
55450+ - Failed fork logging
55451+ - Time change logging
55452+ - Signal logging
55453+ - Deny mounts in chroot
55454+ - Deny double chrooting
55455+ - Deny sysctl writes in chroot
55456+ - Deny mknod in chroot
55457+ - Deny access to abstract AF_UNIX sockets out of chroot
55458+ - Deny pivot_root in chroot
55459+ - Denied writes of /dev/kmem, /dev/mem, and /dev/port
55460+ - /proc restrictions with special GID set to 10 (usually wheel)
55461+ - Address Space Layout Randomization (ASLR)
55462+ - Prevent exploitation of most refcount overflows
55463+ - Bounds checking of copying between the kernel and userland
55464+
55465+config GRKERNSEC_HIGH
55466+ bool "High"
55467+ select GRKERNSEC_LINK
55468+ select GRKERNSEC_FIFO
55469+ select GRKERNSEC_DMESG
55470+ select GRKERNSEC_FORKFAIL
55471+ select GRKERNSEC_TIME
55472+ select GRKERNSEC_SIGNAL
55473+ select GRKERNSEC_CHROOT
55474+ select GRKERNSEC_CHROOT_SHMAT
55475+ select GRKERNSEC_CHROOT_UNIX
55476+ select GRKERNSEC_CHROOT_MOUNT
55477+ select GRKERNSEC_CHROOT_FCHDIR
55478+ select GRKERNSEC_CHROOT_PIVOT
55479+ select GRKERNSEC_CHROOT_DOUBLE
55480+ select GRKERNSEC_CHROOT_CHDIR
55481+ select GRKERNSEC_CHROOT_MKNOD
55482+ select GRKERNSEC_CHROOT_CAPS
55483+ select GRKERNSEC_CHROOT_SYSCTL
55484+ select GRKERNSEC_CHROOT_FINDTASK
55485+ select GRKERNSEC_SYSFS_RESTRICT
55486+ select GRKERNSEC_PROC
55487+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55488+ select GRKERNSEC_HIDESYM
55489+ select GRKERNSEC_BRUTE
55490+ select GRKERNSEC_PROC_USERGROUP
55491+ select GRKERNSEC_KMEM
55492+ select GRKERNSEC_RESLOG
55493+ select GRKERNSEC_RANDNET
55494+ select GRKERNSEC_PROC_ADD
55495+ select GRKERNSEC_CHROOT_CHMOD
55496+ select GRKERNSEC_CHROOT_NICE
55497+ select GRKERNSEC_AUDIT_MOUNT
55498+ select GRKERNSEC_MODHARDEN if (MODULES)
55499+ select GRKERNSEC_HARDEN_PTRACE
55500+ select GRKERNSEC_VM86 if (X86_32)
55501+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
55502+ select PAX
55503+ select PAX_RANDUSTACK
55504+ select PAX_ASLR
55505+ select PAX_RANDMMAP
55506+ select PAX_NOEXEC
55507+ select PAX_MPROTECT
55508+ select PAX_EI_PAX
55509+ select PAX_PT_PAX_FLAGS
55510+ select PAX_HAVE_ACL_FLAGS
55511+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
55512+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
55513+ select PAX_RANDKSTACK if (X86_TSC && X86)
55514+ select PAX_SEGMEXEC if (X86_32)
55515+ select PAX_PAGEEXEC
55516+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
55517+ select PAX_EMUTRAMP if (PARISC)
55518+ select PAX_EMUSIGRT if (PARISC)
55519+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
55520+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
55521+ select PAX_REFCOUNT if (X86 || SPARC64)
55522+ select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
55523+ help
55524+ If you say Y here, many of the features of grsecurity will be
55525+ enabled, which will protect you against many kinds of attacks
55526+ against your system. The heightened security comes at a cost
55527+ of an increased chance of incompatibilities with rare software
55528+ on your machine. Since this security level enables PaX, you should
55529+ view <http://pax.grsecurity.net> and read about the PaX
55530+ project. While you are there, download chpax and run it on
55531+ binaries that cause problems with PaX. Also remember that
55532+ since the /proc restrictions are enabled, you must run your
55533+ identd as gid 1001. This security level enables the following
55534+ features in addition to those listed in the low and medium
55535+ security levels:
55536+
55537+ - Additional /proc restrictions
55538+ - Chmod restrictions in chroot
55539+ - No signals, ptrace, or viewing of processes outside of chroot
55540+ - Capability restrictions in chroot
55541+ - Deny fchdir out of chroot
55542+ - Priority restrictions in chroot
55543+ - Segmentation-based implementation of PaX
55544+ - Mprotect restrictions
55545+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
55546+ - Kernel stack randomization
55547+ - Mount/unmount/remount logging
55548+ - Kernel symbol hiding
55549+ - Prevention of memory exhaustion-based exploits
55550+ - Hardening of module auto-loading
55551+ - Ptrace restrictions
55552+ - Restricted vm86 mode
55553+ - Restricted sysfs/debugfs
55554+ - Active kernel exploit response
55555+
55556+config GRKERNSEC_CUSTOM
55557+ bool "Custom"
55558+ help
55559+ If you say Y here, you will be able to configure every grsecurity
55560+ option, which allows you to enable many more features that aren't
55561+ covered in the basic security levels. These additional features
55562+ include TPE, socket restrictions, and the sysctl system for
55563+ grsecurity. It is advised that you read through the help for
55564+ each option to determine its usefulness in your situation.
55565+
55566+endchoice
55567+
55568+menu "Address Space Protection"
55569+depends on GRKERNSEC
55570+
55571+config GRKERNSEC_KMEM
55572+ bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
55573+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
55574+ help
55575+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
55576+ be written to via mmap or otherwise to modify the running kernel.
55577+ /dev/port will also not be allowed to be opened. If you have module
55578+ support disabled, enabling this will close up four ways that are
55579+ currently used to insert malicious code into the running kernel.
55580+ Even with all these features enabled, we still highly recommend that
55581+ you use the RBAC system, as it is still possible for an attacker to
55582+ modify the running kernel through privileged I/O granted by ioperm/iopl.
55583+ If you are not using XFree86, you may be able to stop this additional
55584+ case by enabling the 'Disable privileged I/O' option. Though nothing
55585+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
55586+ but only to video memory, which is the only writing we allow in this
55587+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
55588+ not be allowed to mprotect it with PROT_WRITE later.
55589+ It is highly recommended that you say Y here if you meet all the
55590+ conditions above.
55591+
55592+config GRKERNSEC_VM86
55593+ bool "Restrict VM86 mode"
55594+ depends on X86_32
55595+
55596+ help
55597+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
55598+ make use of a special execution mode on 32bit x86 processors called
55599+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
55600+ video cards and will still work with this option enabled. The purpose
55601+ of the option is to prevent exploitation of emulation errors in
55602+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
55603+ Nearly all users should be able to enable this option.
55604+
55605+config GRKERNSEC_IO
55606+ bool "Disable privileged I/O"
55607+ depends on X86
55608+ select RTC_CLASS
55609+ select RTC_INTF_DEV
55610+ select RTC_DRV_CMOS
55611+
55612+ help
55613+ If you say Y here, all ioperm and iopl calls will return an error.
55614+ Ioperm and iopl can be used to modify the running kernel.
55615+ Unfortunately, some programs need this access to operate properly,
55616+ the most notable of which are XFree86 and hwclock. hwclock can be
55617+ remedied by having RTC support in the kernel, so real-time
55618+ clock support is enabled if this option is enabled, to ensure
55619+ that hwclock operates correctly. XFree86 still will not
55620+ operate correctly with this option enabled, so DO NOT CHOOSE Y
55621+ IF YOU USE XFree86. If you use XFree86 and you still want to
55622+ protect your kernel against modification, use the RBAC system.
55623+
55624+config GRKERNSEC_PROC_MEMMAP
55625+ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
55626+ default y if (PAX_NOEXEC || PAX_ASLR)
55627+ depends on PAX_NOEXEC || PAX_ASLR
55628+ help
55629+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
55630+ give no information about the addresses of its mappings if
55631+ PaX features that rely on random addresses are enabled on the task.
55632+ If you use PaX it is greatly recommended that you say Y here as it
55633+ closes up a hole that makes the full ASLR useless for suid
55634+ binaries.
55635+
55636+config GRKERNSEC_BRUTE
55637+ bool "Deter exploit bruteforcing"
55638+ help
55639+ If you say Y here, attempts to bruteforce exploits against forking
55640+ daemons such as apache or sshd, as well as against suid/sgid binaries
55641+ will be deterred. When a child of a forking daemon is killed by PaX
55642+ or crashes due to an illegal instruction or other suspicious signal,
55643+ the parent process will be delayed 30 seconds upon every subsequent
55644+ fork until the administrator is able to assess the situation and
55645+ restart the daemon.
55646+ In the suid/sgid case, the attempt is logged, the user has all their
55647+ processes terminated, and they are prevented from executing any further
55648+ processes for 15 minutes.
55649+ It is recommended that you also enable signal logging in the auditing
55650+ section so that logs are generated when a process triggers a suspicious
55651+ signal.
55652+ If the sysctl option is enabled, a sysctl option with name
55653+ "deter_bruteforce" is created.
55654+
55655+
55656+config GRKERNSEC_MODHARDEN
55657+ bool "Harden module auto-loading"
55658+ depends on MODULES
55659+ help
55660+ If you say Y here, module auto-loading in response to use of some
55661+ feature implemented by an unloaded module will be restricted to
55662+ root users. Enabling this option helps defend against attacks
55663+ by unprivileged users who abuse the auto-loading behavior to
55664+ cause a vulnerable module to load that is then exploited.
55665+
55666+ If this option prevents a legitimate use of auto-loading for a
55667+ non-root user, the administrator can execute modprobe manually
55668+ with the exact name of the module mentioned in the alert log.
55669+ Alternatively, the administrator can add the module to the list
55670+ of modules loaded at boot by modifying init scripts.
55671+
55672+ Modification of init scripts will most likely be needed on
55673+ Ubuntu servers with encrypted home directory support enabled,
55674+ as the first non-root user logging in will cause the ecb(aes),
55675+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
55676+
55677+config GRKERNSEC_HIDESYM
55678+ bool "Hide kernel symbols"
55679+ help
55680+ If you say Y here, getting information on loaded modules, and
55681+ displaying all kernel symbols through a syscall will be restricted
55682+ to users with CAP_SYS_MODULE. For software compatibility reasons,
55683+ /proc/kallsyms will be restricted to the root user. The RBAC
55684+ system can hide that entry even from root.
55685+
55686+ This option also prevents leaking of kernel addresses through
55687+ several /proc entries.
55688+
55689+ Note that this option is only effective provided the following
55690+ conditions are met:
55691+ 1) The kernel using grsecurity is not precompiled by some distribution
55692+ 2) You have also enabled GRKERNSEC_DMESG
55693+ 3) You are using the RBAC system and hiding other files such as your
55694+ kernel image and System.map. Alternatively, enabling this option
55695+ causes the permissions on /boot, /lib/modules, and the kernel
55696+ source directory to change at compile time to prevent
55697+ reading by non-root users.
55698+ If the above conditions are met, this option will aid in providing a
55699+ useful protection against local kernel exploitation of overflows
55700+ and arbitrary read/write vulnerabilities.
55701+
55702+config GRKERNSEC_KERN_LOCKOUT
55703+ bool "Active kernel exploit response"
55704+ depends on X86 || ARM || PPC || SPARC
55705+ help
55706+ If you say Y here, when a PaX alert is triggered due to suspicious
55707+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
55708+ or an OOPs occurs due to bad memory accesses, instead of just
55709+ terminating the offending process (and potentially allowing
55710+ a subsequent exploit from the same user), we will take one of two
55711+ actions:
55712+ If the user was root, we will panic the system
55713+ If the user was non-root, we will log the attempt, terminate
55714+ all processes owned by the user, then prevent them from creating
55715+ any new processes until the system is restarted
55716+ This deters repeated kernel exploitation/bruteforcing attempts
55717+ and is useful for later forensics.
55718+
55719+endmenu
55720+menu "Role Based Access Control Options"
55721+depends on GRKERNSEC
55722+
55723+config GRKERNSEC_RBAC_DEBUG
55724+ bool
55725+
55726+config GRKERNSEC_NO_RBAC
55727+ bool "Disable RBAC system"
55728+ help
55729+ If you say Y here, the /dev/grsec device will be removed from the kernel,
55730+ preventing the RBAC system from being enabled. You should only say Y
55731+ here if you have no intention of using the RBAC system, so as to prevent
55732+ an attacker with root access from misusing the RBAC system to hide files
55733+ and processes when loadable module support and /dev/[k]mem have been
55734+ locked down.
55735+
55736+config GRKERNSEC_ACL_HIDEKERN
55737+ bool "Hide kernel processes"
55738+ help
55739+ If you say Y here, all kernel threads will be hidden to all
55740+ processes but those whose subject has the "view hidden processes"
55741+ flag.
55742+
55743+config GRKERNSEC_ACL_MAXTRIES
55744+ int "Maximum tries before password lockout"
55745+ default 3
55746+ help
55747+ This option enforces the maximum number of times a user can attempt
55748+ to authorize themselves with the grsecurity RBAC system before being
55749+ denied the ability to attempt authorization again for a specified time.
55750+ The lower the number, the harder it will be to brute-force a password.
55751+
55752+config GRKERNSEC_ACL_TIMEOUT
55753+ int "Time to wait after max password tries, in seconds"
55754+ default 30
55755+ help
55756+ This option specifies the time the user must wait after attempting to
55757+ authorize to the RBAC system with the maximum number of invalid
55758+ passwords. The higher the number, the harder it will be to brute-force
55759+ a password.
55760+
55761+endmenu
55762+menu "Filesystem Protections"
55763+depends on GRKERNSEC
55764+
55765+config GRKERNSEC_PROC
55766+ bool "Proc restrictions"
55767+ help
55768+ If you say Y here, the permissions of the /proc filesystem
55769+ will be altered to enhance system security and privacy. You MUST
55770+ choose either a user only restriction or a user and group restriction.
55771+ Depending upon the option you choose, you can either restrict users to
55772+ see only the processes they themselves run, or choose a group that can
55773+ view all processes and files normally restricted to root if you choose
55774+ the "restrict to user only" option. NOTE: If you're running identd as
55775+ a non-root user, you will have to run it as the group you specify here.
55776+
55777+config GRKERNSEC_PROC_USER
55778+ bool "Restrict /proc to user only"
55779+ depends on GRKERNSEC_PROC
55780+ help
55781+ If you say Y here, non-root users will only be able to view their own
55782+ processes, and restricts them from viewing network-related information,
55783+ and viewing kernel symbol and module information.
55784+
55785+config GRKERNSEC_PROC_USERGROUP
55786+ bool "Allow special group"
55787+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
55788+ help
55789+ If you say Y here, you will be able to select a group that will be
55790+ able to view all processes and network-related information. If you've
55791+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
55792+ remain hidden. This option is useful if you want to run identd as
55793+ a non-root user.
55794+
55795+config GRKERNSEC_PROC_GID
55796+ int "GID for special group"
55797+ depends on GRKERNSEC_PROC_USERGROUP
55798+ default 1001
55799+
55800+config GRKERNSEC_PROC_ADD
55801+ bool "Additional restrictions"
55802+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
55803+ help
55804+ If you say Y here, additional restrictions will be placed on
55805+ /proc that keep normal users from viewing device information and
55806+ slabinfo information that could be useful for exploits.
55807+
55808+config GRKERNSEC_LINK
55809+ bool "Linking restrictions"
55810+ help
55811+ If you say Y here, /tmp race exploits will be prevented, since users
55812+ will no longer be able to follow symlinks owned by other users in
55813+ world-writable +t directories (e.g. /tmp), unless the owner of the
55814+ symlink is the owner of the directory. users will also not be
55815+ able to hardlink to files they do not own. If the sysctl option is
55816+ enabled, a sysctl option with name "linking_restrictions" is created.
55817+
55818+config GRKERNSEC_FIFO
55819+ bool "FIFO restrictions"
55820+ help
55821+ If you say Y here, users will not be able to write to FIFOs they don't
55822+ own in world-writable +t directories (e.g. /tmp), unless the owner of
55823+ the FIFO is the same owner of the directory it's held in. If the sysctl
55824+ option is enabled, a sysctl option with name "fifo_restrictions" is
55825+ created.
55826+
55827+config GRKERNSEC_SYSFS_RESTRICT
55828+ bool "Sysfs/debugfs restriction"
55829+ depends on SYSFS
55830+ help
55831+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
55832+ any filesystem normally mounted under it (e.g. debugfs) will only
55833+ be accessible by root. These filesystems generally provide access
55834+ to hardware and debug information that isn't appropriate for unprivileged
55835+ users of the system. Sysfs and debugfs have also become a large source
55836+ of new vulnerabilities, ranging from infoleaks to local compromise.
55837+ There has been very little oversight with an eye toward security involved
55838+ in adding new exporters of information to these filesystems, so their
55839+ use is discouraged.
55840+ This option is equivalent to a chmod 0700 of the mount paths.
55841+
55842+config GRKERNSEC_ROFS
55843+ bool "Runtime read-only mount protection"
55844+ help
55845+ If you say Y here, a sysctl option with name "romount_protect" will
55846+ be created. By setting this option to 1 at runtime, filesystems
55847+ will be protected in the following ways:
55848+ * No new writable mounts will be allowed
55849+ * Existing read-only mounts won't be able to be remounted read/write
55850+ * Write operations will be denied on all block devices
55851+ This option acts independently of grsec_lock: once it is set to 1,
55852+ it cannot be turned off. Therefore, please be mindful of the resulting
55853+ behavior if this option is enabled in an init script on a read-only
55854+ filesystem. This feature is mainly intended for secure embedded systems.
55855+
55856+config GRKERNSEC_CHROOT
55857+ bool "Chroot jail restrictions"
55858+ help
55859+ If you say Y here, you will be able to choose several options that will
55860+ make breaking out of a chrooted jail much more difficult. If you
55861+ encounter no software incompatibilities with the following options, it
55862+ is recommended that you enable each one.
55863+
55864+config GRKERNSEC_CHROOT_MOUNT
55865+ bool "Deny mounts"
55866+ depends on GRKERNSEC_CHROOT
55867+ help
55868+ If you say Y here, processes inside a chroot will not be able to
55869+ mount or remount filesystems. If the sysctl option is enabled, a
55870+ sysctl option with name "chroot_deny_mount" is created.
55871+
55872+config GRKERNSEC_CHROOT_DOUBLE
55873+ bool "Deny double-chroots"
55874+ depends on GRKERNSEC_CHROOT
55875+ help
55876+ If you say Y here, processes inside a chroot will not be able to chroot
55877+ again outside the chroot. This is a widely used method of breaking
55878+ out of a chroot jail and should not be allowed. If the sysctl
55879+ option is enabled, a sysctl option with name
55880+ "chroot_deny_chroot" is created.
55881+
55882+config GRKERNSEC_CHROOT_PIVOT
55883+ bool "Deny pivot_root in chroot"
55884+ depends on GRKERNSEC_CHROOT
55885+ help
55886+ If you say Y here, processes inside a chroot will not be able to use
55887+ a function called pivot_root() that was introduced in Linux 2.3.41. It
55888+ works similar to chroot in that it changes the root filesystem. This
55889+ function could be misused in a chrooted process to attempt to break out
55890+ of the chroot, and therefore should not be allowed. If the sysctl
55891+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
55892+ created.
55893+
55894+config GRKERNSEC_CHROOT_CHDIR
55895+ bool "Enforce chdir(\"/\") on all chroots"
55896+ depends on GRKERNSEC_CHROOT
55897+ help
55898+ If you say Y here, the current working directory of all newly-chrooted
55899+ applications will be set to the the root directory of the chroot.
55900+ The man page on chroot(2) states:
55901+ Note that this call does not change the current working
55902+ directory, so that `.' can be outside the tree rooted at
55903+ `/'. In particular, the super-user can escape from a
55904+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
55905+
55906+ It is recommended that you say Y here, since it's not known to break
55907+ any software. If the sysctl option is enabled, a sysctl option with
55908+ name "chroot_enforce_chdir" is created.
55909+
55910+config GRKERNSEC_CHROOT_CHMOD
55911+ bool "Deny (f)chmod +s"
55912+ depends on GRKERNSEC_CHROOT
55913+ help
55914+ If you say Y here, processes inside a chroot will not be able to chmod
55915+ or fchmod files to make them have suid or sgid bits. This protects
55916+ against another published method of breaking a chroot. If the sysctl
55917+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
55918+ created.
55919+
55920+config GRKERNSEC_CHROOT_FCHDIR
55921+ bool "Deny fchdir out of chroot"
55922+ depends on GRKERNSEC_CHROOT
55923+ help
55924+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
55925+ to a file descriptor of the chrooting process that points to a directory
55926+ outside the filesystem will be stopped. If the sysctl option
55927+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
55928+
55929+config GRKERNSEC_CHROOT_MKNOD
55930+ bool "Deny mknod"
55931+ depends on GRKERNSEC_CHROOT
55932+ help
55933+ If you say Y here, processes inside a chroot will not be allowed to
55934+ mknod. The problem with using mknod inside a chroot is that it
55935+ would allow an attacker to create a device entry that is the same
55936+ as one on the physical root of your system, which could range from
55937+ anything from the console device to a device for your harddrive (which
55938+ they could then use to wipe the drive or steal data). It is recommended
55939+ that you say Y here, unless you run into software incompatibilities.
55940+ If the sysctl option is enabled, a sysctl option with name
55941+ "chroot_deny_mknod" is created.
55942+
55943+config GRKERNSEC_CHROOT_SHMAT
55944+ bool "Deny shmat() out of chroot"
55945+ depends on GRKERNSEC_CHROOT
55946+ help
55947+ If you say Y here, processes inside a chroot will not be able to attach
55948+ to shared memory segments that were created outside of the chroot jail.
55949+ It is recommended that you say Y here. If the sysctl option is enabled,
55950+ a sysctl option with name "chroot_deny_shmat" is created.
55951+
55952+config GRKERNSEC_CHROOT_UNIX
55953+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
55954+ depends on GRKERNSEC_CHROOT
55955+ help
55956+ If you say Y here, processes inside a chroot will not be able to
55957+ connect to abstract (meaning not belonging to a filesystem) Unix
55958+ domain sockets that were bound outside of a chroot. It is recommended
55959+ that you say Y here. If the sysctl option is enabled, a sysctl option
55960+ with name "chroot_deny_unix" is created.
55961+
55962+config GRKERNSEC_CHROOT_FINDTASK
55963+ bool "Protect outside processes"
55964+ depends on GRKERNSEC_CHROOT
55965+ help
55966+ If you say Y here, processes inside a chroot will not be able to
55967+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
55968+ getsid, or view any process outside of the chroot. If the sysctl
55969+ option is enabled, a sysctl option with name "chroot_findtask" is
55970+ created.
55971+
55972+config GRKERNSEC_CHROOT_NICE
55973+ bool "Restrict priority changes"
55974+ depends on GRKERNSEC_CHROOT
55975+ help
55976+ If you say Y here, processes inside a chroot will not be able to raise
55977+ the priority of processes in the chroot, or alter the priority of
55978+ processes outside the chroot. This provides more security than simply
55979+ removing CAP_SYS_NICE from the process' capability set. If the
55980+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
55981+ is created.
55982+
55983+config GRKERNSEC_CHROOT_SYSCTL
55984+ bool "Deny sysctl writes"
55985+ depends on GRKERNSEC_CHROOT
55986+ help
55987+ If you say Y here, an attacker in a chroot will not be able to
55988+ write to sysctl entries, either by sysctl(2) or through a /proc
55989+ interface. It is strongly recommended that you say Y here. If the
55990+ sysctl option is enabled, a sysctl option with name
55991+ "chroot_deny_sysctl" is created.
55992+
55993+config GRKERNSEC_CHROOT_CAPS
55994+ bool "Capability restrictions"
55995+ depends on GRKERNSEC_CHROOT
55996+ help
55997+ If you say Y here, the capabilities on all processes within a
55998+ chroot jail will be lowered to stop module insertion, raw i/o,
55999+ system and net admin tasks, rebooting the system, modifying immutable
56000+ files, modifying IPC owned by another, and changing the system time.
56001+ This is left an option because it can break some apps. Disable this
56002+ if your chrooted apps are having problems performing those kinds of
56003+ tasks. If the sysctl option is enabled, a sysctl option with
56004+ name "chroot_caps" is created.
56005+
56006+endmenu
56007+menu "Kernel Auditing"
56008+depends on GRKERNSEC
56009+
56010+config GRKERNSEC_AUDIT_GROUP
56011+ bool "Single group for auditing"
56012+ help
56013+ If you say Y here, the exec, chdir, and (un)mount logging features
56014+ will only operate on a group you specify. This option is recommended
56015+ if you only want to watch certain users instead of having a large
56016+ amount of logs from the entire system. If the sysctl option is enabled,
56017+ a sysctl option with name "audit_group" is created.
56018+
56019+config GRKERNSEC_AUDIT_GID
56020+ int "GID for auditing"
56021+ depends on GRKERNSEC_AUDIT_GROUP
56022+ default 1007
56023+
56024+config GRKERNSEC_EXECLOG
56025+ bool "Exec logging"
56026+ help
56027+ If you say Y here, all execve() calls will be logged (since the
56028+ other exec*() calls are frontends to execve(), all execution
56029+ will be logged). Useful for shell-servers that like to keep track
56030+ of their users. If the sysctl option is enabled, a sysctl option with
56031+ name "exec_logging" is created.
56032+ WARNING: This option when enabled will produce a LOT of logs, especially
56033+ on an active system.
56034+
56035+config GRKERNSEC_RESLOG
56036+ bool "Resource logging"
56037+ help
56038+ If you say Y here, all attempts to overstep resource limits will
56039+ be logged with the resource name, the requested size, and the current
56040+ limit. It is highly recommended that you say Y here. If the sysctl
56041+ option is enabled, a sysctl option with name "resource_logging" is
56042+ created. If the RBAC system is enabled, the sysctl value is ignored.
56043+
56044+config GRKERNSEC_CHROOT_EXECLOG
56045+ bool "Log execs within chroot"
56046+ help
56047+ If you say Y here, all executions inside a chroot jail will be logged
56048+ to syslog. This can cause a large amount of logs if certain
56049+ applications (eg. djb's daemontools) are installed on the system, and
56050+ is therefore left as an option. If the sysctl option is enabled, a
56051+ sysctl option with name "chroot_execlog" is created.
56052+
56053+config GRKERNSEC_AUDIT_PTRACE
56054+ bool "Ptrace logging"
56055+ help
56056+ If you say Y here, all attempts to attach to a process via ptrace
56057+ will be logged. If the sysctl option is enabled, a sysctl option
56058+ with name "audit_ptrace" is created.
56059+
56060+config GRKERNSEC_AUDIT_CHDIR
56061+ bool "Chdir logging"
56062+ help
56063+ If you say Y here, all chdir() calls will be logged. If the sysctl
56064+ option is enabled, a sysctl option with name "audit_chdir" is created.
56065+
56066+config GRKERNSEC_AUDIT_MOUNT
56067+ bool "(Un)Mount logging"
56068+ help
56069+ If you say Y here, all mounts and unmounts will be logged. If the
56070+ sysctl option is enabled, a sysctl option with name "audit_mount" is
56071+ created.
56072+
56073+config GRKERNSEC_SIGNAL
56074+ bool "Signal logging"
56075+ help
56076+ If you say Y here, certain important signals will be logged, such as
56077+ SIGSEGV, which will as a result inform you of when a error in a program
56078+ occurred, which in some cases could mean a possible exploit attempt.
56079+ If the sysctl option is enabled, a sysctl option with name
56080+ "signal_logging" is created.
56081+
56082+config GRKERNSEC_FORKFAIL
56083+ bool "Fork failure logging"
56084+ help
56085+ If you say Y here, all failed fork() attempts will be logged.
56086+ This could suggest a fork bomb, or someone attempting to overstep
56087+ their process limit. If the sysctl option is enabled, a sysctl option
56088+ with name "forkfail_logging" is created.
56089+
56090+config GRKERNSEC_TIME
56091+ bool "Time change logging"
56092+ help
56093+ If you say Y here, any changes of the system clock will be logged.
56094+ If the sysctl option is enabled, a sysctl option with name
56095+ "timechange_logging" is created.
56096+
56097+config GRKERNSEC_PROC_IPADDR
56098+ bool "/proc/<pid>/ipaddr support"
56099+ help
56100+ If you say Y here, a new entry will be added to each /proc/<pid>
56101+ directory that contains the IP address of the person using the task.
56102+ The IP is carried across local TCP and AF_UNIX stream sockets.
56103+ This information can be useful for IDS/IPSes to perform remote response
56104+ to a local attack. The entry is readable by only the owner of the
56105+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
56106+ the RBAC system), and thus does not create privacy concerns.
56107+
56108+config GRKERNSEC_RWXMAP_LOG
56109+ bool 'Denied RWX mmap/mprotect logging'
56110+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
56111+ help
56112+ If you say Y here, calls to mmap() and mprotect() with explicit
56113+ usage of PROT_WRITE and PROT_EXEC together will be logged when
56114+ denied by the PAX_MPROTECT feature. If the sysctl option is
56115+ enabled, a sysctl option with name "rwxmap_logging" is created.
56116+
56117+config GRKERNSEC_AUDIT_TEXTREL
56118+ bool 'ELF text relocations logging (READ HELP)'
56119+ depends on PAX_MPROTECT
56120+ help
56121+ If you say Y here, text relocations will be logged with the filename
56122+ of the offending library or binary. The purpose of the feature is
56123+ to help Linux distribution developers get rid of libraries and
56124+ binaries that need text relocations which hinder the future progress
56125+ of PaX. Only Linux distribution developers should say Y here, and
56126+ never on a production machine, as this option creates an information
56127+ leak that could aid an attacker in defeating the randomization of
56128+ a single memory region. If the sysctl option is enabled, a sysctl
56129+ option with name "audit_textrel" is created.
56130+
56131+endmenu
56132+
56133+menu "Executable Protections"
56134+depends on GRKERNSEC
56135+
56136+config GRKERNSEC_DMESG
56137+ bool "Dmesg(8) restriction"
56138+ help
56139+ If you say Y here, non-root users will not be able to use dmesg(8)
56140+ to view up to the last 4kb of messages in the kernel's log buffer.
56141+ The kernel's log buffer often contains kernel addresses and other
56142+ identifying information useful to an attacker in fingerprinting a
56143+ system for a targeted exploit.
56144+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
56145+ created.
56146+
56147+config GRKERNSEC_HARDEN_PTRACE
56148+ bool "Deter ptrace-based process snooping"
56149+ help
56150+ If you say Y here, TTY sniffers and other malicious monitoring
56151+ programs implemented through ptrace will be defeated. If you
56152+ have been using the RBAC system, this option has already been
56153+ enabled for several years for all users, with the ability to make
56154+ fine-grained exceptions.
56155+
56156+ This option only affects the ability of non-root users to ptrace
56157+ processes that are not a descendent of the ptracing process.
56158+ This means that strace ./binary and gdb ./binary will still work,
56159+ but attaching to arbitrary processes will not. If the sysctl
56160+ option is enabled, a sysctl option with name "harden_ptrace" is
56161+ created.
56162+
56163+config GRKERNSEC_TPE
56164+ bool "Trusted Path Execution (TPE)"
56165+ help
56166+ If you say Y here, you will be able to choose a gid to add to the
56167+ supplementary groups of users you want to mark as "untrusted."
56168+ These users will not be able to execute any files that are not in
56169+ root-owned directories writable only by root. If the sysctl option
56170+ is enabled, a sysctl option with name "tpe" is created.
56171+
56172+config GRKERNSEC_TPE_ALL
56173+ bool "Partially restrict all non-root users"
56174+ depends on GRKERNSEC_TPE
56175+ help
56176+ If you say Y here, all non-root users will be covered under
56177+ a weaker TPE restriction. This is separate from, and in addition to,
56178+ the main TPE options that you have selected elsewhere. Thus, if a
56179+ "trusted" GID is chosen, this restriction applies to even that GID.
56180+ Under this restriction, all non-root users will only be allowed to
56181+ execute files in directories they own that are not group or
56182+ world-writable, or in directories owned by root and writable only by
56183+ root. If the sysctl option is enabled, a sysctl option with name
56184+ "tpe_restrict_all" is created.
56185+
56186+config GRKERNSEC_TPE_INVERT
56187+ bool "Invert GID option"
56188+ depends on GRKERNSEC_TPE
56189+ help
56190+ If you say Y here, the group you specify in the TPE configuration will
56191+ decide what group TPE restrictions will be *disabled* for. This
56192+ option is useful if you want TPE restrictions to be applied to most
56193+ users on the system. If the sysctl option is enabled, a sysctl option
56194+ with name "tpe_invert" is created. Unlike other sysctl options, this
56195+ entry will default to on for backward-compatibility.
56196+
56197+config GRKERNSEC_TPE_GID
56198+ int "GID for untrusted users"
56199+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
56200+ default 1005
56201+ help
56202+ Setting this GID determines what group TPE restrictions will be
56203+ *enabled* for. If the sysctl option is enabled, a sysctl option
56204+ with name "tpe_gid" is created.
56205+
56206+config GRKERNSEC_TPE_GID
56207+ int "GID for trusted users"
56208+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
56209+ default 1005
56210+ help
56211+ Setting this GID determines what group TPE restrictions will be
56212+ *disabled* for. If the sysctl option is enabled, a sysctl option
56213+ with name "tpe_gid" is created.
56214+
56215+endmenu
56216+menu "Network Protections"
56217+depends on GRKERNSEC
56218+
56219+config GRKERNSEC_RANDNET
56220+ bool "Larger entropy pools"
56221+ help
56222+ If you say Y here, the entropy pools used for many features of Linux
56223+ and grsecurity will be doubled in size. Since several grsecurity
56224+ features use additional randomness, it is recommended that you say Y
56225+ here. Saying Y here has a similar effect as modifying
56226+ /proc/sys/kernel/random/poolsize.
56227+
56228+config GRKERNSEC_BLACKHOLE
56229+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
56230+ depends on NET
56231+ help
56232+ If you say Y here, neither TCP resets nor ICMP
56233+ destination-unreachable packets will be sent in response to packets
56234+ sent to ports for which no associated listening process exists.
56235+ This feature supports both IPV4 and IPV6 and exempts the
56236+ loopback interface from blackholing. Enabling this feature
56237+ makes a host more resilient to DoS attacks and reduces network
56238+ visibility against scanners.
56239+
56240+ The blackhole feature as-implemented is equivalent to the FreeBSD
56241+ blackhole feature, as it prevents RST responses to all packets, not
56242+ just SYNs. Under most application behavior this causes no
56243+ problems, but applications (like haproxy) may not close certain
56244+ connections in a way that cleanly terminates them on the remote
56245+ end, leaving the remote host in LAST_ACK state. Because of this
56246+ side-effect and to prevent intentional LAST_ACK DoSes, this
56247+ feature also adds automatic mitigation against such attacks.
56248+ The mitigation drastically reduces the amount of time a socket
56249+ can spend in LAST_ACK state. If you're using haproxy and not
56250+ all servers it connects to have this option enabled, consider
56251+ disabling this feature on the haproxy host.
56252+
56253+ If the sysctl option is enabled, two sysctl options with names
56254+ "ip_blackhole" and "lastack_retries" will be created.
56255+ While "ip_blackhole" takes the standard zero/non-zero on/off
56256+ toggle, "lastack_retries" uses the same kinds of values as
56257+ "tcp_retries1" and "tcp_retries2". The default value of 4
56258+ prevents a socket from lasting more than 45 seconds in LAST_ACK
56259+ state.
56260+
56261+config GRKERNSEC_SOCKET
56262+ bool "Socket restrictions"
56263+ depends on NET
56264+ help
56265+ If you say Y here, you will be able to choose from several options.
56266+ If you assign a GID on your system and add it to the supplementary
56267+ groups of users you want to restrict socket access to, this patch
56268+ will perform up to three things, based on the option(s) you choose.
56269+
56270+config GRKERNSEC_SOCKET_ALL
56271+ bool "Deny any sockets to group"
56272+ depends on GRKERNSEC_SOCKET
56273+ help
56274+ If you say Y here, you will be able to choose a GID of whose users will
56275+ be unable to connect to other hosts from your machine or run server
56276+ applications from your machine. If the sysctl option is enabled, a
56277+ sysctl option with name "socket_all" is created.
56278+
56279+config GRKERNSEC_SOCKET_ALL_GID
56280+ int "GID to deny all sockets for"
56281+ depends on GRKERNSEC_SOCKET_ALL
56282+ default 1004
56283+ help
56284+ Here you can choose the GID to disable socket access for. Remember to
56285+ add the users you want socket access disabled for to the GID
56286+ specified here. If the sysctl option is enabled, a sysctl option
56287+ with name "socket_all_gid" is created.
56288+
56289+config GRKERNSEC_SOCKET_CLIENT
56290+ bool "Deny client sockets to group"
56291+ depends on GRKERNSEC_SOCKET
56292+ help
56293+ If you say Y here, you will be able to choose a GID of whose users will
56294+ be unable to connect to other hosts from your machine, but will be
56295+ able to run servers. If this option is enabled, all users in the group
56296+ you specify will have to use passive mode when initiating ftp transfers
56297+ from the shell on your machine. If the sysctl option is enabled, a
56298+ sysctl option with name "socket_client" is created.
56299+
56300+config GRKERNSEC_SOCKET_CLIENT_GID
56301+ int "GID to deny client sockets for"
56302+ depends on GRKERNSEC_SOCKET_CLIENT
56303+ default 1003
56304+ help
56305+ Here you can choose the GID to disable client socket access for.
56306+ Remember to add the users you want client socket access disabled for to
56307+ the GID specified here. If the sysctl option is enabled, a sysctl
56308+ option with name "socket_client_gid" is created.
56309+
56310+config GRKERNSEC_SOCKET_SERVER
56311+ bool "Deny server sockets to group"
56312+ depends on GRKERNSEC_SOCKET
56313+ help
56314+ If you say Y here, you will be able to choose a GID of whose users will
56315+ be unable to run server applications from your machine. If the sysctl
56316+ option is enabled, a sysctl option with name "socket_server" is created.
56317+
56318+config GRKERNSEC_SOCKET_SERVER_GID
56319+ int "GID to deny server sockets for"
56320+ depends on GRKERNSEC_SOCKET_SERVER
56321+ default 1002
56322+ help
56323+ Here you can choose the GID to disable server socket access for.
56324+ Remember to add the users you want server socket access disabled for to
56325+ the GID specified here. If the sysctl option is enabled, a sysctl
56326+ option with name "socket_server_gid" is created.
56327+
56328+endmenu
56329+menu "Sysctl support"
56330+depends on GRKERNSEC && SYSCTL
56331+
56332+config GRKERNSEC_SYSCTL
56333+ bool "Sysctl support"
56334+ help
56335+ If you say Y here, you will be able to change the options that
56336+ grsecurity runs with at bootup, without having to recompile your
56337+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
56338+ to enable (1) or disable (0) various features. All the sysctl entries
56339+ are mutable until the "grsec_lock" entry is set to a non-zero value.
56340+ All features enabled in the kernel configuration are disabled at boot
56341+ if you do not say Y to the "Turn on features by default" option.
56342+ All options should be set at startup, and the grsec_lock entry should
56343+ be set to a non-zero value after all the options are set.
56344+ *THIS IS EXTREMELY IMPORTANT*
56345+
56346+config GRKERNSEC_SYSCTL_DISTRO
56347+ bool "Extra sysctl support for distro makers (READ HELP)"
56348+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
56349+ help
56350+ If you say Y here, additional sysctl options will be created
56351+ for features that affect processes running as root. Therefore,
56352+ it is critical when using this option that the grsec_lock entry be
56353+ enabled after boot. Only distros with prebuilt kernel packages
56354+ with this option enabled that can ensure grsec_lock is enabled
56355+ after boot should use this option.
56356+ *Failure to set grsec_lock after boot makes all grsec features
56357+ this option covers useless*
56358+
56359+ Currently this option creates the following sysctl entries:
56360+ "Disable Privileged I/O": "disable_priv_io"
56361+
56362+config GRKERNSEC_SYSCTL_ON
56363+ bool "Turn on features by default"
56364+ depends on GRKERNSEC_SYSCTL
56365+ help
56366+ If you say Y here, instead of having all features enabled in the
56367+ kernel configuration disabled at boot time, the features will be
56368+ enabled at boot time. It is recommended you say Y here unless
56369+ there is some reason you would want all sysctl-tunable features to
56370+ be disabled by default. As mentioned elsewhere, it is important
56371+ to enable the grsec_lock entry once you have finished modifying
56372+ the sysctl entries.
56373+
56374+endmenu
56375+menu "Logging Options"
56376+depends on GRKERNSEC
56377+
56378+config GRKERNSEC_FLOODTIME
56379+ int "Seconds in between log messages (minimum)"
56380+ default 10
56381+ help
56382+ This option allows you to enforce the number of seconds between
56383+ grsecurity log messages. The default should be suitable for most
56384+ people, however, if you choose to change it, choose a value small enough
56385+ to allow informative logs to be produced, but large enough to
56386+ prevent flooding.
56387+
56388+config GRKERNSEC_FLOODBURST
56389+ int "Number of messages in a burst (maximum)"
56390+ default 6
56391+ help
56392+ This option allows you to choose the maximum number of messages allowed
56393+ within the flood time interval you chose in a separate option. The
56394+ default should be suitable for most people, however if you find that
56395+ many of your logs are being interpreted as flooding, you may want to
56396+ raise this value.
56397+
56398+endmenu
56399+
56400+endmenu
56401diff -urNp linux-3.0.7/grsecurity/Makefile linux-3.0.7/grsecurity/Makefile
56402--- linux-3.0.7/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
56403+++ linux-3.0.7/grsecurity/Makefile 2011-10-17 06:45:43.000000000 -0400
56404@@ -0,0 +1,36 @@
56405+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
56406+# during 2001-2009 it has been completely redesigned by Brad Spengler
56407+# into an RBAC system
56408+#
56409+# All code in this directory and various hooks inserted throughout the kernel
56410+# are copyright Brad Spengler - Open Source Security, Inc., and released
56411+# under the GPL v2 or higher
56412+
56413+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
56414+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
56415+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
56416+
56417+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
56418+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
56419+ gracl_learn.o grsec_log.o
56420+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
56421+
56422+ifdef CONFIG_NET
56423+obj-y += grsec_sock.o
56424+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
56425+endif
56426+
56427+ifndef CONFIG_GRKERNSEC
56428+obj-y += grsec_disabled.o
56429+endif
56430+
56431+ifdef CONFIG_GRKERNSEC_HIDESYM
56432+extra-y := grsec_hidesym.o
56433+$(obj)/grsec_hidesym.o:
56434+ @-chmod -f 500 /boot
56435+ @-chmod -f 500 /lib/modules
56436+ @-chmod -f 500 /lib64/modules
56437+ @-chmod -f 500 /lib32/modules
56438+ @-chmod -f 700 .
56439+ @echo ' grsec: protected kernel image paths'
56440+endif
56441diff -urNp linux-3.0.7/include/acpi/acpi_bus.h linux-3.0.7/include/acpi/acpi_bus.h
56442--- linux-3.0.7/include/acpi/acpi_bus.h 2011-07-21 22:17:23.000000000 -0400
56443+++ linux-3.0.7/include/acpi/acpi_bus.h 2011-08-23 21:47:56.000000000 -0400
56444@@ -107,7 +107,7 @@ struct acpi_device_ops {
56445 acpi_op_bind bind;
56446 acpi_op_unbind unbind;
56447 acpi_op_notify notify;
56448-};
56449+} __no_const;
56450
56451 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
56452
56453diff -urNp linux-3.0.7/include/asm-generic/atomic-long.h linux-3.0.7/include/asm-generic/atomic-long.h
56454--- linux-3.0.7/include/asm-generic/atomic-long.h 2011-07-21 22:17:23.000000000 -0400
56455+++ linux-3.0.7/include/asm-generic/atomic-long.h 2011-08-23 21:47:56.000000000 -0400
56456@@ -22,6 +22,12 @@
56457
56458 typedef atomic64_t atomic_long_t;
56459
56460+#ifdef CONFIG_PAX_REFCOUNT
56461+typedef atomic64_unchecked_t atomic_long_unchecked_t;
56462+#else
56463+typedef atomic64_t atomic_long_unchecked_t;
56464+#endif
56465+
56466 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
56467
56468 static inline long atomic_long_read(atomic_long_t *l)
56469@@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
56470 return (long)atomic64_read(v);
56471 }
56472
56473+#ifdef CONFIG_PAX_REFCOUNT
56474+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
56475+{
56476+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56477+
56478+ return (long)atomic64_read_unchecked(v);
56479+}
56480+#endif
56481+
56482 static inline void atomic_long_set(atomic_long_t *l, long i)
56483 {
56484 atomic64_t *v = (atomic64_t *)l;
56485@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
56486 atomic64_set(v, i);
56487 }
56488
56489+#ifdef CONFIG_PAX_REFCOUNT
56490+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
56491+{
56492+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56493+
56494+ atomic64_set_unchecked(v, i);
56495+}
56496+#endif
56497+
56498 static inline void atomic_long_inc(atomic_long_t *l)
56499 {
56500 atomic64_t *v = (atomic64_t *)l;
56501@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
56502 atomic64_inc(v);
56503 }
56504
56505+#ifdef CONFIG_PAX_REFCOUNT
56506+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
56507+{
56508+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56509+
56510+ atomic64_inc_unchecked(v);
56511+}
56512+#endif
56513+
56514 static inline void atomic_long_dec(atomic_long_t *l)
56515 {
56516 atomic64_t *v = (atomic64_t *)l;
56517@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
56518 atomic64_dec(v);
56519 }
56520
56521+#ifdef CONFIG_PAX_REFCOUNT
56522+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
56523+{
56524+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56525+
56526+ atomic64_dec_unchecked(v);
56527+}
56528+#endif
56529+
56530 static inline void atomic_long_add(long i, atomic_long_t *l)
56531 {
56532 atomic64_t *v = (atomic64_t *)l;
56533@@ -59,6 +101,15 @@ static inline void atomic_long_add(long
56534 atomic64_add(i, v);
56535 }
56536
56537+#ifdef CONFIG_PAX_REFCOUNT
56538+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
56539+{
56540+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56541+
56542+ atomic64_add_unchecked(i, v);
56543+}
56544+#endif
56545+
56546 static inline void atomic_long_sub(long i, atomic_long_t *l)
56547 {
56548 atomic64_t *v = (atomic64_t *)l;
56549@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long
56550 atomic64_sub(i, v);
56551 }
56552
56553+#ifdef CONFIG_PAX_REFCOUNT
56554+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
56555+{
56556+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56557+
56558+ atomic64_sub_unchecked(i, v);
56559+}
56560+#endif
56561+
56562 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
56563 {
56564 atomic64_t *v = (atomic64_t *)l;
56565@@ -115,6 +175,15 @@ static inline long atomic_long_inc_retur
56566 return (long)atomic64_inc_return(v);
56567 }
56568
56569+#ifdef CONFIG_PAX_REFCOUNT
56570+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
56571+{
56572+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56573+
56574+ return (long)atomic64_inc_return_unchecked(v);
56575+}
56576+#endif
56577+
56578 static inline long atomic_long_dec_return(atomic_long_t *l)
56579 {
56580 atomic64_t *v = (atomic64_t *)l;
56581@@ -140,6 +209,12 @@ static inline long atomic_long_add_unles
56582
56583 typedef atomic_t atomic_long_t;
56584
56585+#ifdef CONFIG_PAX_REFCOUNT
56586+typedef atomic_unchecked_t atomic_long_unchecked_t;
56587+#else
56588+typedef atomic_t atomic_long_unchecked_t;
56589+#endif
56590+
56591 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
56592 static inline long atomic_long_read(atomic_long_t *l)
56593 {
56594@@ -148,6 +223,15 @@ static inline long atomic_long_read(atom
56595 return (long)atomic_read(v);
56596 }
56597
56598+#ifdef CONFIG_PAX_REFCOUNT
56599+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
56600+{
56601+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56602+
56603+ return (long)atomic_read_unchecked(v);
56604+}
56605+#endif
56606+
56607 static inline void atomic_long_set(atomic_long_t *l, long i)
56608 {
56609 atomic_t *v = (atomic_t *)l;
56610@@ -155,6 +239,15 @@ static inline void atomic_long_set(atomi
56611 atomic_set(v, i);
56612 }
56613
56614+#ifdef CONFIG_PAX_REFCOUNT
56615+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
56616+{
56617+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56618+
56619+ atomic_set_unchecked(v, i);
56620+}
56621+#endif
56622+
56623 static inline void atomic_long_inc(atomic_long_t *l)
56624 {
56625 atomic_t *v = (atomic_t *)l;
56626@@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomi
56627 atomic_inc(v);
56628 }
56629
56630+#ifdef CONFIG_PAX_REFCOUNT
56631+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
56632+{
56633+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56634+
56635+ atomic_inc_unchecked(v);
56636+}
56637+#endif
56638+
56639 static inline void atomic_long_dec(atomic_long_t *l)
56640 {
56641 atomic_t *v = (atomic_t *)l;
56642@@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomi
56643 atomic_dec(v);
56644 }
56645
56646+#ifdef CONFIG_PAX_REFCOUNT
56647+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
56648+{
56649+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56650+
56651+ atomic_dec_unchecked(v);
56652+}
56653+#endif
56654+
56655 static inline void atomic_long_add(long i, atomic_long_t *l)
56656 {
56657 atomic_t *v = (atomic_t *)l;
56658@@ -176,6 +287,15 @@ static inline void atomic_long_add(long
56659 atomic_add(i, v);
56660 }
56661
56662+#ifdef CONFIG_PAX_REFCOUNT
56663+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
56664+{
56665+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56666+
56667+ atomic_add_unchecked(i, v);
56668+}
56669+#endif
56670+
56671 static inline void atomic_long_sub(long i, atomic_long_t *l)
56672 {
56673 atomic_t *v = (atomic_t *)l;
56674@@ -183,6 +303,15 @@ static inline void atomic_long_sub(long
56675 atomic_sub(i, v);
56676 }
56677
56678+#ifdef CONFIG_PAX_REFCOUNT
56679+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
56680+{
56681+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56682+
56683+ atomic_sub_unchecked(i, v);
56684+}
56685+#endif
56686+
56687 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
56688 {
56689 atomic_t *v = (atomic_t *)l;
56690@@ -232,6 +361,15 @@ static inline long atomic_long_inc_retur
56691 return (long)atomic_inc_return(v);
56692 }
56693
56694+#ifdef CONFIG_PAX_REFCOUNT
56695+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
56696+{
56697+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56698+
56699+ return (long)atomic_inc_return_unchecked(v);
56700+}
56701+#endif
56702+
56703 static inline long atomic_long_dec_return(atomic_long_t *l)
56704 {
56705 atomic_t *v = (atomic_t *)l;
56706@@ -255,4 +393,49 @@ static inline long atomic_long_add_unles
56707
56708 #endif /* BITS_PER_LONG == 64 */
56709
56710+#ifdef CONFIG_PAX_REFCOUNT
56711+static inline void pax_refcount_needs_these_functions(void)
56712+{
56713+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
56714+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
56715+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
56716+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
56717+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
56718+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
56719+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
56720+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
56721+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
56722+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
56723+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
56724+
56725+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
56726+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
56727+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
56728+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
56729+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
56730+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
56731+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
56732+}
56733+#else
56734+#define atomic_read_unchecked(v) atomic_read(v)
56735+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
56736+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
56737+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
56738+#define atomic_inc_unchecked(v) atomic_inc(v)
56739+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
56740+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
56741+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
56742+#define atomic_dec_unchecked(v) atomic_dec(v)
56743+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
56744+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
56745+
56746+#define atomic_long_read_unchecked(v) atomic_long_read(v)
56747+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
56748+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
56749+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
56750+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
56751+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
56752+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
56753+#endif
56754+
56755 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
56756diff -urNp linux-3.0.7/include/asm-generic/cache.h linux-3.0.7/include/asm-generic/cache.h
56757--- linux-3.0.7/include/asm-generic/cache.h 2011-07-21 22:17:23.000000000 -0400
56758+++ linux-3.0.7/include/asm-generic/cache.h 2011-08-23 21:47:56.000000000 -0400
56759@@ -6,7 +6,7 @@
56760 * cache lines need to provide their own cache.h.
56761 */
56762
56763-#define L1_CACHE_SHIFT 5
56764-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
56765+#define L1_CACHE_SHIFT 5UL
56766+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
56767
56768 #endif /* __ASM_GENERIC_CACHE_H */
56769diff -urNp linux-3.0.7/include/asm-generic/int-l64.h linux-3.0.7/include/asm-generic/int-l64.h
56770--- linux-3.0.7/include/asm-generic/int-l64.h 2011-07-21 22:17:23.000000000 -0400
56771+++ linux-3.0.7/include/asm-generic/int-l64.h 2011-08-23 21:47:56.000000000 -0400
56772@@ -46,6 +46,8 @@ typedef unsigned int u32;
56773 typedef signed long s64;
56774 typedef unsigned long u64;
56775
56776+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
56777+
56778 #define S8_C(x) x
56779 #define U8_C(x) x ## U
56780 #define S16_C(x) x
56781diff -urNp linux-3.0.7/include/asm-generic/int-ll64.h linux-3.0.7/include/asm-generic/int-ll64.h
56782--- linux-3.0.7/include/asm-generic/int-ll64.h 2011-07-21 22:17:23.000000000 -0400
56783+++ linux-3.0.7/include/asm-generic/int-ll64.h 2011-08-23 21:47:56.000000000 -0400
56784@@ -51,6 +51,8 @@ typedef unsigned int u32;
56785 typedef signed long long s64;
56786 typedef unsigned long long u64;
56787
56788+typedef unsigned long long intoverflow_t;
56789+
56790 #define S8_C(x) x
56791 #define U8_C(x) x ## U
56792 #define S16_C(x) x
56793diff -urNp linux-3.0.7/include/asm-generic/kmap_types.h linux-3.0.7/include/asm-generic/kmap_types.h
56794--- linux-3.0.7/include/asm-generic/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
56795+++ linux-3.0.7/include/asm-generic/kmap_types.h 2011-08-23 21:47:56.000000000 -0400
56796@@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
56797 KMAP_D(17) KM_NMI,
56798 KMAP_D(18) KM_NMI_PTE,
56799 KMAP_D(19) KM_KDB,
56800+KMAP_D(20) KM_CLEARPAGE,
56801 /*
56802 * Remember to update debug_kmap_atomic() when adding new kmap types!
56803 */
56804-KMAP_D(20) KM_TYPE_NR
56805+KMAP_D(21) KM_TYPE_NR
56806 };
56807
56808 #undef KMAP_D
56809diff -urNp linux-3.0.7/include/asm-generic/pgtable.h linux-3.0.7/include/asm-generic/pgtable.h
56810--- linux-3.0.7/include/asm-generic/pgtable.h 2011-07-21 22:17:23.000000000 -0400
56811+++ linux-3.0.7/include/asm-generic/pgtable.h 2011-08-23 21:47:56.000000000 -0400
56812@@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
56813 #endif /* __HAVE_ARCH_PMD_WRITE */
56814 #endif
56815
56816+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
56817+static inline unsigned long pax_open_kernel(void) { return 0; }
56818+#endif
56819+
56820+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
56821+static inline unsigned long pax_close_kernel(void) { return 0; }
56822+#endif
56823+
56824 #endif /* !__ASSEMBLY__ */
56825
56826 #endif /* _ASM_GENERIC_PGTABLE_H */
56827diff -urNp linux-3.0.7/include/asm-generic/pgtable-nopmd.h linux-3.0.7/include/asm-generic/pgtable-nopmd.h
56828--- linux-3.0.7/include/asm-generic/pgtable-nopmd.h 2011-07-21 22:17:23.000000000 -0400
56829+++ linux-3.0.7/include/asm-generic/pgtable-nopmd.h 2011-08-23 21:47:56.000000000 -0400
56830@@ -1,14 +1,19 @@
56831 #ifndef _PGTABLE_NOPMD_H
56832 #define _PGTABLE_NOPMD_H
56833
56834-#ifndef __ASSEMBLY__
56835-
56836 #include <asm-generic/pgtable-nopud.h>
56837
56838-struct mm_struct;
56839-
56840 #define __PAGETABLE_PMD_FOLDED
56841
56842+#define PMD_SHIFT PUD_SHIFT
56843+#define PTRS_PER_PMD 1
56844+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
56845+#define PMD_MASK (~(PMD_SIZE-1))
56846+
56847+#ifndef __ASSEMBLY__
56848+
56849+struct mm_struct;
56850+
56851 /*
56852 * Having the pmd type consist of a pud gets the size right, and allows
56853 * us to conceptually access the pud entry that this pmd is folded into
56854@@ -16,11 +21,6 @@ struct mm_struct;
56855 */
56856 typedef struct { pud_t pud; } pmd_t;
56857
56858-#define PMD_SHIFT PUD_SHIFT
56859-#define PTRS_PER_PMD 1
56860-#define PMD_SIZE (1UL << PMD_SHIFT)
56861-#define PMD_MASK (~(PMD_SIZE-1))
56862-
56863 /*
56864 * The "pud_xxx()" functions here are trivial for a folded two-level
56865 * setup: the pmd is never bad, and a pmd always exists (as it's folded
56866diff -urNp linux-3.0.7/include/asm-generic/pgtable-nopud.h linux-3.0.7/include/asm-generic/pgtable-nopud.h
56867--- linux-3.0.7/include/asm-generic/pgtable-nopud.h 2011-07-21 22:17:23.000000000 -0400
56868+++ linux-3.0.7/include/asm-generic/pgtable-nopud.h 2011-08-23 21:47:56.000000000 -0400
56869@@ -1,10 +1,15 @@
56870 #ifndef _PGTABLE_NOPUD_H
56871 #define _PGTABLE_NOPUD_H
56872
56873-#ifndef __ASSEMBLY__
56874-
56875 #define __PAGETABLE_PUD_FOLDED
56876
56877+#define PUD_SHIFT PGDIR_SHIFT
56878+#define PTRS_PER_PUD 1
56879+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
56880+#define PUD_MASK (~(PUD_SIZE-1))
56881+
56882+#ifndef __ASSEMBLY__
56883+
56884 /*
56885 * Having the pud type consist of a pgd gets the size right, and allows
56886 * us to conceptually access the pgd entry that this pud is folded into
56887@@ -12,11 +17,6 @@
56888 */
56889 typedef struct { pgd_t pgd; } pud_t;
56890
56891-#define PUD_SHIFT PGDIR_SHIFT
56892-#define PTRS_PER_PUD 1
56893-#define PUD_SIZE (1UL << PUD_SHIFT)
56894-#define PUD_MASK (~(PUD_SIZE-1))
56895-
56896 /*
56897 * The "pgd_xxx()" functions here are trivial for a folded two-level
56898 * setup: the pud is never bad, and a pud always exists (as it's folded
56899diff -urNp linux-3.0.7/include/asm-generic/vmlinux.lds.h linux-3.0.7/include/asm-generic/vmlinux.lds.h
56900--- linux-3.0.7/include/asm-generic/vmlinux.lds.h 2011-07-21 22:17:23.000000000 -0400
56901+++ linux-3.0.7/include/asm-generic/vmlinux.lds.h 2011-08-23 21:47:56.000000000 -0400
56902@@ -217,6 +217,7 @@
56903 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
56904 VMLINUX_SYMBOL(__start_rodata) = .; \
56905 *(.rodata) *(.rodata.*) \
56906+ *(.data..read_only) \
56907 *(__vermagic) /* Kernel version magic */ \
56908 . = ALIGN(8); \
56909 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
56910@@ -723,17 +724,18 @@
56911 * section in the linker script will go there too. @phdr should have
56912 * a leading colon.
56913 *
56914- * Note that this macros defines __per_cpu_load as an absolute symbol.
56915+ * Note that this macros defines per_cpu_load as an absolute symbol.
56916 * If there is no need to put the percpu section at a predetermined
56917 * address, use PERCPU_SECTION.
56918 */
56919 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
56920- VMLINUX_SYMBOL(__per_cpu_load) = .; \
56921- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
56922+ per_cpu_load = .; \
56923+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
56924 - LOAD_OFFSET) { \
56925+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
56926 PERCPU_INPUT(cacheline) \
56927 } phdr \
56928- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
56929+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
56930
56931 /**
56932 * PERCPU_SECTION - define output section for percpu area, simple version
56933diff -urNp linux-3.0.7/include/drm/drm_crtc_helper.h linux-3.0.7/include/drm/drm_crtc_helper.h
56934--- linux-3.0.7/include/drm/drm_crtc_helper.h 2011-07-21 22:17:23.000000000 -0400
56935+++ linux-3.0.7/include/drm/drm_crtc_helper.h 2011-08-23 21:47:56.000000000 -0400
56936@@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
56937
56938 /* disable crtc when not in use - more explicit than dpms off */
56939 void (*disable)(struct drm_crtc *crtc);
56940-};
56941+} __no_const;
56942
56943 struct drm_encoder_helper_funcs {
56944 void (*dpms)(struct drm_encoder *encoder, int mode);
56945@@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
56946 struct drm_connector *connector);
56947 /* disable encoder when not in use - more explicit than dpms off */
56948 void (*disable)(struct drm_encoder *encoder);
56949-};
56950+} __no_const;
56951
56952 struct drm_connector_helper_funcs {
56953 int (*get_modes)(struct drm_connector *connector);
56954diff -urNp linux-3.0.7/include/drm/drmP.h linux-3.0.7/include/drm/drmP.h
56955--- linux-3.0.7/include/drm/drmP.h 2011-07-21 22:17:23.000000000 -0400
56956+++ linux-3.0.7/include/drm/drmP.h 2011-08-23 21:47:56.000000000 -0400
56957@@ -73,6 +73,7 @@
56958 #include <linux/workqueue.h>
56959 #include <linux/poll.h>
56960 #include <asm/pgalloc.h>
56961+#include <asm/local.h>
56962 #include "drm.h"
56963
56964 #include <linux/idr.h>
56965@@ -1033,7 +1034,7 @@ struct drm_device {
56966
56967 /** \name Usage Counters */
56968 /*@{ */
56969- int open_count; /**< Outstanding files open */
56970+ local_t open_count; /**< Outstanding files open */
56971 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
56972 atomic_t vma_count; /**< Outstanding vma areas open */
56973 int buf_use; /**< Buffers in use -- cannot alloc */
56974@@ -1044,7 +1045,7 @@ struct drm_device {
56975 /*@{ */
56976 unsigned long counters;
56977 enum drm_stat_type types[15];
56978- atomic_t counts[15];
56979+ atomic_unchecked_t counts[15];
56980 /*@} */
56981
56982 struct list_head filelist;
56983diff -urNp linux-3.0.7/include/drm/ttm/ttm_memory.h linux-3.0.7/include/drm/ttm/ttm_memory.h
56984--- linux-3.0.7/include/drm/ttm/ttm_memory.h 2011-07-21 22:17:23.000000000 -0400
56985+++ linux-3.0.7/include/drm/ttm/ttm_memory.h 2011-08-23 21:47:56.000000000 -0400
56986@@ -47,7 +47,7 @@
56987
56988 struct ttm_mem_shrink {
56989 int (*do_shrink) (struct ttm_mem_shrink *);
56990-};
56991+} __no_const;
56992
56993 /**
56994 * struct ttm_mem_global - Global memory accounting structure.
56995diff -urNp linux-3.0.7/include/linux/a.out.h linux-3.0.7/include/linux/a.out.h
56996--- linux-3.0.7/include/linux/a.out.h 2011-07-21 22:17:23.000000000 -0400
56997+++ linux-3.0.7/include/linux/a.out.h 2011-08-23 21:47:56.000000000 -0400
56998@@ -39,6 +39,14 @@ enum machine_type {
56999 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
57000 };
57001
57002+/* Constants for the N_FLAGS field */
57003+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57004+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
57005+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
57006+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
57007+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57008+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57009+
57010 #if !defined (N_MAGIC)
57011 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
57012 #endif
57013diff -urNp linux-3.0.7/include/linux/atmdev.h linux-3.0.7/include/linux/atmdev.h
57014--- linux-3.0.7/include/linux/atmdev.h 2011-07-21 22:17:23.000000000 -0400
57015+++ linux-3.0.7/include/linux/atmdev.h 2011-08-23 21:47:56.000000000 -0400
57016@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
57017 #endif
57018
57019 struct k_atm_aal_stats {
57020-#define __HANDLE_ITEM(i) atomic_t i
57021+#define __HANDLE_ITEM(i) atomic_unchecked_t i
57022 __AAL_STAT_ITEMS
57023 #undef __HANDLE_ITEM
57024 };
57025diff -urNp linux-3.0.7/include/linux/binfmts.h linux-3.0.7/include/linux/binfmts.h
57026--- linux-3.0.7/include/linux/binfmts.h 2011-07-21 22:17:23.000000000 -0400
57027+++ linux-3.0.7/include/linux/binfmts.h 2011-08-23 21:47:56.000000000 -0400
57028@@ -88,6 +88,7 @@ struct linux_binfmt {
57029 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
57030 int (*load_shlib)(struct file *);
57031 int (*core_dump)(struct coredump_params *cprm);
57032+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
57033 unsigned long min_coredump; /* minimal dump size */
57034 };
57035
57036diff -urNp linux-3.0.7/include/linux/blkdev.h linux-3.0.7/include/linux/blkdev.h
57037--- linux-3.0.7/include/linux/blkdev.h 2011-07-21 22:17:23.000000000 -0400
57038+++ linux-3.0.7/include/linux/blkdev.h 2011-08-26 19:49:56.000000000 -0400
57039@@ -1308,7 +1308,7 @@ struct block_device_operations {
57040 /* this callback is with swap_lock and sometimes page table lock held */
57041 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
57042 struct module *owner;
57043-};
57044+} __do_const;
57045
57046 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
57047 unsigned long);
57048diff -urNp linux-3.0.7/include/linux/blktrace_api.h linux-3.0.7/include/linux/blktrace_api.h
57049--- linux-3.0.7/include/linux/blktrace_api.h 2011-07-21 22:17:23.000000000 -0400
57050+++ linux-3.0.7/include/linux/blktrace_api.h 2011-08-23 21:47:56.000000000 -0400
57051@@ -161,7 +161,7 @@ struct blk_trace {
57052 struct dentry *dir;
57053 struct dentry *dropped_file;
57054 struct dentry *msg_file;
57055- atomic_t dropped;
57056+ atomic_unchecked_t dropped;
57057 };
57058
57059 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
57060diff -urNp linux-3.0.7/include/linux/byteorder/little_endian.h linux-3.0.7/include/linux/byteorder/little_endian.h
57061--- linux-3.0.7/include/linux/byteorder/little_endian.h 2011-07-21 22:17:23.000000000 -0400
57062+++ linux-3.0.7/include/linux/byteorder/little_endian.h 2011-08-23 21:47:56.000000000 -0400
57063@@ -42,51 +42,51 @@
57064
57065 static inline __le64 __cpu_to_le64p(const __u64 *p)
57066 {
57067- return (__force __le64)*p;
57068+ return (__force const __le64)*p;
57069 }
57070 static inline __u64 __le64_to_cpup(const __le64 *p)
57071 {
57072- return (__force __u64)*p;
57073+ return (__force const __u64)*p;
57074 }
57075 static inline __le32 __cpu_to_le32p(const __u32 *p)
57076 {
57077- return (__force __le32)*p;
57078+ return (__force const __le32)*p;
57079 }
57080 static inline __u32 __le32_to_cpup(const __le32 *p)
57081 {
57082- return (__force __u32)*p;
57083+ return (__force const __u32)*p;
57084 }
57085 static inline __le16 __cpu_to_le16p(const __u16 *p)
57086 {
57087- return (__force __le16)*p;
57088+ return (__force const __le16)*p;
57089 }
57090 static inline __u16 __le16_to_cpup(const __le16 *p)
57091 {
57092- return (__force __u16)*p;
57093+ return (__force const __u16)*p;
57094 }
57095 static inline __be64 __cpu_to_be64p(const __u64 *p)
57096 {
57097- return (__force __be64)__swab64p(p);
57098+ return (__force const __be64)__swab64p(p);
57099 }
57100 static inline __u64 __be64_to_cpup(const __be64 *p)
57101 {
57102- return __swab64p((__u64 *)p);
57103+ return __swab64p((const __u64 *)p);
57104 }
57105 static inline __be32 __cpu_to_be32p(const __u32 *p)
57106 {
57107- return (__force __be32)__swab32p(p);
57108+ return (__force const __be32)__swab32p(p);
57109 }
57110 static inline __u32 __be32_to_cpup(const __be32 *p)
57111 {
57112- return __swab32p((__u32 *)p);
57113+ return __swab32p((const __u32 *)p);
57114 }
57115 static inline __be16 __cpu_to_be16p(const __u16 *p)
57116 {
57117- return (__force __be16)__swab16p(p);
57118+ return (__force const __be16)__swab16p(p);
57119 }
57120 static inline __u16 __be16_to_cpup(const __be16 *p)
57121 {
57122- return __swab16p((__u16 *)p);
57123+ return __swab16p((const __u16 *)p);
57124 }
57125 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
57126 #define __le64_to_cpus(x) do { (void)(x); } while (0)
57127diff -urNp linux-3.0.7/include/linux/cache.h linux-3.0.7/include/linux/cache.h
57128--- linux-3.0.7/include/linux/cache.h 2011-07-21 22:17:23.000000000 -0400
57129+++ linux-3.0.7/include/linux/cache.h 2011-08-23 21:47:56.000000000 -0400
57130@@ -16,6 +16,10 @@
57131 #define __read_mostly
57132 #endif
57133
57134+#ifndef __read_only
57135+#define __read_only __read_mostly
57136+#endif
57137+
57138 #ifndef ____cacheline_aligned
57139 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
57140 #endif
57141diff -urNp linux-3.0.7/include/linux/capability.h linux-3.0.7/include/linux/capability.h
57142--- linux-3.0.7/include/linux/capability.h 2011-07-21 22:17:23.000000000 -0400
57143+++ linux-3.0.7/include/linux/capability.h 2011-08-23 21:48:14.000000000 -0400
57144@@ -547,6 +547,9 @@ extern bool capable(int cap);
57145 extern bool ns_capable(struct user_namespace *ns, int cap);
57146 extern bool task_ns_capable(struct task_struct *t, int cap);
57147 extern bool nsown_capable(int cap);
57148+extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
57149+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
57150+extern bool capable_nolog(int cap);
57151
57152 /* audit system wants to get cap info from files as well */
57153 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
57154diff -urNp linux-3.0.7/include/linux/cleancache.h linux-3.0.7/include/linux/cleancache.h
57155--- linux-3.0.7/include/linux/cleancache.h 2011-07-21 22:17:23.000000000 -0400
57156+++ linux-3.0.7/include/linux/cleancache.h 2011-08-23 21:47:56.000000000 -0400
57157@@ -31,7 +31,7 @@ struct cleancache_ops {
57158 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
57159 void (*flush_inode)(int, struct cleancache_filekey);
57160 void (*flush_fs)(int);
57161-};
57162+} __no_const;
57163
57164 extern struct cleancache_ops
57165 cleancache_register_ops(struct cleancache_ops *ops);
57166diff -urNp linux-3.0.7/include/linux/compiler-gcc4.h linux-3.0.7/include/linux/compiler-gcc4.h
57167--- linux-3.0.7/include/linux/compiler-gcc4.h 2011-07-21 22:17:23.000000000 -0400
57168+++ linux-3.0.7/include/linux/compiler-gcc4.h 2011-08-26 19:49:56.000000000 -0400
57169@@ -31,6 +31,12 @@
57170
57171
57172 #if __GNUC_MINOR__ >= 5
57173+
57174+#ifdef CONSTIFY_PLUGIN
57175+#define __no_const __attribute__((no_const))
57176+#define __do_const __attribute__((do_const))
57177+#endif
57178+
57179 /*
57180 * Mark a position in code as unreachable. This can be used to
57181 * suppress control flow warnings after asm blocks that transfer
57182@@ -46,6 +52,11 @@
57183 #define __noclone __attribute__((__noclone__))
57184
57185 #endif
57186+
57187+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
57188+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
57189+#define __bos0(ptr) __bos((ptr), 0)
57190+#define __bos1(ptr) __bos((ptr), 1)
57191 #endif
57192
57193 #if __GNUC_MINOR__ > 0
57194diff -urNp linux-3.0.7/include/linux/compiler.h linux-3.0.7/include/linux/compiler.h
57195--- linux-3.0.7/include/linux/compiler.h 2011-07-21 22:17:23.000000000 -0400
57196+++ linux-3.0.7/include/linux/compiler.h 2011-10-06 04:17:55.000000000 -0400
57197@@ -5,31 +5,62 @@
57198
57199 #ifdef __CHECKER__
57200 # define __user __attribute__((noderef, address_space(1)))
57201+# define __force_user __force __user
57202 # define __kernel __attribute__((address_space(0)))
57203+# define __force_kernel __force __kernel
57204 # define __safe __attribute__((safe))
57205 # define __force __attribute__((force))
57206 # define __nocast __attribute__((nocast))
57207 # define __iomem __attribute__((noderef, address_space(2)))
57208+# define __force_iomem __force __iomem
57209 # define __acquires(x) __attribute__((context(x,0,1)))
57210 # define __releases(x) __attribute__((context(x,1,0)))
57211 # define __acquire(x) __context__(x,1)
57212 # define __release(x) __context__(x,-1)
57213 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
57214 # define __percpu __attribute__((noderef, address_space(3)))
57215+# define __force_percpu __force __percpu
57216 #ifdef CONFIG_SPARSE_RCU_POINTER
57217 # define __rcu __attribute__((noderef, address_space(4)))
57218+# define __force_rcu __force __rcu
57219 #else
57220 # define __rcu
57221+# define __force_rcu
57222 #endif
57223 extern void __chk_user_ptr(const volatile void __user *);
57224 extern void __chk_io_ptr(const volatile void __iomem *);
57225+#elif defined(CHECKER_PLUGIN)
57226+//# define __user
57227+//# define __force_user
57228+//# define __kernel
57229+//# define __force_kernel
57230+# define __safe
57231+# define __force
57232+# define __nocast
57233+# define __iomem
57234+# define __force_iomem
57235+# define __chk_user_ptr(x) (void)0
57236+# define __chk_io_ptr(x) (void)0
57237+# define __builtin_warning(x, y...) (1)
57238+# define __acquires(x)
57239+# define __releases(x)
57240+# define __acquire(x) (void)0
57241+# define __release(x) (void)0
57242+# define __cond_lock(x,c) (c)
57243+# define __percpu
57244+# define __force_percpu
57245+# define __rcu
57246+# define __force_rcu
57247 #else
57248 # define __user
57249+# define __force_user
57250 # define __kernel
57251+# define __force_kernel
57252 # define __safe
57253 # define __force
57254 # define __nocast
57255 # define __iomem
57256+# define __force_iomem
57257 # define __chk_user_ptr(x) (void)0
57258 # define __chk_io_ptr(x) (void)0
57259 # define __builtin_warning(x, y...) (1)
57260@@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile
57261 # define __release(x) (void)0
57262 # define __cond_lock(x,c) (c)
57263 # define __percpu
57264+# define __force_percpu
57265 # define __rcu
57266+# define __force_rcu
57267 #endif
57268
57269 #ifdef __KERNEL__
57270@@ -264,6 +297,14 @@ void ftrace_likely_update(struct ftrace_
57271 # define __attribute_const__ /* unimplemented */
57272 #endif
57273
57274+#ifndef __no_const
57275+# define __no_const
57276+#endif
57277+
57278+#ifndef __do_const
57279+# define __do_const
57280+#endif
57281+
57282 /*
57283 * Tell gcc if a function is cold. The compiler will assume any path
57284 * directly leading to the call is unlikely.
57285@@ -273,6 +314,22 @@ void ftrace_likely_update(struct ftrace_
57286 #define __cold
57287 #endif
57288
57289+#ifndef __alloc_size
57290+#define __alloc_size(...)
57291+#endif
57292+
57293+#ifndef __bos
57294+#define __bos(ptr, arg)
57295+#endif
57296+
57297+#ifndef __bos0
57298+#define __bos0(ptr)
57299+#endif
57300+
57301+#ifndef __bos1
57302+#define __bos1(ptr)
57303+#endif
57304+
57305 /* Simple shorthand for a section definition */
57306 #ifndef __section
57307 # define __section(S) __attribute__ ((__section__(#S)))
57308@@ -306,6 +363,7 @@ void ftrace_likely_update(struct ftrace_
57309 * use is to mediate communication between process-level code and irq/NMI
57310 * handlers, all running on the same CPU.
57311 */
57312-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
57313+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
57314+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
57315
57316 #endif /* __LINUX_COMPILER_H */
57317diff -urNp linux-3.0.7/include/linux/cpuset.h linux-3.0.7/include/linux/cpuset.h
57318--- linux-3.0.7/include/linux/cpuset.h 2011-07-21 22:17:23.000000000 -0400
57319+++ linux-3.0.7/include/linux/cpuset.h 2011-08-23 21:47:56.000000000 -0400
57320@@ -118,7 +118,7 @@ static inline void put_mems_allowed(void
57321 * nodemask.
57322 */
57323 smp_mb();
57324- --ACCESS_ONCE(current->mems_allowed_change_disable);
57325+ --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
57326 }
57327
57328 static inline void set_mems_allowed(nodemask_t nodemask)
57329diff -urNp linux-3.0.7/include/linux/crypto.h linux-3.0.7/include/linux/crypto.h
57330--- linux-3.0.7/include/linux/crypto.h 2011-07-21 22:17:23.000000000 -0400
57331+++ linux-3.0.7/include/linux/crypto.h 2011-08-23 21:47:56.000000000 -0400
57332@@ -361,7 +361,7 @@ struct cipher_tfm {
57333 const u8 *key, unsigned int keylen);
57334 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57335 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57336-};
57337+} __no_const;
57338
57339 struct hash_tfm {
57340 int (*init)(struct hash_desc *desc);
57341@@ -382,13 +382,13 @@ struct compress_tfm {
57342 int (*cot_decompress)(struct crypto_tfm *tfm,
57343 const u8 *src, unsigned int slen,
57344 u8 *dst, unsigned int *dlen);
57345-};
57346+} __no_const;
57347
57348 struct rng_tfm {
57349 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
57350 unsigned int dlen);
57351 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
57352-};
57353+} __no_const;
57354
57355 #define crt_ablkcipher crt_u.ablkcipher
57356 #define crt_aead crt_u.aead
57357diff -urNp linux-3.0.7/include/linux/decompress/mm.h linux-3.0.7/include/linux/decompress/mm.h
57358--- linux-3.0.7/include/linux/decompress/mm.h 2011-07-21 22:17:23.000000000 -0400
57359+++ linux-3.0.7/include/linux/decompress/mm.h 2011-08-23 21:47:56.000000000 -0400
57360@@ -77,7 +77,7 @@ static void free(void *where)
57361 * warnings when not needed (indeed large_malloc / large_free are not
57362 * needed by inflate */
57363
57364-#define malloc(a) kmalloc(a, GFP_KERNEL)
57365+#define malloc(a) kmalloc((a), GFP_KERNEL)
57366 #define free(a) kfree(a)
57367
57368 #define large_malloc(a) vmalloc(a)
57369diff -urNp linux-3.0.7/include/linux/dma-mapping.h linux-3.0.7/include/linux/dma-mapping.h
57370--- linux-3.0.7/include/linux/dma-mapping.h 2011-07-21 22:17:23.000000000 -0400
57371+++ linux-3.0.7/include/linux/dma-mapping.h 2011-08-26 19:49:56.000000000 -0400
57372@@ -50,7 +50,7 @@ struct dma_map_ops {
57373 int (*dma_supported)(struct device *dev, u64 mask);
57374 int (*set_dma_mask)(struct device *dev, u64 mask);
57375 int is_phys;
57376-};
57377+} __do_const;
57378
57379 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
57380
57381diff -urNp linux-3.0.7/include/linux/efi.h linux-3.0.7/include/linux/efi.h
57382--- linux-3.0.7/include/linux/efi.h 2011-07-21 22:17:23.000000000 -0400
57383+++ linux-3.0.7/include/linux/efi.h 2011-08-23 21:47:56.000000000 -0400
57384@@ -410,7 +410,7 @@ struct efivar_operations {
57385 efi_get_variable_t *get_variable;
57386 efi_get_next_variable_t *get_next_variable;
57387 efi_set_variable_t *set_variable;
57388-};
57389+} __no_const;
57390
57391 struct efivars {
57392 /*
57393diff -urNp linux-3.0.7/include/linux/elf.h linux-3.0.7/include/linux/elf.h
57394--- linux-3.0.7/include/linux/elf.h 2011-07-21 22:17:23.000000000 -0400
57395+++ linux-3.0.7/include/linux/elf.h 2011-08-23 21:47:56.000000000 -0400
57396@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
57397 #define PT_GNU_EH_FRAME 0x6474e550
57398
57399 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
57400+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
57401+
57402+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
57403+
57404+/* Constants for the e_flags field */
57405+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57406+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
57407+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
57408+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
57409+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57410+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57411
57412 /*
57413 * Extended Numbering
57414@@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
57415 #define DT_DEBUG 21
57416 #define DT_TEXTREL 22
57417 #define DT_JMPREL 23
57418+#define DT_FLAGS 30
57419+ #define DF_TEXTREL 0x00000004
57420 #define DT_ENCODING 32
57421 #define OLD_DT_LOOS 0x60000000
57422 #define DT_LOOS 0x6000000d
57423@@ -252,6 +265,19 @@ typedef struct elf64_hdr {
57424 #define PF_W 0x2
57425 #define PF_X 0x1
57426
57427+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
57428+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
57429+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
57430+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
57431+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
57432+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
57433+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
57434+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
57435+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
57436+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
57437+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
57438+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
57439+
57440 typedef struct elf32_phdr{
57441 Elf32_Word p_type;
57442 Elf32_Off p_offset;
57443@@ -344,6 +370,8 @@ typedef struct elf64_shdr {
57444 #define EI_OSABI 7
57445 #define EI_PAD 8
57446
57447+#define EI_PAX 14
57448+
57449 #define ELFMAG0 0x7f /* EI_MAG */
57450 #define ELFMAG1 'E'
57451 #define ELFMAG2 'L'
57452@@ -422,6 +450,7 @@ extern Elf32_Dyn _DYNAMIC [];
57453 #define elf_note elf32_note
57454 #define elf_addr_t Elf32_Off
57455 #define Elf_Half Elf32_Half
57456+#define elf_dyn Elf32_Dyn
57457
57458 #else
57459
57460@@ -432,6 +461,7 @@ extern Elf64_Dyn _DYNAMIC [];
57461 #define elf_note elf64_note
57462 #define elf_addr_t Elf64_Off
57463 #define Elf_Half Elf64_Half
57464+#define elf_dyn Elf64_Dyn
57465
57466 #endif
57467
57468diff -urNp linux-3.0.7/include/linux/firewire.h linux-3.0.7/include/linux/firewire.h
57469--- linux-3.0.7/include/linux/firewire.h 2011-07-21 22:17:23.000000000 -0400
57470+++ linux-3.0.7/include/linux/firewire.h 2011-08-23 21:47:56.000000000 -0400
57471@@ -428,7 +428,7 @@ struct fw_iso_context {
57472 union {
57473 fw_iso_callback_t sc;
57474 fw_iso_mc_callback_t mc;
57475- } callback;
57476+ } __no_const callback;
57477 void *callback_data;
57478 };
57479
57480diff -urNp linux-3.0.7/include/linux/fscache-cache.h linux-3.0.7/include/linux/fscache-cache.h
57481--- linux-3.0.7/include/linux/fscache-cache.h 2011-07-21 22:17:23.000000000 -0400
57482+++ linux-3.0.7/include/linux/fscache-cache.h 2011-08-23 21:47:56.000000000 -0400
57483@@ -102,7 +102,7 @@ struct fscache_operation {
57484 fscache_operation_release_t release;
57485 };
57486
57487-extern atomic_t fscache_op_debug_id;
57488+extern atomic_unchecked_t fscache_op_debug_id;
57489 extern void fscache_op_work_func(struct work_struct *work);
57490
57491 extern void fscache_enqueue_operation(struct fscache_operation *);
57492@@ -122,7 +122,7 @@ static inline void fscache_operation_ini
57493 {
57494 INIT_WORK(&op->work, fscache_op_work_func);
57495 atomic_set(&op->usage, 1);
57496- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
57497+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
57498 op->processor = processor;
57499 op->release = release;
57500 INIT_LIST_HEAD(&op->pend_link);
57501diff -urNp linux-3.0.7/include/linux/fs.h linux-3.0.7/include/linux/fs.h
57502--- linux-3.0.7/include/linux/fs.h 2011-07-21 22:17:23.000000000 -0400
57503+++ linux-3.0.7/include/linux/fs.h 2011-08-26 19:49:56.000000000 -0400
57504@@ -109,6 +109,11 @@ struct inodes_stat_t {
57505 /* File was opened by fanotify and shouldn't generate fanotify events */
57506 #define FMODE_NONOTIFY ((__force fmode_t)0x1000000)
57507
57508+/* Hack for grsec so as not to require read permission simply to execute
57509+ * a binary
57510+ */
57511+#define FMODE_GREXEC ((__force fmode_t)0x2000000)
57512+
57513 /*
57514 * The below are the various read and write types that we support. Some of
57515 * them include behavioral modifiers that send information down to the
57516@@ -1571,7 +1576,8 @@ struct file_operations {
57517 int (*setlease)(struct file *, long, struct file_lock **);
57518 long (*fallocate)(struct file *file, int mode, loff_t offset,
57519 loff_t len);
57520-};
57521+} __do_const;
57522+typedef struct file_operations __no_const file_operations_no_const;
57523
57524 #define IPERM_FLAG_RCU 0x0001
57525
57526diff -urNp linux-3.0.7/include/linux/fsnotify.h linux-3.0.7/include/linux/fsnotify.h
57527--- linux-3.0.7/include/linux/fsnotify.h 2011-07-21 22:17:23.000000000 -0400
57528+++ linux-3.0.7/include/linux/fsnotify.h 2011-08-24 18:10:29.000000000 -0400
57529@@ -314,7 +314,7 @@ static inline void fsnotify_change(struc
57530 */
57531 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
57532 {
57533- return kstrdup(name, GFP_KERNEL);
57534+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
57535 }
57536
57537 /*
57538diff -urNp linux-3.0.7/include/linux/fs_struct.h linux-3.0.7/include/linux/fs_struct.h
57539--- linux-3.0.7/include/linux/fs_struct.h 2011-07-21 22:17:23.000000000 -0400
57540+++ linux-3.0.7/include/linux/fs_struct.h 2011-08-23 21:47:56.000000000 -0400
57541@@ -6,7 +6,7 @@
57542 #include <linux/seqlock.h>
57543
57544 struct fs_struct {
57545- int users;
57546+ atomic_t users;
57547 spinlock_t lock;
57548 seqcount_t seq;
57549 int umask;
57550diff -urNp linux-3.0.7/include/linux/ftrace_event.h linux-3.0.7/include/linux/ftrace_event.h
57551--- linux-3.0.7/include/linux/ftrace_event.h 2011-07-21 22:17:23.000000000 -0400
57552+++ linux-3.0.7/include/linux/ftrace_event.h 2011-08-23 21:47:56.000000000 -0400
57553@@ -96,7 +96,7 @@ struct trace_event_functions {
57554 trace_print_func raw;
57555 trace_print_func hex;
57556 trace_print_func binary;
57557-};
57558+} __no_const;
57559
57560 struct trace_event {
57561 struct hlist_node node;
57562@@ -247,7 +247,7 @@ extern int trace_define_field(struct ftr
57563 extern int trace_add_event_call(struct ftrace_event_call *call);
57564 extern void trace_remove_event_call(struct ftrace_event_call *call);
57565
57566-#define is_signed_type(type) (((type)(-1)) < 0)
57567+#define is_signed_type(type) (((type)(-1)) < (type)1)
57568
57569 int trace_set_clr_event(const char *system, const char *event, int set);
57570
57571diff -urNp linux-3.0.7/include/linux/genhd.h linux-3.0.7/include/linux/genhd.h
57572--- linux-3.0.7/include/linux/genhd.h 2011-07-21 22:17:23.000000000 -0400
57573+++ linux-3.0.7/include/linux/genhd.h 2011-08-23 21:47:56.000000000 -0400
57574@@ -184,7 +184,7 @@ struct gendisk {
57575 struct kobject *slave_dir;
57576
57577 struct timer_rand_state *random;
57578- atomic_t sync_io; /* RAID */
57579+ atomic_unchecked_t sync_io; /* RAID */
57580 struct disk_events *ev;
57581 #ifdef CONFIG_BLK_DEV_INTEGRITY
57582 struct blk_integrity *integrity;
57583diff -urNp linux-3.0.7/include/linux/gracl.h linux-3.0.7/include/linux/gracl.h
57584--- linux-3.0.7/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
57585+++ linux-3.0.7/include/linux/gracl.h 2011-08-23 21:48:14.000000000 -0400
57586@@ -0,0 +1,317 @@
57587+#ifndef GR_ACL_H
57588+#define GR_ACL_H
57589+
57590+#include <linux/grdefs.h>
57591+#include <linux/resource.h>
57592+#include <linux/capability.h>
57593+#include <linux/dcache.h>
57594+#include <asm/resource.h>
57595+
57596+/* Major status information */
57597+
57598+#define GR_VERSION "grsecurity 2.2.2"
57599+#define GRSECURITY_VERSION 0x2202
57600+
57601+enum {
57602+ GR_SHUTDOWN = 0,
57603+ GR_ENABLE = 1,
57604+ GR_SPROLE = 2,
57605+ GR_RELOAD = 3,
57606+ GR_SEGVMOD = 4,
57607+ GR_STATUS = 5,
57608+ GR_UNSPROLE = 6,
57609+ GR_PASSSET = 7,
57610+ GR_SPROLEPAM = 8,
57611+};
57612+
57613+/* Password setup definitions
57614+ * kernel/grhash.c */
57615+enum {
57616+ GR_PW_LEN = 128,
57617+ GR_SALT_LEN = 16,
57618+ GR_SHA_LEN = 32,
57619+};
57620+
57621+enum {
57622+ GR_SPROLE_LEN = 64,
57623+};
57624+
57625+enum {
57626+ GR_NO_GLOB = 0,
57627+ GR_REG_GLOB,
57628+ GR_CREATE_GLOB
57629+};
57630+
57631+#define GR_NLIMITS 32
57632+
57633+/* Begin Data Structures */
57634+
57635+struct sprole_pw {
57636+ unsigned char *rolename;
57637+ unsigned char salt[GR_SALT_LEN];
57638+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
57639+};
57640+
57641+struct name_entry {
57642+ __u32 key;
57643+ ino_t inode;
57644+ dev_t device;
57645+ char *name;
57646+ __u16 len;
57647+ __u8 deleted;
57648+ struct name_entry *prev;
57649+ struct name_entry *next;
57650+};
57651+
57652+struct inodev_entry {
57653+ struct name_entry *nentry;
57654+ struct inodev_entry *prev;
57655+ struct inodev_entry *next;
57656+};
57657+
57658+struct acl_role_db {
57659+ struct acl_role_label **r_hash;
57660+ __u32 r_size;
57661+};
57662+
57663+struct inodev_db {
57664+ struct inodev_entry **i_hash;
57665+ __u32 i_size;
57666+};
57667+
57668+struct name_db {
57669+ struct name_entry **n_hash;
57670+ __u32 n_size;
57671+};
57672+
57673+struct crash_uid {
57674+ uid_t uid;
57675+ unsigned long expires;
57676+};
57677+
57678+struct gr_hash_struct {
57679+ void **table;
57680+ void **nametable;
57681+ void *first;
57682+ __u32 table_size;
57683+ __u32 used_size;
57684+ int type;
57685+};
57686+
57687+/* Userspace Grsecurity ACL data structures */
57688+
57689+struct acl_subject_label {
57690+ char *filename;
57691+ ino_t inode;
57692+ dev_t device;
57693+ __u32 mode;
57694+ kernel_cap_t cap_mask;
57695+ kernel_cap_t cap_lower;
57696+ kernel_cap_t cap_invert_audit;
57697+
57698+ struct rlimit res[GR_NLIMITS];
57699+ __u32 resmask;
57700+
57701+ __u8 user_trans_type;
57702+ __u8 group_trans_type;
57703+ uid_t *user_transitions;
57704+ gid_t *group_transitions;
57705+ __u16 user_trans_num;
57706+ __u16 group_trans_num;
57707+
57708+ __u32 sock_families[2];
57709+ __u32 ip_proto[8];
57710+ __u32 ip_type;
57711+ struct acl_ip_label **ips;
57712+ __u32 ip_num;
57713+ __u32 inaddr_any_override;
57714+
57715+ __u32 crashes;
57716+ unsigned long expires;
57717+
57718+ struct acl_subject_label *parent_subject;
57719+ struct gr_hash_struct *hash;
57720+ struct acl_subject_label *prev;
57721+ struct acl_subject_label *next;
57722+
57723+ struct acl_object_label **obj_hash;
57724+ __u32 obj_hash_size;
57725+ __u16 pax_flags;
57726+};
57727+
57728+struct role_allowed_ip {
57729+ __u32 addr;
57730+ __u32 netmask;
57731+
57732+ struct role_allowed_ip *prev;
57733+ struct role_allowed_ip *next;
57734+};
57735+
57736+struct role_transition {
57737+ char *rolename;
57738+
57739+ struct role_transition *prev;
57740+ struct role_transition *next;
57741+};
57742+
57743+struct acl_role_label {
57744+ char *rolename;
57745+ uid_t uidgid;
57746+ __u16 roletype;
57747+
57748+ __u16 auth_attempts;
57749+ unsigned long expires;
57750+
57751+ struct acl_subject_label *root_label;
57752+ struct gr_hash_struct *hash;
57753+
57754+ struct acl_role_label *prev;
57755+ struct acl_role_label *next;
57756+
57757+ struct role_transition *transitions;
57758+ struct role_allowed_ip *allowed_ips;
57759+ uid_t *domain_children;
57760+ __u16 domain_child_num;
57761+
57762+ struct acl_subject_label **subj_hash;
57763+ __u32 subj_hash_size;
57764+};
57765+
57766+struct user_acl_role_db {
57767+ struct acl_role_label **r_table;
57768+ __u32 num_pointers; /* Number of allocations to track */
57769+ __u32 num_roles; /* Number of roles */
57770+ __u32 num_domain_children; /* Number of domain children */
57771+ __u32 num_subjects; /* Number of subjects */
57772+ __u32 num_objects; /* Number of objects */
57773+};
57774+
57775+struct acl_object_label {
57776+ char *filename;
57777+ ino_t inode;
57778+ dev_t device;
57779+ __u32 mode;
57780+
57781+ struct acl_subject_label *nested;
57782+ struct acl_object_label *globbed;
57783+
57784+ /* next two structures not used */
57785+
57786+ struct acl_object_label *prev;
57787+ struct acl_object_label *next;
57788+};
57789+
57790+struct acl_ip_label {
57791+ char *iface;
57792+ __u32 addr;
57793+ __u32 netmask;
57794+ __u16 low, high;
57795+ __u8 mode;
57796+ __u32 type;
57797+ __u32 proto[8];
57798+
57799+ /* next two structures not used */
57800+
57801+ struct acl_ip_label *prev;
57802+ struct acl_ip_label *next;
57803+};
57804+
57805+struct gr_arg {
57806+ struct user_acl_role_db role_db;
57807+ unsigned char pw[GR_PW_LEN];
57808+ unsigned char salt[GR_SALT_LEN];
57809+ unsigned char sum[GR_SHA_LEN];
57810+ unsigned char sp_role[GR_SPROLE_LEN];
57811+ struct sprole_pw *sprole_pws;
57812+ dev_t segv_device;
57813+ ino_t segv_inode;
57814+ uid_t segv_uid;
57815+ __u16 num_sprole_pws;
57816+ __u16 mode;
57817+};
57818+
57819+struct gr_arg_wrapper {
57820+ struct gr_arg *arg;
57821+ __u32 version;
57822+ __u32 size;
57823+};
57824+
57825+struct subject_map {
57826+ struct acl_subject_label *user;
57827+ struct acl_subject_label *kernel;
57828+ struct subject_map *prev;
57829+ struct subject_map *next;
57830+};
57831+
57832+struct acl_subj_map_db {
57833+ struct subject_map **s_hash;
57834+ __u32 s_size;
57835+};
57836+
57837+/* End Data Structures Section */
57838+
57839+/* Hash functions generated by empirical testing by Brad Spengler
57840+ Makes good use of the low bits of the inode. Generally 0-1 times
57841+ in loop for successful match. 0-3 for unsuccessful match.
57842+ Shift/add algorithm with modulus of table size and an XOR*/
57843+
57844+static __inline__ unsigned int
57845+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
57846+{
57847+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
57848+}
57849+
57850+ static __inline__ unsigned int
57851+shash(const struct acl_subject_label *userp, const unsigned int sz)
57852+{
57853+ return ((const unsigned long)userp % sz);
57854+}
57855+
57856+static __inline__ unsigned int
57857+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
57858+{
57859+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
57860+}
57861+
57862+static __inline__ unsigned int
57863+nhash(const char *name, const __u16 len, const unsigned int sz)
57864+{
57865+ return full_name_hash((const unsigned char *)name, len) % sz;
57866+}
57867+
57868+#define FOR_EACH_ROLE_START(role) \
57869+ role = role_list; \
57870+ while (role) {
57871+
57872+#define FOR_EACH_ROLE_END(role) \
57873+ role = role->prev; \
57874+ }
57875+
57876+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
57877+ subj = NULL; \
57878+ iter = 0; \
57879+ while (iter < role->subj_hash_size) { \
57880+ if (subj == NULL) \
57881+ subj = role->subj_hash[iter]; \
57882+ if (subj == NULL) { \
57883+ iter++; \
57884+ continue; \
57885+ }
57886+
57887+#define FOR_EACH_SUBJECT_END(subj,iter) \
57888+ subj = subj->next; \
57889+ if (subj == NULL) \
57890+ iter++; \
57891+ }
57892+
57893+
57894+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
57895+ subj = role->hash->first; \
57896+ while (subj != NULL) {
57897+
57898+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
57899+ subj = subj->next; \
57900+ }
57901+
57902+#endif
57903+
57904diff -urNp linux-3.0.7/include/linux/gralloc.h linux-3.0.7/include/linux/gralloc.h
57905--- linux-3.0.7/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
57906+++ linux-3.0.7/include/linux/gralloc.h 2011-08-23 21:48:14.000000000 -0400
57907@@ -0,0 +1,9 @@
57908+#ifndef __GRALLOC_H
57909+#define __GRALLOC_H
57910+
57911+void acl_free_all(void);
57912+int acl_alloc_stack_init(unsigned long size);
57913+void *acl_alloc(unsigned long len);
57914+void *acl_alloc_num(unsigned long num, unsigned long len);
57915+
57916+#endif
57917diff -urNp linux-3.0.7/include/linux/grdefs.h linux-3.0.7/include/linux/grdefs.h
57918--- linux-3.0.7/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
57919+++ linux-3.0.7/include/linux/grdefs.h 2011-08-23 21:48:14.000000000 -0400
57920@@ -0,0 +1,140 @@
57921+#ifndef GRDEFS_H
57922+#define GRDEFS_H
57923+
57924+/* Begin grsecurity status declarations */
57925+
57926+enum {
57927+ GR_READY = 0x01,
57928+ GR_STATUS_INIT = 0x00 // disabled state
57929+};
57930+
57931+/* Begin ACL declarations */
57932+
57933+/* Role flags */
57934+
57935+enum {
57936+ GR_ROLE_USER = 0x0001,
57937+ GR_ROLE_GROUP = 0x0002,
57938+ GR_ROLE_DEFAULT = 0x0004,
57939+ GR_ROLE_SPECIAL = 0x0008,
57940+ GR_ROLE_AUTH = 0x0010,
57941+ GR_ROLE_NOPW = 0x0020,
57942+ GR_ROLE_GOD = 0x0040,
57943+ GR_ROLE_LEARN = 0x0080,
57944+ GR_ROLE_TPE = 0x0100,
57945+ GR_ROLE_DOMAIN = 0x0200,
57946+ GR_ROLE_PAM = 0x0400,
57947+ GR_ROLE_PERSIST = 0x0800
57948+};
57949+
57950+/* ACL Subject and Object mode flags */
57951+enum {
57952+ GR_DELETED = 0x80000000
57953+};
57954+
57955+/* ACL Object-only mode flags */
57956+enum {
57957+ GR_READ = 0x00000001,
57958+ GR_APPEND = 0x00000002,
57959+ GR_WRITE = 0x00000004,
57960+ GR_EXEC = 0x00000008,
57961+ GR_FIND = 0x00000010,
57962+ GR_INHERIT = 0x00000020,
57963+ GR_SETID = 0x00000040,
57964+ GR_CREATE = 0x00000080,
57965+ GR_DELETE = 0x00000100,
57966+ GR_LINK = 0x00000200,
57967+ GR_AUDIT_READ = 0x00000400,
57968+ GR_AUDIT_APPEND = 0x00000800,
57969+ GR_AUDIT_WRITE = 0x00001000,
57970+ GR_AUDIT_EXEC = 0x00002000,
57971+ GR_AUDIT_FIND = 0x00004000,
57972+ GR_AUDIT_INHERIT= 0x00008000,
57973+ GR_AUDIT_SETID = 0x00010000,
57974+ GR_AUDIT_CREATE = 0x00020000,
57975+ GR_AUDIT_DELETE = 0x00040000,
57976+ GR_AUDIT_LINK = 0x00080000,
57977+ GR_PTRACERD = 0x00100000,
57978+ GR_NOPTRACE = 0x00200000,
57979+ GR_SUPPRESS = 0x00400000,
57980+ GR_NOLEARN = 0x00800000,
57981+ GR_INIT_TRANSFER= 0x01000000
57982+};
57983+
57984+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
57985+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
57986+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
57987+
57988+/* ACL subject-only mode flags */
57989+enum {
57990+ GR_KILL = 0x00000001,
57991+ GR_VIEW = 0x00000002,
57992+ GR_PROTECTED = 0x00000004,
57993+ GR_LEARN = 0x00000008,
57994+ GR_OVERRIDE = 0x00000010,
57995+ /* just a placeholder, this mode is only used in userspace */
57996+ GR_DUMMY = 0x00000020,
57997+ GR_PROTSHM = 0x00000040,
57998+ GR_KILLPROC = 0x00000080,
57999+ GR_KILLIPPROC = 0x00000100,
58000+ /* just a placeholder, this mode is only used in userspace */
58001+ GR_NOTROJAN = 0x00000200,
58002+ GR_PROTPROCFD = 0x00000400,
58003+ GR_PROCACCT = 0x00000800,
58004+ GR_RELAXPTRACE = 0x00001000,
58005+ GR_NESTED = 0x00002000,
58006+ GR_INHERITLEARN = 0x00004000,
58007+ GR_PROCFIND = 0x00008000,
58008+ GR_POVERRIDE = 0x00010000,
58009+ GR_KERNELAUTH = 0x00020000,
58010+ GR_ATSECURE = 0x00040000,
58011+ GR_SHMEXEC = 0x00080000
58012+};
58013+
58014+enum {
58015+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
58016+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
58017+ GR_PAX_ENABLE_MPROTECT = 0x0004,
58018+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
58019+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
58020+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
58021+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
58022+ GR_PAX_DISABLE_MPROTECT = 0x0400,
58023+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
58024+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
58025+};
58026+
58027+enum {
58028+ GR_ID_USER = 0x01,
58029+ GR_ID_GROUP = 0x02,
58030+};
58031+
58032+enum {
58033+ GR_ID_ALLOW = 0x01,
58034+ GR_ID_DENY = 0x02,
58035+};
58036+
58037+#define GR_CRASH_RES 31
58038+#define GR_UIDTABLE_MAX 500
58039+
58040+/* begin resource learning section */
58041+enum {
58042+ GR_RLIM_CPU_BUMP = 60,
58043+ GR_RLIM_FSIZE_BUMP = 50000,
58044+ GR_RLIM_DATA_BUMP = 10000,
58045+ GR_RLIM_STACK_BUMP = 1000,
58046+ GR_RLIM_CORE_BUMP = 10000,
58047+ GR_RLIM_RSS_BUMP = 500000,
58048+ GR_RLIM_NPROC_BUMP = 1,
58049+ GR_RLIM_NOFILE_BUMP = 5,
58050+ GR_RLIM_MEMLOCK_BUMP = 50000,
58051+ GR_RLIM_AS_BUMP = 500000,
58052+ GR_RLIM_LOCKS_BUMP = 2,
58053+ GR_RLIM_SIGPENDING_BUMP = 5,
58054+ GR_RLIM_MSGQUEUE_BUMP = 10000,
58055+ GR_RLIM_NICE_BUMP = 1,
58056+ GR_RLIM_RTPRIO_BUMP = 1,
58057+ GR_RLIM_RTTIME_BUMP = 1000000
58058+};
58059+
58060+#endif
58061diff -urNp linux-3.0.7/include/linux/grinternal.h linux-3.0.7/include/linux/grinternal.h
58062--- linux-3.0.7/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
58063+++ linux-3.0.7/include/linux/grinternal.h 2011-10-17 00:25:19.000000000 -0400
58064@@ -0,0 +1,219 @@
58065+#ifndef __GRINTERNAL_H
58066+#define __GRINTERNAL_H
58067+
58068+#ifdef CONFIG_GRKERNSEC
58069+
58070+#include <linux/fs.h>
58071+#include <linux/mnt_namespace.h>
58072+#include <linux/nsproxy.h>
58073+#include <linux/gracl.h>
58074+#include <linux/grdefs.h>
58075+#include <linux/grmsg.h>
58076+
58077+void gr_add_learn_entry(const char *fmt, ...)
58078+ __attribute__ ((format (printf, 1, 2)));
58079+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
58080+ const struct vfsmount *mnt);
58081+__u32 gr_check_create(const struct dentry *new_dentry,
58082+ const struct dentry *parent,
58083+ const struct vfsmount *mnt, const __u32 mode);
58084+int gr_check_protected_task(const struct task_struct *task);
58085+__u32 to_gr_audit(const __u32 reqmode);
58086+int gr_set_acls(const int type);
58087+int gr_apply_subject_to_task(struct task_struct *task);
58088+int gr_acl_is_enabled(void);
58089+char gr_roletype_to_char(void);
58090+
58091+void gr_handle_alertkill(struct task_struct *task);
58092+char *gr_to_filename(const struct dentry *dentry,
58093+ const struct vfsmount *mnt);
58094+char *gr_to_filename1(const struct dentry *dentry,
58095+ const struct vfsmount *mnt);
58096+char *gr_to_filename2(const struct dentry *dentry,
58097+ const struct vfsmount *mnt);
58098+char *gr_to_filename3(const struct dentry *dentry,
58099+ const struct vfsmount *mnt);
58100+
58101+extern int grsec_enable_harden_ptrace;
58102+extern int grsec_enable_link;
58103+extern int grsec_enable_fifo;
58104+extern int grsec_enable_execve;
58105+extern int grsec_enable_shm;
58106+extern int grsec_enable_execlog;
58107+extern int grsec_enable_signal;
58108+extern int grsec_enable_audit_ptrace;
58109+extern int grsec_enable_forkfail;
58110+extern int grsec_enable_time;
58111+extern int grsec_enable_rofs;
58112+extern int grsec_enable_chroot_shmat;
58113+extern int grsec_enable_chroot_mount;
58114+extern int grsec_enable_chroot_double;
58115+extern int grsec_enable_chroot_pivot;
58116+extern int grsec_enable_chroot_chdir;
58117+extern int grsec_enable_chroot_chmod;
58118+extern int grsec_enable_chroot_mknod;
58119+extern int grsec_enable_chroot_fchdir;
58120+extern int grsec_enable_chroot_nice;
58121+extern int grsec_enable_chroot_execlog;
58122+extern int grsec_enable_chroot_caps;
58123+extern int grsec_enable_chroot_sysctl;
58124+extern int grsec_enable_chroot_unix;
58125+extern int grsec_enable_tpe;
58126+extern int grsec_tpe_gid;
58127+extern int grsec_enable_tpe_all;
58128+extern int grsec_enable_tpe_invert;
58129+extern int grsec_enable_socket_all;
58130+extern int grsec_socket_all_gid;
58131+extern int grsec_enable_socket_client;
58132+extern int grsec_socket_client_gid;
58133+extern int grsec_enable_socket_server;
58134+extern int grsec_socket_server_gid;
58135+extern int grsec_audit_gid;
58136+extern int grsec_enable_group;
58137+extern int grsec_enable_audit_textrel;
58138+extern int grsec_enable_log_rwxmaps;
58139+extern int grsec_enable_mount;
58140+extern int grsec_enable_chdir;
58141+extern int grsec_resource_logging;
58142+extern int grsec_enable_blackhole;
58143+extern int grsec_lastack_retries;
58144+extern int grsec_enable_brute;
58145+extern int grsec_lock;
58146+
58147+extern spinlock_t grsec_alert_lock;
58148+extern unsigned long grsec_alert_wtime;
58149+extern unsigned long grsec_alert_fyet;
58150+
58151+extern spinlock_t grsec_audit_lock;
58152+
58153+extern rwlock_t grsec_exec_file_lock;
58154+
58155+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
58156+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
58157+ (tsk)->exec_file->f_vfsmnt) : "/")
58158+
58159+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
58160+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
58161+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58162+
58163+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
58164+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
58165+ (tsk)->exec_file->f_vfsmnt) : "/")
58166+
58167+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
58168+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
58169+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58170+
58171+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
58172+
58173+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
58174+
58175+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
58176+ (task)->pid, (cred)->uid, \
58177+ (cred)->euid, (cred)->gid, (cred)->egid, \
58178+ gr_parent_task_fullpath(task), \
58179+ (task)->real_parent->comm, (task)->real_parent->pid, \
58180+ (pcred)->uid, (pcred)->euid, \
58181+ (pcred)->gid, (pcred)->egid
58182+
58183+#define GR_CHROOT_CAPS {{ \
58184+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
58185+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
58186+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
58187+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
58188+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
58189+ CAP_TO_MASK(CAP_IPC_OWNER) , CAP_TO_MASK(CAP_SYSLOG) }}
58190+
58191+#define security_learn(normal_msg,args...) \
58192+({ \
58193+ read_lock(&grsec_exec_file_lock); \
58194+ gr_add_learn_entry(normal_msg "\n", ## args); \
58195+ read_unlock(&grsec_exec_file_lock); \
58196+})
58197+
58198+enum {
58199+ GR_DO_AUDIT,
58200+ GR_DONT_AUDIT,
58201+ /* used for non-audit messages that we shouldn't kill the task on */
58202+ GR_DONT_AUDIT_GOOD
58203+};
58204+
58205+enum {
58206+ GR_TTYSNIFF,
58207+ GR_RBAC,
58208+ GR_RBAC_STR,
58209+ GR_STR_RBAC,
58210+ GR_RBAC_MODE2,
58211+ GR_RBAC_MODE3,
58212+ GR_FILENAME,
58213+ GR_SYSCTL_HIDDEN,
58214+ GR_NOARGS,
58215+ GR_ONE_INT,
58216+ GR_ONE_INT_TWO_STR,
58217+ GR_ONE_STR,
58218+ GR_STR_INT,
58219+ GR_TWO_STR_INT,
58220+ GR_TWO_INT,
58221+ GR_TWO_U64,
58222+ GR_THREE_INT,
58223+ GR_FIVE_INT_TWO_STR,
58224+ GR_TWO_STR,
58225+ GR_THREE_STR,
58226+ GR_FOUR_STR,
58227+ GR_STR_FILENAME,
58228+ GR_FILENAME_STR,
58229+ GR_FILENAME_TWO_INT,
58230+ GR_FILENAME_TWO_INT_STR,
58231+ GR_TEXTREL,
58232+ GR_PTRACE,
58233+ GR_RESOURCE,
58234+ GR_CAP,
58235+ GR_SIG,
58236+ GR_SIG2,
58237+ GR_CRASH1,
58238+ GR_CRASH2,
58239+ GR_PSACCT,
58240+ GR_RWXMAP
58241+};
58242+
58243+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
58244+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
58245+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
58246+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
58247+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
58248+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
58249+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
58250+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
58251+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
58252+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
58253+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
58254+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
58255+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
58256+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
58257+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
58258+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
58259+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
58260+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
58261+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
58262+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
58263+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
58264+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
58265+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
58266+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
58267+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
58268+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
58269+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
58270+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
58271+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
58272+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
58273+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
58274+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
58275+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
58276+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
58277+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
58278+
58279+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
58280+
58281+#endif
58282+
58283+#endif
58284diff -urNp linux-3.0.7/include/linux/grmsg.h linux-3.0.7/include/linux/grmsg.h
58285--- linux-3.0.7/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
58286+++ linux-3.0.7/include/linux/grmsg.h 2011-09-14 09:16:54.000000000 -0400
58287@@ -0,0 +1,108 @@
58288+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
58289+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
58290+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
58291+#define GR_STOPMOD_MSG "denied modification of module state by "
58292+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
58293+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
58294+#define GR_IOPERM_MSG "denied use of ioperm() by "
58295+#define GR_IOPL_MSG "denied use of iopl() by "
58296+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
58297+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
58298+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
58299+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
58300+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
58301+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
58302+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
58303+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
58304+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
58305+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
58306+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
58307+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
58308+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
58309+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
58310+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
58311+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
58312+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
58313+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
58314+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
58315+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
58316+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
58317+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
58318+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
58319+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
58320+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
58321+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
58322+#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
58323+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
58324+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
58325+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
58326+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
58327+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
58328+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
58329+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
58330+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
58331+#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
58332+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
58333+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
58334+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
58335+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
58336+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
58337+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
58338+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
58339+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
58340+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
58341+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
58342+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
58343+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
58344+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
58345+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
58346+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
58347+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
58348+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
58349+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
58350+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
58351+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
58352+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
58353+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
58354+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
58355+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
58356+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
58357+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
58358+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
58359+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
58360+#define GR_FAILFORK_MSG "failed fork with errno %s by "
58361+#define GR_NICE_CHROOT_MSG "denied priority change by "
58362+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
58363+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
58364+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
58365+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
58366+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
58367+#define GR_TIME_MSG "time set by "
58368+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
58369+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
58370+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
58371+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
58372+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
58373+#define GR_BIND_MSG "denied bind() by "
58374+#define GR_CONNECT_MSG "denied connect() by "
58375+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
58376+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
58377+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
58378+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
58379+#define GR_CAP_ACL_MSG "use of %s denied for "
58380+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
58381+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
58382+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
58383+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
58384+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
58385+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
58386+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
58387+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
58388+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
58389+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
58390+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
58391+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
58392+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
58393+#define GR_VM86_MSG "denied use of vm86 by "
58394+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
58395+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
58396diff -urNp linux-3.0.7/include/linux/grsecurity.h linux-3.0.7/include/linux/grsecurity.h
58397--- linux-3.0.7/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
58398+++ linux-3.0.7/include/linux/grsecurity.h 2011-10-17 06:35:30.000000000 -0400
58399@@ -0,0 +1,228 @@
58400+#ifndef GR_SECURITY_H
58401+#define GR_SECURITY_H
58402+#include <linux/fs.h>
58403+#include <linux/fs_struct.h>
58404+#include <linux/binfmts.h>
58405+#include <linux/gracl.h>
58406+
58407+/* notify of brain-dead configs */
58408+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58409+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
58410+#endif
58411+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
58412+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
58413+#endif
58414+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
58415+#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
58416+#endif
58417+#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
58418+#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
58419+#endif
58420+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
58421+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
58422+#endif
58423+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
58424+#error "CONFIG_PAX enabled, but no PaX options are enabled."
58425+#endif
58426+
58427+#include <linux/compat.h>
58428+
58429+struct user_arg_ptr {
58430+#ifdef CONFIG_COMPAT
58431+ bool is_compat;
58432+#endif
58433+ union {
58434+ const char __user *const __user *native;
58435+#ifdef CONFIG_COMPAT
58436+ compat_uptr_t __user *compat;
58437+#endif
58438+ } ptr;
58439+};
58440+
58441+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
58442+void gr_handle_brute_check(void);
58443+void gr_handle_kernel_exploit(void);
58444+int gr_process_user_ban(void);
58445+
58446+char gr_roletype_to_char(void);
58447+
58448+int gr_acl_enable_at_secure(void);
58449+
58450+int gr_check_user_change(int real, int effective, int fs);
58451+int gr_check_group_change(int real, int effective, int fs);
58452+
58453+void gr_del_task_from_ip_table(struct task_struct *p);
58454+
58455+int gr_pid_is_chrooted(struct task_struct *p);
58456+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
58457+int gr_handle_chroot_nice(void);
58458+int gr_handle_chroot_sysctl(const int op);
58459+int gr_handle_chroot_setpriority(struct task_struct *p,
58460+ const int niceval);
58461+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
58462+int gr_handle_chroot_chroot(const struct dentry *dentry,
58463+ const struct vfsmount *mnt);
58464+void gr_handle_chroot_chdir(struct path *path);
58465+int gr_handle_chroot_chmod(const struct dentry *dentry,
58466+ const struct vfsmount *mnt, const int mode);
58467+int gr_handle_chroot_mknod(const struct dentry *dentry,
58468+ const struct vfsmount *mnt, const int mode);
58469+int gr_handle_chroot_mount(const struct dentry *dentry,
58470+ const struct vfsmount *mnt,
58471+ const char *dev_name);
58472+int gr_handle_chroot_pivot(void);
58473+int gr_handle_chroot_unix(const pid_t pid);
58474+
58475+int gr_handle_rawio(const struct inode *inode);
58476+
58477+void gr_handle_ioperm(void);
58478+void gr_handle_iopl(void);
58479+
58480+int gr_tpe_allow(const struct file *file);
58481+
58482+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
58483+void gr_clear_chroot_entries(struct task_struct *task);
58484+
58485+void gr_log_forkfail(const int retval);
58486+void gr_log_timechange(void);
58487+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
58488+void gr_log_chdir(const struct dentry *dentry,
58489+ const struct vfsmount *mnt);
58490+void gr_log_chroot_exec(const struct dentry *dentry,
58491+ const struct vfsmount *mnt);
58492+void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
58493+void gr_log_remount(const char *devname, const int retval);
58494+void gr_log_unmount(const char *devname, const int retval);
58495+void gr_log_mount(const char *from, const char *to, const int retval);
58496+void gr_log_textrel(struct vm_area_struct *vma);
58497+void gr_log_rwxmmap(struct file *file);
58498+void gr_log_rwxmprotect(struct file *file);
58499+
58500+int gr_handle_follow_link(const struct inode *parent,
58501+ const struct inode *inode,
58502+ const struct dentry *dentry,
58503+ const struct vfsmount *mnt);
58504+int gr_handle_fifo(const struct dentry *dentry,
58505+ const struct vfsmount *mnt,
58506+ const struct dentry *dir, const int flag,
58507+ const int acc_mode);
58508+int gr_handle_hardlink(const struct dentry *dentry,
58509+ const struct vfsmount *mnt,
58510+ struct inode *inode,
58511+ const int mode, const char *to);
58512+
58513+int gr_is_capable(const int cap);
58514+int gr_is_capable_nolog(const int cap);
58515+void gr_learn_resource(const struct task_struct *task, const int limit,
58516+ const unsigned long wanted, const int gt);
58517+void gr_copy_label(struct task_struct *tsk);
58518+void gr_handle_crash(struct task_struct *task, const int sig);
58519+int gr_handle_signal(const struct task_struct *p, const int sig);
58520+int gr_check_crash_uid(const uid_t uid);
58521+int gr_check_protected_task(const struct task_struct *task);
58522+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
58523+int gr_acl_handle_mmap(const struct file *file,
58524+ const unsigned long prot);
58525+int gr_acl_handle_mprotect(const struct file *file,
58526+ const unsigned long prot);
58527+int gr_check_hidden_task(const struct task_struct *tsk);
58528+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
58529+ const struct vfsmount *mnt);
58530+__u32 gr_acl_handle_utime(const struct dentry *dentry,
58531+ const struct vfsmount *mnt);
58532+__u32 gr_acl_handle_access(const struct dentry *dentry,
58533+ const struct vfsmount *mnt, const int fmode);
58534+__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
58535+ const struct vfsmount *mnt, mode_t mode);
58536+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
58537+ const struct vfsmount *mnt, mode_t mode);
58538+__u32 gr_acl_handle_chown(const struct dentry *dentry,
58539+ const struct vfsmount *mnt);
58540+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
58541+ const struct vfsmount *mnt);
58542+int gr_handle_ptrace(struct task_struct *task, const long request);
58543+int gr_handle_proc_ptrace(struct task_struct *task);
58544+__u32 gr_acl_handle_execve(const struct dentry *dentry,
58545+ const struct vfsmount *mnt);
58546+int gr_check_crash_exec(const struct file *filp);
58547+int gr_acl_is_enabled(void);
58548+void gr_set_kernel_label(struct task_struct *task);
58549+void gr_set_role_label(struct task_struct *task, const uid_t uid,
58550+ const gid_t gid);
58551+int gr_set_proc_label(const struct dentry *dentry,
58552+ const struct vfsmount *mnt,
58553+ const int unsafe_share);
58554+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
58555+ const struct vfsmount *mnt);
58556+__u32 gr_acl_handle_open(const struct dentry *dentry,
58557+ const struct vfsmount *mnt, const int fmode);
58558+__u32 gr_acl_handle_creat(const struct dentry *dentry,
58559+ const struct dentry *p_dentry,
58560+ const struct vfsmount *p_mnt, const int fmode,
58561+ const int imode);
58562+void gr_handle_create(const struct dentry *dentry,
58563+ const struct vfsmount *mnt);
58564+void gr_handle_proc_create(const struct dentry *dentry,
58565+ const struct inode *inode);
58566+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
58567+ const struct dentry *parent_dentry,
58568+ const struct vfsmount *parent_mnt,
58569+ const int mode);
58570+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
58571+ const struct dentry *parent_dentry,
58572+ const struct vfsmount *parent_mnt);
58573+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
58574+ const struct vfsmount *mnt);
58575+void gr_handle_delete(const ino_t ino, const dev_t dev);
58576+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
58577+ const struct vfsmount *mnt);
58578+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
58579+ const struct dentry *parent_dentry,
58580+ const struct vfsmount *parent_mnt,
58581+ const char *from);
58582+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
58583+ const struct dentry *parent_dentry,
58584+ const struct vfsmount *parent_mnt,
58585+ const struct dentry *old_dentry,
58586+ const struct vfsmount *old_mnt, const char *to);
58587+int gr_acl_handle_rename(struct dentry *new_dentry,
58588+ struct dentry *parent_dentry,
58589+ const struct vfsmount *parent_mnt,
58590+ struct dentry *old_dentry,
58591+ struct inode *old_parent_inode,
58592+ struct vfsmount *old_mnt, const char *newname);
58593+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
58594+ struct dentry *old_dentry,
58595+ struct dentry *new_dentry,
58596+ struct vfsmount *mnt, const __u8 replace);
58597+__u32 gr_check_link(const struct dentry *new_dentry,
58598+ const struct dentry *parent_dentry,
58599+ const struct vfsmount *parent_mnt,
58600+ const struct dentry *old_dentry,
58601+ const struct vfsmount *old_mnt);
58602+int gr_acl_handle_filldir(const struct file *file, const char *name,
58603+ const unsigned int namelen, const ino_t ino);
58604+
58605+__u32 gr_acl_handle_unix(const struct dentry *dentry,
58606+ const struct vfsmount *mnt);
58607+void gr_acl_handle_exit(void);
58608+void gr_acl_handle_psacct(struct task_struct *task, const long code);
58609+int gr_acl_handle_procpidmem(const struct task_struct *task);
58610+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
58611+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
58612+void gr_audit_ptrace(struct task_struct *task);
58613+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
58614+
58615+#ifdef CONFIG_GRKERNSEC
58616+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
58617+void gr_handle_vm86(void);
58618+void gr_handle_mem_readwrite(u64 from, u64 to);
58619+
58620+extern int grsec_enable_dmesg;
58621+extern int grsec_disable_privio;
58622+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
58623+extern int grsec_enable_chroot_findtask;
58624+#endif
58625+#endif
58626+
58627+#endif
58628diff -urNp linux-3.0.7/include/linux/grsock.h linux-3.0.7/include/linux/grsock.h
58629--- linux-3.0.7/include/linux/grsock.h 1969-12-31 19:00:00.000000000 -0500
58630+++ linux-3.0.7/include/linux/grsock.h 2011-08-23 21:48:14.000000000 -0400
58631@@ -0,0 +1,19 @@
58632+#ifndef __GRSOCK_H
58633+#define __GRSOCK_H
58634+
58635+extern void gr_attach_curr_ip(const struct sock *sk);
58636+extern int gr_handle_sock_all(const int family, const int type,
58637+ const int protocol);
58638+extern int gr_handle_sock_server(const struct sockaddr *sck);
58639+extern int gr_handle_sock_server_other(const struct sock *sck);
58640+extern int gr_handle_sock_client(const struct sockaddr *sck);
58641+extern int gr_search_connect(struct socket * sock,
58642+ struct sockaddr_in * addr);
58643+extern int gr_search_bind(struct socket * sock,
58644+ struct sockaddr_in * addr);
58645+extern int gr_search_listen(struct socket * sock);
58646+extern int gr_search_accept(struct socket * sock);
58647+extern int gr_search_socket(const int domain, const int type,
58648+ const int protocol);
58649+
58650+#endif
58651diff -urNp linux-3.0.7/include/linux/hid.h linux-3.0.7/include/linux/hid.h
58652--- linux-3.0.7/include/linux/hid.h 2011-07-21 22:17:23.000000000 -0400
58653+++ linux-3.0.7/include/linux/hid.h 2011-08-23 21:47:56.000000000 -0400
58654@@ -675,7 +675,7 @@ struct hid_ll_driver {
58655 unsigned int code, int value);
58656
58657 int (*parse)(struct hid_device *hdev);
58658-};
58659+} __no_const;
58660
58661 #define PM_HINT_FULLON 1<<5
58662 #define PM_HINT_NORMAL 1<<1
58663diff -urNp linux-3.0.7/include/linux/highmem.h linux-3.0.7/include/linux/highmem.h
58664--- linux-3.0.7/include/linux/highmem.h 2011-07-21 22:17:23.000000000 -0400
58665+++ linux-3.0.7/include/linux/highmem.h 2011-08-23 21:47:56.000000000 -0400
58666@@ -185,6 +185,18 @@ static inline void clear_highpage(struct
58667 kunmap_atomic(kaddr, KM_USER0);
58668 }
58669
58670+static inline void sanitize_highpage(struct page *page)
58671+{
58672+ void *kaddr;
58673+ unsigned long flags;
58674+
58675+ local_irq_save(flags);
58676+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
58677+ clear_page(kaddr);
58678+ kunmap_atomic(kaddr, KM_CLEARPAGE);
58679+ local_irq_restore(flags);
58680+}
58681+
58682 static inline void zero_user_segments(struct page *page,
58683 unsigned start1, unsigned end1,
58684 unsigned start2, unsigned end2)
58685diff -urNp linux-3.0.7/include/linux/i2c.h linux-3.0.7/include/linux/i2c.h
58686--- linux-3.0.7/include/linux/i2c.h 2011-07-21 22:17:23.000000000 -0400
58687+++ linux-3.0.7/include/linux/i2c.h 2011-08-23 21:47:56.000000000 -0400
58688@@ -346,6 +346,7 @@ struct i2c_algorithm {
58689 /* To determine what the adapter supports */
58690 u32 (*functionality) (struct i2c_adapter *);
58691 };
58692+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
58693
58694 /*
58695 * i2c_adapter is the structure used to identify a physical i2c bus along
58696diff -urNp linux-3.0.7/include/linux/i2o.h linux-3.0.7/include/linux/i2o.h
58697--- linux-3.0.7/include/linux/i2o.h 2011-07-21 22:17:23.000000000 -0400
58698+++ linux-3.0.7/include/linux/i2o.h 2011-08-23 21:47:56.000000000 -0400
58699@@ -564,7 +564,7 @@ struct i2o_controller {
58700 struct i2o_device *exec; /* Executive */
58701 #if BITS_PER_LONG == 64
58702 spinlock_t context_list_lock; /* lock for context_list */
58703- atomic_t context_list_counter; /* needed for unique contexts */
58704+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
58705 struct list_head context_list; /* list of context id's
58706 and pointers */
58707 #endif
58708diff -urNp linux-3.0.7/include/linux/init.h linux-3.0.7/include/linux/init.h
58709--- linux-3.0.7/include/linux/init.h 2011-07-21 22:17:23.000000000 -0400
58710+++ linux-3.0.7/include/linux/init.h 2011-08-23 21:47:56.000000000 -0400
58711@@ -293,13 +293,13 @@ void __init parse_early_options(char *cm
58712
58713 /* Each module must use one module_init(). */
58714 #define module_init(initfn) \
58715- static inline initcall_t __inittest(void) \
58716+ static inline __used initcall_t __inittest(void) \
58717 { return initfn; } \
58718 int init_module(void) __attribute__((alias(#initfn)));
58719
58720 /* This is only required if you want to be unloadable. */
58721 #define module_exit(exitfn) \
58722- static inline exitcall_t __exittest(void) \
58723+ static inline __used exitcall_t __exittest(void) \
58724 { return exitfn; } \
58725 void cleanup_module(void) __attribute__((alias(#exitfn)));
58726
58727diff -urNp linux-3.0.7/include/linux/init_task.h linux-3.0.7/include/linux/init_task.h
58728--- linux-3.0.7/include/linux/init_task.h 2011-07-21 22:17:23.000000000 -0400
58729+++ linux-3.0.7/include/linux/init_task.h 2011-08-23 21:47:56.000000000 -0400
58730@@ -126,6 +126,12 @@ extern struct cred init_cred;
58731 # define INIT_PERF_EVENTS(tsk)
58732 #endif
58733
58734+#ifdef CONFIG_X86
58735+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
58736+#else
58737+#define INIT_TASK_THREAD_INFO
58738+#endif
58739+
58740 /*
58741 * INIT_TASK is used to set up the first task table, touch at
58742 * your own risk!. Base=0, limit=0x1fffff (=2MB)
58743@@ -164,6 +170,7 @@ extern struct cred init_cred;
58744 RCU_INIT_POINTER(.cred, &init_cred), \
58745 .comm = "swapper", \
58746 .thread = INIT_THREAD, \
58747+ INIT_TASK_THREAD_INFO \
58748 .fs = &init_fs, \
58749 .files = &init_files, \
58750 .signal = &init_signals, \
58751diff -urNp linux-3.0.7/include/linux/intel-iommu.h linux-3.0.7/include/linux/intel-iommu.h
58752--- linux-3.0.7/include/linux/intel-iommu.h 2011-07-21 22:17:23.000000000 -0400
58753+++ linux-3.0.7/include/linux/intel-iommu.h 2011-08-23 21:47:56.000000000 -0400
58754@@ -296,7 +296,7 @@ struct iommu_flush {
58755 u8 fm, u64 type);
58756 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
58757 unsigned int size_order, u64 type);
58758-};
58759+} __no_const;
58760
58761 enum {
58762 SR_DMAR_FECTL_REG,
58763diff -urNp linux-3.0.7/include/linux/interrupt.h linux-3.0.7/include/linux/interrupt.h
58764--- linux-3.0.7/include/linux/interrupt.h 2011-07-21 22:17:23.000000000 -0400
58765+++ linux-3.0.7/include/linux/interrupt.h 2011-08-23 21:47:56.000000000 -0400
58766@@ -422,7 +422,7 @@ enum
58767 /* map softirq index to softirq name. update 'softirq_to_name' in
58768 * kernel/softirq.c when adding a new softirq.
58769 */
58770-extern char *softirq_to_name[NR_SOFTIRQS];
58771+extern const char * const softirq_to_name[NR_SOFTIRQS];
58772
58773 /* softirq mask and active fields moved to irq_cpustat_t in
58774 * asm/hardirq.h to get better cache usage. KAO
58775@@ -430,12 +430,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
58776
58777 struct softirq_action
58778 {
58779- void (*action)(struct softirq_action *);
58780+ void (*action)(void);
58781 };
58782
58783 asmlinkage void do_softirq(void);
58784 asmlinkage void __do_softirq(void);
58785-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
58786+extern void open_softirq(int nr, void (*action)(void));
58787 extern void softirq_init(void);
58788 static inline void __raise_softirq_irqoff(unsigned int nr)
58789 {
58790diff -urNp linux-3.0.7/include/linux/kallsyms.h linux-3.0.7/include/linux/kallsyms.h
58791--- linux-3.0.7/include/linux/kallsyms.h 2011-07-21 22:17:23.000000000 -0400
58792+++ linux-3.0.7/include/linux/kallsyms.h 2011-08-23 21:48:14.000000000 -0400
58793@@ -15,7 +15,8 @@
58794
58795 struct module;
58796
58797-#ifdef CONFIG_KALLSYMS
58798+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
58799+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
58800 /* Lookup the address for a symbol. Returns 0 if not found. */
58801 unsigned long kallsyms_lookup_name(const char *name);
58802
58803@@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(un
58804 /* Stupid that this does nothing, but I didn't create this mess. */
58805 #define __print_symbol(fmt, addr)
58806 #endif /*CONFIG_KALLSYMS*/
58807+#else /* when included by kallsyms.c, vsnprintf.c, or
58808+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
58809+extern void __print_symbol(const char *fmt, unsigned long address);
58810+extern int sprint_backtrace(char *buffer, unsigned long address);
58811+extern int sprint_symbol(char *buffer, unsigned long address);
58812+const char *kallsyms_lookup(unsigned long addr,
58813+ unsigned long *symbolsize,
58814+ unsigned long *offset,
58815+ char **modname, char *namebuf);
58816+#endif
58817
58818 /* This macro allows us to keep printk typechecking */
58819 static void __check_printsym_format(const char *fmt, ...)
58820diff -urNp linux-3.0.7/include/linux/kgdb.h linux-3.0.7/include/linux/kgdb.h
58821--- linux-3.0.7/include/linux/kgdb.h 2011-07-21 22:17:23.000000000 -0400
58822+++ linux-3.0.7/include/linux/kgdb.h 2011-08-26 19:49:56.000000000 -0400
58823@@ -53,7 +53,7 @@ extern int kgdb_connected;
58824 extern int kgdb_io_module_registered;
58825
58826 extern atomic_t kgdb_setting_breakpoint;
58827-extern atomic_t kgdb_cpu_doing_single_step;
58828+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
58829
58830 extern struct task_struct *kgdb_usethread;
58831 extern struct task_struct *kgdb_contthread;
58832@@ -251,7 +251,7 @@ struct kgdb_arch {
58833 void (*disable_hw_break)(struct pt_regs *regs);
58834 void (*remove_all_hw_break)(void);
58835 void (*correct_hw_break)(void);
58836-};
58837+} __do_const;
58838
58839 /**
58840 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
58841@@ -276,7 +276,7 @@ struct kgdb_io {
58842 void (*pre_exception) (void);
58843 void (*post_exception) (void);
58844 int is_console;
58845-};
58846+} __do_const;
58847
58848 extern struct kgdb_arch arch_kgdb_ops;
58849
58850diff -urNp linux-3.0.7/include/linux/kmod.h linux-3.0.7/include/linux/kmod.h
58851--- linux-3.0.7/include/linux/kmod.h 2011-07-21 22:17:23.000000000 -0400
58852+++ linux-3.0.7/include/linux/kmod.h 2011-08-23 21:48:14.000000000 -0400
58853@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysc
58854 * usually useless though. */
58855 extern int __request_module(bool wait, const char *name, ...) \
58856 __attribute__((format(printf, 2, 3)));
58857+extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
58858+ __attribute__((format(printf, 3, 4)));
58859 #define request_module(mod...) __request_module(true, mod)
58860 #define request_module_nowait(mod...) __request_module(false, mod)
58861 #define try_then_request_module(x, mod...) \
58862diff -urNp linux-3.0.7/include/linux/kvm_host.h linux-3.0.7/include/linux/kvm_host.h
58863--- linux-3.0.7/include/linux/kvm_host.h 2011-07-21 22:17:23.000000000 -0400
58864+++ linux-3.0.7/include/linux/kvm_host.h 2011-08-23 21:47:56.000000000 -0400
58865@@ -307,7 +307,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
58866 void vcpu_load(struct kvm_vcpu *vcpu);
58867 void vcpu_put(struct kvm_vcpu *vcpu);
58868
58869-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
58870+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
58871 struct module *module);
58872 void kvm_exit(void);
58873
58874@@ -446,7 +446,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
58875 struct kvm_guest_debug *dbg);
58876 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
58877
58878-int kvm_arch_init(void *opaque);
58879+int kvm_arch_init(const void *opaque);
58880 void kvm_arch_exit(void);
58881
58882 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
58883diff -urNp linux-3.0.7/include/linux/libata.h linux-3.0.7/include/linux/libata.h
58884--- linux-3.0.7/include/linux/libata.h 2011-07-21 22:17:23.000000000 -0400
58885+++ linux-3.0.7/include/linux/libata.h 2011-08-26 19:49:56.000000000 -0400
58886@@ -899,7 +899,7 @@ struct ata_port_operations {
58887 * fields must be pointers.
58888 */
58889 const struct ata_port_operations *inherits;
58890-};
58891+} __do_const;
58892
58893 struct ata_port_info {
58894 unsigned long flags;
58895diff -urNp linux-3.0.7/include/linux/linkage.h linux-3.0.7/include/linux/linkage.h
58896--- linux-3.0.7/include/linux/linkage.h 2011-07-21 22:17:23.000000000 -0400
58897+++ linux-3.0.7/include/linux/linkage.h 2011-10-11 10:44:33.000000000 -0400
58898@@ -82,6 +82,7 @@
58899 */
58900 #ifndef ENDPROC
58901 #define ENDPROC(name) \
58902+ .size name, .-name; \
58903 .type name, @function; \
58904 END(name)
58905 #endif
58906diff -urNp linux-3.0.7/include/linux/mca.h linux-3.0.7/include/linux/mca.h
58907--- linux-3.0.7/include/linux/mca.h 2011-07-21 22:17:23.000000000 -0400
58908+++ linux-3.0.7/include/linux/mca.h 2011-08-23 21:47:56.000000000 -0400
58909@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
58910 int region);
58911 void * (*mca_transform_memory)(struct mca_device *,
58912 void *memory);
58913-};
58914+} __no_const;
58915
58916 struct mca_bus {
58917 u64 default_dma_mask;
58918diff -urNp linux-3.0.7/include/linux/memory.h linux-3.0.7/include/linux/memory.h
58919--- linux-3.0.7/include/linux/memory.h 2011-07-21 22:17:23.000000000 -0400
58920+++ linux-3.0.7/include/linux/memory.h 2011-08-23 21:47:56.000000000 -0400
58921@@ -144,7 +144,7 @@ struct memory_accessor {
58922 size_t count);
58923 ssize_t (*write)(struct memory_accessor *, const char *buf,
58924 off_t offset, size_t count);
58925-};
58926+} __no_const;
58927
58928 /*
58929 * Kernel text modification mutex, used for code patching. Users of this lock
58930diff -urNp linux-3.0.7/include/linux/mfd/abx500.h linux-3.0.7/include/linux/mfd/abx500.h
58931--- linux-3.0.7/include/linux/mfd/abx500.h 2011-07-21 22:17:23.000000000 -0400
58932+++ linux-3.0.7/include/linux/mfd/abx500.h 2011-08-23 21:47:56.000000000 -0400
58933@@ -234,6 +234,7 @@ struct abx500_ops {
58934 int (*event_registers_startup_state_get) (struct device *, u8 *);
58935 int (*startup_irq_enabled) (struct device *, unsigned int);
58936 };
58937+typedef struct abx500_ops __no_const abx500_ops_no_const;
58938
58939 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
58940 void abx500_remove_ops(struct device *dev);
58941diff -urNp linux-3.0.7/include/linux/mm.h linux-3.0.7/include/linux/mm.h
58942--- linux-3.0.7/include/linux/mm.h 2011-09-02 18:11:21.000000000 -0400
58943+++ linux-3.0.7/include/linux/mm.h 2011-08-23 21:47:56.000000000 -0400
58944@@ -113,7 +113,14 @@ extern unsigned int kobjsize(const void
58945
58946 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
58947 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
58948+
58949+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
58950+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
58951+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
58952+#else
58953 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
58954+#endif
58955+
58956 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
58957 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
58958
58959@@ -1009,34 +1016,6 @@ int set_page_dirty(struct page *page);
58960 int set_page_dirty_lock(struct page *page);
58961 int clear_page_dirty_for_io(struct page *page);
58962
58963-/* Is the vma a continuation of the stack vma above it? */
58964-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
58965-{
58966- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
58967-}
58968-
58969-static inline int stack_guard_page_start(struct vm_area_struct *vma,
58970- unsigned long addr)
58971-{
58972- return (vma->vm_flags & VM_GROWSDOWN) &&
58973- (vma->vm_start == addr) &&
58974- !vma_growsdown(vma->vm_prev, addr);
58975-}
58976-
58977-/* Is the vma a continuation of the stack vma below it? */
58978-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
58979-{
58980- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
58981-}
58982-
58983-static inline int stack_guard_page_end(struct vm_area_struct *vma,
58984- unsigned long addr)
58985-{
58986- return (vma->vm_flags & VM_GROWSUP) &&
58987- (vma->vm_end == addr) &&
58988- !vma_growsup(vma->vm_next, addr);
58989-}
58990-
58991 extern unsigned long move_page_tables(struct vm_area_struct *vma,
58992 unsigned long old_addr, struct vm_area_struct *new_vma,
58993 unsigned long new_addr, unsigned long len);
58994@@ -1169,6 +1148,15 @@ struct shrinker {
58995 extern void register_shrinker(struct shrinker *);
58996 extern void unregister_shrinker(struct shrinker *);
58997
58998+#ifdef CONFIG_MMU
58999+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
59000+#else
59001+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
59002+{
59003+ return __pgprot(0);
59004+}
59005+#endif
59006+
59007 int vma_wants_writenotify(struct vm_area_struct *vma);
59008
59009 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
59010@@ -1452,6 +1440,7 @@ out:
59011 }
59012
59013 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
59014+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
59015
59016 extern unsigned long do_brk(unsigned long, unsigned long);
59017
59018@@ -1510,6 +1499,10 @@ extern struct vm_area_struct * find_vma(
59019 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
59020 struct vm_area_struct **pprev);
59021
59022+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
59023+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
59024+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
59025+
59026 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
59027 NULL if none. Assume start_addr < end_addr. */
59028 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
59029@@ -1526,15 +1519,6 @@ static inline unsigned long vma_pages(st
59030 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
59031 }
59032
59033-#ifdef CONFIG_MMU
59034-pgprot_t vm_get_page_prot(unsigned long vm_flags);
59035-#else
59036-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
59037-{
59038- return __pgprot(0);
59039-}
59040-#endif
59041-
59042 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
59043 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
59044 unsigned long pfn, unsigned long size, pgprot_t);
59045@@ -1647,7 +1631,7 @@ extern int unpoison_memory(unsigned long
59046 extern int sysctl_memory_failure_early_kill;
59047 extern int sysctl_memory_failure_recovery;
59048 extern void shake_page(struct page *p, int access);
59049-extern atomic_long_t mce_bad_pages;
59050+extern atomic_long_unchecked_t mce_bad_pages;
59051 extern int soft_offline_page(struct page *page, int flags);
59052
59053 extern void dump_page(struct page *page);
59054@@ -1661,5 +1645,11 @@ extern void copy_user_huge_page(struct p
59055 unsigned int pages_per_huge_page);
59056 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
59057
59058+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
59059+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
59060+#else
59061+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
59062+#endif
59063+
59064 #endif /* __KERNEL__ */
59065 #endif /* _LINUX_MM_H */
59066diff -urNp linux-3.0.7/include/linux/mm_types.h linux-3.0.7/include/linux/mm_types.h
59067--- linux-3.0.7/include/linux/mm_types.h 2011-07-21 22:17:23.000000000 -0400
59068+++ linux-3.0.7/include/linux/mm_types.h 2011-08-23 21:47:56.000000000 -0400
59069@@ -184,6 +184,8 @@ struct vm_area_struct {
59070 #ifdef CONFIG_NUMA
59071 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
59072 #endif
59073+
59074+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
59075 };
59076
59077 struct core_thread {
59078@@ -316,6 +318,24 @@ struct mm_struct {
59079 #ifdef CONFIG_CPUMASK_OFFSTACK
59080 struct cpumask cpumask_allocation;
59081 #endif
59082+
59083+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59084+ unsigned long pax_flags;
59085+#endif
59086+
59087+#ifdef CONFIG_PAX_DLRESOLVE
59088+ unsigned long call_dl_resolve;
59089+#endif
59090+
59091+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
59092+ unsigned long call_syscall;
59093+#endif
59094+
59095+#ifdef CONFIG_PAX_ASLR
59096+ unsigned long delta_mmap; /* randomized offset */
59097+ unsigned long delta_stack; /* randomized offset */
59098+#endif
59099+
59100 };
59101
59102 static inline void mm_init_cpumask(struct mm_struct *mm)
59103diff -urNp linux-3.0.7/include/linux/mmu_notifier.h linux-3.0.7/include/linux/mmu_notifier.h
59104--- linux-3.0.7/include/linux/mmu_notifier.h 2011-07-21 22:17:23.000000000 -0400
59105+++ linux-3.0.7/include/linux/mmu_notifier.h 2011-08-23 21:47:56.000000000 -0400
59106@@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destr
59107 */
59108 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
59109 ({ \
59110- pte_t __pte; \
59111+ pte_t ___pte; \
59112 struct vm_area_struct *___vma = __vma; \
59113 unsigned long ___address = __address; \
59114- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
59115+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
59116 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
59117- __pte; \
59118+ ___pte; \
59119 })
59120
59121 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
59122diff -urNp linux-3.0.7/include/linux/mmzone.h linux-3.0.7/include/linux/mmzone.h
59123--- linux-3.0.7/include/linux/mmzone.h 2011-07-21 22:17:23.000000000 -0400
59124+++ linux-3.0.7/include/linux/mmzone.h 2011-08-23 21:47:56.000000000 -0400
59125@@ -350,7 +350,7 @@ struct zone {
59126 unsigned long flags; /* zone flags, see below */
59127
59128 /* Zone statistics */
59129- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59130+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59131
59132 /*
59133 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
59134diff -urNp linux-3.0.7/include/linux/mod_devicetable.h linux-3.0.7/include/linux/mod_devicetable.h
59135--- linux-3.0.7/include/linux/mod_devicetable.h 2011-07-21 22:17:23.000000000 -0400
59136+++ linux-3.0.7/include/linux/mod_devicetable.h 2011-08-23 21:47:56.000000000 -0400
59137@@ -12,7 +12,7 @@
59138 typedef unsigned long kernel_ulong_t;
59139 #endif
59140
59141-#define PCI_ANY_ID (~0)
59142+#define PCI_ANY_ID ((__u16)~0)
59143
59144 struct pci_device_id {
59145 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
59146@@ -131,7 +131,7 @@ struct usb_device_id {
59147 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
59148 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
59149
59150-#define HID_ANY_ID (~0)
59151+#define HID_ANY_ID (~0U)
59152
59153 struct hid_device_id {
59154 __u16 bus;
59155diff -urNp linux-3.0.7/include/linux/module.h linux-3.0.7/include/linux/module.h
59156--- linux-3.0.7/include/linux/module.h 2011-07-21 22:17:23.000000000 -0400
59157+++ linux-3.0.7/include/linux/module.h 2011-08-23 21:47:56.000000000 -0400
59158@@ -16,6 +16,7 @@
59159 #include <linux/kobject.h>
59160 #include <linux/moduleparam.h>
59161 #include <linux/tracepoint.h>
59162+#include <linux/fs.h>
59163
59164 #include <linux/percpu.h>
59165 #include <asm/module.h>
59166@@ -325,19 +326,16 @@ struct module
59167 int (*init)(void);
59168
59169 /* If this is non-NULL, vfree after init() returns */
59170- void *module_init;
59171+ void *module_init_rx, *module_init_rw;
59172
59173 /* Here is the actual code + data, vfree'd on unload. */
59174- void *module_core;
59175+ void *module_core_rx, *module_core_rw;
59176
59177 /* Here are the sizes of the init and core sections */
59178- unsigned int init_size, core_size;
59179+ unsigned int init_size_rw, core_size_rw;
59180
59181 /* The size of the executable code in each section. */
59182- unsigned int init_text_size, core_text_size;
59183-
59184- /* Size of RO sections of the module (text+rodata) */
59185- unsigned int init_ro_size, core_ro_size;
59186+ unsigned int init_size_rx, core_size_rx;
59187
59188 /* Arch-specific module values */
59189 struct mod_arch_specific arch;
59190@@ -393,6 +391,10 @@ struct module
59191 #ifdef CONFIG_EVENT_TRACING
59192 struct ftrace_event_call **trace_events;
59193 unsigned int num_trace_events;
59194+ struct file_operations trace_id;
59195+ struct file_operations trace_enable;
59196+ struct file_operations trace_format;
59197+ struct file_operations trace_filter;
59198 #endif
59199 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
59200 unsigned int num_ftrace_callsites;
59201@@ -443,16 +445,46 @@ bool is_module_address(unsigned long add
59202 bool is_module_percpu_address(unsigned long addr);
59203 bool is_module_text_address(unsigned long addr);
59204
59205+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
59206+{
59207+
59208+#ifdef CONFIG_PAX_KERNEXEC
59209+ if (ktla_ktva(addr) >= (unsigned long)start &&
59210+ ktla_ktva(addr) < (unsigned long)start + size)
59211+ return 1;
59212+#endif
59213+
59214+ return ((void *)addr >= start && (void *)addr < start + size);
59215+}
59216+
59217+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
59218+{
59219+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
59220+}
59221+
59222+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
59223+{
59224+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
59225+}
59226+
59227+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
59228+{
59229+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
59230+}
59231+
59232+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
59233+{
59234+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
59235+}
59236+
59237 static inline int within_module_core(unsigned long addr, struct module *mod)
59238 {
59239- return (unsigned long)mod->module_core <= addr &&
59240- addr < (unsigned long)mod->module_core + mod->core_size;
59241+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
59242 }
59243
59244 static inline int within_module_init(unsigned long addr, struct module *mod)
59245 {
59246- return (unsigned long)mod->module_init <= addr &&
59247- addr < (unsigned long)mod->module_init + mod->init_size;
59248+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
59249 }
59250
59251 /* Search for module by name: must hold module_mutex. */
59252diff -urNp linux-3.0.7/include/linux/moduleloader.h linux-3.0.7/include/linux/moduleloader.h
59253--- linux-3.0.7/include/linux/moduleloader.h 2011-07-21 22:17:23.000000000 -0400
59254+++ linux-3.0.7/include/linux/moduleloader.h 2011-08-23 21:47:56.000000000 -0400
59255@@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
59256 sections. Returns NULL on failure. */
59257 void *module_alloc(unsigned long size);
59258
59259+#ifdef CONFIG_PAX_KERNEXEC
59260+void *module_alloc_exec(unsigned long size);
59261+#else
59262+#define module_alloc_exec(x) module_alloc(x)
59263+#endif
59264+
59265 /* Free memory returned from module_alloc. */
59266 void module_free(struct module *mod, void *module_region);
59267
59268+#ifdef CONFIG_PAX_KERNEXEC
59269+void module_free_exec(struct module *mod, void *module_region);
59270+#else
59271+#define module_free_exec(x, y) module_free((x), (y))
59272+#endif
59273+
59274 /* Apply the given relocation to the (simplified) ELF. Return -error
59275 or 0. */
59276 int apply_relocate(Elf_Shdr *sechdrs,
59277diff -urNp linux-3.0.7/include/linux/moduleparam.h linux-3.0.7/include/linux/moduleparam.h
59278--- linux-3.0.7/include/linux/moduleparam.h 2011-07-21 22:17:23.000000000 -0400
59279+++ linux-3.0.7/include/linux/moduleparam.h 2011-08-23 21:47:56.000000000 -0400
59280@@ -255,7 +255,7 @@ static inline void __kernel_param_unlock
59281 * @len is usually just sizeof(string).
59282 */
59283 #define module_param_string(name, string, len, perm) \
59284- static const struct kparam_string __param_string_##name \
59285+ static const struct kparam_string __param_string_##name __used \
59286 = { len, string }; \
59287 __module_param_call(MODULE_PARAM_PREFIX, name, \
59288 &param_ops_string, \
59289@@ -370,7 +370,7 @@ extern int param_get_invbool(char *buffe
59290 * module_param_named() for why this might be necessary.
59291 */
59292 #define module_param_array_named(name, array, type, nump, perm) \
59293- static const struct kparam_array __param_arr_##name \
59294+ static const struct kparam_array __param_arr_##name __used \
59295 = { .max = ARRAY_SIZE(array), .num = nump, \
59296 .ops = &param_ops_##type, \
59297 .elemsize = sizeof(array[0]), .elem = array }; \
59298diff -urNp linux-3.0.7/include/linux/namei.h linux-3.0.7/include/linux/namei.h
59299--- linux-3.0.7/include/linux/namei.h 2011-07-21 22:17:23.000000000 -0400
59300+++ linux-3.0.7/include/linux/namei.h 2011-08-23 21:47:56.000000000 -0400
59301@@ -24,7 +24,7 @@ struct nameidata {
59302 unsigned seq;
59303 int last_type;
59304 unsigned depth;
59305- char *saved_names[MAX_NESTED_LINKS + 1];
59306+ const char *saved_names[MAX_NESTED_LINKS + 1];
59307
59308 /* Intent data */
59309 union {
59310@@ -91,12 +91,12 @@ extern int follow_up(struct path *);
59311 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
59312 extern void unlock_rename(struct dentry *, struct dentry *);
59313
59314-static inline void nd_set_link(struct nameidata *nd, char *path)
59315+static inline void nd_set_link(struct nameidata *nd, const char *path)
59316 {
59317 nd->saved_names[nd->depth] = path;
59318 }
59319
59320-static inline char *nd_get_link(struct nameidata *nd)
59321+static inline const char *nd_get_link(const struct nameidata *nd)
59322 {
59323 return nd->saved_names[nd->depth];
59324 }
59325diff -urNp linux-3.0.7/include/linux/netdevice.h linux-3.0.7/include/linux/netdevice.h
59326--- linux-3.0.7/include/linux/netdevice.h 2011-09-02 18:11:21.000000000 -0400
59327+++ linux-3.0.7/include/linux/netdevice.h 2011-08-23 21:47:56.000000000 -0400
59328@@ -979,6 +979,7 @@ struct net_device_ops {
59329 int (*ndo_set_features)(struct net_device *dev,
59330 u32 features);
59331 };
59332+typedef struct net_device_ops __no_const net_device_ops_no_const;
59333
59334 /*
59335 * The DEVICE structure.
59336diff -urNp linux-3.0.7/include/linux/netfilter/xt_gradm.h linux-3.0.7/include/linux/netfilter/xt_gradm.h
59337--- linux-3.0.7/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
59338+++ linux-3.0.7/include/linux/netfilter/xt_gradm.h 2011-08-23 21:48:14.000000000 -0400
59339@@ -0,0 +1,9 @@
59340+#ifndef _LINUX_NETFILTER_XT_GRADM_H
59341+#define _LINUX_NETFILTER_XT_GRADM_H 1
59342+
59343+struct xt_gradm_mtinfo {
59344+ __u16 flags;
59345+ __u16 invflags;
59346+};
59347+
59348+#endif
59349diff -urNp linux-3.0.7/include/linux/of_pdt.h linux-3.0.7/include/linux/of_pdt.h
59350--- linux-3.0.7/include/linux/of_pdt.h 2011-07-21 22:17:23.000000000 -0400
59351+++ linux-3.0.7/include/linux/of_pdt.h 2011-08-30 06:20:11.000000000 -0400
59352@@ -32,7 +32,7 @@ struct of_pdt_ops {
59353
59354 /* return 0 on success; fill in 'len' with number of bytes in path */
59355 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
59356-};
59357+} __no_const;
59358
59359 extern void *prom_early_alloc(unsigned long size);
59360
59361diff -urNp linux-3.0.7/include/linux/oprofile.h linux-3.0.7/include/linux/oprofile.h
59362--- linux-3.0.7/include/linux/oprofile.h 2011-07-21 22:17:23.000000000 -0400
59363+++ linux-3.0.7/include/linux/oprofile.h 2011-08-23 21:47:56.000000000 -0400
59364@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super
59365 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
59366 char const * name, ulong * val);
59367
59368-/** Create a file for read-only access to an atomic_t. */
59369+/** Create a file for read-only access to an atomic_unchecked_t. */
59370 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
59371- char const * name, atomic_t * val);
59372+ char const * name, atomic_unchecked_t * val);
59373
59374 /** create a directory */
59375 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
59376diff -urNp linux-3.0.7/include/linux/padata.h linux-3.0.7/include/linux/padata.h
59377--- linux-3.0.7/include/linux/padata.h 2011-07-21 22:17:23.000000000 -0400
59378+++ linux-3.0.7/include/linux/padata.h 2011-08-23 21:47:56.000000000 -0400
59379@@ -129,7 +129,7 @@ struct parallel_data {
59380 struct padata_instance *pinst;
59381 struct padata_parallel_queue __percpu *pqueue;
59382 struct padata_serial_queue __percpu *squeue;
59383- atomic_t seq_nr;
59384+ atomic_unchecked_t seq_nr;
59385 atomic_t reorder_objects;
59386 atomic_t refcnt;
59387 unsigned int max_seq_nr;
59388diff -urNp linux-3.0.7/include/linux/perf_event.h linux-3.0.7/include/linux/perf_event.h
59389--- linux-3.0.7/include/linux/perf_event.h 2011-07-21 22:17:23.000000000 -0400
59390+++ linux-3.0.7/include/linux/perf_event.h 2011-08-23 21:47:56.000000000 -0400
59391@@ -761,8 +761,8 @@ struct perf_event {
59392
59393 enum perf_event_active_state state;
59394 unsigned int attach_state;
59395- local64_t count;
59396- atomic64_t child_count;
59397+ local64_t count; /* PaX: fix it one day */
59398+ atomic64_unchecked_t child_count;
59399
59400 /*
59401 * These are the total time in nanoseconds that the event
59402@@ -813,8 +813,8 @@ struct perf_event {
59403 * These accumulate total time (in nanoseconds) that children
59404 * events have been enabled and running, respectively.
59405 */
59406- atomic64_t child_total_time_enabled;
59407- atomic64_t child_total_time_running;
59408+ atomic64_unchecked_t child_total_time_enabled;
59409+ atomic64_unchecked_t child_total_time_running;
59410
59411 /*
59412 * Protect attach/detach and child_list:
59413diff -urNp linux-3.0.7/include/linux/pipe_fs_i.h linux-3.0.7/include/linux/pipe_fs_i.h
59414--- linux-3.0.7/include/linux/pipe_fs_i.h 2011-07-21 22:17:23.000000000 -0400
59415+++ linux-3.0.7/include/linux/pipe_fs_i.h 2011-08-23 21:47:56.000000000 -0400
59416@@ -46,9 +46,9 @@ struct pipe_buffer {
59417 struct pipe_inode_info {
59418 wait_queue_head_t wait;
59419 unsigned int nrbufs, curbuf, buffers;
59420- unsigned int readers;
59421- unsigned int writers;
59422- unsigned int waiting_writers;
59423+ atomic_t readers;
59424+ atomic_t writers;
59425+ atomic_t waiting_writers;
59426 unsigned int r_counter;
59427 unsigned int w_counter;
59428 struct page *tmp_page;
59429diff -urNp linux-3.0.7/include/linux/pm_runtime.h linux-3.0.7/include/linux/pm_runtime.h
59430--- linux-3.0.7/include/linux/pm_runtime.h 2011-07-21 22:17:23.000000000 -0400
59431+++ linux-3.0.7/include/linux/pm_runtime.h 2011-08-23 21:47:56.000000000 -0400
59432@@ -94,7 +94,7 @@ static inline bool pm_runtime_callbacks_
59433
59434 static inline void pm_runtime_mark_last_busy(struct device *dev)
59435 {
59436- ACCESS_ONCE(dev->power.last_busy) = jiffies;
59437+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
59438 }
59439
59440 #else /* !CONFIG_PM_RUNTIME */
59441diff -urNp linux-3.0.7/include/linux/poison.h linux-3.0.7/include/linux/poison.h
59442--- linux-3.0.7/include/linux/poison.h 2011-07-21 22:17:23.000000000 -0400
59443+++ linux-3.0.7/include/linux/poison.h 2011-08-23 21:47:56.000000000 -0400
59444@@ -19,8 +19,8 @@
59445 * under normal circumstances, used to verify that nobody uses
59446 * non-initialized list entries.
59447 */
59448-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
59449-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
59450+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
59451+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
59452
59453 /********** include/linux/timer.h **********/
59454 /*
59455diff -urNp linux-3.0.7/include/linux/preempt.h linux-3.0.7/include/linux/preempt.h
59456--- linux-3.0.7/include/linux/preempt.h 2011-07-21 22:17:23.000000000 -0400
59457+++ linux-3.0.7/include/linux/preempt.h 2011-08-23 21:47:56.000000000 -0400
59458@@ -115,7 +115,7 @@ struct preempt_ops {
59459 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
59460 void (*sched_out)(struct preempt_notifier *notifier,
59461 struct task_struct *next);
59462-};
59463+} __no_const;
59464
59465 /**
59466 * preempt_notifier - key for installing preemption notifiers
59467diff -urNp linux-3.0.7/include/linux/proc_fs.h linux-3.0.7/include/linux/proc_fs.h
59468--- linux-3.0.7/include/linux/proc_fs.h 2011-07-21 22:17:23.000000000 -0400
59469+++ linux-3.0.7/include/linux/proc_fs.h 2011-08-23 21:48:14.000000000 -0400
59470@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
59471 return proc_create_data(name, mode, parent, proc_fops, NULL);
59472 }
59473
59474+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
59475+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
59476+{
59477+#ifdef CONFIG_GRKERNSEC_PROC_USER
59478+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
59479+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59480+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
59481+#else
59482+ return proc_create_data(name, mode, parent, proc_fops, NULL);
59483+#endif
59484+}
59485+
59486+
59487 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
59488 mode_t mode, struct proc_dir_entry *base,
59489 read_proc_t *read_proc, void * data)
59490@@ -258,7 +271,7 @@ union proc_op {
59491 int (*proc_show)(struct seq_file *m,
59492 struct pid_namespace *ns, struct pid *pid,
59493 struct task_struct *task);
59494-};
59495+} __no_const;
59496
59497 struct ctl_table_header;
59498 struct ctl_table;
59499diff -urNp linux-3.0.7/include/linux/ptrace.h linux-3.0.7/include/linux/ptrace.h
59500--- linux-3.0.7/include/linux/ptrace.h 2011-07-21 22:17:23.000000000 -0400
59501+++ linux-3.0.7/include/linux/ptrace.h 2011-08-23 21:48:14.000000000 -0400
59502@@ -115,10 +115,10 @@ extern void __ptrace_unlink(struct task_
59503 extern void exit_ptrace(struct task_struct *tracer);
59504 #define PTRACE_MODE_READ 1
59505 #define PTRACE_MODE_ATTACH 2
59506-/* Returns 0 on success, -errno on denial. */
59507-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
59508 /* Returns true on success, false on denial. */
59509 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
59510+/* Returns true on success, false on denial. */
59511+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
59512
59513 static inline int ptrace_reparented(struct task_struct *child)
59514 {
59515diff -urNp linux-3.0.7/include/linux/random.h linux-3.0.7/include/linux/random.h
59516--- linux-3.0.7/include/linux/random.h 2011-09-02 18:11:21.000000000 -0400
59517+++ linux-3.0.7/include/linux/random.h 2011-08-23 21:47:56.000000000 -0400
59518@@ -69,12 +69,17 @@ void srandom32(u32 seed);
59519
59520 u32 prandom32(struct rnd_state *);
59521
59522+static inline unsigned long pax_get_random_long(void)
59523+{
59524+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
59525+}
59526+
59527 /*
59528 * Handle minimum values for seeds
59529 */
59530 static inline u32 __seed(u32 x, u32 m)
59531 {
59532- return (x < m) ? x + m : x;
59533+ return (x <= m) ? x + m + 1 : x;
59534 }
59535
59536 /**
59537diff -urNp linux-3.0.7/include/linux/reboot.h linux-3.0.7/include/linux/reboot.h
59538--- linux-3.0.7/include/linux/reboot.h 2011-07-21 22:17:23.000000000 -0400
59539+++ linux-3.0.7/include/linux/reboot.h 2011-08-23 21:47:56.000000000 -0400
59540@@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
59541 * Architecture-specific implementations of sys_reboot commands.
59542 */
59543
59544-extern void machine_restart(char *cmd);
59545-extern void machine_halt(void);
59546-extern void machine_power_off(void);
59547+extern void machine_restart(char *cmd) __noreturn;
59548+extern void machine_halt(void) __noreturn;
59549+extern void machine_power_off(void) __noreturn;
59550
59551 extern void machine_shutdown(void);
59552 struct pt_regs;
59553@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
59554 */
59555
59556 extern void kernel_restart_prepare(char *cmd);
59557-extern void kernel_restart(char *cmd);
59558-extern void kernel_halt(void);
59559-extern void kernel_power_off(void);
59560+extern void kernel_restart(char *cmd) __noreturn;
59561+extern void kernel_halt(void) __noreturn;
59562+extern void kernel_power_off(void) __noreturn;
59563
59564 extern int C_A_D; /* for sysctl */
59565 void ctrl_alt_del(void);
59566@@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
59567 * Emergency restart, callable from an interrupt handler.
59568 */
59569
59570-extern void emergency_restart(void);
59571+extern void emergency_restart(void) __noreturn;
59572 #include <asm/emergency-restart.h>
59573
59574 #endif
59575diff -urNp linux-3.0.7/include/linux/reiserfs_fs.h linux-3.0.7/include/linux/reiserfs_fs.h
59576--- linux-3.0.7/include/linux/reiserfs_fs.h 2011-07-21 22:17:23.000000000 -0400
59577+++ linux-3.0.7/include/linux/reiserfs_fs.h 2011-08-23 21:47:56.000000000 -0400
59578@@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset
59579 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
59580
59581 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
59582-#define get_generation(s) atomic_read (&fs_generation(s))
59583+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
59584 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
59585 #define __fs_changed(gen,s) (gen != get_generation (s))
59586 #define fs_changed(gen,s) \
59587diff -urNp linux-3.0.7/include/linux/reiserfs_fs_sb.h linux-3.0.7/include/linux/reiserfs_fs_sb.h
59588--- linux-3.0.7/include/linux/reiserfs_fs_sb.h 2011-07-21 22:17:23.000000000 -0400
59589+++ linux-3.0.7/include/linux/reiserfs_fs_sb.h 2011-08-23 21:47:56.000000000 -0400
59590@@ -386,7 +386,7 @@ struct reiserfs_sb_info {
59591 /* Comment? -Hans */
59592 wait_queue_head_t s_wait;
59593 /* To be obsoleted soon by per buffer seals.. -Hans */
59594- atomic_t s_generation_counter; // increased by one every time the
59595+ atomic_unchecked_t s_generation_counter; // increased by one every time the
59596 // tree gets re-balanced
59597 unsigned long s_properties; /* File system properties. Currently holds
59598 on-disk FS format */
59599diff -urNp linux-3.0.7/include/linux/relay.h linux-3.0.7/include/linux/relay.h
59600--- linux-3.0.7/include/linux/relay.h 2011-07-21 22:17:23.000000000 -0400
59601+++ linux-3.0.7/include/linux/relay.h 2011-08-23 21:47:56.000000000 -0400
59602@@ -159,7 +159,7 @@ struct rchan_callbacks
59603 * The callback should return 0 if successful, negative if not.
59604 */
59605 int (*remove_buf_file)(struct dentry *dentry);
59606-};
59607+} __no_const;
59608
59609 /*
59610 * CONFIG_RELAY kernel API, kernel/relay.c
59611diff -urNp linux-3.0.7/include/linux/rfkill.h linux-3.0.7/include/linux/rfkill.h
59612--- linux-3.0.7/include/linux/rfkill.h 2011-07-21 22:17:23.000000000 -0400
59613+++ linux-3.0.7/include/linux/rfkill.h 2011-08-23 21:47:56.000000000 -0400
59614@@ -147,6 +147,7 @@ struct rfkill_ops {
59615 void (*query)(struct rfkill *rfkill, void *data);
59616 int (*set_block)(void *data, bool blocked);
59617 };
59618+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
59619
59620 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
59621 /**
59622diff -urNp linux-3.0.7/include/linux/rmap.h linux-3.0.7/include/linux/rmap.h
59623--- linux-3.0.7/include/linux/rmap.h 2011-07-21 22:17:23.000000000 -0400
59624+++ linux-3.0.7/include/linux/rmap.h 2011-08-23 21:47:56.000000000 -0400
59625@@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struc
59626 void anon_vma_init(void); /* create anon_vma_cachep */
59627 int anon_vma_prepare(struct vm_area_struct *);
59628 void unlink_anon_vmas(struct vm_area_struct *);
59629-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
59630-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
59631+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
59632+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
59633 void __anon_vma_link(struct vm_area_struct *);
59634
59635 static inline void anon_vma_merge(struct vm_area_struct *vma,
59636diff -urNp linux-3.0.7/include/linux/sched.h linux-3.0.7/include/linux/sched.h
59637--- linux-3.0.7/include/linux/sched.h 2011-10-17 23:17:09.000000000 -0400
59638+++ linux-3.0.7/include/linux/sched.h 2011-10-17 23:17:19.000000000 -0400
59639@@ -100,6 +100,7 @@ struct bio_list;
59640 struct fs_struct;
59641 struct perf_event_context;
59642 struct blk_plug;
59643+struct linux_binprm;
59644
59645 /*
59646 * List of flags we want to share for kernel threads,
59647@@ -380,10 +381,13 @@ struct user_namespace;
59648 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
59649
59650 extern int sysctl_max_map_count;
59651+extern unsigned long sysctl_heap_stack_gap;
59652
59653 #include <linux/aio.h>
59654
59655 #ifdef CONFIG_MMU
59656+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
59657+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
59658 extern void arch_pick_mmap_layout(struct mm_struct *mm);
59659 extern unsigned long
59660 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
59661@@ -629,6 +633,17 @@ struct signal_struct {
59662 #ifdef CONFIG_TASKSTATS
59663 struct taskstats *stats;
59664 #endif
59665+
59666+#ifdef CONFIG_GRKERNSEC
59667+ u32 curr_ip;
59668+ u32 saved_ip;
59669+ u32 gr_saddr;
59670+ u32 gr_daddr;
59671+ u16 gr_sport;
59672+ u16 gr_dport;
59673+ u8 used_accept:1;
59674+#endif
59675+
59676 #ifdef CONFIG_AUDIT
59677 unsigned audit_tty;
59678 struct tty_audit_buf *tty_audit_buf;
59679@@ -710,6 +725,11 @@ struct user_struct {
59680 struct key *session_keyring; /* UID's default session keyring */
59681 #endif
59682
59683+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
59684+ unsigned int banned;
59685+ unsigned long ban_expires;
59686+#endif
59687+
59688 /* Hash table maintenance information */
59689 struct hlist_node uidhash_node;
59690 uid_t uid;
59691@@ -1340,8 +1360,8 @@ struct task_struct {
59692 struct list_head thread_group;
59693
59694 struct completion *vfork_done; /* for vfork() */
59695- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
59696- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
59697+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
59698+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
59699
59700 cputime_t utime, stime, utimescaled, stimescaled;
59701 cputime_t gtime;
59702@@ -1357,13 +1377,6 @@ struct task_struct {
59703 struct task_cputime cputime_expires;
59704 struct list_head cpu_timers[3];
59705
59706-/* process credentials */
59707- const struct cred __rcu *real_cred; /* objective and real subjective task
59708- * credentials (COW) */
59709- const struct cred __rcu *cred; /* effective (overridable) subjective task
59710- * credentials (COW) */
59711- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
59712-
59713 char comm[TASK_COMM_LEN]; /* executable name excluding path
59714 - access with [gs]et_task_comm (which lock
59715 it with task_lock())
59716@@ -1380,8 +1393,16 @@ struct task_struct {
59717 #endif
59718 /* CPU-specific state of this task */
59719 struct thread_struct thread;
59720+/* thread_info moved to task_struct */
59721+#ifdef CONFIG_X86
59722+ struct thread_info tinfo;
59723+#endif
59724 /* filesystem information */
59725 struct fs_struct *fs;
59726+
59727+ const struct cred __rcu *cred; /* effective (overridable) subjective task
59728+ * credentials (COW) */
59729+
59730 /* open file information */
59731 struct files_struct *files;
59732 /* namespaces */
59733@@ -1428,6 +1449,11 @@ struct task_struct {
59734 struct rt_mutex_waiter *pi_blocked_on;
59735 #endif
59736
59737+/* process credentials */
59738+ const struct cred __rcu *real_cred; /* objective and real subjective task
59739+ * credentials (COW) */
59740+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
59741+
59742 #ifdef CONFIG_DEBUG_MUTEXES
59743 /* mutex deadlock detection */
59744 struct mutex_waiter *blocked_on;
59745@@ -1538,6 +1564,21 @@ struct task_struct {
59746 unsigned long default_timer_slack_ns;
59747
59748 struct list_head *scm_work_list;
59749+
59750+#ifdef CONFIG_GRKERNSEC
59751+ /* grsecurity */
59752+ struct dentry *gr_chroot_dentry;
59753+ struct acl_subject_label *acl;
59754+ struct acl_role_label *role;
59755+ struct file *exec_file;
59756+ u16 acl_role_id;
59757+ /* is this the task that authenticated to the special role */
59758+ u8 acl_sp_role;
59759+ u8 is_writable;
59760+ u8 brute;
59761+ u8 gr_is_chrooted;
59762+#endif
59763+
59764 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
59765 /* Index of current stored address in ret_stack */
59766 int curr_ret_stack;
59767@@ -1572,6 +1613,57 @@ struct task_struct {
59768 #endif
59769 };
59770
59771+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
59772+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
59773+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
59774+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
59775+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
59776+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
59777+
59778+#ifdef CONFIG_PAX_SOFTMODE
59779+extern int pax_softmode;
59780+#endif
59781+
59782+extern int pax_check_flags(unsigned long *);
59783+
59784+/* if tsk != current then task_lock must be held on it */
59785+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59786+static inline unsigned long pax_get_flags(struct task_struct *tsk)
59787+{
59788+ if (likely(tsk->mm))
59789+ return tsk->mm->pax_flags;
59790+ else
59791+ return 0UL;
59792+}
59793+
59794+/* if tsk != current then task_lock must be held on it */
59795+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
59796+{
59797+ if (likely(tsk->mm)) {
59798+ tsk->mm->pax_flags = flags;
59799+ return 0;
59800+ }
59801+ return -EINVAL;
59802+}
59803+#endif
59804+
59805+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
59806+extern void pax_set_initial_flags(struct linux_binprm *bprm);
59807+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
59808+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
59809+#endif
59810+
59811+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
59812+extern void pax_report_insns(void *pc, void *sp);
59813+extern void pax_report_refcount_overflow(struct pt_regs *regs);
59814+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
59815+
59816+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
59817+extern void pax_track_stack(void);
59818+#else
59819+static inline void pax_track_stack(void) {}
59820+#endif
59821+
59822 /* Future-safe accessor for struct task_struct's cpus_allowed. */
59823 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
59824
59825@@ -1768,6 +1860,7 @@ extern void thread_group_times(struct ta
59826 #define PF_DUMPCORE 0x00000200 /* dumped core */
59827 #define PF_SIGNALED 0x00000400 /* killed by a signal */
59828 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
59829+#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
59830 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
59831 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
59832 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
59833@@ -2055,7 +2148,9 @@ void yield(void);
59834 extern struct exec_domain default_exec_domain;
59835
59836 union thread_union {
59837+#ifndef CONFIG_X86
59838 struct thread_info thread_info;
59839+#endif
59840 unsigned long stack[THREAD_SIZE/sizeof(long)];
59841 };
59842
59843@@ -2088,6 +2183,7 @@ extern struct pid_namespace init_pid_ns;
59844 */
59845
59846 extern struct task_struct *find_task_by_vpid(pid_t nr);
59847+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
59848 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
59849 struct pid_namespace *ns);
59850
59851@@ -2224,7 +2320,7 @@ extern void __cleanup_sighand(struct sig
59852 extern void exit_itimers(struct signal_struct *);
59853 extern void flush_itimer_signals(void);
59854
59855-extern NORET_TYPE void do_group_exit(int);
59856+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
59857
59858 extern void daemonize(const char *, ...);
59859 extern int allow_signal(int);
59860@@ -2392,13 +2488,17 @@ static inline unsigned long *end_of_stac
59861
59862 #endif
59863
59864-static inline int object_is_on_stack(void *obj)
59865+static inline int object_starts_on_stack(void *obj)
59866 {
59867- void *stack = task_stack_page(current);
59868+ const void *stack = task_stack_page(current);
59869
59870 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
59871 }
59872
59873+#ifdef CONFIG_PAX_USERCOPY
59874+extern int object_is_on_stack(const void *obj, unsigned long len);
59875+#endif
59876+
59877 extern void thread_info_cache_init(void);
59878
59879 #ifdef CONFIG_DEBUG_STACK_USAGE
59880diff -urNp linux-3.0.7/include/linux/screen_info.h linux-3.0.7/include/linux/screen_info.h
59881--- linux-3.0.7/include/linux/screen_info.h 2011-07-21 22:17:23.000000000 -0400
59882+++ linux-3.0.7/include/linux/screen_info.h 2011-08-23 21:47:56.000000000 -0400
59883@@ -43,7 +43,8 @@ struct screen_info {
59884 __u16 pages; /* 0x32 */
59885 __u16 vesa_attributes; /* 0x34 */
59886 __u32 capabilities; /* 0x36 */
59887- __u8 _reserved[6]; /* 0x3a */
59888+ __u16 vesapm_size; /* 0x3a */
59889+ __u8 _reserved[4]; /* 0x3c */
59890 } __attribute__((packed));
59891
59892 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
59893diff -urNp linux-3.0.7/include/linux/security.h linux-3.0.7/include/linux/security.h
59894--- linux-3.0.7/include/linux/security.h 2011-07-21 22:17:23.000000000 -0400
59895+++ linux-3.0.7/include/linux/security.h 2011-08-23 21:48:14.000000000 -0400
59896@@ -36,6 +36,7 @@
59897 #include <linux/key.h>
59898 #include <linux/xfrm.h>
59899 #include <linux/slab.h>
59900+#include <linux/grsecurity.h>
59901 #include <net/flow.h>
59902
59903 /* Maximum number of letters for an LSM name string */
59904diff -urNp linux-3.0.7/include/linux/seq_file.h linux-3.0.7/include/linux/seq_file.h
59905--- linux-3.0.7/include/linux/seq_file.h 2011-07-21 22:17:23.000000000 -0400
59906+++ linux-3.0.7/include/linux/seq_file.h 2011-08-23 21:47:56.000000000 -0400
59907@@ -32,6 +32,7 @@ struct seq_operations {
59908 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
59909 int (*show) (struct seq_file *m, void *v);
59910 };
59911+typedef struct seq_operations __no_const seq_operations_no_const;
59912
59913 #define SEQ_SKIP 1
59914
59915diff -urNp linux-3.0.7/include/linux/shmem_fs.h linux-3.0.7/include/linux/shmem_fs.h
59916--- linux-3.0.7/include/linux/shmem_fs.h 2011-07-21 22:17:23.000000000 -0400
59917+++ linux-3.0.7/include/linux/shmem_fs.h 2011-08-23 21:47:56.000000000 -0400
59918@@ -10,7 +10,7 @@
59919
59920 #define SHMEM_NR_DIRECT 16
59921
59922-#define SHMEM_SYMLINK_INLINE_LEN (SHMEM_NR_DIRECT * sizeof(swp_entry_t))
59923+#define SHMEM_SYMLINK_INLINE_LEN 64
59924
59925 struct shmem_inode_info {
59926 spinlock_t lock;
59927diff -urNp linux-3.0.7/include/linux/shm.h linux-3.0.7/include/linux/shm.h
59928--- linux-3.0.7/include/linux/shm.h 2011-07-21 22:17:23.000000000 -0400
59929+++ linux-3.0.7/include/linux/shm.h 2011-08-23 21:48:14.000000000 -0400
59930@@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
59931 pid_t shm_cprid;
59932 pid_t shm_lprid;
59933 struct user_struct *mlock_user;
59934+#ifdef CONFIG_GRKERNSEC
59935+ time_t shm_createtime;
59936+ pid_t shm_lapid;
59937+#endif
59938 };
59939
59940 /* shm_mode upper byte flags */
59941diff -urNp linux-3.0.7/include/linux/skbuff.h linux-3.0.7/include/linux/skbuff.h
59942--- linux-3.0.7/include/linux/skbuff.h 2011-07-21 22:17:23.000000000 -0400
59943+++ linux-3.0.7/include/linux/skbuff.h 2011-08-23 21:47:56.000000000 -0400
59944@@ -592,7 +592,7 @@ static inline struct skb_shared_hwtstamp
59945 */
59946 static inline int skb_queue_empty(const struct sk_buff_head *list)
59947 {
59948- return list->next == (struct sk_buff *)list;
59949+ return list->next == (const struct sk_buff *)list;
59950 }
59951
59952 /**
59953@@ -605,7 +605,7 @@ static inline int skb_queue_empty(const
59954 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
59955 const struct sk_buff *skb)
59956 {
59957- return skb->next == (struct sk_buff *)list;
59958+ return skb->next == (const struct sk_buff *)list;
59959 }
59960
59961 /**
59962@@ -618,7 +618,7 @@ static inline bool skb_queue_is_last(con
59963 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
59964 const struct sk_buff *skb)
59965 {
59966- return skb->prev == (struct sk_buff *)list;
59967+ return skb->prev == (const struct sk_buff *)list;
59968 }
59969
59970 /**
59971@@ -1440,7 +1440,7 @@ static inline int pskb_network_may_pull(
59972 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
59973 */
59974 #ifndef NET_SKB_PAD
59975-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
59976+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
59977 #endif
59978
59979 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
59980diff -urNp linux-3.0.7/include/linux/slab_def.h linux-3.0.7/include/linux/slab_def.h
59981--- linux-3.0.7/include/linux/slab_def.h 2011-07-21 22:17:23.000000000 -0400
59982+++ linux-3.0.7/include/linux/slab_def.h 2011-08-23 21:47:56.000000000 -0400
59983@@ -96,10 +96,10 @@ struct kmem_cache {
59984 unsigned long node_allocs;
59985 unsigned long node_frees;
59986 unsigned long node_overflow;
59987- atomic_t allochit;
59988- atomic_t allocmiss;
59989- atomic_t freehit;
59990- atomic_t freemiss;
59991+ atomic_unchecked_t allochit;
59992+ atomic_unchecked_t allocmiss;
59993+ atomic_unchecked_t freehit;
59994+ atomic_unchecked_t freemiss;
59995
59996 /*
59997 * If debugging is enabled, then the allocator can add additional
59998diff -urNp linux-3.0.7/include/linux/slab.h linux-3.0.7/include/linux/slab.h
59999--- linux-3.0.7/include/linux/slab.h 2011-07-21 22:17:23.000000000 -0400
60000+++ linux-3.0.7/include/linux/slab.h 2011-08-23 21:47:56.000000000 -0400
60001@@ -11,12 +11,20 @@
60002
60003 #include <linux/gfp.h>
60004 #include <linux/types.h>
60005+#include <linux/err.h>
60006
60007 /*
60008 * Flags to pass to kmem_cache_create().
60009 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
60010 */
60011 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
60012+
60013+#ifdef CONFIG_PAX_USERCOPY
60014+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
60015+#else
60016+#define SLAB_USERCOPY 0x00000000UL
60017+#endif
60018+
60019 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
60020 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
60021 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
60022@@ -87,10 +95,13 @@
60023 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
60024 * Both make kfree a no-op.
60025 */
60026-#define ZERO_SIZE_PTR ((void *)16)
60027+#define ZERO_SIZE_PTR \
60028+({ \
60029+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
60030+ (void *)(-MAX_ERRNO-1L); \
60031+})
60032
60033-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
60034- (unsigned long)ZERO_SIZE_PTR)
60035+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
60036
60037 /*
60038 * struct kmem_cache related prototypes
60039@@ -141,6 +152,7 @@ void * __must_check krealloc(const void
60040 void kfree(const void *);
60041 void kzfree(const void *);
60042 size_t ksize(const void *);
60043+void check_object_size(const void *ptr, unsigned long n, bool to);
60044
60045 /*
60046 * Allocator specific definitions. These are mainly used to establish optimized
60047@@ -333,4 +345,59 @@ static inline void *kzalloc_node(size_t
60048
60049 void __init kmem_cache_init_late(void);
60050
60051+#define kmalloc(x, y) \
60052+({ \
60053+ void *___retval; \
60054+ intoverflow_t ___x = (intoverflow_t)x; \
60055+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
60056+ ___retval = NULL; \
60057+ else \
60058+ ___retval = kmalloc((size_t)___x, (y)); \
60059+ ___retval; \
60060+})
60061+
60062+#define kmalloc_node(x, y, z) \
60063+({ \
60064+ void *___retval; \
60065+ intoverflow_t ___x = (intoverflow_t)x; \
60066+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
60067+ ___retval = NULL; \
60068+ else \
60069+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
60070+ ___retval; \
60071+})
60072+
60073+#define kzalloc(x, y) \
60074+({ \
60075+ void *___retval; \
60076+ intoverflow_t ___x = (intoverflow_t)x; \
60077+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
60078+ ___retval = NULL; \
60079+ else \
60080+ ___retval = kzalloc((size_t)___x, (y)); \
60081+ ___retval; \
60082+})
60083+
60084+#define __krealloc(x, y, z) \
60085+({ \
60086+ void *___retval; \
60087+ intoverflow_t ___y = (intoverflow_t)y; \
60088+ if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
60089+ ___retval = NULL; \
60090+ else \
60091+ ___retval = __krealloc((x), (size_t)___y, (z)); \
60092+ ___retval; \
60093+})
60094+
60095+#define krealloc(x, y, z) \
60096+({ \
60097+ void *___retval; \
60098+ intoverflow_t ___y = (intoverflow_t)y; \
60099+ if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
60100+ ___retval = NULL; \
60101+ else \
60102+ ___retval = krealloc((x), (size_t)___y, (z)); \
60103+ ___retval; \
60104+})
60105+
60106 #endif /* _LINUX_SLAB_H */
60107diff -urNp linux-3.0.7/include/linux/slub_def.h linux-3.0.7/include/linux/slub_def.h
60108--- linux-3.0.7/include/linux/slub_def.h 2011-07-21 22:17:23.000000000 -0400
60109+++ linux-3.0.7/include/linux/slub_def.h 2011-08-23 21:47:56.000000000 -0400
60110@@ -82,7 +82,7 @@ struct kmem_cache {
60111 struct kmem_cache_order_objects max;
60112 struct kmem_cache_order_objects min;
60113 gfp_t allocflags; /* gfp flags to use on each alloc */
60114- int refcount; /* Refcount for slab cache destroy */
60115+ atomic_t refcount; /* Refcount for slab cache destroy */
60116 void (*ctor)(void *);
60117 int inuse; /* Offset to metadata */
60118 int align; /* Alignment */
60119@@ -218,7 +218,7 @@ static __always_inline struct kmem_cache
60120 }
60121
60122 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
60123-void *__kmalloc(size_t size, gfp_t flags);
60124+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
60125
60126 static __always_inline void *
60127 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
60128diff -urNp linux-3.0.7/include/linux/sonet.h linux-3.0.7/include/linux/sonet.h
60129--- linux-3.0.7/include/linux/sonet.h 2011-07-21 22:17:23.000000000 -0400
60130+++ linux-3.0.7/include/linux/sonet.h 2011-08-23 21:47:56.000000000 -0400
60131@@ -61,7 +61,7 @@ struct sonet_stats {
60132 #include <asm/atomic.h>
60133
60134 struct k_sonet_stats {
60135-#define __HANDLE_ITEM(i) atomic_t i
60136+#define __HANDLE_ITEM(i) atomic_unchecked_t i
60137 __SONET_ITEMS
60138 #undef __HANDLE_ITEM
60139 };
60140diff -urNp linux-3.0.7/include/linux/sunrpc/clnt.h linux-3.0.7/include/linux/sunrpc/clnt.h
60141--- linux-3.0.7/include/linux/sunrpc/clnt.h 2011-07-21 22:17:23.000000000 -0400
60142+++ linux-3.0.7/include/linux/sunrpc/clnt.h 2011-08-23 21:47:56.000000000 -0400
60143@@ -169,9 +169,9 @@ static inline unsigned short rpc_get_por
60144 {
60145 switch (sap->sa_family) {
60146 case AF_INET:
60147- return ntohs(((struct sockaddr_in *)sap)->sin_port);
60148+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
60149 case AF_INET6:
60150- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
60151+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
60152 }
60153 return 0;
60154 }
60155@@ -204,7 +204,7 @@ static inline bool __rpc_cmp_addr4(const
60156 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
60157 const struct sockaddr *src)
60158 {
60159- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
60160+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
60161 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
60162
60163 dsin->sin_family = ssin->sin_family;
60164@@ -301,7 +301,7 @@ static inline u32 rpc_get_scope_id(const
60165 if (sa->sa_family != AF_INET6)
60166 return 0;
60167
60168- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
60169+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
60170 }
60171
60172 #endif /* __KERNEL__ */
60173diff -urNp linux-3.0.7/include/linux/sunrpc/svc_rdma.h linux-3.0.7/include/linux/sunrpc/svc_rdma.h
60174--- linux-3.0.7/include/linux/sunrpc/svc_rdma.h 2011-07-21 22:17:23.000000000 -0400
60175+++ linux-3.0.7/include/linux/sunrpc/svc_rdma.h 2011-08-23 21:47:56.000000000 -0400
60176@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
60177 extern unsigned int svcrdma_max_requests;
60178 extern unsigned int svcrdma_max_req_size;
60179
60180-extern atomic_t rdma_stat_recv;
60181-extern atomic_t rdma_stat_read;
60182-extern atomic_t rdma_stat_write;
60183-extern atomic_t rdma_stat_sq_starve;
60184-extern atomic_t rdma_stat_rq_starve;
60185-extern atomic_t rdma_stat_rq_poll;
60186-extern atomic_t rdma_stat_rq_prod;
60187-extern atomic_t rdma_stat_sq_poll;
60188-extern atomic_t rdma_stat_sq_prod;
60189+extern atomic_unchecked_t rdma_stat_recv;
60190+extern atomic_unchecked_t rdma_stat_read;
60191+extern atomic_unchecked_t rdma_stat_write;
60192+extern atomic_unchecked_t rdma_stat_sq_starve;
60193+extern atomic_unchecked_t rdma_stat_rq_starve;
60194+extern atomic_unchecked_t rdma_stat_rq_poll;
60195+extern atomic_unchecked_t rdma_stat_rq_prod;
60196+extern atomic_unchecked_t rdma_stat_sq_poll;
60197+extern atomic_unchecked_t rdma_stat_sq_prod;
60198
60199 #define RPCRDMA_VERSION 1
60200
60201diff -urNp linux-3.0.7/include/linux/sysctl.h linux-3.0.7/include/linux/sysctl.h
60202--- linux-3.0.7/include/linux/sysctl.h 2011-07-21 22:17:23.000000000 -0400
60203+++ linux-3.0.7/include/linux/sysctl.h 2011-08-23 21:48:14.000000000 -0400
60204@@ -155,7 +155,11 @@ enum
60205 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
60206 };
60207
60208-
60209+#ifdef CONFIG_PAX_SOFTMODE
60210+enum {
60211+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
60212+};
60213+#endif
60214
60215 /* CTL_VM names: */
60216 enum
60217@@ -967,6 +971,8 @@ typedef int proc_handler (struct ctl_tab
60218
60219 extern int proc_dostring(struct ctl_table *, int,
60220 void __user *, size_t *, loff_t *);
60221+extern int proc_dostring_modpriv(struct ctl_table *, int,
60222+ void __user *, size_t *, loff_t *);
60223 extern int proc_dointvec(struct ctl_table *, int,
60224 void __user *, size_t *, loff_t *);
60225 extern int proc_dointvec_minmax(struct ctl_table *, int,
60226diff -urNp linux-3.0.7/include/linux/tty_ldisc.h linux-3.0.7/include/linux/tty_ldisc.h
60227--- linux-3.0.7/include/linux/tty_ldisc.h 2011-07-21 22:17:23.000000000 -0400
60228+++ linux-3.0.7/include/linux/tty_ldisc.h 2011-08-23 21:47:56.000000000 -0400
60229@@ -148,7 +148,7 @@ struct tty_ldisc_ops {
60230
60231 struct module *owner;
60232
60233- int refcount;
60234+ atomic_t refcount;
60235 };
60236
60237 struct tty_ldisc {
60238diff -urNp linux-3.0.7/include/linux/types.h linux-3.0.7/include/linux/types.h
60239--- linux-3.0.7/include/linux/types.h 2011-07-21 22:17:23.000000000 -0400
60240+++ linux-3.0.7/include/linux/types.h 2011-08-23 21:47:56.000000000 -0400
60241@@ -213,10 +213,26 @@ typedef struct {
60242 int counter;
60243 } atomic_t;
60244
60245+#ifdef CONFIG_PAX_REFCOUNT
60246+typedef struct {
60247+ int counter;
60248+} atomic_unchecked_t;
60249+#else
60250+typedef atomic_t atomic_unchecked_t;
60251+#endif
60252+
60253 #ifdef CONFIG_64BIT
60254 typedef struct {
60255 long counter;
60256 } atomic64_t;
60257+
60258+#ifdef CONFIG_PAX_REFCOUNT
60259+typedef struct {
60260+ long counter;
60261+} atomic64_unchecked_t;
60262+#else
60263+typedef atomic64_t atomic64_unchecked_t;
60264+#endif
60265 #endif
60266
60267 struct list_head {
60268diff -urNp linux-3.0.7/include/linux/uaccess.h linux-3.0.7/include/linux/uaccess.h
60269--- linux-3.0.7/include/linux/uaccess.h 2011-07-21 22:17:23.000000000 -0400
60270+++ linux-3.0.7/include/linux/uaccess.h 2011-10-06 04:17:55.000000000 -0400
60271@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
60272 long ret; \
60273 mm_segment_t old_fs = get_fs(); \
60274 \
60275- set_fs(KERNEL_DS); \
60276 pagefault_disable(); \
60277- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
60278- pagefault_enable(); \
60279+ set_fs(KERNEL_DS); \
60280+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
60281 set_fs(old_fs); \
60282+ pagefault_enable(); \
60283 ret; \
60284 })
60285
60286diff -urNp linux-3.0.7/include/linux/unaligned/access_ok.h linux-3.0.7/include/linux/unaligned/access_ok.h
60287--- linux-3.0.7/include/linux/unaligned/access_ok.h 2011-07-21 22:17:23.000000000 -0400
60288+++ linux-3.0.7/include/linux/unaligned/access_ok.h 2011-08-23 21:47:56.000000000 -0400
60289@@ -6,32 +6,32 @@
60290
60291 static inline u16 get_unaligned_le16(const void *p)
60292 {
60293- return le16_to_cpup((__le16 *)p);
60294+ return le16_to_cpup((const __le16 *)p);
60295 }
60296
60297 static inline u32 get_unaligned_le32(const void *p)
60298 {
60299- return le32_to_cpup((__le32 *)p);
60300+ return le32_to_cpup((const __le32 *)p);
60301 }
60302
60303 static inline u64 get_unaligned_le64(const void *p)
60304 {
60305- return le64_to_cpup((__le64 *)p);
60306+ return le64_to_cpup((const __le64 *)p);
60307 }
60308
60309 static inline u16 get_unaligned_be16(const void *p)
60310 {
60311- return be16_to_cpup((__be16 *)p);
60312+ return be16_to_cpup((const __be16 *)p);
60313 }
60314
60315 static inline u32 get_unaligned_be32(const void *p)
60316 {
60317- return be32_to_cpup((__be32 *)p);
60318+ return be32_to_cpup((const __be32 *)p);
60319 }
60320
60321 static inline u64 get_unaligned_be64(const void *p)
60322 {
60323- return be64_to_cpup((__be64 *)p);
60324+ return be64_to_cpup((const __be64 *)p);
60325 }
60326
60327 static inline void put_unaligned_le16(u16 val, void *p)
60328diff -urNp linux-3.0.7/include/linux/vermagic.h linux-3.0.7/include/linux/vermagic.h
60329--- linux-3.0.7/include/linux/vermagic.h 2011-07-21 22:17:23.000000000 -0400
60330+++ linux-3.0.7/include/linux/vermagic.h 2011-10-07 19:25:35.000000000 -0400
60331@@ -26,9 +26,28 @@
60332 #define MODULE_ARCH_VERMAGIC ""
60333 #endif
60334
60335+#ifdef CONFIG_PAX_REFCOUNT
60336+#define MODULE_PAX_REFCOUNT "REFCOUNT "
60337+#else
60338+#define MODULE_PAX_REFCOUNT ""
60339+#endif
60340+
60341+#ifdef CONSTIFY_PLUGIN
60342+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
60343+#else
60344+#define MODULE_CONSTIFY_PLUGIN ""
60345+#endif
60346+
60347+#ifdef CONFIG_GRKERNSEC
60348+#define MODULE_GRSEC "GRSEC "
60349+#else
60350+#define MODULE_GRSEC ""
60351+#endif
60352+
60353 #define VERMAGIC_STRING \
60354 UTS_RELEASE " " \
60355 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
60356 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
60357- MODULE_ARCH_VERMAGIC
60358+ MODULE_ARCH_VERMAGIC \
60359+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_GRSEC
60360
60361diff -urNp linux-3.0.7/include/linux/vmalloc.h linux-3.0.7/include/linux/vmalloc.h
60362--- linux-3.0.7/include/linux/vmalloc.h 2011-07-21 22:17:23.000000000 -0400
60363+++ linux-3.0.7/include/linux/vmalloc.h 2011-08-23 21:47:56.000000000 -0400
60364@@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
60365 #define VM_MAP 0x00000004 /* vmap()ed pages */
60366 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
60367 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
60368+
60369+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
60370+#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
60371+#endif
60372+
60373 /* bits [20..32] reserved for arch specific ioremap internals */
60374
60375 /*
60376@@ -155,4 +160,103 @@ pcpu_free_vm_areas(struct vm_struct **vm
60377 # endif
60378 #endif
60379
60380+#define vmalloc(x) \
60381+({ \
60382+ void *___retval; \
60383+ intoverflow_t ___x = (intoverflow_t)x; \
60384+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
60385+ ___retval = NULL; \
60386+ else \
60387+ ___retval = vmalloc((unsigned long)___x); \
60388+ ___retval; \
60389+})
60390+
60391+#define vzalloc(x) \
60392+({ \
60393+ void *___retval; \
60394+ intoverflow_t ___x = (intoverflow_t)x; \
60395+ if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
60396+ ___retval = NULL; \
60397+ else \
60398+ ___retval = vzalloc((unsigned long)___x); \
60399+ ___retval; \
60400+})
60401+
60402+#define __vmalloc(x, y, z) \
60403+({ \
60404+ void *___retval; \
60405+ intoverflow_t ___x = (intoverflow_t)x; \
60406+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
60407+ ___retval = NULL; \
60408+ else \
60409+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
60410+ ___retval; \
60411+})
60412+
60413+#define vmalloc_user(x) \
60414+({ \
60415+ void *___retval; \
60416+ intoverflow_t ___x = (intoverflow_t)x; \
60417+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
60418+ ___retval = NULL; \
60419+ else \
60420+ ___retval = vmalloc_user((unsigned long)___x); \
60421+ ___retval; \
60422+})
60423+
60424+#define vmalloc_exec(x) \
60425+({ \
60426+ void *___retval; \
60427+ intoverflow_t ___x = (intoverflow_t)x; \
60428+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
60429+ ___retval = NULL; \
60430+ else \
60431+ ___retval = vmalloc_exec((unsigned long)___x); \
60432+ ___retval; \
60433+})
60434+
60435+#define vmalloc_node(x, y) \
60436+({ \
60437+ void *___retval; \
60438+ intoverflow_t ___x = (intoverflow_t)x; \
60439+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
60440+ ___retval = NULL; \
60441+ else \
60442+ ___retval = vmalloc_node((unsigned long)___x, (y));\
60443+ ___retval; \
60444+})
60445+
60446+#define vzalloc_node(x, y) \
60447+({ \
60448+ void *___retval; \
60449+ intoverflow_t ___x = (intoverflow_t)x; \
60450+ if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
60451+ ___retval = NULL; \
60452+ else \
60453+ ___retval = vzalloc_node((unsigned long)___x, (y));\
60454+ ___retval; \
60455+})
60456+
60457+#define vmalloc_32(x) \
60458+({ \
60459+ void *___retval; \
60460+ intoverflow_t ___x = (intoverflow_t)x; \
60461+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
60462+ ___retval = NULL; \
60463+ else \
60464+ ___retval = vmalloc_32((unsigned long)___x); \
60465+ ___retval; \
60466+})
60467+
60468+#define vmalloc_32_user(x) \
60469+({ \
60470+void *___retval; \
60471+ intoverflow_t ___x = (intoverflow_t)x; \
60472+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
60473+ ___retval = NULL; \
60474+ else \
60475+ ___retval = vmalloc_32_user((unsigned long)___x);\
60476+ ___retval; \
60477+})
60478+
60479 #endif /* _LINUX_VMALLOC_H */
60480diff -urNp linux-3.0.7/include/linux/vmstat.h linux-3.0.7/include/linux/vmstat.h
60481--- linux-3.0.7/include/linux/vmstat.h 2011-07-21 22:17:23.000000000 -0400
60482+++ linux-3.0.7/include/linux/vmstat.h 2011-08-23 21:47:56.000000000 -0400
60483@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(in
60484 /*
60485 * Zone based page accounting with per cpu differentials.
60486 */
60487-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
60488+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
60489
60490 static inline void zone_page_state_add(long x, struct zone *zone,
60491 enum zone_stat_item item)
60492 {
60493- atomic_long_add(x, &zone->vm_stat[item]);
60494- atomic_long_add(x, &vm_stat[item]);
60495+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
60496+ atomic_long_add_unchecked(x, &vm_stat[item]);
60497 }
60498
60499 static inline unsigned long global_page_state(enum zone_stat_item item)
60500 {
60501- long x = atomic_long_read(&vm_stat[item]);
60502+ long x = atomic_long_read_unchecked(&vm_stat[item]);
60503 #ifdef CONFIG_SMP
60504 if (x < 0)
60505 x = 0;
60506@@ -109,7 +109,7 @@ static inline unsigned long global_page_
60507 static inline unsigned long zone_page_state(struct zone *zone,
60508 enum zone_stat_item item)
60509 {
60510- long x = atomic_long_read(&zone->vm_stat[item]);
60511+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
60512 #ifdef CONFIG_SMP
60513 if (x < 0)
60514 x = 0;
60515@@ -126,7 +126,7 @@ static inline unsigned long zone_page_st
60516 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
60517 enum zone_stat_item item)
60518 {
60519- long x = atomic_long_read(&zone->vm_stat[item]);
60520+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
60521
60522 #ifdef CONFIG_SMP
60523 int cpu;
60524@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state
60525
60526 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
60527 {
60528- atomic_long_inc(&zone->vm_stat[item]);
60529- atomic_long_inc(&vm_stat[item]);
60530+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
60531+ atomic_long_inc_unchecked(&vm_stat[item]);
60532 }
60533
60534 static inline void __inc_zone_page_state(struct page *page,
60535@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state
60536
60537 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
60538 {
60539- atomic_long_dec(&zone->vm_stat[item]);
60540- atomic_long_dec(&vm_stat[item]);
60541+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
60542+ atomic_long_dec_unchecked(&vm_stat[item]);
60543 }
60544
60545 static inline void __dec_zone_page_state(struct page *page,
60546diff -urNp linux-3.0.7/include/media/saa7146_vv.h linux-3.0.7/include/media/saa7146_vv.h
60547--- linux-3.0.7/include/media/saa7146_vv.h 2011-07-21 22:17:23.000000000 -0400
60548+++ linux-3.0.7/include/media/saa7146_vv.h 2011-10-07 19:07:40.000000000 -0400
60549@@ -163,7 +163,7 @@ struct saa7146_ext_vv
60550 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
60551
60552 /* the extension can override this */
60553- struct v4l2_ioctl_ops ops;
60554+ v4l2_ioctl_ops_no_const ops;
60555 /* pointer to the saa7146 core ops */
60556 const struct v4l2_ioctl_ops *core_ops;
60557
60558diff -urNp linux-3.0.7/include/media/v4l2-dev.h linux-3.0.7/include/media/v4l2-dev.h
60559--- linux-3.0.7/include/media/v4l2-dev.h 2011-07-21 22:17:23.000000000 -0400
60560+++ linux-3.0.7/include/media/v4l2-dev.h 2011-10-07 19:07:40.000000000 -0400
60561@@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_sta
60562
60563
60564 struct v4l2_file_operations {
60565- struct module *owner;
60566+ struct module * const owner;
60567 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
60568 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
60569 unsigned int (*poll) (struct file *, struct poll_table_struct *);
60570@@ -68,6 +68,7 @@ struct v4l2_file_operations {
60571 int (*open) (struct file *);
60572 int (*release) (struct file *);
60573 };
60574+typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
60575
60576 /*
60577 * Newer version of video_device, handled by videodev2.c
60578diff -urNp linux-3.0.7/include/media/v4l2-ioctl.h linux-3.0.7/include/media/v4l2-ioctl.h
60579--- linux-3.0.7/include/media/v4l2-ioctl.h 2011-07-21 22:17:23.000000000 -0400
60580+++ linux-3.0.7/include/media/v4l2-ioctl.h 2011-08-24 18:25:45.000000000 -0400
60581@@ -272,6 +272,7 @@ struct v4l2_ioctl_ops {
60582 long (*vidioc_default) (struct file *file, void *fh,
60583 bool valid_prio, int cmd, void *arg);
60584 };
60585+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
60586
60587
60588 /* v4l debugging and diagnostics */
60589diff -urNp linux-3.0.7/include/net/caif/cfctrl.h linux-3.0.7/include/net/caif/cfctrl.h
60590--- linux-3.0.7/include/net/caif/cfctrl.h 2011-07-21 22:17:23.000000000 -0400
60591+++ linux-3.0.7/include/net/caif/cfctrl.h 2011-08-23 21:47:56.000000000 -0400
60592@@ -52,7 +52,7 @@ struct cfctrl_rsp {
60593 void (*radioset_rsp)(void);
60594 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
60595 struct cflayer *client_layer);
60596-};
60597+} __no_const;
60598
60599 /* Link Setup Parameters for CAIF-Links. */
60600 struct cfctrl_link_param {
60601@@ -101,8 +101,8 @@ struct cfctrl_request_info {
60602 struct cfctrl {
60603 struct cfsrvl serv;
60604 struct cfctrl_rsp res;
60605- atomic_t req_seq_no;
60606- atomic_t rsp_seq_no;
60607+ atomic_unchecked_t req_seq_no;
60608+ atomic_unchecked_t rsp_seq_no;
60609 struct list_head list;
60610 /* Protects from simultaneous access to first_req list */
60611 spinlock_t info_list_lock;
60612diff -urNp linux-3.0.7/include/net/flow.h linux-3.0.7/include/net/flow.h
60613--- linux-3.0.7/include/net/flow.h 2011-07-21 22:17:23.000000000 -0400
60614+++ linux-3.0.7/include/net/flow.h 2011-08-23 21:47:56.000000000 -0400
60615@@ -188,6 +188,6 @@ extern struct flow_cache_object *flow_ca
60616 u8 dir, flow_resolve_t resolver, void *ctx);
60617
60618 extern void flow_cache_flush(void);
60619-extern atomic_t flow_cache_genid;
60620+extern atomic_unchecked_t flow_cache_genid;
60621
60622 #endif
60623diff -urNp linux-3.0.7/include/net/inetpeer.h linux-3.0.7/include/net/inetpeer.h
60624--- linux-3.0.7/include/net/inetpeer.h 2011-07-21 22:17:23.000000000 -0400
60625+++ linux-3.0.7/include/net/inetpeer.h 2011-08-23 21:47:56.000000000 -0400
60626@@ -43,8 +43,8 @@ struct inet_peer {
60627 */
60628 union {
60629 struct {
60630- atomic_t rid; /* Frag reception counter */
60631- atomic_t ip_id_count; /* IP ID for the next packet */
60632+ atomic_unchecked_t rid; /* Frag reception counter */
60633+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
60634 __u32 tcp_ts;
60635 __u32 tcp_ts_stamp;
60636 u32 metrics[RTAX_MAX];
60637@@ -108,7 +108,7 @@ static inline __u16 inet_getid(struct in
60638 {
60639 more++;
60640 inet_peer_refcheck(p);
60641- return atomic_add_return(more, &p->ip_id_count) - more;
60642+ return atomic_add_return_unchecked(more, &p->ip_id_count) - more;
60643 }
60644
60645 #endif /* _NET_INETPEER_H */
60646diff -urNp linux-3.0.7/include/net/ip_fib.h linux-3.0.7/include/net/ip_fib.h
60647--- linux-3.0.7/include/net/ip_fib.h 2011-07-21 22:17:23.000000000 -0400
60648+++ linux-3.0.7/include/net/ip_fib.h 2011-08-23 21:47:56.000000000 -0400
60649@@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(s
60650
60651 #define FIB_RES_SADDR(net, res) \
60652 ((FIB_RES_NH(res).nh_saddr_genid == \
60653- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
60654+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
60655 FIB_RES_NH(res).nh_saddr : \
60656 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
60657 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
60658diff -urNp linux-3.0.7/include/net/ip_vs.h linux-3.0.7/include/net/ip_vs.h
60659--- linux-3.0.7/include/net/ip_vs.h 2011-07-21 22:17:23.000000000 -0400
60660+++ linux-3.0.7/include/net/ip_vs.h 2011-08-23 21:47:56.000000000 -0400
60661@@ -509,7 +509,7 @@ struct ip_vs_conn {
60662 struct ip_vs_conn *control; /* Master control connection */
60663 atomic_t n_control; /* Number of controlled ones */
60664 struct ip_vs_dest *dest; /* real server */
60665- atomic_t in_pkts; /* incoming packet counter */
60666+ atomic_unchecked_t in_pkts; /* incoming packet counter */
60667
60668 /* packet transmitter for different forwarding methods. If it
60669 mangles the packet, it must return NF_DROP or better NF_STOLEN,
60670@@ -647,7 +647,7 @@ struct ip_vs_dest {
60671 __be16 port; /* port number of the server */
60672 union nf_inet_addr addr; /* IP address of the server */
60673 volatile unsigned flags; /* dest status flags */
60674- atomic_t conn_flags; /* flags to copy to conn */
60675+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
60676 atomic_t weight; /* server weight */
60677
60678 atomic_t refcnt; /* reference counter */
60679diff -urNp linux-3.0.7/include/net/irda/ircomm_core.h linux-3.0.7/include/net/irda/ircomm_core.h
60680--- linux-3.0.7/include/net/irda/ircomm_core.h 2011-07-21 22:17:23.000000000 -0400
60681+++ linux-3.0.7/include/net/irda/ircomm_core.h 2011-08-23 21:47:56.000000000 -0400
60682@@ -51,7 +51,7 @@ typedef struct {
60683 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
60684 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
60685 struct ircomm_info *);
60686-} call_t;
60687+} __no_const call_t;
60688
60689 struct ircomm_cb {
60690 irda_queue_t queue;
60691diff -urNp linux-3.0.7/include/net/irda/ircomm_tty.h linux-3.0.7/include/net/irda/ircomm_tty.h
60692--- linux-3.0.7/include/net/irda/ircomm_tty.h 2011-07-21 22:17:23.000000000 -0400
60693+++ linux-3.0.7/include/net/irda/ircomm_tty.h 2011-08-23 21:47:56.000000000 -0400
60694@@ -35,6 +35,7 @@
60695 #include <linux/termios.h>
60696 #include <linux/timer.h>
60697 #include <linux/tty.h> /* struct tty_struct */
60698+#include <asm/local.h>
60699
60700 #include <net/irda/irias_object.h>
60701 #include <net/irda/ircomm_core.h>
60702@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
60703 unsigned short close_delay;
60704 unsigned short closing_wait; /* time to wait before closing */
60705
60706- int open_count;
60707- int blocked_open; /* # of blocked opens */
60708+ local_t open_count;
60709+ local_t blocked_open; /* # of blocked opens */
60710
60711 /* Protect concurent access to :
60712 * o self->open_count
60713diff -urNp linux-3.0.7/include/net/iucv/af_iucv.h linux-3.0.7/include/net/iucv/af_iucv.h
60714--- linux-3.0.7/include/net/iucv/af_iucv.h 2011-07-21 22:17:23.000000000 -0400
60715+++ linux-3.0.7/include/net/iucv/af_iucv.h 2011-08-23 21:47:56.000000000 -0400
60716@@ -87,7 +87,7 @@ struct iucv_sock {
60717 struct iucv_sock_list {
60718 struct hlist_head head;
60719 rwlock_t lock;
60720- atomic_t autobind_name;
60721+ atomic_unchecked_t autobind_name;
60722 };
60723
60724 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
60725diff -urNp linux-3.0.7/include/net/lapb.h linux-3.0.7/include/net/lapb.h
60726--- linux-3.0.7/include/net/lapb.h 2011-07-21 22:17:23.000000000 -0400
60727+++ linux-3.0.7/include/net/lapb.h 2011-08-23 21:47:56.000000000 -0400
60728@@ -95,7 +95,7 @@ struct lapb_cb {
60729 struct sk_buff_head write_queue;
60730 struct sk_buff_head ack_queue;
60731 unsigned char window;
60732- struct lapb_register_struct callbacks;
60733+ struct lapb_register_struct *callbacks;
60734
60735 /* FRMR control information */
60736 struct lapb_frame frmr_data;
60737diff -urNp linux-3.0.7/include/net/neighbour.h linux-3.0.7/include/net/neighbour.h
60738--- linux-3.0.7/include/net/neighbour.h 2011-07-21 22:17:23.000000000 -0400
60739+++ linux-3.0.7/include/net/neighbour.h 2011-08-31 18:39:25.000000000 -0400
60740@@ -124,7 +124,7 @@ struct neigh_ops {
60741 int (*connected_output)(struct sk_buff*);
60742 int (*hh_output)(struct sk_buff*);
60743 int (*queue_xmit)(struct sk_buff*);
60744-};
60745+} __do_const;
60746
60747 struct pneigh_entry {
60748 struct pneigh_entry *next;
60749diff -urNp linux-3.0.7/include/net/netlink.h linux-3.0.7/include/net/netlink.h
60750--- linux-3.0.7/include/net/netlink.h 2011-07-21 22:17:23.000000000 -0400
60751+++ linux-3.0.7/include/net/netlink.h 2011-08-23 21:47:56.000000000 -0400
60752@@ -562,7 +562,7 @@ static inline void *nlmsg_get_pos(struct
60753 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
60754 {
60755 if (mark)
60756- skb_trim(skb, (unsigned char *) mark - skb->data);
60757+ skb_trim(skb, (const unsigned char *) mark - skb->data);
60758 }
60759
60760 /**
60761diff -urNp linux-3.0.7/include/net/netns/ipv4.h linux-3.0.7/include/net/netns/ipv4.h
60762--- linux-3.0.7/include/net/netns/ipv4.h 2011-07-21 22:17:23.000000000 -0400
60763+++ linux-3.0.7/include/net/netns/ipv4.h 2011-08-23 21:47:56.000000000 -0400
60764@@ -56,8 +56,8 @@ struct netns_ipv4 {
60765
60766 unsigned int sysctl_ping_group_range[2];
60767
60768- atomic_t rt_genid;
60769- atomic_t dev_addr_genid;
60770+ atomic_unchecked_t rt_genid;
60771+ atomic_unchecked_t dev_addr_genid;
60772
60773 #ifdef CONFIG_IP_MROUTE
60774 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
60775diff -urNp linux-3.0.7/include/net/sctp/sctp.h linux-3.0.7/include/net/sctp/sctp.h
60776--- linux-3.0.7/include/net/sctp/sctp.h 2011-07-21 22:17:23.000000000 -0400
60777+++ linux-3.0.7/include/net/sctp/sctp.h 2011-08-23 21:47:56.000000000 -0400
60778@@ -315,9 +315,9 @@ do { \
60779
60780 #else /* SCTP_DEBUG */
60781
60782-#define SCTP_DEBUG_PRINTK(whatever...)
60783-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
60784-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
60785+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
60786+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
60787+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
60788 #define SCTP_ENABLE_DEBUG
60789 #define SCTP_DISABLE_DEBUG
60790 #define SCTP_ASSERT(expr, str, func)
60791diff -urNp linux-3.0.7/include/net/sock.h linux-3.0.7/include/net/sock.h
60792--- linux-3.0.7/include/net/sock.h 2011-07-21 22:17:23.000000000 -0400
60793+++ linux-3.0.7/include/net/sock.h 2011-08-23 21:47:56.000000000 -0400
60794@@ -277,7 +277,7 @@ struct sock {
60795 #ifdef CONFIG_RPS
60796 __u32 sk_rxhash;
60797 #endif
60798- atomic_t sk_drops;
60799+ atomic_unchecked_t sk_drops;
60800 int sk_rcvbuf;
60801
60802 struct sk_filter __rcu *sk_filter;
60803@@ -1390,7 +1390,7 @@ static inline void sk_nocaps_add(struct
60804 }
60805
60806 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
60807- char __user *from, char *to,
60808+ char __user *from, unsigned char *to,
60809 int copy, int offset)
60810 {
60811 if (skb->ip_summed == CHECKSUM_NONE) {
60812diff -urNp linux-3.0.7/include/net/tcp.h linux-3.0.7/include/net/tcp.h
60813--- linux-3.0.7/include/net/tcp.h 2011-07-21 22:17:23.000000000 -0400
60814+++ linux-3.0.7/include/net/tcp.h 2011-08-23 21:47:56.000000000 -0400
60815@@ -1374,8 +1374,8 @@ enum tcp_seq_states {
60816 struct tcp_seq_afinfo {
60817 char *name;
60818 sa_family_t family;
60819- struct file_operations seq_fops;
60820- struct seq_operations seq_ops;
60821+ file_operations_no_const seq_fops;
60822+ seq_operations_no_const seq_ops;
60823 };
60824
60825 struct tcp_iter_state {
60826diff -urNp linux-3.0.7/include/net/udp.h linux-3.0.7/include/net/udp.h
60827--- linux-3.0.7/include/net/udp.h 2011-07-21 22:17:23.000000000 -0400
60828+++ linux-3.0.7/include/net/udp.h 2011-08-23 21:47:56.000000000 -0400
60829@@ -234,8 +234,8 @@ struct udp_seq_afinfo {
60830 char *name;
60831 sa_family_t family;
60832 struct udp_table *udp_table;
60833- struct file_operations seq_fops;
60834- struct seq_operations seq_ops;
60835+ file_operations_no_const seq_fops;
60836+ seq_operations_no_const seq_ops;
60837 };
60838
60839 struct udp_iter_state {
60840diff -urNp linux-3.0.7/include/net/xfrm.h linux-3.0.7/include/net/xfrm.h
60841--- linux-3.0.7/include/net/xfrm.h 2011-07-21 22:17:23.000000000 -0400
60842+++ linux-3.0.7/include/net/xfrm.h 2011-08-23 21:47:56.000000000 -0400
60843@@ -505,7 +505,7 @@ struct xfrm_policy {
60844 struct timer_list timer;
60845
60846 struct flow_cache_object flo;
60847- atomic_t genid;
60848+ atomic_unchecked_t genid;
60849 u32 priority;
60850 u32 index;
60851 struct xfrm_mark mark;
60852diff -urNp linux-3.0.7/include/rdma/iw_cm.h linux-3.0.7/include/rdma/iw_cm.h
60853--- linux-3.0.7/include/rdma/iw_cm.h 2011-07-21 22:17:23.000000000 -0400
60854+++ linux-3.0.7/include/rdma/iw_cm.h 2011-08-23 21:47:56.000000000 -0400
60855@@ -120,7 +120,7 @@ struct iw_cm_verbs {
60856 int backlog);
60857
60858 int (*destroy_listen)(struct iw_cm_id *cm_id);
60859-};
60860+} __no_const;
60861
60862 /**
60863 * iw_create_cm_id - Create an IW CM identifier.
60864diff -urNp linux-3.0.7/include/scsi/libfc.h linux-3.0.7/include/scsi/libfc.h
60865--- linux-3.0.7/include/scsi/libfc.h 2011-07-21 22:17:23.000000000 -0400
60866+++ linux-3.0.7/include/scsi/libfc.h 2011-08-23 21:47:56.000000000 -0400
60867@@ -750,6 +750,7 @@ struct libfc_function_template {
60868 */
60869 void (*disc_stop_final) (struct fc_lport *);
60870 };
60871+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
60872
60873 /**
60874 * struct fc_disc - Discovery context
60875@@ -853,7 +854,7 @@ struct fc_lport {
60876 struct fc_vport *vport;
60877
60878 /* Operational Information */
60879- struct libfc_function_template tt;
60880+ libfc_function_template_no_const tt;
60881 u8 link_up;
60882 u8 qfull;
60883 enum fc_lport_state state;
60884diff -urNp linux-3.0.7/include/scsi/scsi_device.h linux-3.0.7/include/scsi/scsi_device.h
60885--- linux-3.0.7/include/scsi/scsi_device.h 2011-07-21 22:17:23.000000000 -0400
60886+++ linux-3.0.7/include/scsi/scsi_device.h 2011-08-23 21:47:56.000000000 -0400
60887@@ -161,9 +161,9 @@ struct scsi_device {
60888 unsigned int max_device_blocked; /* what device_blocked counts down from */
60889 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
60890
60891- atomic_t iorequest_cnt;
60892- atomic_t iodone_cnt;
60893- atomic_t ioerr_cnt;
60894+ atomic_unchecked_t iorequest_cnt;
60895+ atomic_unchecked_t iodone_cnt;
60896+ atomic_unchecked_t ioerr_cnt;
60897
60898 struct device sdev_gendev,
60899 sdev_dev;
60900diff -urNp linux-3.0.7/include/scsi/scsi_transport_fc.h linux-3.0.7/include/scsi/scsi_transport_fc.h
60901--- linux-3.0.7/include/scsi/scsi_transport_fc.h 2011-07-21 22:17:23.000000000 -0400
60902+++ linux-3.0.7/include/scsi/scsi_transport_fc.h 2011-08-26 19:49:56.000000000 -0400
60903@@ -711,7 +711,7 @@ struct fc_function_template {
60904 unsigned long show_host_system_hostname:1;
60905
60906 unsigned long disable_target_scan:1;
60907-};
60908+} __do_const;
60909
60910
60911 /**
60912diff -urNp linux-3.0.7/include/sound/ak4xxx-adda.h linux-3.0.7/include/sound/ak4xxx-adda.h
60913--- linux-3.0.7/include/sound/ak4xxx-adda.h 2011-07-21 22:17:23.000000000 -0400
60914+++ linux-3.0.7/include/sound/ak4xxx-adda.h 2011-08-23 21:47:56.000000000 -0400
60915@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
60916 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
60917 unsigned char val);
60918 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
60919-};
60920+} __no_const;
60921
60922 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
60923
60924diff -urNp linux-3.0.7/include/sound/hwdep.h linux-3.0.7/include/sound/hwdep.h
60925--- linux-3.0.7/include/sound/hwdep.h 2011-07-21 22:17:23.000000000 -0400
60926+++ linux-3.0.7/include/sound/hwdep.h 2011-08-23 21:47:56.000000000 -0400
60927@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
60928 struct snd_hwdep_dsp_status *status);
60929 int (*dsp_load)(struct snd_hwdep *hw,
60930 struct snd_hwdep_dsp_image *image);
60931-};
60932+} __no_const;
60933
60934 struct snd_hwdep {
60935 struct snd_card *card;
60936diff -urNp linux-3.0.7/include/sound/info.h linux-3.0.7/include/sound/info.h
60937--- linux-3.0.7/include/sound/info.h 2011-07-21 22:17:23.000000000 -0400
60938+++ linux-3.0.7/include/sound/info.h 2011-08-23 21:47:56.000000000 -0400
60939@@ -44,7 +44,7 @@ struct snd_info_entry_text {
60940 struct snd_info_buffer *buffer);
60941 void (*write)(struct snd_info_entry *entry,
60942 struct snd_info_buffer *buffer);
60943-};
60944+} __no_const;
60945
60946 struct snd_info_entry_ops {
60947 int (*open)(struct snd_info_entry *entry,
60948diff -urNp linux-3.0.7/include/sound/pcm.h linux-3.0.7/include/sound/pcm.h
60949--- linux-3.0.7/include/sound/pcm.h 2011-07-21 22:17:23.000000000 -0400
60950+++ linux-3.0.7/include/sound/pcm.h 2011-08-23 21:47:56.000000000 -0400
60951@@ -81,6 +81,7 @@ struct snd_pcm_ops {
60952 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
60953 int (*ack)(struct snd_pcm_substream *substream);
60954 };
60955+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
60956
60957 /*
60958 *
60959diff -urNp linux-3.0.7/include/sound/sb16_csp.h linux-3.0.7/include/sound/sb16_csp.h
60960--- linux-3.0.7/include/sound/sb16_csp.h 2011-07-21 22:17:23.000000000 -0400
60961+++ linux-3.0.7/include/sound/sb16_csp.h 2011-08-23 21:47:56.000000000 -0400
60962@@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
60963 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
60964 int (*csp_stop) (struct snd_sb_csp * p);
60965 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
60966-};
60967+} __no_const;
60968
60969 /*
60970 * CSP private data
60971diff -urNp linux-3.0.7/include/sound/soc.h linux-3.0.7/include/sound/soc.h
60972--- linux-3.0.7/include/sound/soc.h 2011-07-21 22:17:23.000000000 -0400
60973+++ linux-3.0.7/include/sound/soc.h 2011-08-26 19:49:56.000000000 -0400
60974@@ -636,7 +636,7 @@ struct snd_soc_platform_driver {
60975
60976 /* platform stream ops */
60977 struct snd_pcm_ops *ops;
60978-};
60979+} __do_const;
60980
60981 struct snd_soc_platform {
60982 const char *name;
60983diff -urNp linux-3.0.7/include/sound/ymfpci.h linux-3.0.7/include/sound/ymfpci.h
60984--- linux-3.0.7/include/sound/ymfpci.h 2011-07-21 22:17:23.000000000 -0400
60985+++ linux-3.0.7/include/sound/ymfpci.h 2011-08-23 21:47:56.000000000 -0400
60986@@ -358,7 +358,7 @@ struct snd_ymfpci {
60987 spinlock_t reg_lock;
60988 spinlock_t voice_lock;
60989 wait_queue_head_t interrupt_sleep;
60990- atomic_t interrupt_sleep_count;
60991+ atomic_unchecked_t interrupt_sleep_count;
60992 struct snd_info_entry *proc_entry;
60993 const struct firmware *dsp_microcode;
60994 const struct firmware *controller_microcode;
60995diff -urNp linux-3.0.7/include/target/target_core_base.h linux-3.0.7/include/target/target_core_base.h
60996--- linux-3.0.7/include/target/target_core_base.h 2011-07-21 22:17:23.000000000 -0400
60997+++ linux-3.0.7/include/target/target_core_base.h 2011-08-23 21:47:56.000000000 -0400
60998@@ -364,7 +364,7 @@ struct t10_reservation_ops {
60999 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
61000 int (*t10_pr_register)(struct se_cmd *);
61001 int (*t10_pr_clear)(struct se_cmd *);
61002-};
61003+} __no_const;
61004
61005 struct t10_reservation_template {
61006 /* Reservation effects all target ports */
61007@@ -432,8 +432,8 @@ struct se_transport_task {
61008 atomic_t t_task_cdbs_left;
61009 atomic_t t_task_cdbs_ex_left;
61010 atomic_t t_task_cdbs_timeout_left;
61011- atomic_t t_task_cdbs_sent;
61012- atomic_t t_transport_aborted;
61013+ atomic_unchecked_t t_task_cdbs_sent;
61014+ atomic_unchecked_t t_transport_aborted;
61015 atomic_t t_transport_active;
61016 atomic_t t_transport_complete;
61017 atomic_t t_transport_queue_active;
61018@@ -774,7 +774,7 @@ struct se_device {
61019 atomic_t active_cmds;
61020 atomic_t simple_cmds;
61021 atomic_t depth_left;
61022- atomic_t dev_ordered_id;
61023+ atomic_unchecked_t dev_ordered_id;
61024 atomic_t dev_tur_active;
61025 atomic_t execute_tasks;
61026 atomic_t dev_status_thr_count;
61027diff -urNp linux-3.0.7/include/trace/events/irq.h linux-3.0.7/include/trace/events/irq.h
61028--- linux-3.0.7/include/trace/events/irq.h 2011-07-21 22:17:23.000000000 -0400
61029+++ linux-3.0.7/include/trace/events/irq.h 2011-08-23 21:47:56.000000000 -0400
61030@@ -36,7 +36,7 @@ struct softirq_action;
61031 */
61032 TRACE_EVENT(irq_handler_entry,
61033
61034- TP_PROTO(int irq, struct irqaction *action),
61035+ TP_PROTO(int irq, const struct irqaction *action),
61036
61037 TP_ARGS(irq, action),
61038
61039@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
61040 */
61041 TRACE_EVENT(irq_handler_exit,
61042
61043- TP_PROTO(int irq, struct irqaction *action, int ret),
61044+ TP_PROTO(int irq, const struct irqaction *action, int ret),
61045
61046 TP_ARGS(irq, action, ret),
61047
61048diff -urNp linux-3.0.7/include/video/udlfb.h linux-3.0.7/include/video/udlfb.h
61049--- linux-3.0.7/include/video/udlfb.h 2011-07-21 22:17:23.000000000 -0400
61050+++ linux-3.0.7/include/video/udlfb.h 2011-08-23 21:47:56.000000000 -0400
61051@@ -51,10 +51,10 @@ struct dlfb_data {
61052 int base8;
61053 u32 pseudo_palette[256];
61054 /* blit-only rendering path metrics, exposed through sysfs */
61055- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
61056- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
61057- atomic_t bytes_sent; /* to usb, after compression including overhead */
61058- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
61059+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
61060+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
61061+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
61062+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
61063 };
61064
61065 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
61066diff -urNp linux-3.0.7/include/video/uvesafb.h linux-3.0.7/include/video/uvesafb.h
61067--- linux-3.0.7/include/video/uvesafb.h 2011-07-21 22:17:23.000000000 -0400
61068+++ linux-3.0.7/include/video/uvesafb.h 2011-08-23 21:47:56.000000000 -0400
61069@@ -177,6 +177,7 @@ struct uvesafb_par {
61070 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
61071 u8 pmi_setpal; /* PMI for palette changes */
61072 u16 *pmi_base; /* protected mode interface location */
61073+ u8 *pmi_code; /* protected mode code location */
61074 void *pmi_start;
61075 void *pmi_pal;
61076 u8 *vbe_state_orig; /*
61077diff -urNp linux-3.0.7/init/do_mounts.c linux-3.0.7/init/do_mounts.c
61078--- linux-3.0.7/init/do_mounts.c 2011-07-21 22:17:23.000000000 -0400
61079+++ linux-3.0.7/init/do_mounts.c 2011-10-06 04:17:55.000000000 -0400
61080@@ -287,11 +287,11 @@ static void __init get_fs_names(char *pa
61081
61082 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
61083 {
61084- int err = sys_mount(name, "/root", fs, flags, data);
61085+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
61086 if (err)
61087 return err;
61088
61089- sys_chdir((const char __user __force *)"/root");
61090+ sys_chdir((const char __force_user*)"/root");
61091 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
61092 printk(KERN_INFO
61093 "VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
61094@@ -383,18 +383,18 @@ void __init change_floppy(char *fmt, ...
61095 va_start(args, fmt);
61096 vsprintf(buf, fmt, args);
61097 va_end(args);
61098- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
61099+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
61100 if (fd >= 0) {
61101 sys_ioctl(fd, FDEJECT, 0);
61102 sys_close(fd);
61103 }
61104 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
61105- fd = sys_open("/dev/console", O_RDWR, 0);
61106+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
61107 if (fd >= 0) {
61108 sys_ioctl(fd, TCGETS, (long)&termios);
61109 termios.c_lflag &= ~ICANON;
61110 sys_ioctl(fd, TCSETSF, (long)&termios);
61111- sys_read(fd, &c, 1);
61112+ sys_read(fd, (char __user *)&c, 1);
61113 termios.c_lflag |= ICANON;
61114 sys_ioctl(fd, TCSETSF, (long)&termios);
61115 sys_close(fd);
61116@@ -488,6 +488,6 @@ void __init prepare_namespace(void)
61117 mount_root();
61118 out:
61119 devtmpfs_mount("dev");
61120- sys_mount(".", "/", NULL, MS_MOVE, NULL);
61121- sys_chroot((const char __user __force *)".");
61122+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
61123+ sys_chroot((const char __force_user *)".");
61124 }
61125diff -urNp linux-3.0.7/init/do_mounts.h linux-3.0.7/init/do_mounts.h
61126--- linux-3.0.7/init/do_mounts.h 2011-07-21 22:17:23.000000000 -0400
61127+++ linux-3.0.7/init/do_mounts.h 2011-10-06 04:17:55.000000000 -0400
61128@@ -15,15 +15,15 @@ extern int root_mountflags;
61129
61130 static inline int create_dev(char *name, dev_t dev)
61131 {
61132- sys_unlink(name);
61133- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
61134+ sys_unlink((char __force_user *)name);
61135+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
61136 }
61137
61138 #if BITS_PER_LONG == 32
61139 static inline u32 bstat(char *name)
61140 {
61141 struct stat64 stat;
61142- if (sys_stat64(name, &stat) != 0)
61143+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
61144 return 0;
61145 if (!S_ISBLK(stat.st_mode))
61146 return 0;
61147@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
61148 static inline u32 bstat(char *name)
61149 {
61150 struct stat stat;
61151- if (sys_newstat(name, &stat) != 0)
61152+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
61153 return 0;
61154 if (!S_ISBLK(stat.st_mode))
61155 return 0;
61156diff -urNp linux-3.0.7/init/do_mounts_initrd.c linux-3.0.7/init/do_mounts_initrd.c
61157--- linux-3.0.7/init/do_mounts_initrd.c 2011-07-21 22:17:23.000000000 -0400
61158+++ linux-3.0.7/init/do_mounts_initrd.c 2011-10-06 04:17:55.000000000 -0400
61159@@ -44,13 +44,13 @@ static void __init handle_initrd(void)
61160 create_dev("/dev/root.old", Root_RAM0);
61161 /* mount initrd on rootfs' /root */
61162 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
61163- sys_mkdir("/old", 0700);
61164- root_fd = sys_open("/", 0, 0);
61165- old_fd = sys_open("/old", 0, 0);
61166+ sys_mkdir((const char __force_user *)"/old", 0700);
61167+ root_fd = sys_open((const char __force_user *)"/", 0, 0);
61168+ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
61169 /* move initrd over / and chdir/chroot in initrd root */
61170- sys_chdir("/root");
61171- sys_mount(".", "/", NULL, MS_MOVE, NULL);
61172- sys_chroot(".");
61173+ sys_chdir((const char __force_user *)"/root");
61174+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
61175+ sys_chroot((const char __force_user *)".");
61176
61177 /*
61178 * In case that a resume from disk is carried out by linuxrc or one of
61179@@ -67,15 +67,15 @@ static void __init handle_initrd(void)
61180
61181 /* move initrd to rootfs' /old */
61182 sys_fchdir(old_fd);
61183- sys_mount("/", ".", NULL, MS_MOVE, NULL);
61184+ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
61185 /* switch root and cwd back to / of rootfs */
61186 sys_fchdir(root_fd);
61187- sys_chroot(".");
61188+ sys_chroot((const char __force_user *)".");
61189 sys_close(old_fd);
61190 sys_close(root_fd);
61191
61192 if (new_decode_dev(real_root_dev) == Root_RAM0) {
61193- sys_chdir("/old");
61194+ sys_chdir((const char __force_user *)"/old");
61195 return;
61196 }
61197
61198@@ -83,17 +83,17 @@ static void __init handle_initrd(void)
61199 mount_root();
61200
61201 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
61202- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
61203+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
61204 if (!error)
61205 printk("okay\n");
61206 else {
61207- int fd = sys_open("/dev/root.old", O_RDWR, 0);
61208+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
61209 if (error == -ENOENT)
61210 printk("/initrd does not exist. Ignored.\n");
61211 else
61212 printk("failed\n");
61213 printk(KERN_NOTICE "Unmounting old root\n");
61214- sys_umount("/old", MNT_DETACH);
61215+ sys_umount((char __force_user *)"/old", MNT_DETACH);
61216 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
61217 if (fd < 0) {
61218 error = fd;
61219@@ -116,11 +116,11 @@ int __init initrd_load(void)
61220 * mounted in the normal path.
61221 */
61222 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
61223- sys_unlink("/initrd.image");
61224+ sys_unlink((const char __force_user *)"/initrd.image");
61225 handle_initrd();
61226 return 1;
61227 }
61228 }
61229- sys_unlink("/initrd.image");
61230+ sys_unlink((const char __force_user *)"/initrd.image");
61231 return 0;
61232 }
61233diff -urNp linux-3.0.7/init/do_mounts_md.c linux-3.0.7/init/do_mounts_md.c
61234--- linux-3.0.7/init/do_mounts_md.c 2011-07-21 22:17:23.000000000 -0400
61235+++ linux-3.0.7/init/do_mounts_md.c 2011-10-06 04:17:55.000000000 -0400
61236@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
61237 partitioned ? "_d" : "", minor,
61238 md_setup_args[ent].device_names);
61239
61240- fd = sys_open(name, 0, 0);
61241+ fd = sys_open((char __force_user *)name, 0, 0);
61242 if (fd < 0) {
61243 printk(KERN_ERR "md: open failed - cannot start "
61244 "array %s\n", name);
61245@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
61246 * array without it
61247 */
61248 sys_close(fd);
61249- fd = sys_open(name, 0, 0);
61250+ fd = sys_open((char __force_user *)name, 0, 0);
61251 sys_ioctl(fd, BLKRRPART, 0);
61252 }
61253 sys_close(fd);
61254@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
61255
61256 wait_for_device_probe();
61257
61258- fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
61259+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
61260 if (fd >= 0) {
61261 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
61262 sys_close(fd);
61263diff -urNp linux-3.0.7/init/initramfs.c linux-3.0.7/init/initramfs.c
61264--- linux-3.0.7/init/initramfs.c 2011-07-21 22:17:23.000000000 -0400
61265+++ linux-3.0.7/init/initramfs.c 2011-10-06 04:17:55.000000000 -0400
61266@@ -74,7 +74,7 @@ static void __init free_hash(void)
61267 }
61268 }
61269
61270-static long __init do_utime(char __user *filename, time_t mtime)
61271+static long __init do_utime(__force char __user *filename, time_t mtime)
61272 {
61273 struct timespec t[2];
61274
61275@@ -109,7 +109,7 @@ static void __init dir_utime(void)
61276 struct dir_entry *de, *tmp;
61277 list_for_each_entry_safe(de, tmp, &dir_list, list) {
61278 list_del(&de->list);
61279- do_utime(de->name, de->mtime);
61280+ do_utime((char __force_user *)de->name, de->mtime);
61281 kfree(de->name);
61282 kfree(de);
61283 }
61284@@ -271,7 +271,7 @@ static int __init maybe_link(void)
61285 if (nlink >= 2) {
61286 char *old = find_link(major, minor, ino, mode, collected);
61287 if (old)
61288- return (sys_link(old, collected) < 0) ? -1 : 1;
61289+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
61290 }
61291 return 0;
61292 }
61293@@ -280,11 +280,11 @@ static void __init clean_path(char *path
61294 {
61295 struct stat st;
61296
61297- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
61298+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
61299 if (S_ISDIR(st.st_mode))
61300- sys_rmdir(path);
61301+ sys_rmdir((char __force_user *)path);
61302 else
61303- sys_unlink(path);
61304+ sys_unlink((char __force_user *)path);
61305 }
61306 }
61307
61308@@ -305,7 +305,7 @@ static int __init do_name(void)
61309 int openflags = O_WRONLY|O_CREAT;
61310 if (ml != 1)
61311 openflags |= O_TRUNC;
61312- wfd = sys_open(collected, openflags, mode);
61313+ wfd = sys_open((char __force_user *)collected, openflags, mode);
61314
61315 if (wfd >= 0) {
61316 sys_fchown(wfd, uid, gid);
61317@@ -317,17 +317,17 @@ static int __init do_name(void)
61318 }
61319 }
61320 } else if (S_ISDIR(mode)) {
61321- sys_mkdir(collected, mode);
61322- sys_chown(collected, uid, gid);
61323- sys_chmod(collected, mode);
61324+ sys_mkdir((char __force_user *)collected, mode);
61325+ sys_chown((char __force_user *)collected, uid, gid);
61326+ sys_chmod((char __force_user *)collected, mode);
61327 dir_add(collected, mtime);
61328 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
61329 S_ISFIFO(mode) || S_ISSOCK(mode)) {
61330 if (maybe_link() == 0) {
61331- sys_mknod(collected, mode, rdev);
61332- sys_chown(collected, uid, gid);
61333- sys_chmod(collected, mode);
61334- do_utime(collected, mtime);
61335+ sys_mknod((char __force_user *)collected, mode, rdev);
61336+ sys_chown((char __force_user *)collected, uid, gid);
61337+ sys_chmod((char __force_user *)collected, mode);
61338+ do_utime((char __force_user *)collected, mtime);
61339 }
61340 }
61341 return 0;
61342@@ -336,15 +336,15 @@ static int __init do_name(void)
61343 static int __init do_copy(void)
61344 {
61345 if (count >= body_len) {
61346- sys_write(wfd, victim, body_len);
61347+ sys_write(wfd, (char __force_user *)victim, body_len);
61348 sys_close(wfd);
61349- do_utime(vcollected, mtime);
61350+ do_utime((char __force_user *)vcollected, mtime);
61351 kfree(vcollected);
61352 eat(body_len);
61353 state = SkipIt;
61354 return 0;
61355 } else {
61356- sys_write(wfd, victim, count);
61357+ sys_write(wfd, (char __force_user *)victim, count);
61358 body_len -= count;
61359 eat(count);
61360 return 1;
61361@@ -355,9 +355,9 @@ static int __init do_symlink(void)
61362 {
61363 collected[N_ALIGN(name_len) + body_len] = '\0';
61364 clean_path(collected, 0);
61365- sys_symlink(collected + N_ALIGN(name_len), collected);
61366- sys_lchown(collected, uid, gid);
61367- do_utime(collected, mtime);
61368+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
61369+ sys_lchown((char __force_user *)collected, uid, gid);
61370+ do_utime((char __force_user *)collected, mtime);
61371 state = SkipIt;
61372 next_state = Reset;
61373 return 0;
61374diff -urNp linux-3.0.7/init/Kconfig linux-3.0.7/init/Kconfig
61375--- linux-3.0.7/init/Kconfig 2011-07-21 22:17:23.000000000 -0400
61376+++ linux-3.0.7/init/Kconfig 2011-08-23 21:47:56.000000000 -0400
61377@@ -1195,7 +1195,7 @@ config SLUB_DEBUG
61378
61379 config COMPAT_BRK
61380 bool "Disable heap randomization"
61381- default y
61382+ default n
61383 help
61384 Randomizing heap placement makes heap exploits harder, but it
61385 also breaks ancient binaries (including anything libc5 based).
61386diff -urNp linux-3.0.7/init/main.c linux-3.0.7/init/main.c
61387--- linux-3.0.7/init/main.c 2011-07-21 22:17:23.000000000 -0400
61388+++ linux-3.0.7/init/main.c 2011-10-06 04:17:55.000000000 -0400
61389@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void)
61390 extern void tc_init(void);
61391 #endif
61392
61393+extern void grsecurity_init(void);
61394+
61395 /*
61396 * Debug helper: via this flag we know that we are in 'early bootup code'
61397 * where only the boot processor is running with IRQ disabled. This means
61398@@ -149,6 +151,49 @@ static int __init set_reset_devices(char
61399
61400 __setup("reset_devices", set_reset_devices);
61401
61402+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
61403+extern char pax_enter_kernel_user[];
61404+extern char pax_exit_kernel_user[];
61405+extern pgdval_t clone_pgd_mask;
61406+#endif
61407+
61408+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
61409+static int __init setup_pax_nouderef(char *str)
61410+{
61411+#ifdef CONFIG_X86_32
61412+ unsigned int cpu;
61413+ struct desc_struct *gdt;
61414+
61415+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
61416+ gdt = get_cpu_gdt_table(cpu);
61417+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
61418+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
61419+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
61420+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
61421+ }
61422+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
61423+#else
61424+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
61425+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
61426+ clone_pgd_mask = ~(pgdval_t)0UL;
61427+#endif
61428+
61429+ return 0;
61430+}
61431+early_param("pax_nouderef", setup_pax_nouderef);
61432+#endif
61433+
61434+#ifdef CONFIG_PAX_SOFTMODE
61435+int pax_softmode;
61436+
61437+static int __init setup_pax_softmode(char *str)
61438+{
61439+ get_option(&str, &pax_softmode);
61440+ return 1;
61441+}
61442+__setup("pax_softmode=", setup_pax_softmode);
61443+#endif
61444+
61445 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
61446 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
61447 static const char *panic_later, *panic_param;
61448@@ -667,6 +712,7 @@ int __init_or_module do_one_initcall(ini
61449 {
61450 int count = preempt_count();
61451 int ret;
61452+ const char *msg1 = "", *msg2 = "";
61453
61454 if (initcall_debug)
61455 ret = do_one_initcall_debug(fn);
61456@@ -679,15 +725,15 @@ int __init_or_module do_one_initcall(ini
61457 sprintf(msgbuf, "error code %d ", ret);
61458
61459 if (preempt_count() != count) {
61460- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
61461+ msg1 = " preemption imbalance";
61462 preempt_count() = count;
61463 }
61464 if (irqs_disabled()) {
61465- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
61466+ msg2 = " disabled interrupts";
61467 local_irq_enable();
61468 }
61469- if (msgbuf[0]) {
61470- printk("initcall %pF returned with %s\n", fn, msgbuf);
61471+ if (msgbuf[0] || *msg1 || *msg2) {
61472+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
61473 }
61474
61475 return ret;
61476@@ -805,7 +851,7 @@ static int __init kernel_init(void * unu
61477 do_basic_setup();
61478
61479 /* Open the /dev/console on the rootfs, this should never fail */
61480- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
61481+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
61482 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
61483
61484 (void) sys_dup(0);
61485@@ -818,11 +864,13 @@ static int __init kernel_init(void * unu
61486 if (!ramdisk_execute_command)
61487 ramdisk_execute_command = "/init";
61488
61489- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
61490+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
61491 ramdisk_execute_command = NULL;
61492 prepare_namespace();
61493 }
61494
61495+ grsecurity_init();
61496+
61497 /*
61498 * Ok, we have completed the initial bootup, and
61499 * we're essentially up and running. Get rid of the
61500diff -urNp linux-3.0.7/ipc/mqueue.c linux-3.0.7/ipc/mqueue.c
61501--- linux-3.0.7/ipc/mqueue.c 2011-10-16 21:54:54.000000000 -0400
61502+++ linux-3.0.7/ipc/mqueue.c 2011-10-16 21:59:31.000000000 -0400
61503@@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(st
61504 mq_bytes = (mq_msg_tblsz +
61505 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
61506
61507+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
61508 spin_lock(&mq_lock);
61509 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
61510 u->mq_bytes + mq_bytes > task_rlimit(p, RLIMIT_MSGQUEUE)) {
61511diff -urNp linux-3.0.7/ipc/msg.c linux-3.0.7/ipc/msg.c
61512--- linux-3.0.7/ipc/msg.c 2011-07-21 22:17:23.000000000 -0400
61513+++ linux-3.0.7/ipc/msg.c 2011-08-23 21:47:56.000000000 -0400
61514@@ -309,18 +309,19 @@ static inline int msg_security(struct ke
61515 return security_msg_queue_associate(msq, msgflg);
61516 }
61517
61518+static struct ipc_ops msg_ops = {
61519+ .getnew = newque,
61520+ .associate = msg_security,
61521+ .more_checks = NULL
61522+};
61523+
61524 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
61525 {
61526 struct ipc_namespace *ns;
61527- struct ipc_ops msg_ops;
61528 struct ipc_params msg_params;
61529
61530 ns = current->nsproxy->ipc_ns;
61531
61532- msg_ops.getnew = newque;
61533- msg_ops.associate = msg_security;
61534- msg_ops.more_checks = NULL;
61535-
61536 msg_params.key = key;
61537 msg_params.flg = msgflg;
61538
61539diff -urNp linux-3.0.7/ipc/sem.c linux-3.0.7/ipc/sem.c
61540--- linux-3.0.7/ipc/sem.c 2011-09-02 18:11:21.000000000 -0400
61541+++ linux-3.0.7/ipc/sem.c 2011-08-23 21:48:14.000000000 -0400
61542@@ -318,10 +318,15 @@ static inline int sem_more_checks(struct
61543 return 0;
61544 }
61545
61546+static struct ipc_ops sem_ops = {
61547+ .getnew = newary,
61548+ .associate = sem_security,
61549+ .more_checks = sem_more_checks
61550+};
61551+
61552 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
61553 {
61554 struct ipc_namespace *ns;
61555- struct ipc_ops sem_ops;
61556 struct ipc_params sem_params;
61557
61558 ns = current->nsproxy->ipc_ns;
61559@@ -329,10 +334,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
61560 if (nsems < 0 || nsems > ns->sc_semmsl)
61561 return -EINVAL;
61562
61563- sem_ops.getnew = newary;
61564- sem_ops.associate = sem_security;
61565- sem_ops.more_checks = sem_more_checks;
61566-
61567 sem_params.key = key;
61568 sem_params.flg = semflg;
61569 sem_params.u.nsems = nsems;
61570@@ -854,6 +855,8 @@ static int semctl_main(struct ipc_namesp
61571 int nsems;
61572 struct list_head tasks;
61573
61574+ pax_track_stack();
61575+
61576 sma = sem_lock_check(ns, semid);
61577 if (IS_ERR(sma))
61578 return PTR_ERR(sma);
61579@@ -1301,6 +1304,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
61580 struct ipc_namespace *ns;
61581 struct list_head tasks;
61582
61583+ pax_track_stack();
61584+
61585 ns = current->nsproxy->ipc_ns;
61586
61587 if (nsops < 1 || semid < 0)
61588diff -urNp linux-3.0.7/ipc/shm.c linux-3.0.7/ipc/shm.c
61589--- linux-3.0.7/ipc/shm.c 2011-07-21 22:17:23.000000000 -0400
61590+++ linux-3.0.7/ipc/shm.c 2011-08-23 21:48:14.000000000 -0400
61591@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_name
61592 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
61593 #endif
61594
61595+#ifdef CONFIG_GRKERNSEC
61596+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
61597+ const time_t shm_createtime, const uid_t cuid,
61598+ const int shmid);
61599+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
61600+ const time_t shm_createtime);
61601+#endif
61602+
61603 void shm_init_ns(struct ipc_namespace *ns)
61604 {
61605 ns->shm_ctlmax = SHMMAX;
61606@@ -401,6 +409,14 @@ static int newseg(struct ipc_namespace *
61607 shp->shm_lprid = 0;
61608 shp->shm_atim = shp->shm_dtim = 0;
61609 shp->shm_ctim = get_seconds();
61610+#ifdef CONFIG_GRKERNSEC
61611+ {
61612+ struct timespec timeval;
61613+ do_posix_clock_monotonic_gettime(&timeval);
61614+
61615+ shp->shm_createtime = timeval.tv_sec;
61616+ }
61617+#endif
61618 shp->shm_segsz = size;
61619 shp->shm_nattch = 0;
61620 shp->shm_file = file;
61621@@ -451,18 +467,19 @@ static inline int shm_more_checks(struct
61622 return 0;
61623 }
61624
61625+static struct ipc_ops shm_ops = {
61626+ .getnew = newseg,
61627+ .associate = shm_security,
61628+ .more_checks = shm_more_checks
61629+};
61630+
61631 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
61632 {
61633 struct ipc_namespace *ns;
61634- struct ipc_ops shm_ops;
61635 struct ipc_params shm_params;
61636
61637 ns = current->nsproxy->ipc_ns;
61638
61639- shm_ops.getnew = newseg;
61640- shm_ops.associate = shm_security;
61641- shm_ops.more_checks = shm_more_checks;
61642-
61643 shm_params.key = key;
61644 shm_params.flg = shmflg;
61645 shm_params.u.size = size;
61646@@ -762,8 +779,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int,
61647 case SHM_LOCK:
61648 case SHM_UNLOCK:
61649 {
61650- struct file *uninitialized_var(shm_file);
61651-
61652 lru_add_drain_all(); /* drain pagevecs to lru lists */
61653
61654 shp = shm_lock_check(ns, shmid);
61655@@ -896,9 +911,21 @@ long do_shmat(int shmid, char __user *sh
61656 if (err)
61657 goto out_unlock;
61658
61659+#ifdef CONFIG_GRKERNSEC
61660+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
61661+ shp->shm_perm.cuid, shmid) ||
61662+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
61663+ err = -EACCES;
61664+ goto out_unlock;
61665+ }
61666+#endif
61667+
61668 path = shp->shm_file->f_path;
61669 path_get(&path);
61670 shp->shm_nattch++;
61671+#ifdef CONFIG_GRKERNSEC
61672+ shp->shm_lapid = current->pid;
61673+#endif
61674 size = i_size_read(path.dentry->d_inode);
61675 shm_unlock(shp);
61676
61677diff -urNp linux-3.0.7/kernel/acct.c linux-3.0.7/kernel/acct.c
61678--- linux-3.0.7/kernel/acct.c 2011-07-21 22:17:23.000000000 -0400
61679+++ linux-3.0.7/kernel/acct.c 2011-10-06 04:17:55.000000000 -0400
61680@@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_a
61681 */
61682 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
61683 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
61684- file->f_op->write(file, (char *)&ac,
61685+ file->f_op->write(file, (char __force_user *)&ac,
61686 sizeof(acct_t), &file->f_pos);
61687 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
61688 set_fs(fs);
61689diff -urNp linux-3.0.7/kernel/audit.c linux-3.0.7/kernel/audit.c
61690--- linux-3.0.7/kernel/audit.c 2011-07-21 22:17:23.000000000 -0400
61691+++ linux-3.0.7/kernel/audit.c 2011-08-23 21:47:56.000000000 -0400
61692@@ -112,7 +112,7 @@ u32 audit_sig_sid = 0;
61693 3) suppressed due to audit_rate_limit
61694 4) suppressed due to audit_backlog_limit
61695 */
61696-static atomic_t audit_lost = ATOMIC_INIT(0);
61697+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
61698
61699 /* The netlink socket. */
61700 static struct sock *audit_sock;
61701@@ -234,7 +234,7 @@ void audit_log_lost(const char *message)
61702 unsigned long now;
61703 int print;
61704
61705- atomic_inc(&audit_lost);
61706+ atomic_inc_unchecked(&audit_lost);
61707
61708 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
61709
61710@@ -253,7 +253,7 @@ void audit_log_lost(const char *message)
61711 printk(KERN_WARNING
61712 "audit: audit_lost=%d audit_rate_limit=%d "
61713 "audit_backlog_limit=%d\n",
61714- atomic_read(&audit_lost),
61715+ atomic_read_unchecked(&audit_lost),
61716 audit_rate_limit,
61717 audit_backlog_limit);
61718 audit_panic(message);
61719@@ -686,7 +686,7 @@ static int audit_receive_msg(struct sk_b
61720 status_set.pid = audit_pid;
61721 status_set.rate_limit = audit_rate_limit;
61722 status_set.backlog_limit = audit_backlog_limit;
61723- status_set.lost = atomic_read(&audit_lost);
61724+ status_set.lost = atomic_read_unchecked(&audit_lost);
61725 status_set.backlog = skb_queue_len(&audit_skb_queue);
61726 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
61727 &status_set, sizeof(status_set));
61728diff -urNp linux-3.0.7/kernel/auditsc.c linux-3.0.7/kernel/auditsc.c
61729--- linux-3.0.7/kernel/auditsc.c 2011-07-21 22:17:23.000000000 -0400
61730+++ linux-3.0.7/kernel/auditsc.c 2011-08-23 21:47:56.000000000 -0400
61731@@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_conte
61732 }
61733
61734 /* global counter which is incremented every time something logs in */
61735-static atomic_t session_id = ATOMIC_INIT(0);
61736+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
61737
61738 /**
61739 * audit_set_loginuid - set a task's audit_context loginuid
61740@@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT
61741 */
61742 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
61743 {
61744- unsigned int sessionid = atomic_inc_return(&session_id);
61745+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
61746 struct audit_context *context = task->audit_context;
61747
61748 if (context && context->in_syscall) {
61749diff -urNp linux-3.0.7/kernel/capability.c linux-3.0.7/kernel/capability.c
61750--- linux-3.0.7/kernel/capability.c 2011-07-21 22:17:23.000000000 -0400
61751+++ linux-3.0.7/kernel/capability.c 2011-08-23 21:48:14.000000000 -0400
61752@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_
61753 * before modification is attempted and the application
61754 * fails.
61755 */
61756+ if (tocopy > ARRAY_SIZE(kdata))
61757+ return -EFAULT;
61758+
61759 if (copy_to_user(dataptr, kdata, tocopy
61760 * sizeof(struct __user_cap_data_struct))) {
61761 return -EFAULT;
61762@@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *n
61763 BUG();
61764 }
61765
61766- if (security_capable(ns, current_cred(), cap) == 0) {
61767+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
61768 current->flags |= PF_SUPERPRIV;
61769 return true;
61770 }
61771@@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *n
61772 }
61773 EXPORT_SYMBOL(ns_capable);
61774
61775+bool ns_capable_nolog(struct user_namespace *ns, int cap)
61776+{
61777+ if (unlikely(!cap_valid(cap))) {
61778+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
61779+ BUG();
61780+ }
61781+
61782+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
61783+ current->flags |= PF_SUPERPRIV;
61784+ return true;
61785+ }
61786+ return false;
61787+}
61788+EXPORT_SYMBOL(ns_capable_nolog);
61789+
61790+bool capable_nolog(int cap)
61791+{
61792+ return ns_capable_nolog(&init_user_ns, cap);
61793+}
61794+EXPORT_SYMBOL(capable_nolog);
61795+
61796 /**
61797 * task_ns_capable - Determine whether current task has a superior
61798 * capability targeted at a specific task's user namespace.
61799@@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct
61800 }
61801 EXPORT_SYMBOL(task_ns_capable);
61802
61803+bool task_ns_capable_nolog(struct task_struct *t, int cap)
61804+{
61805+ return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
61806+}
61807+EXPORT_SYMBOL(task_ns_capable_nolog);
61808+
61809 /**
61810 * nsown_capable - Check superior capability to one's own user_ns
61811 * @cap: The capability in question
61812diff -urNp linux-3.0.7/kernel/cgroup.c linux-3.0.7/kernel/cgroup.c
61813--- linux-3.0.7/kernel/cgroup.c 2011-07-21 22:17:23.000000000 -0400
61814+++ linux-3.0.7/kernel/cgroup.c 2011-08-23 21:48:14.000000000 -0400
61815@@ -593,6 +593,8 @@ static struct css_set *find_css_set(
61816 struct hlist_head *hhead;
61817 struct cg_cgroup_link *link;
61818
61819+ pax_track_stack();
61820+
61821 /* First see if we already have a cgroup group that matches
61822 * the desired set */
61823 read_lock(&css_set_lock);
61824diff -urNp linux-3.0.7/kernel/compat.c linux-3.0.7/kernel/compat.c
61825--- linux-3.0.7/kernel/compat.c 2011-07-21 22:17:23.000000000 -0400
61826+++ linux-3.0.7/kernel/compat.c 2011-10-06 04:17:55.000000000 -0400
61827@@ -13,6 +13,7 @@
61828
61829 #include <linux/linkage.h>
61830 #include <linux/compat.h>
61831+#include <linux/module.h>
61832 #include <linux/errno.h>
61833 #include <linux/time.h>
61834 #include <linux/signal.h>
61835@@ -166,7 +167,7 @@ static long compat_nanosleep_restart(str
61836 mm_segment_t oldfs;
61837 long ret;
61838
61839- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
61840+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
61841 oldfs = get_fs();
61842 set_fs(KERNEL_DS);
61843 ret = hrtimer_nanosleep_restart(restart);
61844@@ -198,7 +199,7 @@ asmlinkage long compat_sys_nanosleep(str
61845 oldfs = get_fs();
61846 set_fs(KERNEL_DS);
61847 ret = hrtimer_nanosleep(&tu,
61848- rmtp ? (struct timespec __user *)&rmt : NULL,
61849+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
61850 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
61851 set_fs(oldfs);
61852
61853@@ -307,7 +308,7 @@ asmlinkage long compat_sys_sigpending(co
61854 mm_segment_t old_fs = get_fs();
61855
61856 set_fs(KERNEL_DS);
61857- ret = sys_sigpending((old_sigset_t __user *) &s);
61858+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
61859 set_fs(old_fs);
61860 if (ret == 0)
61861 ret = put_user(s, set);
61862@@ -330,8 +331,8 @@ asmlinkage long compat_sys_sigprocmask(i
61863 old_fs = get_fs();
61864 set_fs(KERNEL_DS);
61865 ret = sys_sigprocmask(how,
61866- set ? (old_sigset_t __user *) &s : NULL,
61867- oset ? (old_sigset_t __user *) &s : NULL);
61868+ set ? (old_sigset_t __force_user *) &s : NULL,
61869+ oset ? (old_sigset_t __force_user *) &s : NULL);
61870 set_fs(old_fs);
61871 if (ret == 0)
61872 if (oset)
61873@@ -368,7 +369,7 @@ asmlinkage long compat_sys_old_getrlimit
61874 mm_segment_t old_fs = get_fs();
61875
61876 set_fs(KERNEL_DS);
61877- ret = sys_old_getrlimit(resource, &r);
61878+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
61879 set_fs(old_fs);
61880
61881 if (!ret) {
61882@@ -440,7 +441,7 @@ asmlinkage long compat_sys_getrusage(int
61883 mm_segment_t old_fs = get_fs();
61884
61885 set_fs(KERNEL_DS);
61886- ret = sys_getrusage(who, (struct rusage __user *) &r);
61887+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
61888 set_fs(old_fs);
61889
61890 if (ret)
61891@@ -467,8 +468,8 @@ compat_sys_wait4(compat_pid_t pid, compa
61892 set_fs (KERNEL_DS);
61893 ret = sys_wait4(pid,
61894 (stat_addr ?
61895- (unsigned int __user *) &status : NULL),
61896- options, (struct rusage __user *) &r);
61897+ (unsigned int __force_user *) &status : NULL),
61898+ options, (struct rusage __force_user *) &r);
61899 set_fs (old_fs);
61900
61901 if (ret > 0) {
61902@@ -493,8 +494,8 @@ asmlinkage long compat_sys_waitid(int wh
61903 memset(&info, 0, sizeof(info));
61904
61905 set_fs(KERNEL_DS);
61906- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
61907- uru ? (struct rusage __user *)&ru : NULL);
61908+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
61909+ uru ? (struct rusage __force_user *)&ru : NULL);
61910 set_fs(old_fs);
61911
61912 if ((ret < 0) || (info.si_signo == 0))
61913@@ -624,8 +625,8 @@ long compat_sys_timer_settime(timer_t ti
61914 oldfs = get_fs();
61915 set_fs(KERNEL_DS);
61916 err = sys_timer_settime(timer_id, flags,
61917- (struct itimerspec __user *) &newts,
61918- (struct itimerspec __user *) &oldts);
61919+ (struct itimerspec __force_user *) &newts,
61920+ (struct itimerspec __force_user *) &oldts);
61921 set_fs(oldfs);
61922 if (!err && old && put_compat_itimerspec(old, &oldts))
61923 return -EFAULT;
61924@@ -642,7 +643,7 @@ long compat_sys_timer_gettime(timer_t ti
61925 oldfs = get_fs();
61926 set_fs(KERNEL_DS);
61927 err = sys_timer_gettime(timer_id,
61928- (struct itimerspec __user *) &ts);
61929+ (struct itimerspec __force_user *) &ts);
61930 set_fs(oldfs);
61931 if (!err && put_compat_itimerspec(setting, &ts))
61932 return -EFAULT;
61933@@ -661,7 +662,7 @@ long compat_sys_clock_settime(clockid_t
61934 oldfs = get_fs();
61935 set_fs(KERNEL_DS);
61936 err = sys_clock_settime(which_clock,
61937- (struct timespec __user *) &ts);
61938+ (struct timespec __force_user *) &ts);
61939 set_fs(oldfs);
61940 return err;
61941 }
61942@@ -676,7 +677,7 @@ long compat_sys_clock_gettime(clockid_t
61943 oldfs = get_fs();
61944 set_fs(KERNEL_DS);
61945 err = sys_clock_gettime(which_clock,
61946- (struct timespec __user *) &ts);
61947+ (struct timespec __force_user *) &ts);
61948 set_fs(oldfs);
61949 if (!err && put_compat_timespec(&ts, tp))
61950 return -EFAULT;
61951@@ -696,7 +697,7 @@ long compat_sys_clock_adjtime(clockid_t
61952
61953 oldfs = get_fs();
61954 set_fs(KERNEL_DS);
61955- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
61956+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
61957 set_fs(oldfs);
61958
61959 err = compat_put_timex(utp, &txc);
61960@@ -716,7 +717,7 @@ long compat_sys_clock_getres(clockid_t w
61961 oldfs = get_fs();
61962 set_fs(KERNEL_DS);
61963 err = sys_clock_getres(which_clock,
61964- (struct timespec __user *) &ts);
61965+ (struct timespec __force_user *) &ts);
61966 set_fs(oldfs);
61967 if (!err && tp && put_compat_timespec(&ts, tp))
61968 return -EFAULT;
61969@@ -728,9 +729,9 @@ static long compat_clock_nanosleep_resta
61970 long err;
61971 mm_segment_t oldfs;
61972 struct timespec tu;
61973- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
61974+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
61975
61976- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
61977+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
61978 oldfs = get_fs();
61979 set_fs(KERNEL_DS);
61980 err = clock_nanosleep_restart(restart);
61981@@ -762,8 +763,8 @@ long compat_sys_clock_nanosleep(clockid_
61982 oldfs = get_fs();
61983 set_fs(KERNEL_DS);
61984 err = sys_clock_nanosleep(which_clock, flags,
61985- (struct timespec __user *) &in,
61986- (struct timespec __user *) &out);
61987+ (struct timespec __force_user *) &in,
61988+ (struct timespec __force_user *) &out);
61989 set_fs(oldfs);
61990
61991 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
61992diff -urNp linux-3.0.7/kernel/configs.c linux-3.0.7/kernel/configs.c
61993--- linux-3.0.7/kernel/configs.c 2011-07-21 22:17:23.000000000 -0400
61994+++ linux-3.0.7/kernel/configs.c 2011-08-23 21:48:14.000000000 -0400
61995@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
61996 struct proc_dir_entry *entry;
61997
61998 /* create the current config file */
61999+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
62000+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
62001+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
62002+ &ikconfig_file_ops);
62003+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62004+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
62005+ &ikconfig_file_ops);
62006+#endif
62007+#else
62008 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
62009 &ikconfig_file_ops);
62010+#endif
62011+
62012 if (!entry)
62013 return -ENOMEM;
62014
62015diff -urNp linux-3.0.7/kernel/cred.c linux-3.0.7/kernel/cred.c
62016--- linux-3.0.7/kernel/cred.c 2011-07-21 22:17:23.000000000 -0400
62017+++ linux-3.0.7/kernel/cred.c 2011-08-25 17:23:03.000000000 -0400
62018@@ -158,6 +158,8 @@ static void put_cred_rcu(struct rcu_head
62019 */
62020 void __put_cred(struct cred *cred)
62021 {
62022+ pax_track_stack();
62023+
62024 kdebug("__put_cred(%p{%d,%d})", cred,
62025 atomic_read(&cred->usage),
62026 read_cred_subscribers(cred));
62027@@ -182,6 +184,8 @@ void exit_creds(struct task_struct *tsk)
62028 {
62029 struct cred *cred;
62030
62031+ pax_track_stack();
62032+
62033 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
62034 atomic_read(&tsk->cred->usage),
62035 read_cred_subscribers(tsk->cred));
62036@@ -220,6 +224,8 @@ const struct cred *get_task_cred(struct
62037 {
62038 const struct cred *cred;
62039
62040+ pax_track_stack();
62041+
62042 rcu_read_lock();
62043
62044 do {
62045@@ -239,6 +245,8 @@ struct cred *cred_alloc_blank(void)
62046 {
62047 struct cred *new;
62048
62049+ pax_track_stack();
62050+
62051 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
62052 if (!new)
62053 return NULL;
62054@@ -287,6 +295,8 @@ struct cred *prepare_creds(void)
62055 const struct cred *old;
62056 struct cred *new;
62057
62058+ pax_track_stack();
62059+
62060 validate_process_creds();
62061
62062 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
62063@@ -333,6 +343,8 @@ struct cred *prepare_exec_creds(void)
62064 struct thread_group_cred *tgcred = NULL;
62065 struct cred *new;
62066
62067+ pax_track_stack();
62068+
62069 #ifdef CONFIG_KEYS
62070 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
62071 if (!tgcred)
62072@@ -385,6 +397,8 @@ int copy_creds(struct task_struct *p, un
62073 struct cred *new;
62074 int ret;
62075
62076+ pax_track_stack();
62077+
62078 if (
62079 #ifdef CONFIG_KEYS
62080 !p->cred->thread_keyring &&
62081@@ -475,6 +489,8 @@ int commit_creds(struct cred *new)
62082 struct task_struct *task = current;
62083 const struct cred *old = task->real_cred;
62084
62085+ pax_track_stack();
62086+
62087 kdebug("commit_creds(%p{%d,%d})", new,
62088 atomic_read(&new->usage),
62089 read_cred_subscribers(new));
62090@@ -489,6 +505,8 @@ int commit_creds(struct cred *new)
62091
62092 get_cred(new); /* we will require a ref for the subj creds too */
62093
62094+ gr_set_role_label(task, new->uid, new->gid);
62095+
62096 /* dumpability changes */
62097 if (old->euid != new->euid ||
62098 old->egid != new->egid ||
62099@@ -508,10 +526,8 @@ int commit_creds(struct cred *new)
62100 key_fsgid_changed(task);
62101
62102 /* do it
62103- * - What if a process setreuid()'s and this brings the
62104- * new uid over his NPROC rlimit? We can check this now
62105- * cheaply with the new uid cache, so if it matters
62106- * we should be checking for it. -DaveM
62107+ * RLIMIT_NPROC limits on user->processes have already been checked
62108+ * in set_user().
62109 */
62110 alter_cred_subscribers(new, 2);
62111 if (new->user != old->user)
62112@@ -551,6 +567,8 @@ EXPORT_SYMBOL(commit_creds);
62113 */
62114 void abort_creds(struct cred *new)
62115 {
62116+ pax_track_stack();
62117+
62118 kdebug("abort_creds(%p{%d,%d})", new,
62119 atomic_read(&new->usage),
62120 read_cred_subscribers(new));
62121@@ -574,6 +592,8 @@ const struct cred *override_creds(const
62122 {
62123 const struct cred *old = current->cred;
62124
62125+ pax_track_stack();
62126+
62127 kdebug("override_creds(%p{%d,%d})", new,
62128 atomic_read(&new->usage),
62129 read_cred_subscribers(new));
62130@@ -603,6 +623,8 @@ void revert_creds(const struct cred *old
62131 {
62132 const struct cred *override = current->cred;
62133
62134+ pax_track_stack();
62135+
62136 kdebug("revert_creds(%p{%d,%d})", old,
62137 atomic_read(&old->usage),
62138 read_cred_subscribers(old));
62139@@ -649,6 +671,8 @@ struct cred *prepare_kernel_cred(struct
62140 const struct cred *old;
62141 struct cred *new;
62142
62143+ pax_track_stack();
62144+
62145 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
62146 if (!new)
62147 return NULL;
62148@@ -703,6 +727,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
62149 */
62150 int set_security_override(struct cred *new, u32 secid)
62151 {
62152+ pax_track_stack();
62153+
62154 return security_kernel_act_as(new, secid);
62155 }
62156 EXPORT_SYMBOL(set_security_override);
62157@@ -722,6 +748,8 @@ int set_security_override_from_ctx(struc
62158 u32 secid;
62159 int ret;
62160
62161+ pax_track_stack();
62162+
62163 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
62164 if (ret < 0)
62165 return ret;
62166diff -urNp linux-3.0.7/kernel/debug/debug_core.c linux-3.0.7/kernel/debug/debug_core.c
62167--- linux-3.0.7/kernel/debug/debug_core.c 2011-07-21 22:17:23.000000000 -0400
62168+++ linux-3.0.7/kernel/debug/debug_core.c 2011-08-23 21:47:56.000000000 -0400
62169@@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_loc
62170 */
62171 static atomic_t masters_in_kgdb;
62172 static atomic_t slaves_in_kgdb;
62173-static atomic_t kgdb_break_tasklet_var;
62174+static atomic_unchecked_t kgdb_break_tasklet_var;
62175 atomic_t kgdb_setting_breakpoint;
62176
62177 struct task_struct *kgdb_usethread;
62178@@ -129,7 +129,7 @@ int kgdb_single_step;
62179 static pid_t kgdb_sstep_pid;
62180
62181 /* to keep track of the CPU which is doing the single stepping*/
62182-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
62183+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
62184
62185 /*
62186 * If you are debugging a problem where roundup (the collection of
62187@@ -542,7 +542,7 @@ return_normal:
62188 * kernel will only try for the value of sstep_tries before
62189 * giving up and continuing on.
62190 */
62191- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
62192+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
62193 (kgdb_info[cpu].task &&
62194 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
62195 atomic_set(&kgdb_active, -1);
62196@@ -636,8 +636,8 @@ cpu_master_loop:
62197 }
62198
62199 kgdb_restore:
62200- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
62201- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
62202+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
62203+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
62204 if (kgdb_info[sstep_cpu].task)
62205 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
62206 else
62207@@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(vo
62208 static void kgdb_tasklet_bpt(unsigned long ing)
62209 {
62210 kgdb_breakpoint();
62211- atomic_set(&kgdb_break_tasklet_var, 0);
62212+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
62213 }
62214
62215 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
62216
62217 void kgdb_schedule_breakpoint(void)
62218 {
62219- if (atomic_read(&kgdb_break_tasklet_var) ||
62220+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
62221 atomic_read(&kgdb_active) != -1 ||
62222 atomic_read(&kgdb_setting_breakpoint))
62223 return;
62224- atomic_inc(&kgdb_break_tasklet_var);
62225+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
62226 tasklet_schedule(&kgdb_tasklet_breakpoint);
62227 }
62228 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
62229diff -urNp linux-3.0.7/kernel/debug/kdb/kdb_main.c linux-3.0.7/kernel/debug/kdb/kdb_main.c
62230--- linux-3.0.7/kernel/debug/kdb/kdb_main.c 2011-07-21 22:17:23.000000000 -0400
62231+++ linux-3.0.7/kernel/debug/kdb/kdb_main.c 2011-08-23 21:47:56.000000000 -0400
62232@@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const cha
62233 list_for_each_entry(mod, kdb_modules, list) {
62234
62235 kdb_printf("%-20s%8u 0x%p ", mod->name,
62236- mod->core_size, (void *)mod);
62237+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
62238 #ifdef CONFIG_MODULE_UNLOAD
62239 kdb_printf("%4d ", module_refcount(mod));
62240 #endif
62241@@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const cha
62242 kdb_printf(" (Loading)");
62243 else
62244 kdb_printf(" (Live)");
62245- kdb_printf(" 0x%p", mod->module_core);
62246+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
62247
62248 #ifdef CONFIG_MODULE_UNLOAD
62249 {
62250diff -urNp linux-3.0.7/kernel/events/core.c linux-3.0.7/kernel/events/core.c
62251--- linux-3.0.7/kernel/events/core.c 2011-09-02 18:11:21.000000000 -0400
62252+++ linux-3.0.7/kernel/events/core.c 2011-09-14 09:08:05.000000000 -0400
62253@@ -170,7 +170,7 @@ int perf_proc_update_handler(struct ctl_
62254 return 0;
62255 }
62256
62257-static atomic64_t perf_event_id;
62258+static atomic64_unchecked_t perf_event_id;
62259
62260 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
62261 enum event_type_t event_type);
62262@@ -2488,7 +2488,7 @@ static void __perf_event_read(void *info
62263
62264 static inline u64 perf_event_count(struct perf_event *event)
62265 {
62266- return local64_read(&event->count) + atomic64_read(&event->child_count);
62267+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
62268 }
62269
62270 static u64 perf_event_read(struct perf_event *event)
62271@@ -3023,9 +3023,9 @@ u64 perf_event_read_value(struct perf_ev
62272 mutex_lock(&event->child_mutex);
62273 total += perf_event_read(event);
62274 *enabled += event->total_time_enabled +
62275- atomic64_read(&event->child_total_time_enabled);
62276+ atomic64_read_unchecked(&event->child_total_time_enabled);
62277 *running += event->total_time_running +
62278- atomic64_read(&event->child_total_time_running);
62279+ atomic64_read_unchecked(&event->child_total_time_running);
62280
62281 list_for_each_entry(child, &event->child_list, child_list) {
62282 total += perf_event_read(child);
62283@@ -3388,10 +3388,10 @@ void perf_event_update_userpage(struct p
62284 userpg->offset -= local64_read(&event->hw.prev_count);
62285
62286 userpg->time_enabled = event->total_time_enabled +
62287- atomic64_read(&event->child_total_time_enabled);
62288+ atomic64_read_unchecked(&event->child_total_time_enabled);
62289
62290 userpg->time_running = event->total_time_running +
62291- atomic64_read(&event->child_total_time_running);
62292+ atomic64_read_unchecked(&event->child_total_time_running);
62293
62294 barrier();
62295 ++userpg->lock;
62296@@ -4188,11 +4188,11 @@ static void perf_output_read_one(struct
62297 values[n++] = perf_event_count(event);
62298 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
62299 values[n++] = enabled +
62300- atomic64_read(&event->child_total_time_enabled);
62301+ atomic64_read_unchecked(&event->child_total_time_enabled);
62302 }
62303 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
62304 values[n++] = running +
62305- atomic64_read(&event->child_total_time_running);
62306+ atomic64_read_unchecked(&event->child_total_time_running);
62307 }
62308 if (read_format & PERF_FORMAT_ID)
62309 values[n++] = primary_event_id(event);
62310@@ -4833,12 +4833,12 @@ static void perf_event_mmap_event(struct
62311 * need to add enough zero bytes after the string to handle
62312 * the 64bit alignment we do later.
62313 */
62314- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
62315+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
62316 if (!buf) {
62317 name = strncpy(tmp, "//enomem", sizeof(tmp));
62318 goto got_name;
62319 }
62320- name = d_path(&file->f_path, buf, PATH_MAX);
62321+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
62322 if (IS_ERR(name)) {
62323 name = strncpy(tmp, "//toolong", sizeof(tmp));
62324 goto got_name;
62325@@ -6190,7 +6190,7 @@ perf_event_alloc(struct perf_event_attr
62326 event->parent = parent_event;
62327
62328 event->ns = get_pid_ns(current->nsproxy->pid_ns);
62329- event->id = atomic64_inc_return(&perf_event_id);
62330+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
62331
62332 event->state = PERF_EVENT_STATE_INACTIVE;
62333
62334@@ -6713,10 +6713,10 @@ static void sync_child_event(struct perf
62335 /*
62336 * Add back the child's count to the parent's count:
62337 */
62338- atomic64_add(child_val, &parent_event->child_count);
62339- atomic64_add(child_event->total_time_enabled,
62340+ atomic64_add_unchecked(child_val, &parent_event->child_count);
62341+ atomic64_add_unchecked(child_event->total_time_enabled,
62342 &parent_event->child_total_time_enabled);
62343- atomic64_add(child_event->total_time_running,
62344+ atomic64_add_unchecked(child_event->total_time_running,
62345 &parent_event->child_total_time_running);
62346
62347 /*
62348diff -urNp linux-3.0.7/kernel/exit.c linux-3.0.7/kernel/exit.c
62349--- linux-3.0.7/kernel/exit.c 2011-07-21 22:17:23.000000000 -0400
62350+++ linux-3.0.7/kernel/exit.c 2011-08-23 21:48:14.000000000 -0400
62351@@ -57,6 +57,10 @@
62352 #include <asm/pgtable.h>
62353 #include <asm/mmu_context.h>
62354
62355+#ifdef CONFIG_GRKERNSEC
62356+extern rwlock_t grsec_exec_file_lock;
62357+#endif
62358+
62359 static void exit_mm(struct task_struct * tsk);
62360
62361 static void __unhash_process(struct task_struct *p, bool group_dead)
62362@@ -169,6 +173,10 @@ void release_task(struct task_struct * p
62363 struct task_struct *leader;
62364 int zap_leader;
62365 repeat:
62366+#ifdef CONFIG_NET
62367+ gr_del_task_from_ip_table(p);
62368+#endif
62369+
62370 tracehook_prepare_release_task(p);
62371 /* don't need to get the RCU readlock here - the process is dead and
62372 * can't be modifying its own credentials. But shut RCU-lockdep up */
62373@@ -338,11 +346,22 @@ static void reparent_to_kthreadd(void)
62374 {
62375 write_lock_irq(&tasklist_lock);
62376
62377+#ifdef CONFIG_GRKERNSEC
62378+ write_lock(&grsec_exec_file_lock);
62379+ if (current->exec_file) {
62380+ fput(current->exec_file);
62381+ current->exec_file = NULL;
62382+ }
62383+ write_unlock(&grsec_exec_file_lock);
62384+#endif
62385+
62386 ptrace_unlink(current);
62387 /* Reparent to init */
62388 current->real_parent = current->parent = kthreadd_task;
62389 list_move_tail(&current->sibling, &current->real_parent->children);
62390
62391+ gr_set_kernel_label(current);
62392+
62393 /* Set the exit signal to SIGCHLD so we signal init on exit */
62394 current->exit_signal = SIGCHLD;
62395
62396@@ -394,7 +413,7 @@ int allow_signal(int sig)
62397 * know it'll be handled, so that they don't get converted to
62398 * SIGKILL or just silently dropped.
62399 */
62400- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
62401+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
62402 recalc_sigpending();
62403 spin_unlock_irq(&current->sighand->siglock);
62404 return 0;
62405@@ -430,6 +449,17 @@ void daemonize(const char *name, ...)
62406 vsnprintf(current->comm, sizeof(current->comm), name, args);
62407 va_end(args);
62408
62409+#ifdef CONFIG_GRKERNSEC
62410+ write_lock(&grsec_exec_file_lock);
62411+ if (current->exec_file) {
62412+ fput(current->exec_file);
62413+ current->exec_file = NULL;
62414+ }
62415+ write_unlock(&grsec_exec_file_lock);
62416+#endif
62417+
62418+ gr_set_kernel_label(current);
62419+
62420 /*
62421 * If we were started as result of loading a module, close all of the
62422 * user space pages. We don't need them, and if we didn't close them
62423@@ -904,15 +934,8 @@ NORET_TYPE void do_exit(long code)
62424 struct task_struct *tsk = current;
62425 int group_dead;
62426
62427- profile_task_exit(tsk);
62428-
62429- WARN_ON(atomic_read(&tsk->fs_excl));
62430- WARN_ON(blk_needs_flush_plug(tsk));
62431-
62432 if (unlikely(in_interrupt()))
62433 panic("Aiee, killing interrupt handler!");
62434- if (unlikely(!tsk->pid))
62435- panic("Attempted to kill the idle task!");
62436
62437 /*
62438 * If do_exit is called because this processes oopsed, it's possible
62439@@ -923,6 +946,14 @@ NORET_TYPE void do_exit(long code)
62440 */
62441 set_fs(USER_DS);
62442
62443+ profile_task_exit(tsk);
62444+
62445+ WARN_ON(atomic_read(&tsk->fs_excl));
62446+ WARN_ON(blk_needs_flush_plug(tsk));
62447+
62448+ if (unlikely(!tsk->pid))
62449+ panic("Attempted to kill the idle task!");
62450+
62451 tracehook_report_exit(&code);
62452
62453 validate_creds_for_do_exit(tsk);
62454@@ -983,6 +1014,9 @@ NORET_TYPE void do_exit(long code)
62455 tsk->exit_code = code;
62456 taskstats_exit(tsk, group_dead);
62457
62458+ gr_acl_handle_psacct(tsk, code);
62459+ gr_acl_handle_exit();
62460+
62461 exit_mm(tsk);
62462
62463 if (group_dead)
62464diff -urNp linux-3.0.7/kernel/fork.c linux-3.0.7/kernel/fork.c
62465--- linux-3.0.7/kernel/fork.c 2011-07-21 22:17:23.000000000 -0400
62466+++ linux-3.0.7/kernel/fork.c 2011-08-25 17:23:36.000000000 -0400
62467@@ -286,7 +286,7 @@ static struct task_struct *dup_task_stru
62468 *stackend = STACK_END_MAGIC; /* for overflow detection */
62469
62470 #ifdef CONFIG_CC_STACKPROTECTOR
62471- tsk->stack_canary = get_random_int();
62472+ tsk->stack_canary = pax_get_random_long();
62473 #endif
62474
62475 /* One for us, one for whoever does the "release_task()" (usually parent) */
62476@@ -308,13 +308,77 @@ out:
62477 }
62478
62479 #ifdef CONFIG_MMU
62480+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
62481+{
62482+ struct vm_area_struct *tmp;
62483+ unsigned long charge;
62484+ struct mempolicy *pol;
62485+ struct file *file;
62486+
62487+ charge = 0;
62488+ if (mpnt->vm_flags & VM_ACCOUNT) {
62489+ unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
62490+ if (security_vm_enough_memory(len))
62491+ goto fail_nomem;
62492+ charge = len;
62493+ }
62494+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
62495+ if (!tmp)
62496+ goto fail_nomem;
62497+ *tmp = *mpnt;
62498+ tmp->vm_mm = mm;
62499+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
62500+ pol = mpol_dup(vma_policy(mpnt));
62501+ if (IS_ERR(pol))
62502+ goto fail_nomem_policy;
62503+ vma_set_policy(tmp, pol);
62504+ if (anon_vma_fork(tmp, mpnt))
62505+ goto fail_nomem_anon_vma_fork;
62506+ tmp->vm_flags &= ~VM_LOCKED;
62507+ tmp->vm_next = tmp->vm_prev = NULL;
62508+ tmp->vm_mirror = NULL;
62509+ file = tmp->vm_file;
62510+ if (file) {
62511+ struct inode *inode = file->f_path.dentry->d_inode;
62512+ struct address_space *mapping = file->f_mapping;
62513+
62514+ get_file(file);
62515+ if (tmp->vm_flags & VM_DENYWRITE)
62516+ atomic_dec(&inode->i_writecount);
62517+ mutex_lock(&mapping->i_mmap_mutex);
62518+ if (tmp->vm_flags & VM_SHARED)
62519+ mapping->i_mmap_writable++;
62520+ flush_dcache_mmap_lock(mapping);
62521+ /* insert tmp into the share list, just after mpnt */
62522+ vma_prio_tree_add(tmp, mpnt);
62523+ flush_dcache_mmap_unlock(mapping);
62524+ mutex_unlock(&mapping->i_mmap_mutex);
62525+ }
62526+
62527+ /*
62528+ * Clear hugetlb-related page reserves for children. This only
62529+ * affects MAP_PRIVATE mappings. Faults generated by the child
62530+ * are not guaranteed to succeed, even if read-only
62531+ */
62532+ if (is_vm_hugetlb_page(tmp))
62533+ reset_vma_resv_huge_pages(tmp);
62534+
62535+ return tmp;
62536+
62537+fail_nomem_anon_vma_fork:
62538+ mpol_put(pol);
62539+fail_nomem_policy:
62540+ kmem_cache_free(vm_area_cachep, tmp);
62541+fail_nomem:
62542+ vm_unacct_memory(charge);
62543+ return NULL;
62544+}
62545+
62546 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
62547 {
62548 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
62549 struct rb_node **rb_link, *rb_parent;
62550 int retval;
62551- unsigned long charge;
62552- struct mempolicy *pol;
62553
62554 down_write(&oldmm->mmap_sem);
62555 flush_cache_dup_mm(oldmm);
62556@@ -326,8 +390,8 @@ static int dup_mmap(struct mm_struct *mm
62557 mm->locked_vm = 0;
62558 mm->mmap = NULL;
62559 mm->mmap_cache = NULL;
62560- mm->free_area_cache = oldmm->mmap_base;
62561- mm->cached_hole_size = ~0UL;
62562+ mm->free_area_cache = oldmm->free_area_cache;
62563+ mm->cached_hole_size = oldmm->cached_hole_size;
62564 mm->map_count = 0;
62565 cpumask_clear(mm_cpumask(mm));
62566 mm->mm_rb = RB_ROOT;
62567@@ -343,8 +407,6 @@ static int dup_mmap(struct mm_struct *mm
62568
62569 prev = NULL;
62570 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
62571- struct file *file;
62572-
62573 if (mpnt->vm_flags & VM_DONTCOPY) {
62574 long pages = vma_pages(mpnt);
62575 mm->total_vm -= pages;
62576@@ -352,55 +414,13 @@ static int dup_mmap(struct mm_struct *mm
62577 -pages);
62578 continue;
62579 }
62580- charge = 0;
62581- if (mpnt->vm_flags & VM_ACCOUNT) {
62582- unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
62583- if (security_vm_enough_memory(len))
62584- goto fail_nomem;
62585- charge = len;
62586- }
62587- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
62588- if (!tmp)
62589- goto fail_nomem;
62590- *tmp = *mpnt;
62591- INIT_LIST_HEAD(&tmp->anon_vma_chain);
62592- pol = mpol_dup(vma_policy(mpnt));
62593- retval = PTR_ERR(pol);
62594- if (IS_ERR(pol))
62595- goto fail_nomem_policy;
62596- vma_set_policy(tmp, pol);
62597- tmp->vm_mm = mm;
62598- if (anon_vma_fork(tmp, mpnt))
62599- goto fail_nomem_anon_vma_fork;
62600- tmp->vm_flags &= ~VM_LOCKED;
62601- tmp->vm_next = tmp->vm_prev = NULL;
62602- file = tmp->vm_file;
62603- if (file) {
62604- struct inode *inode = file->f_path.dentry->d_inode;
62605- struct address_space *mapping = file->f_mapping;
62606-
62607- get_file(file);
62608- if (tmp->vm_flags & VM_DENYWRITE)
62609- atomic_dec(&inode->i_writecount);
62610- mutex_lock(&mapping->i_mmap_mutex);
62611- if (tmp->vm_flags & VM_SHARED)
62612- mapping->i_mmap_writable++;
62613- flush_dcache_mmap_lock(mapping);
62614- /* insert tmp into the share list, just after mpnt */
62615- vma_prio_tree_add(tmp, mpnt);
62616- flush_dcache_mmap_unlock(mapping);
62617- mutex_unlock(&mapping->i_mmap_mutex);
62618+ tmp = dup_vma(mm, mpnt);
62619+ if (!tmp) {
62620+ retval = -ENOMEM;
62621+ goto out;
62622 }
62623
62624 /*
62625- * Clear hugetlb-related page reserves for children. This only
62626- * affects MAP_PRIVATE mappings. Faults generated by the child
62627- * are not guaranteed to succeed, even if read-only
62628- */
62629- if (is_vm_hugetlb_page(tmp))
62630- reset_vma_resv_huge_pages(tmp);
62631-
62632- /*
62633 * Link in the new vma and copy the page table entries.
62634 */
62635 *pprev = tmp;
62636@@ -421,6 +441,31 @@ static int dup_mmap(struct mm_struct *mm
62637 if (retval)
62638 goto out;
62639 }
62640+
62641+#ifdef CONFIG_PAX_SEGMEXEC
62642+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
62643+ struct vm_area_struct *mpnt_m;
62644+
62645+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
62646+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
62647+
62648+ if (!mpnt->vm_mirror)
62649+ continue;
62650+
62651+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
62652+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
62653+ mpnt->vm_mirror = mpnt_m;
62654+ } else {
62655+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
62656+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
62657+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
62658+ mpnt->vm_mirror->vm_mirror = mpnt;
62659+ }
62660+ }
62661+ BUG_ON(mpnt_m);
62662+ }
62663+#endif
62664+
62665 /* a new mm has just been created */
62666 arch_dup_mmap(oldmm, mm);
62667 retval = 0;
62668@@ -429,14 +474,6 @@ out:
62669 flush_tlb_mm(oldmm);
62670 up_write(&oldmm->mmap_sem);
62671 return retval;
62672-fail_nomem_anon_vma_fork:
62673- mpol_put(pol);
62674-fail_nomem_policy:
62675- kmem_cache_free(vm_area_cachep, tmp);
62676-fail_nomem:
62677- retval = -ENOMEM;
62678- vm_unacct_memory(charge);
62679- goto out;
62680 }
62681
62682 static inline int mm_alloc_pgd(struct mm_struct * mm)
62683@@ -836,13 +873,14 @@ static int copy_fs(unsigned long clone_f
62684 spin_unlock(&fs->lock);
62685 return -EAGAIN;
62686 }
62687- fs->users++;
62688+ atomic_inc(&fs->users);
62689 spin_unlock(&fs->lock);
62690 return 0;
62691 }
62692 tsk->fs = copy_fs_struct(fs);
62693 if (!tsk->fs)
62694 return -ENOMEM;
62695+ gr_set_chroot_entries(tsk, &tsk->fs->root);
62696 return 0;
62697 }
62698
62699@@ -1104,12 +1142,16 @@ static struct task_struct *copy_process(
62700 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
62701 #endif
62702 retval = -EAGAIN;
62703+
62704+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
62705+
62706 if (atomic_read(&p->real_cred->user->processes) >=
62707 task_rlimit(p, RLIMIT_NPROC)) {
62708- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
62709- p->real_cred->user != INIT_USER)
62710+ if (p->real_cred->user != INIT_USER &&
62711+ !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE))
62712 goto bad_fork_free;
62713 }
62714+ current->flags &= ~PF_NPROC_EXCEEDED;
62715
62716 retval = copy_creds(p, clone_flags);
62717 if (retval < 0)
62718@@ -1250,6 +1292,8 @@ static struct task_struct *copy_process(
62719 if (clone_flags & CLONE_THREAD)
62720 p->tgid = current->tgid;
62721
62722+ gr_copy_label(p);
62723+
62724 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
62725 /*
62726 * Clear TID on mm_release()?
62727@@ -1414,6 +1458,8 @@ bad_fork_cleanup_count:
62728 bad_fork_free:
62729 free_task(p);
62730 fork_out:
62731+ gr_log_forkfail(retval);
62732+
62733 return ERR_PTR(retval);
62734 }
62735
62736@@ -1502,6 +1548,8 @@ long do_fork(unsigned long clone_flags,
62737 if (clone_flags & CLONE_PARENT_SETTID)
62738 put_user(nr, parent_tidptr);
62739
62740+ gr_handle_brute_check();
62741+
62742 if (clone_flags & CLONE_VFORK) {
62743 p->vfork_done = &vfork;
62744 init_completion(&vfork);
62745@@ -1610,7 +1658,7 @@ static int unshare_fs(unsigned long unsh
62746 return 0;
62747
62748 /* don't need lock here; in the worst case we'll do useless copy */
62749- if (fs->users == 1)
62750+ if (atomic_read(&fs->users) == 1)
62751 return 0;
62752
62753 *new_fsp = copy_fs_struct(fs);
62754@@ -1697,7 +1745,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
62755 fs = current->fs;
62756 spin_lock(&fs->lock);
62757 current->fs = new_fs;
62758- if (--fs->users)
62759+ gr_set_chroot_entries(current, &current->fs->root);
62760+ if (atomic_dec_return(&fs->users))
62761 new_fs = NULL;
62762 else
62763 new_fs = fs;
62764diff -urNp linux-3.0.7/kernel/futex.c linux-3.0.7/kernel/futex.c
62765--- linux-3.0.7/kernel/futex.c 2011-09-02 18:11:21.000000000 -0400
62766+++ linux-3.0.7/kernel/futex.c 2011-08-23 21:48:14.000000000 -0400
62767@@ -54,6 +54,7 @@
62768 #include <linux/mount.h>
62769 #include <linux/pagemap.h>
62770 #include <linux/syscalls.h>
62771+#include <linux/ptrace.h>
62772 #include <linux/signal.h>
62773 #include <linux/module.h>
62774 #include <linux/magic.h>
62775@@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fsh
62776 struct page *page, *page_head;
62777 int err, ro = 0;
62778
62779+#ifdef CONFIG_PAX_SEGMEXEC
62780+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
62781+ return -EFAULT;
62782+#endif
62783+
62784 /*
62785 * The futex address must be "naturally" aligned.
62786 */
62787@@ -1863,6 +1869,8 @@ static int futex_wait(u32 __user *uaddr,
62788 struct futex_q q = futex_q_init;
62789 int ret;
62790
62791+ pax_track_stack();
62792+
62793 if (!bitset)
62794 return -EINVAL;
62795 q.bitset = bitset;
62796@@ -2259,6 +2267,8 @@ static int futex_wait_requeue_pi(u32 __u
62797 struct futex_q q = futex_q_init;
62798 int res, ret;
62799
62800+ pax_track_stack();
62801+
62802 if (!bitset)
62803 return -EINVAL;
62804
62805@@ -2431,7 +2441,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
62806 {
62807 struct robust_list_head __user *head;
62808 unsigned long ret;
62809+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
62810 const struct cred *cred = current_cred(), *pcred;
62811+#endif
62812
62813 if (!futex_cmpxchg_enabled)
62814 return -ENOSYS;
62815@@ -2447,6 +2459,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
62816 if (!p)
62817 goto err_unlock;
62818 ret = -EPERM;
62819+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62820+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
62821+ goto err_unlock;
62822+#else
62823 pcred = __task_cred(p);
62824 /* If victim is in different user_ns, then uids are not
62825 comparable, so we must have CAP_SYS_PTRACE */
62826@@ -2461,6 +2477,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
62827 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
62828 goto err_unlock;
62829 ok:
62830+#endif
62831 head = p->robust_list;
62832 rcu_read_unlock();
62833 }
62834@@ -2712,6 +2729,7 @@ static int __init futex_init(void)
62835 {
62836 u32 curval;
62837 int i;
62838+ mm_segment_t oldfs;
62839
62840 /*
62841 * This will fail and we want it. Some arch implementations do
62842@@ -2723,8 +2741,11 @@ static int __init futex_init(void)
62843 * implementation, the non-functional ones will return
62844 * -ENOSYS.
62845 */
62846+ oldfs = get_fs();
62847+ set_fs(USER_DS);
62848 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
62849 futex_cmpxchg_enabled = 1;
62850+ set_fs(oldfs);
62851
62852 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
62853 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
62854diff -urNp linux-3.0.7/kernel/futex_compat.c linux-3.0.7/kernel/futex_compat.c
62855--- linux-3.0.7/kernel/futex_compat.c 2011-07-21 22:17:23.000000000 -0400
62856+++ linux-3.0.7/kernel/futex_compat.c 2011-08-23 21:48:14.000000000 -0400
62857@@ -10,6 +10,7 @@
62858 #include <linux/compat.h>
62859 #include <linux/nsproxy.h>
62860 #include <linux/futex.h>
62861+#include <linux/ptrace.h>
62862
62863 #include <asm/uaccess.h>
62864
62865@@ -136,7 +137,10 @@ compat_sys_get_robust_list(int pid, comp
62866 {
62867 struct compat_robust_list_head __user *head;
62868 unsigned long ret;
62869- const struct cred *cred = current_cred(), *pcred;
62870+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
62871+ const struct cred *cred = current_cred();
62872+ const struct cred *pcred;
62873+#endif
62874
62875 if (!futex_cmpxchg_enabled)
62876 return -ENOSYS;
62877@@ -152,6 +156,10 @@ compat_sys_get_robust_list(int pid, comp
62878 if (!p)
62879 goto err_unlock;
62880 ret = -EPERM;
62881+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62882+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
62883+ goto err_unlock;
62884+#else
62885 pcred = __task_cred(p);
62886 /* If victim is in different user_ns, then uids are not
62887 comparable, so we must have CAP_SYS_PTRACE */
62888@@ -166,6 +174,7 @@ compat_sys_get_robust_list(int pid, comp
62889 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
62890 goto err_unlock;
62891 ok:
62892+#endif
62893 head = p->compat_robust_list;
62894 rcu_read_unlock();
62895 }
62896diff -urNp linux-3.0.7/kernel/gcov/base.c linux-3.0.7/kernel/gcov/base.c
62897--- linux-3.0.7/kernel/gcov/base.c 2011-07-21 22:17:23.000000000 -0400
62898+++ linux-3.0.7/kernel/gcov/base.c 2011-08-23 21:47:56.000000000 -0400
62899@@ -102,11 +102,6 @@ void gcov_enable_events(void)
62900 }
62901
62902 #ifdef CONFIG_MODULES
62903-static inline int within(void *addr, void *start, unsigned long size)
62904-{
62905- return ((addr >= start) && (addr < start + size));
62906-}
62907-
62908 /* Update list and generate events when modules are unloaded. */
62909 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
62910 void *data)
62911@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
62912 prev = NULL;
62913 /* Remove entries located in module from linked list. */
62914 for (info = gcov_info_head; info; info = info->next) {
62915- if (within(info, mod->module_core, mod->core_size)) {
62916+ if (within_module_core_rw((unsigned long)info, mod)) {
62917 if (prev)
62918 prev->next = info->next;
62919 else
62920diff -urNp linux-3.0.7/kernel/hrtimer.c linux-3.0.7/kernel/hrtimer.c
62921--- linux-3.0.7/kernel/hrtimer.c 2011-07-21 22:17:23.000000000 -0400
62922+++ linux-3.0.7/kernel/hrtimer.c 2011-08-23 21:47:56.000000000 -0400
62923@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
62924 local_irq_restore(flags);
62925 }
62926
62927-static void run_hrtimer_softirq(struct softirq_action *h)
62928+static void run_hrtimer_softirq(void)
62929 {
62930 hrtimer_peek_ahead_timers();
62931 }
62932diff -urNp linux-3.0.7/kernel/jump_label.c linux-3.0.7/kernel/jump_label.c
62933--- linux-3.0.7/kernel/jump_label.c 2011-07-21 22:17:23.000000000 -0400
62934+++ linux-3.0.7/kernel/jump_label.c 2011-08-23 21:47:56.000000000 -0400
62935@@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entr
62936
62937 size = (((unsigned long)stop - (unsigned long)start)
62938 / sizeof(struct jump_entry));
62939+ pax_open_kernel();
62940 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
62941+ pax_close_kernel();
62942 }
62943
62944 static void jump_label_update(struct jump_label_key *key, int enable);
62945@@ -297,10 +299,12 @@ static void jump_label_invalidate_module
62946 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
62947 struct jump_entry *iter;
62948
62949+ pax_open_kernel();
62950 for (iter = iter_start; iter < iter_stop; iter++) {
62951 if (within_module_init(iter->code, mod))
62952 iter->code = 0;
62953 }
62954+ pax_close_kernel();
62955 }
62956
62957 static int
62958diff -urNp linux-3.0.7/kernel/kallsyms.c linux-3.0.7/kernel/kallsyms.c
62959--- linux-3.0.7/kernel/kallsyms.c 2011-07-21 22:17:23.000000000 -0400
62960+++ linux-3.0.7/kernel/kallsyms.c 2011-08-23 21:48:14.000000000 -0400
62961@@ -11,6 +11,9 @@
62962 * Changed the compression method from stem compression to "table lookup"
62963 * compression (see scripts/kallsyms.c for a more complete description)
62964 */
62965+#ifdef CONFIG_GRKERNSEC_HIDESYM
62966+#define __INCLUDED_BY_HIDESYM 1
62967+#endif
62968 #include <linux/kallsyms.h>
62969 #include <linux/module.h>
62970 #include <linux/init.h>
62971@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_mark
62972
62973 static inline int is_kernel_inittext(unsigned long addr)
62974 {
62975+ if (system_state != SYSTEM_BOOTING)
62976+ return 0;
62977+
62978 if (addr >= (unsigned long)_sinittext
62979 && addr <= (unsigned long)_einittext)
62980 return 1;
62981 return 0;
62982 }
62983
62984+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
62985+#ifdef CONFIG_MODULES
62986+static inline int is_module_text(unsigned long addr)
62987+{
62988+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
62989+ return 1;
62990+
62991+ addr = ktla_ktva(addr);
62992+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
62993+}
62994+#else
62995+static inline int is_module_text(unsigned long addr)
62996+{
62997+ return 0;
62998+}
62999+#endif
63000+#endif
63001+
63002 static inline int is_kernel_text(unsigned long addr)
63003 {
63004 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
63005@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigne
63006
63007 static inline int is_kernel(unsigned long addr)
63008 {
63009+
63010+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63011+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
63012+ return 1;
63013+
63014+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
63015+#else
63016 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
63017+#endif
63018+
63019 return 1;
63020 return in_gate_area_no_mm(addr);
63021 }
63022
63023 static int is_ksym_addr(unsigned long addr)
63024 {
63025+
63026+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63027+ if (is_module_text(addr))
63028+ return 0;
63029+#endif
63030+
63031 if (all_var)
63032 return is_kernel(addr);
63033
63034@@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(st
63035
63036 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
63037 {
63038- iter->name[0] = '\0';
63039 iter->nameoff = get_symbol_offset(new_pos);
63040 iter->pos = new_pos;
63041 }
63042@@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, vo
63043 {
63044 struct kallsym_iter *iter = m->private;
63045
63046+#ifdef CONFIG_GRKERNSEC_HIDESYM
63047+ if (current_uid())
63048+ return 0;
63049+#endif
63050+
63051 /* Some debugging symbols have no name. Ignore them. */
63052 if (!iter->name[0])
63053 return 0;
63054@@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *i
63055 struct kallsym_iter *iter;
63056 int ret;
63057
63058- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
63059+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
63060 if (!iter)
63061 return -ENOMEM;
63062 reset_iter(iter, 0);
63063diff -urNp linux-3.0.7/kernel/kexec.c linux-3.0.7/kernel/kexec.c
63064--- linux-3.0.7/kernel/kexec.c 2011-07-21 22:17:23.000000000 -0400
63065+++ linux-3.0.7/kernel/kexec.c 2011-10-06 04:17:55.000000000 -0400
63066@@ -1033,7 +1033,8 @@ asmlinkage long compat_sys_kexec_load(un
63067 unsigned long flags)
63068 {
63069 struct compat_kexec_segment in;
63070- struct kexec_segment out, __user *ksegments;
63071+ struct kexec_segment out;
63072+ struct kexec_segment __user *ksegments;
63073 unsigned long i, result;
63074
63075 /* Don't allow clients that don't understand the native
63076diff -urNp linux-3.0.7/kernel/kmod.c linux-3.0.7/kernel/kmod.c
63077--- linux-3.0.7/kernel/kmod.c 2011-07-21 22:17:23.000000000 -0400
63078+++ linux-3.0.7/kernel/kmod.c 2011-10-06 04:17:55.000000000 -0400
63079@@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
63080 * If module auto-loading support is disabled then this function
63081 * becomes a no-operation.
63082 */
63083-int __request_module(bool wait, const char *fmt, ...)
63084+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
63085 {
63086- va_list args;
63087 char module_name[MODULE_NAME_LEN];
63088 unsigned int max_modprobes;
63089 int ret;
63090- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
63091+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
63092 static char *envp[] = { "HOME=/",
63093 "TERM=linux",
63094 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
63095@@ -88,9 +87,7 @@ int __request_module(bool wait, const ch
63096 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
63097 static int kmod_loop_msg;
63098
63099- va_start(args, fmt);
63100- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
63101- va_end(args);
63102+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
63103 if (ret >= MODULE_NAME_LEN)
63104 return -ENAMETOOLONG;
63105
63106@@ -98,6 +95,20 @@ int __request_module(bool wait, const ch
63107 if (ret)
63108 return ret;
63109
63110+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63111+ if (!current_uid()) {
63112+ /* hack to workaround consolekit/udisks stupidity */
63113+ read_lock(&tasklist_lock);
63114+ if (!strcmp(current->comm, "mount") &&
63115+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
63116+ read_unlock(&tasklist_lock);
63117+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
63118+ return -EPERM;
63119+ }
63120+ read_unlock(&tasklist_lock);
63121+ }
63122+#endif
63123+
63124 /* If modprobe needs a service that is in a module, we get a recursive
63125 * loop. Limit the number of running kmod threads to max_threads/2 or
63126 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
63127@@ -131,6 +142,47 @@ int __request_module(bool wait, const ch
63128 atomic_dec(&kmod_concurrent);
63129 return ret;
63130 }
63131+
63132+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
63133+{
63134+ va_list args;
63135+ int ret;
63136+
63137+ va_start(args, fmt);
63138+ ret = ____request_module(wait, module_param, fmt, args);
63139+ va_end(args);
63140+
63141+ return ret;
63142+}
63143+
63144+int __request_module(bool wait, const char *fmt, ...)
63145+{
63146+ va_list args;
63147+ int ret;
63148+
63149+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63150+ if (current_uid()) {
63151+ char module_param[MODULE_NAME_LEN];
63152+
63153+ memset(module_param, 0, sizeof(module_param));
63154+
63155+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
63156+
63157+ va_start(args, fmt);
63158+ ret = ____request_module(wait, module_param, fmt, args);
63159+ va_end(args);
63160+
63161+ return ret;
63162+ }
63163+#endif
63164+
63165+ va_start(args, fmt);
63166+ ret = ____request_module(wait, NULL, fmt, args);
63167+ va_end(args);
63168+
63169+ return ret;
63170+}
63171+
63172 EXPORT_SYMBOL(__request_module);
63173 #endif /* CONFIG_MODULES */
63174
63175@@ -220,7 +272,7 @@ static int wait_for_helper(void *data)
63176 *
63177 * Thus the __user pointer cast is valid here.
63178 */
63179- sys_wait4(pid, (int __user *)&ret, 0, NULL);
63180+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
63181
63182 /*
63183 * If ret is 0, either ____call_usermodehelper failed and the
63184diff -urNp linux-3.0.7/kernel/kprobes.c linux-3.0.7/kernel/kprobes.c
63185--- linux-3.0.7/kernel/kprobes.c 2011-07-21 22:17:23.000000000 -0400
63186+++ linux-3.0.7/kernel/kprobes.c 2011-08-23 21:47:56.000000000 -0400
63187@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_
63188 * kernel image and loaded module images reside. This is required
63189 * so x86_64 can correctly handle the %rip-relative fixups.
63190 */
63191- kip->insns = module_alloc(PAGE_SIZE);
63192+ kip->insns = module_alloc_exec(PAGE_SIZE);
63193 if (!kip->insns) {
63194 kfree(kip);
63195 return NULL;
63196@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(st
63197 */
63198 if (!list_is_singular(&kip->list)) {
63199 list_del(&kip->list);
63200- module_free(NULL, kip->insns);
63201+ module_free_exec(NULL, kip->insns);
63202 kfree(kip);
63203 }
63204 return 1;
63205@@ -1936,7 +1936,7 @@ static int __init init_kprobes(void)
63206 {
63207 int i, err = 0;
63208 unsigned long offset = 0, size = 0;
63209- char *modname, namebuf[128];
63210+ char *modname, namebuf[KSYM_NAME_LEN];
63211 const char *symbol_name;
63212 void *addr;
63213 struct kprobe_blackpoint *kb;
63214@@ -2062,7 +2062,7 @@ static int __kprobes show_kprobe_addr(st
63215 const char *sym = NULL;
63216 unsigned int i = *(loff_t *) v;
63217 unsigned long offset = 0;
63218- char *modname, namebuf[128];
63219+ char *modname, namebuf[KSYM_NAME_LEN];
63220
63221 head = &kprobe_table[i];
63222 preempt_disable();
63223diff -urNp linux-3.0.7/kernel/lockdep.c linux-3.0.7/kernel/lockdep.c
63224--- linux-3.0.7/kernel/lockdep.c 2011-07-21 22:17:23.000000000 -0400
63225+++ linux-3.0.7/kernel/lockdep.c 2011-08-23 21:47:56.000000000 -0400
63226@@ -583,6 +583,10 @@ static int static_obj(void *obj)
63227 end = (unsigned long) &_end,
63228 addr = (unsigned long) obj;
63229
63230+#ifdef CONFIG_PAX_KERNEXEC
63231+ start = ktla_ktva(start);
63232+#endif
63233+
63234 /*
63235 * static variable?
63236 */
63237@@ -718,6 +722,7 @@ register_lock_class(struct lockdep_map *
63238 if (!static_obj(lock->key)) {
63239 debug_locks_off();
63240 printk("INFO: trying to register non-static key.\n");
63241+ printk("lock:%pS key:%pS.\n", lock, lock->key);
63242 printk("the code is fine but needs lockdep annotation.\n");
63243 printk("turning off the locking correctness validator.\n");
63244 dump_stack();
63245@@ -2936,7 +2941,7 @@ static int __lock_acquire(struct lockdep
63246 if (!class)
63247 return 0;
63248 }
63249- atomic_inc((atomic_t *)&class->ops);
63250+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
63251 if (very_verbose(class)) {
63252 printk("\nacquire class [%p] %s", class->key, class->name);
63253 if (class->name_version > 1)
63254diff -urNp linux-3.0.7/kernel/lockdep_proc.c linux-3.0.7/kernel/lockdep_proc.c
63255--- linux-3.0.7/kernel/lockdep_proc.c 2011-07-21 22:17:23.000000000 -0400
63256+++ linux-3.0.7/kernel/lockdep_proc.c 2011-08-23 21:47:56.000000000 -0400
63257@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
63258
63259 static void print_name(struct seq_file *m, struct lock_class *class)
63260 {
63261- char str[128];
63262+ char str[KSYM_NAME_LEN];
63263 const char *name = class->name;
63264
63265 if (!name) {
63266diff -urNp linux-3.0.7/kernel/module.c linux-3.0.7/kernel/module.c
63267--- linux-3.0.7/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
63268+++ linux-3.0.7/kernel/module.c 2011-08-23 21:48:14.000000000 -0400
63269@@ -58,6 +58,7 @@
63270 #include <linux/jump_label.h>
63271 #include <linux/pfn.h>
63272 #include <linux/bsearch.h>
63273+#include <linux/grsecurity.h>
63274
63275 #define CREATE_TRACE_POINTS
63276 #include <trace/events/module.h>
63277@@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_not
63278
63279 /* Bounds of module allocation, for speeding __module_address.
63280 * Protected by module_mutex. */
63281-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
63282+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
63283+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
63284
63285 int register_module_notifier(struct notifier_block * nb)
63286 {
63287@@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(cons
63288 return true;
63289
63290 list_for_each_entry_rcu(mod, &modules, list) {
63291- struct symsearch arr[] = {
63292+ struct symsearch modarr[] = {
63293 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
63294 NOT_GPL_ONLY, false },
63295 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
63296@@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(cons
63297 #endif
63298 };
63299
63300- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
63301+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
63302 return true;
63303 }
63304 return false;
63305@@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(
63306 static int percpu_modalloc(struct module *mod,
63307 unsigned long size, unsigned long align)
63308 {
63309- if (align > PAGE_SIZE) {
63310+ if (align-1 >= PAGE_SIZE) {
63311 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
63312 mod->name, align, PAGE_SIZE);
63313 align = PAGE_SIZE;
63314@@ -1166,7 +1168,7 @@ resolve_symbol_wait(struct module *mod,
63315 */
63316 #ifdef CONFIG_SYSFS
63317
63318-#ifdef CONFIG_KALLSYMS
63319+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
63320 static inline bool sect_empty(const Elf_Shdr *sect)
63321 {
63322 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
63323@@ -1632,21 +1634,21 @@ static void set_section_ro_nx(void *base
63324
63325 static void unset_module_core_ro_nx(struct module *mod)
63326 {
63327- set_page_attributes(mod->module_core + mod->core_text_size,
63328- mod->module_core + mod->core_size,
63329+ set_page_attributes(mod->module_core_rw,
63330+ mod->module_core_rw + mod->core_size_rw,
63331 set_memory_x);
63332- set_page_attributes(mod->module_core,
63333- mod->module_core + mod->core_ro_size,
63334+ set_page_attributes(mod->module_core_rx,
63335+ mod->module_core_rx + mod->core_size_rx,
63336 set_memory_rw);
63337 }
63338
63339 static void unset_module_init_ro_nx(struct module *mod)
63340 {
63341- set_page_attributes(mod->module_init + mod->init_text_size,
63342- mod->module_init + mod->init_size,
63343+ set_page_attributes(mod->module_init_rw,
63344+ mod->module_init_rw + mod->init_size_rw,
63345 set_memory_x);
63346- set_page_attributes(mod->module_init,
63347- mod->module_init + mod->init_ro_size,
63348+ set_page_attributes(mod->module_init_rx,
63349+ mod->module_init_rx + mod->init_size_rx,
63350 set_memory_rw);
63351 }
63352
63353@@ -1657,14 +1659,14 @@ void set_all_modules_text_rw(void)
63354
63355 mutex_lock(&module_mutex);
63356 list_for_each_entry_rcu(mod, &modules, list) {
63357- if ((mod->module_core) && (mod->core_text_size)) {
63358- set_page_attributes(mod->module_core,
63359- mod->module_core + mod->core_text_size,
63360+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
63361+ set_page_attributes(mod->module_core_rx,
63362+ mod->module_core_rx + mod->core_size_rx,
63363 set_memory_rw);
63364 }
63365- if ((mod->module_init) && (mod->init_text_size)) {
63366- set_page_attributes(mod->module_init,
63367- mod->module_init + mod->init_text_size,
63368+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
63369+ set_page_attributes(mod->module_init_rx,
63370+ mod->module_init_rx + mod->init_size_rx,
63371 set_memory_rw);
63372 }
63373 }
63374@@ -1678,14 +1680,14 @@ void set_all_modules_text_ro(void)
63375
63376 mutex_lock(&module_mutex);
63377 list_for_each_entry_rcu(mod, &modules, list) {
63378- if ((mod->module_core) && (mod->core_text_size)) {
63379- set_page_attributes(mod->module_core,
63380- mod->module_core + mod->core_text_size,
63381+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
63382+ set_page_attributes(mod->module_core_rx,
63383+ mod->module_core_rx + mod->core_size_rx,
63384 set_memory_ro);
63385 }
63386- if ((mod->module_init) && (mod->init_text_size)) {
63387- set_page_attributes(mod->module_init,
63388- mod->module_init + mod->init_text_size,
63389+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
63390+ set_page_attributes(mod->module_init_rx,
63391+ mod->module_init_rx + mod->init_size_rx,
63392 set_memory_ro);
63393 }
63394 }
63395@@ -1722,16 +1724,19 @@ static void free_module(struct module *m
63396
63397 /* This may be NULL, but that's OK */
63398 unset_module_init_ro_nx(mod);
63399- module_free(mod, mod->module_init);
63400+ module_free(mod, mod->module_init_rw);
63401+ module_free_exec(mod, mod->module_init_rx);
63402 kfree(mod->args);
63403 percpu_modfree(mod);
63404
63405 /* Free lock-classes: */
63406- lockdep_free_key_range(mod->module_core, mod->core_size);
63407+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
63408+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
63409
63410 /* Finally, free the core (containing the module structure) */
63411 unset_module_core_ro_nx(mod);
63412- module_free(mod, mod->module_core);
63413+ module_free_exec(mod, mod->module_core_rx);
63414+ module_free(mod, mod->module_core_rw);
63415
63416 #ifdef CONFIG_MPU
63417 update_protections(current->mm);
63418@@ -1800,10 +1805,31 @@ static int simplify_symbols(struct modul
63419 unsigned int i;
63420 int ret = 0;
63421 const struct kernel_symbol *ksym;
63422+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63423+ int is_fs_load = 0;
63424+ int register_filesystem_found = 0;
63425+ char *p;
63426+
63427+ p = strstr(mod->args, "grsec_modharden_fs");
63428+ if (p) {
63429+ char *endptr = p + strlen("grsec_modharden_fs");
63430+ /* copy \0 as well */
63431+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
63432+ is_fs_load = 1;
63433+ }
63434+#endif
63435
63436 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
63437 const char *name = info->strtab + sym[i].st_name;
63438
63439+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63440+ /* it's a real shame this will never get ripped and copied
63441+ upstream! ;(
63442+ */
63443+ if (is_fs_load && !strcmp(name, "register_filesystem"))
63444+ register_filesystem_found = 1;
63445+#endif
63446+
63447 switch (sym[i].st_shndx) {
63448 case SHN_COMMON:
63449 /* We compiled with -fno-common. These are not
63450@@ -1824,7 +1850,9 @@ static int simplify_symbols(struct modul
63451 ksym = resolve_symbol_wait(mod, info, name);
63452 /* Ok if resolved. */
63453 if (ksym && !IS_ERR(ksym)) {
63454+ pax_open_kernel();
63455 sym[i].st_value = ksym->value;
63456+ pax_close_kernel();
63457 break;
63458 }
63459
63460@@ -1843,11 +1871,20 @@ static int simplify_symbols(struct modul
63461 secbase = (unsigned long)mod_percpu(mod);
63462 else
63463 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
63464+ pax_open_kernel();
63465 sym[i].st_value += secbase;
63466+ pax_close_kernel();
63467 break;
63468 }
63469 }
63470
63471+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63472+ if (is_fs_load && !register_filesystem_found) {
63473+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
63474+ ret = -EPERM;
63475+ }
63476+#endif
63477+
63478 return ret;
63479 }
63480
63481@@ -1931,22 +1968,12 @@ static void layout_sections(struct modul
63482 || s->sh_entsize != ~0UL
63483 || strstarts(sname, ".init"))
63484 continue;
63485- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
63486+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
63487+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
63488+ else
63489+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
63490 DEBUGP("\t%s\n", name);
63491 }
63492- switch (m) {
63493- case 0: /* executable */
63494- mod->core_size = debug_align(mod->core_size);
63495- mod->core_text_size = mod->core_size;
63496- break;
63497- case 1: /* RO: text and ro-data */
63498- mod->core_size = debug_align(mod->core_size);
63499- mod->core_ro_size = mod->core_size;
63500- break;
63501- case 3: /* whole core */
63502- mod->core_size = debug_align(mod->core_size);
63503- break;
63504- }
63505 }
63506
63507 DEBUGP("Init section allocation order:\n");
63508@@ -1960,23 +1987,13 @@ static void layout_sections(struct modul
63509 || s->sh_entsize != ~0UL
63510 || !strstarts(sname, ".init"))
63511 continue;
63512- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
63513- | INIT_OFFSET_MASK);
63514+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
63515+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
63516+ else
63517+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
63518+ s->sh_entsize |= INIT_OFFSET_MASK;
63519 DEBUGP("\t%s\n", sname);
63520 }
63521- switch (m) {
63522- case 0: /* executable */
63523- mod->init_size = debug_align(mod->init_size);
63524- mod->init_text_size = mod->init_size;
63525- break;
63526- case 1: /* RO: text and ro-data */
63527- mod->init_size = debug_align(mod->init_size);
63528- mod->init_ro_size = mod->init_size;
63529- break;
63530- case 3: /* whole init */
63531- mod->init_size = debug_align(mod->init_size);
63532- break;
63533- }
63534 }
63535 }
63536
63537@@ -2141,7 +2158,7 @@ static void layout_symtab(struct module
63538
63539 /* Put symbol section at end of init part of module. */
63540 symsect->sh_flags |= SHF_ALLOC;
63541- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
63542+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
63543 info->index.sym) | INIT_OFFSET_MASK;
63544 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
63545
63546@@ -2158,19 +2175,19 @@ static void layout_symtab(struct module
63547 }
63548
63549 /* Append room for core symbols at end of core part. */
63550- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
63551- mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
63552+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
63553+ mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
63554
63555 /* Put string table section at end of init part of module. */
63556 strsect->sh_flags |= SHF_ALLOC;
63557- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
63558+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
63559 info->index.str) | INIT_OFFSET_MASK;
63560 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
63561
63562 /* Append room for core symbols' strings at end of core part. */
63563- info->stroffs = mod->core_size;
63564+ info->stroffs = mod->core_size_rx;
63565 __set_bit(0, info->strmap);
63566- mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
63567+ mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
63568 }
63569
63570 static void add_kallsyms(struct module *mod, const struct load_info *info)
63571@@ -2186,11 +2203,13 @@ static void add_kallsyms(struct module *
63572 /* Make sure we get permanent strtab: don't use info->strtab. */
63573 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
63574
63575+ pax_open_kernel();
63576+
63577 /* Set types up while we still have access to sections. */
63578 for (i = 0; i < mod->num_symtab; i++)
63579 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
63580
63581- mod->core_symtab = dst = mod->module_core + info->symoffs;
63582+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
63583 src = mod->symtab;
63584 *dst = *src;
63585 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
63586@@ -2203,10 +2222,12 @@ static void add_kallsyms(struct module *
63587 }
63588 mod->core_num_syms = ndst;
63589
63590- mod->core_strtab = s = mod->module_core + info->stroffs;
63591+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
63592 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
63593 if (test_bit(i, info->strmap))
63594 *++s = mod->strtab[i];
63595+
63596+ pax_close_kernel();
63597 }
63598 #else
63599 static inline void layout_symtab(struct module *mod, struct load_info *info)
63600@@ -2235,17 +2256,33 @@ static void dynamic_debug_remove(struct
63601 ddebug_remove_module(debug->modname);
63602 }
63603
63604-static void *module_alloc_update_bounds(unsigned long size)
63605+static void *module_alloc_update_bounds_rw(unsigned long size)
63606 {
63607 void *ret = module_alloc(size);
63608
63609 if (ret) {
63610 mutex_lock(&module_mutex);
63611 /* Update module bounds. */
63612- if ((unsigned long)ret < module_addr_min)
63613- module_addr_min = (unsigned long)ret;
63614- if ((unsigned long)ret + size > module_addr_max)
63615- module_addr_max = (unsigned long)ret + size;
63616+ if ((unsigned long)ret < module_addr_min_rw)
63617+ module_addr_min_rw = (unsigned long)ret;
63618+ if ((unsigned long)ret + size > module_addr_max_rw)
63619+ module_addr_max_rw = (unsigned long)ret + size;
63620+ mutex_unlock(&module_mutex);
63621+ }
63622+ return ret;
63623+}
63624+
63625+static void *module_alloc_update_bounds_rx(unsigned long size)
63626+{
63627+ void *ret = module_alloc_exec(size);
63628+
63629+ if (ret) {
63630+ mutex_lock(&module_mutex);
63631+ /* Update module bounds. */
63632+ if ((unsigned long)ret < module_addr_min_rx)
63633+ module_addr_min_rx = (unsigned long)ret;
63634+ if ((unsigned long)ret + size > module_addr_max_rx)
63635+ module_addr_max_rx = (unsigned long)ret + size;
63636 mutex_unlock(&module_mutex);
63637 }
63638 return ret;
63639@@ -2538,7 +2575,7 @@ static int move_module(struct module *mo
63640 void *ptr;
63641
63642 /* Do the allocs. */
63643- ptr = module_alloc_update_bounds(mod->core_size);
63644+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
63645 /*
63646 * The pointer to this block is stored in the module structure
63647 * which is inside the block. Just mark it as not being a
63648@@ -2548,23 +2585,50 @@ static int move_module(struct module *mo
63649 if (!ptr)
63650 return -ENOMEM;
63651
63652- memset(ptr, 0, mod->core_size);
63653- mod->module_core = ptr;
63654+ memset(ptr, 0, mod->core_size_rw);
63655+ mod->module_core_rw = ptr;
63656
63657- ptr = module_alloc_update_bounds(mod->init_size);
63658+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
63659 /*
63660 * The pointer to this block is stored in the module structure
63661 * which is inside the block. This block doesn't need to be
63662 * scanned as it contains data and code that will be freed
63663 * after the module is initialized.
63664 */
63665- kmemleak_ignore(ptr);
63666- if (!ptr && mod->init_size) {
63667- module_free(mod, mod->module_core);
63668+ kmemleak_not_leak(ptr);
63669+ if (!ptr && mod->init_size_rw) {
63670+ module_free(mod, mod->module_core_rw);
63671 return -ENOMEM;
63672 }
63673- memset(ptr, 0, mod->init_size);
63674- mod->module_init = ptr;
63675+ memset(ptr, 0, mod->init_size_rw);
63676+ mod->module_init_rw = ptr;
63677+
63678+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
63679+ kmemleak_not_leak(ptr);
63680+ if (!ptr) {
63681+ module_free(mod, mod->module_init_rw);
63682+ module_free(mod, mod->module_core_rw);
63683+ return -ENOMEM;
63684+ }
63685+
63686+ pax_open_kernel();
63687+ memset(ptr, 0, mod->core_size_rx);
63688+ pax_close_kernel();
63689+ mod->module_core_rx = ptr;
63690+
63691+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
63692+ kmemleak_not_leak(ptr);
63693+ if (!ptr && mod->init_size_rx) {
63694+ module_free_exec(mod, mod->module_core_rx);
63695+ module_free(mod, mod->module_init_rw);
63696+ module_free(mod, mod->module_core_rw);
63697+ return -ENOMEM;
63698+ }
63699+
63700+ pax_open_kernel();
63701+ memset(ptr, 0, mod->init_size_rx);
63702+ pax_close_kernel();
63703+ mod->module_init_rx = ptr;
63704
63705 /* Transfer each section which specifies SHF_ALLOC */
63706 DEBUGP("final section addresses:\n");
63707@@ -2575,16 +2639,45 @@ static int move_module(struct module *mo
63708 if (!(shdr->sh_flags & SHF_ALLOC))
63709 continue;
63710
63711- if (shdr->sh_entsize & INIT_OFFSET_MASK)
63712- dest = mod->module_init
63713- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
63714- else
63715- dest = mod->module_core + shdr->sh_entsize;
63716+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
63717+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
63718+ dest = mod->module_init_rw
63719+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
63720+ else
63721+ dest = mod->module_init_rx
63722+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
63723+ } else {
63724+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
63725+ dest = mod->module_core_rw + shdr->sh_entsize;
63726+ else
63727+ dest = mod->module_core_rx + shdr->sh_entsize;
63728+ }
63729+
63730+ if (shdr->sh_type != SHT_NOBITS) {
63731+
63732+#ifdef CONFIG_PAX_KERNEXEC
63733+#ifdef CONFIG_X86_64
63734+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
63735+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
63736+#endif
63737+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
63738+ pax_open_kernel();
63739+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
63740+ pax_close_kernel();
63741+ } else
63742+#endif
63743
63744- if (shdr->sh_type != SHT_NOBITS)
63745 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
63746+ }
63747 /* Update sh_addr to point to copy in image. */
63748- shdr->sh_addr = (unsigned long)dest;
63749+
63750+#ifdef CONFIG_PAX_KERNEXEC
63751+ if (shdr->sh_flags & SHF_EXECINSTR)
63752+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
63753+ else
63754+#endif
63755+
63756+ shdr->sh_addr = (unsigned long)dest;
63757 DEBUGP("\t0x%lx %s\n",
63758 shdr->sh_addr, info->secstrings + shdr->sh_name);
63759 }
63760@@ -2635,12 +2728,12 @@ static void flush_module_icache(const st
63761 * Do it before processing of module parameters, so the module
63762 * can provide parameter accessor functions of its own.
63763 */
63764- if (mod->module_init)
63765- flush_icache_range((unsigned long)mod->module_init,
63766- (unsigned long)mod->module_init
63767- + mod->init_size);
63768- flush_icache_range((unsigned long)mod->module_core,
63769- (unsigned long)mod->module_core + mod->core_size);
63770+ if (mod->module_init_rx)
63771+ flush_icache_range((unsigned long)mod->module_init_rx,
63772+ (unsigned long)mod->module_init_rx
63773+ + mod->init_size_rx);
63774+ flush_icache_range((unsigned long)mod->module_core_rx,
63775+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
63776
63777 set_fs(old_fs);
63778 }
63779@@ -2712,8 +2805,10 @@ static void module_deallocate(struct mod
63780 {
63781 kfree(info->strmap);
63782 percpu_modfree(mod);
63783- module_free(mod, mod->module_init);
63784- module_free(mod, mod->module_core);
63785+ module_free_exec(mod, mod->module_init_rx);
63786+ module_free_exec(mod, mod->module_core_rx);
63787+ module_free(mod, mod->module_init_rw);
63788+ module_free(mod, mod->module_core_rw);
63789 }
63790
63791 static int post_relocation(struct module *mod, const struct load_info *info)
63792@@ -2770,9 +2865,38 @@ static struct module *load_module(void _
63793 if (err)
63794 goto free_unload;
63795
63796+ /* Now copy in args */
63797+ mod->args = strndup_user(uargs, ~0UL >> 1);
63798+ if (IS_ERR(mod->args)) {
63799+ err = PTR_ERR(mod->args);
63800+ goto free_unload;
63801+ }
63802+
63803 /* Set up MODINFO_ATTR fields */
63804 setup_modinfo(mod, &info);
63805
63806+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63807+ {
63808+ char *p, *p2;
63809+
63810+ if (strstr(mod->args, "grsec_modharden_netdev")) {
63811+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
63812+ err = -EPERM;
63813+ goto free_modinfo;
63814+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
63815+ p += strlen("grsec_modharden_normal");
63816+ p2 = strstr(p, "_");
63817+ if (p2) {
63818+ *p2 = '\0';
63819+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
63820+ *p2 = '_';
63821+ }
63822+ err = -EPERM;
63823+ goto free_modinfo;
63824+ }
63825+ }
63826+#endif
63827+
63828 /* Fix up syms, so that st_value is a pointer to location. */
63829 err = simplify_symbols(mod, &info);
63830 if (err < 0)
63831@@ -2788,13 +2912,6 @@ static struct module *load_module(void _
63832
63833 flush_module_icache(mod);
63834
63835- /* Now copy in args */
63836- mod->args = strndup_user(uargs, ~0UL >> 1);
63837- if (IS_ERR(mod->args)) {
63838- err = PTR_ERR(mod->args);
63839- goto free_arch_cleanup;
63840- }
63841-
63842 /* Mark state as coming so strong_try_module_get() ignores us. */
63843 mod->state = MODULE_STATE_COMING;
63844
63845@@ -2854,11 +2971,10 @@ static struct module *load_module(void _
63846 unlock:
63847 mutex_unlock(&module_mutex);
63848 synchronize_sched();
63849- kfree(mod->args);
63850- free_arch_cleanup:
63851 module_arch_cleanup(mod);
63852 free_modinfo:
63853 free_modinfo(mod);
63854+ kfree(mod->args);
63855 free_unload:
63856 module_unload_free(mod);
63857 free_module:
63858@@ -2899,16 +3015,16 @@ SYSCALL_DEFINE3(init_module, void __user
63859 MODULE_STATE_COMING, mod);
63860
63861 /* Set RO and NX regions for core */
63862- set_section_ro_nx(mod->module_core,
63863- mod->core_text_size,
63864- mod->core_ro_size,
63865- mod->core_size);
63866+ set_section_ro_nx(mod->module_core_rx,
63867+ mod->core_size_rx,
63868+ mod->core_size_rx,
63869+ mod->core_size_rx);
63870
63871 /* Set RO and NX regions for init */
63872- set_section_ro_nx(mod->module_init,
63873- mod->init_text_size,
63874- mod->init_ro_size,
63875- mod->init_size);
63876+ set_section_ro_nx(mod->module_init_rx,
63877+ mod->init_size_rx,
63878+ mod->init_size_rx,
63879+ mod->init_size_rx);
63880
63881 do_mod_ctors(mod);
63882 /* Start the module */
63883@@ -2954,11 +3070,12 @@ SYSCALL_DEFINE3(init_module, void __user
63884 mod->strtab = mod->core_strtab;
63885 #endif
63886 unset_module_init_ro_nx(mod);
63887- module_free(mod, mod->module_init);
63888- mod->module_init = NULL;
63889- mod->init_size = 0;
63890- mod->init_ro_size = 0;
63891- mod->init_text_size = 0;
63892+ module_free(mod, mod->module_init_rw);
63893+ module_free_exec(mod, mod->module_init_rx);
63894+ mod->module_init_rw = NULL;
63895+ mod->module_init_rx = NULL;
63896+ mod->init_size_rw = 0;
63897+ mod->init_size_rx = 0;
63898 mutex_unlock(&module_mutex);
63899
63900 return 0;
63901@@ -2989,10 +3106,16 @@ static const char *get_ksymbol(struct mo
63902 unsigned long nextval;
63903
63904 /* At worse, next value is at end of module */
63905- if (within_module_init(addr, mod))
63906- nextval = (unsigned long)mod->module_init+mod->init_text_size;
63907+ if (within_module_init_rx(addr, mod))
63908+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
63909+ else if (within_module_init_rw(addr, mod))
63910+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
63911+ else if (within_module_core_rx(addr, mod))
63912+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
63913+ else if (within_module_core_rw(addr, mod))
63914+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
63915 else
63916- nextval = (unsigned long)mod->module_core+mod->core_text_size;
63917+ return NULL;
63918
63919 /* Scan for closest preceding symbol, and next symbol. (ELF
63920 starts real symbols at 1). */
63921@@ -3238,7 +3361,7 @@ static int m_show(struct seq_file *m, vo
63922 char buf[8];
63923
63924 seq_printf(m, "%s %u",
63925- mod->name, mod->init_size + mod->core_size);
63926+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
63927 print_unload_info(m, mod);
63928
63929 /* Informative for users. */
63930@@ -3247,7 +3370,7 @@ static int m_show(struct seq_file *m, vo
63931 mod->state == MODULE_STATE_COMING ? "Loading":
63932 "Live");
63933 /* Used by oprofile and other similar tools. */
63934- seq_printf(m, " 0x%pK", mod->module_core);
63935+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
63936
63937 /* Taints info */
63938 if (mod->taints)
63939@@ -3283,7 +3406,17 @@ static const struct file_operations proc
63940
63941 static int __init proc_modules_init(void)
63942 {
63943+#ifndef CONFIG_GRKERNSEC_HIDESYM
63944+#ifdef CONFIG_GRKERNSEC_PROC_USER
63945+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
63946+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
63947+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
63948+#else
63949 proc_create("modules", 0, NULL, &proc_modules_operations);
63950+#endif
63951+#else
63952+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
63953+#endif
63954 return 0;
63955 }
63956 module_init(proc_modules_init);
63957@@ -3342,12 +3475,12 @@ struct module *__module_address(unsigned
63958 {
63959 struct module *mod;
63960
63961- if (addr < module_addr_min || addr > module_addr_max)
63962+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
63963+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
63964 return NULL;
63965
63966 list_for_each_entry_rcu(mod, &modules, list)
63967- if (within_module_core(addr, mod)
63968- || within_module_init(addr, mod))
63969+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
63970 return mod;
63971 return NULL;
63972 }
63973@@ -3381,11 +3514,20 @@ bool is_module_text_address(unsigned lon
63974 */
63975 struct module *__module_text_address(unsigned long addr)
63976 {
63977- struct module *mod = __module_address(addr);
63978+ struct module *mod;
63979+
63980+#ifdef CONFIG_X86_32
63981+ addr = ktla_ktva(addr);
63982+#endif
63983+
63984+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
63985+ return NULL;
63986+
63987+ mod = __module_address(addr);
63988+
63989 if (mod) {
63990 /* Make sure it's within the text section. */
63991- if (!within(addr, mod->module_init, mod->init_text_size)
63992- && !within(addr, mod->module_core, mod->core_text_size))
63993+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
63994 mod = NULL;
63995 }
63996 return mod;
63997diff -urNp linux-3.0.7/kernel/mutex.c linux-3.0.7/kernel/mutex.c
63998--- linux-3.0.7/kernel/mutex.c 2011-07-21 22:17:23.000000000 -0400
63999+++ linux-3.0.7/kernel/mutex.c 2011-08-23 21:47:56.000000000 -0400
64000@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock,
64001 spin_lock_mutex(&lock->wait_lock, flags);
64002
64003 debug_mutex_lock_common(lock, &waiter);
64004- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
64005+ debug_mutex_add_waiter(lock, &waiter, task);
64006
64007 /* add waiting tasks to the end of the waitqueue (FIFO): */
64008 list_add_tail(&waiter.list, &lock->wait_list);
64009@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock,
64010 * TASK_UNINTERRUPTIBLE case.)
64011 */
64012 if (unlikely(signal_pending_state(state, task))) {
64013- mutex_remove_waiter(lock, &waiter,
64014- task_thread_info(task));
64015+ mutex_remove_waiter(lock, &waiter, task);
64016 mutex_release(&lock->dep_map, 1, ip);
64017 spin_unlock_mutex(&lock->wait_lock, flags);
64018
64019@@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock,
64020 done:
64021 lock_acquired(&lock->dep_map, ip);
64022 /* got the lock - rejoice! */
64023- mutex_remove_waiter(lock, &waiter, current_thread_info());
64024+ mutex_remove_waiter(lock, &waiter, task);
64025 mutex_set_owner(lock);
64026
64027 /* set it to 0 if there are no waiters left: */
64028diff -urNp linux-3.0.7/kernel/mutex-debug.c linux-3.0.7/kernel/mutex-debug.c
64029--- linux-3.0.7/kernel/mutex-debug.c 2011-07-21 22:17:23.000000000 -0400
64030+++ linux-3.0.7/kernel/mutex-debug.c 2011-08-23 21:47:56.000000000 -0400
64031@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
64032 }
64033
64034 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64035- struct thread_info *ti)
64036+ struct task_struct *task)
64037 {
64038 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
64039
64040 /* Mark the current thread as blocked on the lock: */
64041- ti->task->blocked_on = waiter;
64042+ task->blocked_on = waiter;
64043 }
64044
64045 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64046- struct thread_info *ti)
64047+ struct task_struct *task)
64048 {
64049 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
64050- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
64051- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
64052- ti->task->blocked_on = NULL;
64053+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
64054+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
64055+ task->blocked_on = NULL;
64056
64057 list_del_init(&waiter->list);
64058 waiter->task = NULL;
64059diff -urNp linux-3.0.7/kernel/mutex-debug.h linux-3.0.7/kernel/mutex-debug.h
64060--- linux-3.0.7/kernel/mutex-debug.h 2011-07-21 22:17:23.000000000 -0400
64061+++ linux-3.0.7/kernel/mutex-debug.h 2011-08-23 21:47:56.000000000 -0400
64062@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(stru
64063 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
64064 extern void debug_mutex_add_waiter(struct mutex *lock,
64065 struct mutex_waiter *waiter,
64066- struct thread_info *ti);
64067+ struct task_struct *task);
64068 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64069- struct thread_info *ti);
64070+ struct task_struct *task);
64071 extern void debug_mutex_unlock(struct mutex *lock);
64072 extern void debug_mutex_init(struct mutex *lock, const char *name,
64073 struct lock_class_key *key);
64074diff -urNp linux-3.0.7/kernel/padata.c linux-3.0.7/kernel/padata.c
64075--- linux-3.0.7/kernel/padata.c 2011-07-21 22:17:23.000000000 -0400
64076+++ linux-3.0.7/kernel/padata.c 2011-08-23 21:47:56.000000000 -0400
64077@@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_ins
64078 padata->pd = pd;
64079 padata->cb_cpu = cb_cpu;
64080
64081- if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
64082- atomic_set(&pd->seq_nr, -1);
64083+ if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
64084+ atomic_set_unchecked(&pd->seq_nr, -1);
64085
64086- padata->seq_nr = atomic_inc_return(&pd->seq_nr);
64087+ padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
64088
64089 target_cpu = padata_cpu_hash(padata);
64090 queue = per_cpu_ptr(pd->pqueue, target_cpu);
64091@@ -444,7 +444,7 @@ static struct parallel_data *padata_allo
64092 padata_init_pqueues(pd);
64093 padata_init_squeues(pd);
64094 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
64095- atomic_set(&pd->seq_nr, -1);
64096+ atomic_set_unchecked(&pd->seq_nr, -1);
64097 atomic_set(&pd->reorder_objects, 0);
64098 atomic_set(&pd->refcnt, 0);
64099 pd->pinst = pinst;
64100diff -urNp linux-3.0.7/kernel/panic.c linux-3.0.7/kernel/panic.c
64101--- linux-3.0.7/kernel/panic.c 2011-07-21 22:17:23.000000000 -0400
64102+++ linux-3.0.7/kernel/panic.c 2011-08-23 21:48:14.000000000 -0400
64103@@ -369,7 +369,7 @@ static void warn_slowpath_common(const c
64104 const char *board;
64105
64106 printk(KERN_WARNING "------------[ cut here ]------------\n");
64107- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
64108+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
64109 board = dmi_get_system_info(DMI_PRODUCT_NAME);
64110 if (board)
64111 printk(KERN_WARNING "Hardware name: %s\n", board);
64112@@ -424,7 +424,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
64113 */
64114 void __stack_chk_fail(void)
64115 {
64116- panic("stack-protector: Kernel stack is corrupted in: %p\n",
64117+ dump_stack();
64118+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
64119 __builtin_return_address(0));
64120 }
64121 EXPORT_SYMBOL(__stack_chk_fail);
64122diff -urNp linux-3.0.7/kernel/pid.c linux-3.0.7/kernel/pid.c
64123--- linux-3.0.7/kernel/pid.c 2011-07-21 22:17:23.000000000 -0400
64124+++ linux-3.0.7/kernel/pid.c 2011-08-23 21:48:14.000000000 -0400
64125@@ -33,6 +33,7 @@
64126 #include <linux/rculist.h>
64127 #include <linux/bootmem.h>
64128 #include <linux/hash.h>
64129+#include <linux/security.h>
64130 #include <linux/pid_namespace.h>
64131 #include <linux/init_task.h>
64132 #include <linux/syscalls.h>
64133@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
64134
64135 int pid_max = PID_MAX_DEFAULT;
64136
64137-#define RESERVED_PIDS 300
64138+#define RESERVED_PIDS 500
64139
64140 int pid_max_min = RESERVED_PIDS + 1;
64141 int pid_max_max = PID_MAX_LIMIT;
64142@@ -419,8 +420,15 @@ EXPORT_SYMBOL(pid_task);
64143 */
64144 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
64145 {
64146+ struct task_struct *task;
64147+
64148 rcu_lockdep_assert(rcu_read_lock_held());
64149- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64150+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64151+
64152+ if (gr_pid_is_chrooted(task))
64153+ return NULL;
64154+
64155+ return task;
64156 }
64157
64158 struct task_struct *find_task_by_vpid(pid_t vnr)
64159@@ -428,6 +436,12 @@ struct task_struct *find_task_by_vpid(pi
64160 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
64161 }
64162
64163+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
64164+{
64165+ rcu_lockdep_assert(rcu_read_lock_held());
64166+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
64167+}
64168+
64169 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
64170 {
64171 struct pid *pid;
64172diff -urNp linux-3.0.7/kernel/posix-cpu-timers.c linux-3.0.7/kernel/posix-cpu-timers.c
64173--- linux-3.0.7/kernel/posix-cpu-timers.c 2011-10-17 23:17:09.000000000 -0400
64174+++ linux-3.0.7/kernel/posix-cpu-timers.c 2011-10-17 23:17:19.000000000 -0400
64175@@ -6,6 +6,7 @@
64176 #include <linux/posix-timers.h>
64177 #include <linux/errno.h>
64178 #include <linux/math64.h>
64179+#include <linux/security.h>
64180 #include <asm/uaccess.h>
64181 #include <linux/kernel_stat.h>
64182 #include <trace/events/timer.h>
64183@@ -1605,14 +1606,14 @@ struct k_clock clock_posix_cpu = {
64184
64185 static __init int init_posix_cpu_timers(void)
64186 {
64187- struct k_clock process = {
64188+ static struct k_clock process = {
64189 .clock_getres = process_cpu_clock_getres,
64190 .clock_get = process_cpu_clock_get,
64191 .timer_create = process_cpu_timer_create,
64192 .nsleep = process_cpu_nsleep,
64193 .nsleep_restart = process_cpu_nsleep_restart,
64194 };
64195- struct k_clock thread = {
64196+ static struct k_clock thread = {
64197 .clock_getres = thread_cpu_clock_getres,
64198 .clock_get = thread_cpu_clock_get,
64199 .timer_create = thread_cpu_timer_create,
64200diff -urNp linux-3.0.7/kernel/posix-timers.c linux-3.0.7/kernel/posix-timers.c
64201--- linux-3.0.7/kernel/posix-timers.c 2011-07-21 22:17:23.000000000 -0400
64202+++ linux-3.0.7/kernel/posix-timers.c 2011-08-23 21:48:14.000000000 -0400
64203@@ -43,6 +43,7 @@
64204 #include <linux/idr.h>
64205 #include <linux/posix-clock.h>
64206 #include <linux/posix-timers.h>
64207+#include <linux/grsecurity.h>
64208 #include <linux/syscalls.h>
64209 #include <linux/wait.h>
64210 #include <linux/workqueue.h>
64211@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
64212 * which we beg off on and pass to do_sys_settimeofday().
64213 */
64214
64215-static struct k_clock posix_clocks[MAX_CLOCKS];
64216+static struct k_clock *posix_clocks[MAX_CLOCKS];
64217
64218 /*
64219 * These ones are defined below.
64220@@ -227,7 +228,7 @@ static int posix_get_boottime(const cloc
64221 */
64222 static __init int init_posix_timers(void)
64223 {
64224- struct k_clock clock_realtime = {
64225+ static struct k_clock clock_realtime = {
64226 .clock_getres = hrtimer_get_res,
64227 .clock_get = posix_clock_realtime_get,
64228 .clock_set = posix_clock_realtime_set,
64229@@ -239,7 +240,7 @@ static __init int init_posix_timers(void
64230 .timer_get = common_timer_get,
64231 .timer_del = common_timer_del,
64232 };
64233- struct k_clock clock_monotonic = {
64234+ static struct k_clock clock_monotonic = {
64235 .clock_getres = hrtimer_get_res,
64236 .clock_get = posix_ktime_get_ts,
64237 .nsleep = common_nsleep,
64238@@ -249,19 +250,19 @@ static __init int init_posix_timers(void
64239 .timer_get = common_timer_get,
64240 .timer_del = common_timer_del,
64241 };
64242- struct k_clock clock_monotonic_raw = {
64243+ static struct k_clock clock_monotonic_raw = {
64244 .clock_getres = hrtimer_get_res,
64245 .clock_get = posix_get_monotonic_raw,
64246 };
64247- struct k_clock clock_realtime_coarse = {
64248+ static struct k_clock clock_realtime_coarse = {
64249 .clock_getres = posix_get_coarse_res,
64250 .clock_get = posix_get_realtime_coarse,
64251 };
64252- struct k_clock clock_monotonic_coarse = {
64253+ static struct k_clock clock_monotonic_coarse = {
64254 .clock_getres = posix_get_coarse_res,
64255 .clock_get = posix_get_monotonic_coarse,
64256 };
64257- struct k_clock clock_boottime = {
64258+ static struct k_clock clock_boottime = {
64259 .clock_getres = hrtimer_get_res,
64260 .clock_get = posix_get_boottime,
64261 .nsleep = common_nsleep,
64262@@ -272,6 +273,8 @@ static __init int init_posix_timers(void
64263 .timer_del = common_timer_del,
64264 };
64265
64266+ pax_track_stack();
64267+
64268 posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
64269 posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
64270 posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
64271@@ -473,7 +476,7 @@ void posix_timers_register_clock(const c
64272 return;
64273 }
64274
64275- posix_clocks[clock_id] = *new_clock;
64276+ posix_clocks[clock_id] = new_clock;
64277 }
64278 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
64279
64280@@ -519,9 +522,9 @@ static struct k_clock *clockid_to_kclock
64281 return (id & CLOCKFD_MASK) == CLOCKFD ?
64282 &clock_posix_dynamic : &clock_posix_cpu;
64283
64284- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
64285+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
64286 return NULL;
64287- return &posix_clocks[id];
64288+ return posix_clocks[id];
64289 }
64290
64291 static int common_timer_create(struct k_itimer *new_timer)
64292@@ -959,6 +962,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
64293 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
64294 return -EFAULT;
64295
64296+ /* only the CLOCK_REALTIME clock can be set, all other clocks
64297+ have their clock_set fptr set to a nosettime dummy function
64298+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
64299+ call common_clock_set, which calls do_sys_settimeofday, which
64300+ we hook
64301+ */
64302+
64303 return kc->clock_set(which_clock, &new_tp);
64304 }
64305
64306diff -urNp linux-3.0.7/kernel/power/poweroff.c linux-3.0.7/kernel/power/poweroff.c
64307--- linux-3.0.7/kernel/power/poweroff.c 2011-07-21 22:17:23.000000000 -0400
64308+++ linux-3.0.7/kernel/power/poweroff.c 2011-08-23 21:47:56.000000000 -0400
64309@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
64310 .enable_mask = SYSRQ_ENABLE_BOOT,
64311 };
64312
64313-static int pm_sysrq_init(void)
64314+static int __init pm_sysrq_init(void)
64315 {
64316 register_sysrq_key('o', &sysrq_poweroff_op);
64317 return 0;
64318diff -urNp linux-3.0.7/kernel/power/process.c linux-3.0.7/kernel/power/process.c
64319--- linux-3.0.7/kernel/power/process.c 2011-07-21 22:17:23.000000000 -0400
64320+++ linux-3.0.7/kernel/power/process.c 2011-08-23 21:47:56.000000000 -0400
64321@@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_
64322 u64 elapsed_csecs64;
64323 unsigned int elapsed_csecs;
64324 bool wakeup = false;
64325+ bool timedout = false;
64326
64327 do_gettimeofday(&start);
64328
64329@@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_
64330
64331 while (true) {
64332 todo = 0;
64333+ if (time_after(jiffies, end_time))
64334+ timedout = true;
64335 read_lock(&tasklist_lock);
64336 do_each_thread(g, p) {
64337 if (frozen(p) || !freezable(p))
64338@@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_
64339 * try_to_stop() after schedule() in ptrace/signal
64340 * stop sees TIF_FREEZE.
64341 */
64342- if (!task_is_stopped_or_traced(p) &&
64343- !freezer_should_skip(p))
64344+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
64345 todo++;
64346+ if (timedout) {
64347+ printk(KERN_ERR "Task refusing to freeze:\n");
64348+ sched_show_task(p);
64349+ }
64350+ }
64351 } while_each_thread(g, p);
64352 read_unlock(&tasklist_lock);
64353
64354@@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_
64355 todo += wq_busy;
64356 }
64357
64358- if (!todo || time_after(jiffies, end_time))
64359+ if (!todo || timedout)
64360 break;
64361
64362 if (pm_wakeup_pending()) {
64363diff -urNp linux-3.0.7/kernel/printk.c linux-3.0.7/kernel/printk.c
64364--- linux-3.0.7/kernel/printk.c 2011-10-16 21:54:54.000000000 -0400
64365+++ linux-3.0.7/kernel/printk.c 2011-10-16 21:55:28.000000000 -0400
64366@@ -313,12 +313,17 @@ static int check_syslog_permissions(int
64367 if (from_file && type != SYSLOG_ACTION_OPEN)
64368 return 0;
64369
64370+#ifdef CONFIG_GRKERNSEC_DMESG
64371+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
64372+ return -EPERM;
64373+#endif
64374+
64375 if (syslog_action_restricted(type)) {
64376 if (capable(CAP_SYSLOG))
64377 return 0;
64378 /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */
64379 if (capable(CAP_SYS_ADMIN)) {
64380- WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN "
64381+ printk_once(KERN_WARNING "Attempt to access syslog with CAP_SYS_ADMIN "
64382 "but no CAP_SYSLOG (deprecated).\n");
64383 return 0;
64384 }
64385diff -urNp linux-3.0.7/kernel/profile.c linux-3.0.7/kernel/profile.c
64386--- linux-3.0.7/kernel/profile.c 2011-07-21 22:17:23.000000000 -0400
64387+++ linux-3.0.7/kernel/profile.c 2011-08-23 21:47:56.000000000 -0400
64388@@ -39,7 +39,7 @@ struct profile_hit {
64389 /* Oprofile timer tick hook */
64390 static int (*timer_hook)(struct pt_regs *) __read_mostly;
64391
64392-static atomic_t *prof_buffer;
64393+static atomic_unchecked_t *prof_buffer;
64394 static unsigned long prof_len, prof_shift;
64395
64396 int prof_on __read_mostly;
64397@@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
64398 hits[i].pc = 0;
64399 continue;
64400 }
64401- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
64402+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
64403 hits[i].hits = hits[i].pc = 0;
64404 }
64405 }
64406@@ -342,9 +342,9 @@ static void do_profile_hits(int type, vo
64407 * Add the current hit(s) and flush the write-queue out
64408 * to the global buffer:
64409 */
64410- atomic_add(nr_hits, &prof_buffer[pc]);
64411+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
64412 for (i = 0; i < NR_PROFILE_HIT; ++i) {
64413- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
64414+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
64415 hits[i].pc = hits[i].hits = 0;
64416 }
64417 out:
64418@@ -419,7 +419,7 @@ static void do_profile_hits(int type, vo
64419 {
64420 unsigned long pc;
64421 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
64422- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
64423+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
64424 }
64425 #endif /* !CONFIG_SMP */
64426
64427@@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
64428 return -EFAULT;
64429 buf++; p++; count--; read++;
64430 }
64431- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
64432+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
64433 if (copy_to_user(buf, (void *)pnt, count))
64434 return -EFAULT;
64435 read += count;
64436@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
64437 }
64438 #endif
64439 profile_discard_flip_buffers();
64440- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
64441+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
64442 return count;
64443 }
64444
64445diff -urNp linux-3.0.7/kernel/ptrace.c linux-3.0.7/kernel/ptrace.c
64446--- linux-3.0.7/kernel/ptrace.c 2011-07-21 22:17:23.000000000 -0400
64447+++ linux-3.0.7/kernel/ptrace.c 2011-08-23 21:48:14.000000000 -0400
64448@@ -132,7 +132,8 @@ int ptrace_check_attach(struct task_stru
64449 return ret;
64450 }
64451
64452-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
64453+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
64454+ unsigned int log)
64455 {
64456 const struct cred *cred = current_cred(), *tcred;
64457
64458@@ -158,7 +159,8 @@ int __ptrace_may_access(struct task_stru
64459 cred->gid == tcred->sgid &&
64460 cred->gid == tcred->gid))
64461 goto ok;
64462- if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
64463+ if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
64464+ (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
64465 goto ok;
64466 rcu_read_unlock();
64467 return -EPERM;
64468@@ -167,7 +169,9 @@ ok:
64469 smp_rmb();
64470 if (task->mm)
64471 dumpable = get_dumpable(task->mm);
64472- if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
64473+ if (!dumpable &&
64474+ ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
64475+ (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
64476 return -EPERM;
64477
64478 return security_ptrace_access_check(task, mode);
64479@@ -177,7 +181,16 @@ bool ptrace_may_access(struct task_struc
64480 {
64481 int err;
64482 task_lock(task);
64483- err = __ptrace_may_access(task, mode);
64484+ err = __ptrace_may_access(task, mode, 0);
64485+ task_unlock(task);
64486+ return !err;
64487+}
64488+
64489+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
64490+{
64491+ int err;
64492+ task_lock(task);
64493+ err = __ptrace_may_access(task, mode, 1);
64494 task_unlock(task);
64495 return !err;
64496 }
64497@@ -205,7 +218,7 @@ static int ptrace_attach(struct task_str
64498 goto out;
64499
64500 task_lock(task);
64501- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
64502+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
64503 task_unlock(task);
64504 if (retval)
64505 goto unlock_creds;
64506@@ -218,7 +231,7 @@ static int ptrace_attach(struct task_str
64507 goto unlock_tasklist;
64508
64509 task->ptrace = PT_PTRACED;
64510- if (task_ns_capable(task, CAP_SYS_PTRACE))
64511+ if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
64512 task->ptrace |= PT_PTRACE_CAP;
64513
64514 __ptrace_link(task, current);
64515@@ -406,6 +419,8 @@ int ptrace_readdata(struct task_struct *
64516 {
64517 int copied = 0;
64518
64519+ pax_track_stack();
64520+
64521 while (len > 0) {
64522 char buf[128];
64523 int this_len, retval;
64524@@ -417,7 +432,7 @@ int ptrace_readdata(struct task_struct *
64525 break;
64526 return -EIO;
64527 }
64528- if (copy_to_user(dst, buf, retval))
64529+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
64530 return -EFAULT;
64531 copied += retval;
64532 src += retval;
64533@@ -431,6 +446,8 @@ int ptrace_writedata(struct task_struct
64534 {
64535 int copied = 0;
64536
64537+ pax_track_stack();
64538+
64539 while (len > 0) {
64540 char buf[128];
64541 int this_len, retval;
64542@@ -613,9 +630,11 @@ int ptrace_request(struct task_struct *c
64543 {
64544 int ret = -EIO;
64545 siginfo_t siginfo;
64546- void __user *datavp = (void __user *) data;
64547+ void __user *datavp = (__force void __user *) data;
64548 unsigned long __user *datalp = datavp;
64549
64550+ pax_track_stack();
64551+
64552 switch (request) {
64553 case PTRACE_PEEKTEXT:
64554 case PTRACE_PEEKDATA:
64555@@ -761,14 +780,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
64556 goto out;
64557 }
64558
64559+ if (gr_handle_ptrace(child, request)) {
64560+ ret = -EPERM;
64561+ goto out_put_task_struct;
64562+ }
64563+
64564 if (request == PTRACE_ATTACH) {
64565 ret = ptrace_attach(child);
64566 /*
64567 * Some architectures need to do book-keeping after
64568 * a ptrace attach.
64569 */
64570- if (!ret)
64571+ if (!ret) {
64572 arch_ptrace_attach(child);
64573+ gr_audit_ptrace(child);
64574+ }
64575 goto out_put_task_struct;
64576 }
64577
64578@@ -793,7 +819,7 @@ int generic_ptrace_peekdata(struct task_
64579 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
64580 if (copied != sizeof(tmp))
64581 return -EIO;
64582- return put_user(tmp, (unsigned long __user *)data);
64583+ return put_user(tmp, (__force unsigned long __user *)data);
64584 }
64585
64586 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
64587@@ -816,6 +842,8 @@ int compat_ptrace_request(struct task_st
64588 siginfo_t siginfo;
64589 int ret;
64590
64591+ pax_track_stack();
64592+
64593 switch (request) {
64594 case PTRACE_PEEKTEXT:
64595 case PTRACE_PEEKDATA:
64596@@ -903,14 +931,21 @@ asmlinkage long compat_sys_ptrace(compat
64597 goto out;
64598 }
64599
64600+ if (gr_handle_ptrace(child, request)) {
64601+ ret = -EPERM;
64602+ goto out_put_task_struct;
64603+ }
64604+
64605 if (request == PTRACE_ATTACH) {
64606 ret = ptrace_attach(child);
64607 /*
64608 * Some architectures need to do book-keeping after
64609 * a ptrace attach.
64610 */
64611- if (!ret)
64612+ if (!ret) {
64613 arch_ptrace_attach(child);
64614+ gr_audit_ptrace(child);
64615+ }
64616 goto out_put_task_struct;
64617 }
64618
64619diff -urNp linux-3.0.7/kernel/rcutorture.c linux-3.0.7/kernel/rcutorture.c
64620--- linux-3.0.7/kernel/rcutorture.c 2011-07-21 22:17:23.000000000 -0400
64621+++ linux-3.0.7/kernel/rcutorture.c 2011-08-23 21:47:56.000000000 -0400
64622@@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
64623 { 0 };
64624 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
64625 { 0 };
64626-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
64627-static atomic_t n_rcu_torture_alloc;
64628-static atomic_t n_rcu_torture_alloc_fail;
64629-static atomic_t n_rcu_torture_free;
64630-static atomic_t n_rcu_torture_mberror;
64631-static atomic_t n_rcu_torture_error;
64632+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
64633+static atomic_unchecked_t n_rcu_torture_alloc;
64634+static atomic_unchecked_t n_rcu_torture_alloc_fail;
64635+static atomic_unchecked_t n_rcu_torture_free;
64636+static atomic_unchecked_t n_rcu_torture_mberror;
64637+static atomic_unchecked_t n_rcu_torture_error;
64638 static long n_rcu_torture_boost_ktrerror;
64639 static long n_rcu_torture_boost_rterror;
64640 static long n_rcu_torture_boost_failure;
64641@@ -223,11 +223,11 @@ rcu_torture_alloc(void)
64642
64643 spin_lock_bh(&rcu_torture_lock);
64644 if (list_empty(&rcu_torture_freelist)) {
64645- atomic_inc(&n_rcu_torture_alloc_fail);
64646+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
64647 spin_unlock_bh(&rcu_torture_lock);
64648 return NULL;
64649 }
64650- atomic_inc(&n_rcu_torture_alloc);
64651+ atomic_inc_unchecked(&n_rcu_torture_alloc);
64652 p = rcu_torture_freelist.next;
64653 list_del_init(p);
64654 spin_unlock_bh(&rcu_torture_lock);
64655@@ -240,7 +240,7 @@ rcu_torture_alloc(void)
64656 static void
64657 rcu_torture_free(struct rcu_torture *p)
64658 {
64659- atomic_inc(&n_rcu_torture_free);
64660+ atomic_inc_unchecked(&n_rcu_torture_free);
64661 spin_lock_bh(&rcu_torture_lock);
64662 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
64663 spin_unlock_bh(&rcu_torture_lock);
64664@@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
64665 i = rp->rtort_pipe_count;
64666 if (i > RCU_TORTURE_PIPE_LEN)
64667 i = RCU_TORTURE_PIPE_LEN;
64668- atomic_inc(&rcu_torture_wcount[i]);
64669+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
64670 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
64671 rp->rtort_mbtest = 0;
64672 rcu_torture_free(rp);
64673@@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_fr
64674 i = rp->rtort_pipe_count;
64675 if (i > RCU_TORTURE_PIPE_LEN)
64676 i = RCU_TORTURE_PIPE_LEN;
64677- atomic_inc(&rcu_torture_wcount[i]);
64678+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
64679 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
64680 rp->rtort_mbtest = 0;
64681 list_del(&rp->rtort_free);
64682@@ -882,7 +882,7 @@ rcu_torture_writer(void *arg)
64683 i = old_rp->rtort_pipe_count;
64684 if (i > RCU_TORTURE_PIPE_LEN)
64685 i = RCU_TORTURE_PIPE_LEN;
64686- atomic_inc(&rcu_torture_wcount[i]);
64687+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
64688 old_rp->rtort_pipe_count++;
64689 cur_ops->deferred_free(old_rp);
64690 }
64691@@ -951,7 +951,7 @@ static void rcu_torture_timer(unsigned l
64692 return;
64693 }
64694 if (p->rtort_mbtest == 0)
64695- atomic_inc(&n_rcu_torture_mberror);
64696+ atomic_inc_unchecked(&n_rcu_torture_mberror);
64697 spin_lock(&rand_lock);
64698 cur_ops->read_delay(&rand);
64699 n_rcu_torture_timers++;
64700@@ -1013,7 +1013,7 @@ rcu_torture_reader(void *arg)
64701 continue;
64702 }
64703 if (p->rtort_mbtest == 0)
64704- atomic_inc(&n_rcu_torture_mberror);
64705+ atomic_inc_unchecked(&n_rcu_torture_mberror);
64706 cur_ops->read_delay(&rand);
64707 preempt_disable();
64708 pipe_count = p->rtort_pipe_count;
64709@@ -1072,16 +1072,16 @@ rcu_torture_printk(char *page)
64710 rcu_torture_current,
64711 rcu_torture_current_version,
64712 list_empty(&rcu_torture_freelist),
64713- atomic_read(&n_rcu_torture_alloc),
64714- atomic_read(&n_rcu_torture_alloc_fail),
64715- atomic_read(&n_rcu_torture_free),
64716- atomic_read(&n_rcu_torture_mberror),
64717+ atomic_read_unchecked(&n_rcu_torture_alloc),
64718+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
64719+ atomic_read_unchecked(&n_rcu_torture_free),
64720+ atomic_read_unchecked(&n_rcu_torture_mberror),
64721 n_rcu_torture_boost_ktrerror,
64722 n_rcu_torture_boost_rterror,
64723 n_rcu_torture_boost_failure,
64724 n_rcu_torture_boosts,
64725 n_rcu_torture_timers);
64726- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
64727+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
64728 n_rcu_torture_boost_ktrerror != 0 ||
64729 n_rcu_torture_boost_rterror != 0 ||
64730 n_rcu_torture_boost_failure != 0)
64731@@ -1089,7 +1089,7 @@ rcu_torture_printk(char *page)
64732 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
64733 if (i > 1) {
64734 cnt += sprintf(&page[cnt], "!!! ");
64735- atomic_inc(&n_rcu_torture_error);
64736+ atomic_inc_unchecked(&n_rcu_torture_error);
64737 WARN_ON_ONCE(1);
64738 }
64739 cnt += sprintf(&page[cnt], "Reader Pipe: ");
64740@@ -1103,7 +1103,7 @@ rcu_torture_printk(char *page)
64741 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
64742 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
64743 cnt += sprintf(&page[cnt], " %d",
64744- atomic_read(&rcu_torture_wcount[i]));
64745+ atomic_read_unchecked(&rcu_torture_wcount[i]));
64746 }
64747 cnt += sprintf(&page[cnt], "\n");
64748 if (cur_ops->stats)
64749@@ -1412,7 +1412,7 @@ rcu_torture_cleanup(void)
64750
64751 if (cur_ops->cleanup)
64752 cur_ops->cleanup();
64753- if (atomic_read(&n_rcu_torture_error))
64754+ if (atomic_read_unchecked(&n_rcu_torture_error))
64755 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
64756 else
64757 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
64758@@ -1476,17 +1476,17 @@ rcu_torture_init(void)
64759
64760 rcu_torture_current = NULL;
64761 rcu_torture_current_version = 0;
64762- atomic_set(&n_rcu_torture_alloc, 0);
64763- atomic_set(&n_rcu_torture_alloc_fail, 0);
64764- atomic_set(&n_rcu_torture_free, 0);
64765- atomic_set(&n_rcu_torture_mberror, 0);
64766- atomic_set(&n_rcu_torture_error, 0);
64767+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
64768+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
64769+ atomic_set_unchecked(&n_rcu_torture_free, 0);
64770+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
64771+ atomic_set_unchecked(&n_rcu_torture_error, 0);
64772 n_rcu_torture_boost_ktrerror = 0;
64773 n_rcu_torture_boost_rterror = 0;
64774 n_rcu_torture_boost_failure = 0;
64775 n_rcu_torture_boosts = 0;
64776 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
64777- atomic_set(&rcu_torture_wcount[i], 0);
64778+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
64779 for_each_possible_cpu(cpu) {
64780 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
64781 per_cpu(rcu_torture_count, cpu)[i] = 0;
64782diff -urNp linux-3.0.7/kernel/rcutree.c linux-3.0.7/kernel/rcutree.c
64783--- linux-3.0.7/kernel/rcutree.c 2011-07-21 22:17:23.000000000 -0400
64784+++ linux-3.0.7/kernel/rcutree.c 2011-09-14 09:08:05.000000000 -0400
64785@@ -356,9 +356,9 @@ void rcu_enter_nohz(void)
64786 }
64787 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
64788 smp_mb__before_atomic_inc(); /* See above. */
64789- atomic_inc(&rdtp->dynticks);
64790+ atomic_inc_unchecked(&rdtp->dynticks);
64791 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
64792- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
64793+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
64794 local_irq_restore(flags);
64795
64796 /* If the interrupt queued a callback, get out of dyntick mode. */
64797@@ -387,10 +387,10 @@ void rcu_exit_nohz(void)
64798 return;
64799 }
64800 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
64801- atomic_inc(&rdtp->dynticks);
64802+ atomic_inc_unchecked(&rdtp->dynticks);
64803 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
64804 smp_mb__after_atomic_inc(); /* See above. */
64805- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
64806+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
64807 local_irq_restore(flags);
64808 }
64809
64810@@ -406,14 +406,14 @@ void rcu_nmi_enter(void)
64811 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
64812
64813 if (rdtp->dynticks_nmi_nesting == 0 &&
64814- (atomic_read(&rdtp->dynticks) & 0x1))
64815+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
64816 return;
64817 rdtp->dynticks_nmi_nesting++;
64818 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
64819- atomic_inc(&rdtp->dynticks);
64820+ atomic_inc_unchecked(&rdtp->dynticks);
64821 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
64822 smp_mb__after_atomic_inc(); /* See above. */
64823- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
64824+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
64825 }
64826
64827 /**
64828@@ -432,9 +432,9 @@ void rcu_nmi_exit(void)
64829 return;
64830 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
64831 smp_mb__before_atomic_inc(); /* See above. */
64832- atomic_inc(&rdtp->dynticks);
64833+ atomic_inc_unchecked(&rdtp->dynticks);
64834 smp_mb__after_atomic_inc(); /* Force delay to next write. */
64835- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
64836+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
64837 }
64838
64839 /**
64840@@ -469,7 +469,7 @@ void rcu_irq_exit(void)
64841 */
64842 static int dyntick_save_progress_counter(struct rcu_data *rdp)
64843 {
64844- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
64845+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
64846 return 0;
64847 }
64848
64849@@ -484,7 +484,7 @@ static int rcu_implicit_dynticks_qs(stru
64850 unsigned long curr;
64851 unsigned long snap;
64852
64853- curr = (unsigned long)atomic_add_return(0, &rdp->dynticks->dynticks);
64854+ curr = (unsigned long)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
64855 snap = (unsigned long)rdp->dynticks_snap;
64856
64857 /*
64858@@ -1470,7 +1470,7 @@ __rcu_process_callbacks(struct rcu_state
64859 /*
64860 * Do softirq processing for the current CPU.
64861 */
64862-static void rcu_process_callbacks(struct softirq_action *unused)
64863+static void rcu_process_callbacks(void)
64864 {
64865 __rcu_process_callbacks(&rcu_sched_state,
64866 &__get_cpu_var(rcu_sched_data));
64867diff -urNp linux-3.0.7/kernel/rcutree.h linux-3.0.7/kernel/rcutree.h
64868--- linux-3.0.7/kernel/rcutree.h 2011-07-21 22:17:23.000000000 -0400
64869+++ linux-3.0.7/kernel/rcutree.h 2011-09-14 09:08:05.000000000 -0400
64870@@ -86,7 +86,7 @@
64871 struct rcu_dynticks {
64872 int dynticks_nesting; /* Track irq/process nesting level. */
64873 int dynticks_nmi_nesting; /* Track NMI nesting level. */
64874- atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
64875+ atomic_unchecked_t dynticks; /* Even value for dynticks-idle, else odd. */
64876 };
64877
64878 /* RCU's kthread states for tracing. */
64879diff -urNp linux-3.0.7/kernel/rcutree_plugin.h linux-3.0.7/kernel/rcutree_plugin.h
64880--- linux-3.0.7/kernel/rcutree_plugin.h 2011-07-21 22:17:23.000000000 -0400
64881+++ linux-3.0.7/kernel/rcutree_plugin.h 2011-08-23 21:47:56.000000000 -0400
64882@@ -822,7 +822,7 @@ void synchronize_rcu_expedited(void)
64883
64884 /* Clean up and exit. */
64885 smp_mb(); /* ensure expedited GP seen before counter increment. */
64886- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
64887+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
64888 unlock_mb_ret:
64889 mutex_unlock(&sync_rcu_preempt_exp_mutex);
64890 mb_ret:
64891@@ -1774,8 +1774,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expe
64892
64893 #else /* #ifndef CONFIG_SMP */
64894
64895-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
64896-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
64897+static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
64898+static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
64899
64900 static int synchronize_sched_expedited_cpu_stop(void *data)
64901 {
64902@@ -1830,7 +1830,7 @@ void synchronize_sched_expedited(void)
64903 int firstsnap, s, snap, trycount = 0;
64904
64905 /* Note that atomic_inc_return() implies full memory barrier. */
64906- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
64907+ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
64908 get_online_cpus();
64909
64910 /*
64911@@ -1851,7 +1851,7 @@ void synchronize_sched_expedited(void)
64912 }
64913
64914 /* Check to see if someone else did our work for us. */
64915- s = atomic_read(&sync_sched_expedited_done);
64916+ s = atomic_read_unchecked(&sync_sched_expedited_done);
64917 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
64918 smp_mb(); /* ensure test happens before caller kfree */
64919 return;
64920@@ -1866,7 +1866,7 @@ void synchronize_sched_expedited(void)
64921 * grace period works for us.
64922 */
64923 get_online_cpus();
64924- snap = atomic_read(&sync_sched_expedited_started) - 1;
64925+ snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
64926 smp_mb(); /* ensure read is before try_stop_cpus(). */
64927 }
64928
64929@@ -1877,12 +1877,12 @@ void synchronize_sched_expedited(void)
64930 * than we did beat us to the punch.
64931 */
64932 do {
64933- s = atomic_read(&sync_sched_expedited_done);
64934+ s = atomic_read_unchecked(&sync_sched_expedited_done);
64935 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
64936 smp_mb(); /* ensure test happens before caller kfree */
64937 break;
64938 }
64939- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
64940+ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
64941
64942 put_online_cpus();
64943 }
64944diff -urNp linux-3.0.7/kernel/relay.c linux-3.0.7/kernel/relay.c
64945--- linux-3.0.7/kernel/relay.c 2011-07-21 22:17:23.000000000 -0400
64946+++ linux-3.0.7/kernel/relay.c 2011-08-23 21:48:14.000000000 -0400
64947@@ -1236,6 +1236,8 @@ static ssize_t subbuf_splice_actor(struc
64948 };
64949 ssize_t ret;
64950
64951+ pax_track_stack();
64952+
64953 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
64954 return 0;
64955 if (splice_grow_spd(pipe, &spd))
64956diff -urNp linux-3.0.7/kernel/resource.c linux-3.0.7/kernel/resource.c
64957--- linux-3.0.7/kernel/resource.c 2011-07-21 22:17:23.000000000 -0400
64958+++ linux-3.0.7/kernel/resource.c 2011-08-23 21:48:14.000000000 -0400
64959@@ -141,8 +141,18 @@ static const struct file_operations proc
64960
64961 static int __init ioresources_init(void)
64962 {
64963+#ifdef CONFIG_GRKERNSEC_PROC_ADD
64964+#ifdef CONFIG_GRKERNSEC_PROC_USER
64965+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
64966+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
64967+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64968+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
64969+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
64970+#endif
64971+#else
64972 proc_create("ioports", 0, NULL, &proc_ioports_operations);
64973 proc_create("iomem", 0, NULL, &proc_iomem_operations);
64974+#endif
64975 return 0;
64976 }
64977 __initcall(ioresources_init);
64978diff -urNp linux-3.0.7/kernel/rtmutex-tester.c linux-3.0.7/kernel/rtmutex-tester.c
64979--- linux-3.0.7/kernel/rtmutex-tester.c 2011-07-21 22:17:23.000000000 -0400
64980+++ linux-3.0.7/kernel/rtmutex-tester.c 2011-08-23 21:47:56.000000000 -0400
64981@@ -20,7 +20,7 @@
64982 #define MAX_RT_TEST_MUTEXES 8
64983
64984 static spinlock_t rttest_lock;
64985-static atomic_t rttest_event;
64986+static atomic_unchecked_t rttest_event;
64987
64988 struct test_thread_data {
64989 int opcode;
64990@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_
64991
64992 case RTTEST_LOCKCONT:
64993 td->mutexes[td->opdata] = 1;
64994- td->event = atomic_add_return(1, &rttest_event);
64995+ td->event = atomic_add_return_unchecked(1, &rttest_event);
64996 return 0;
64997
64998 case RTTEST_RESET:
64999@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_
65000 return 0;
65001
65002 case RTTEST_RESETEVENT:
65003- atomic_set(&rttest_event, 0);
65004+ atomic_set_unchecked(&rttest_event, 0);
65005 return 0;
65006
65007 default:
65008@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_
65009 return ret;
65010
65011 td->mutexes[id] = 1;
65012- td->event = atomic_add_return(1, &rttest_event);
65013+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65014 rt_mutex_lock(&mutexes[id]);
65015- td->event = atomic_add_return(1, &rttest_event);
65016+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65017 td->mutexes[id] = 4;
65018 return 0;
65019
65020@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_
65021 return ret;
65022
65023 td->mutexes[id] = 1;
65024- td->event = atomic_add_return(1, &rttest_event);
65025+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65026 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
65027- td->event = atomic_add_return(1, &rttest_event);
65028+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65029 td->mutexes[id] = ret ? 0 : 4;
65030 return ret ? -EINTR : 0;
65031
65032@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_
65033 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
65034 return ret;
65035
65036- td->event = atomic_add_return(1, &rttest_event);
65037+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65038 rt_mutex_unlock(&mutexes[id]);
65039- td->event = atomic_add_return(1, &rttest_event);
65040+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65041 td->mutexes[id] = 0;
65042 return 0;
65043
65044@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mu
65045 break;
65046
65047 td->mutexes[dat] = 2;
65048- td->event = atomic_add_return(1, &rttest_event);
65049+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65050 break;
65051
65052 default:
65053@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mu
65054 return;
65055
65056 td->mutexes[dat] = 3;
65057- td->event = atomic_add_return(1, &rttest_event);
65058+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65059 break;
65060
65061 case RTTEST_LOCKNOWAIT:
65062@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mu
65063 return;
65064
65065 td->mutexes[dat] = 1;
65066- td->event = atomic_add_return(1, &rttest_event);
65067+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65068 return;
65069
65070 default:
65071diff -urNp linux-3.0.7/kernel/sched_autogroup.c linux-3.0.7/kernel/sched_autogroup.c
65072--- linux-3.0.7/kernel/sched_autogroup.c 2011-07-21 22:17:23.000000000 -0400
65073+++ linux-3.0.7/kernel/sched_autogroup.c 2011-08-23 21:47:56.000000000 -0400
65074@@ -7,7 +7,7 @@
65075
65076 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
65077 static struct autogroup autogroup_default;
65078-static atomic_t autogroup_seq_nr;
65079+static atomic_unchecked_t autogroup_seq_nr;
65080
65081 static void __init autogroup_init(struct task_struct *init_task)
65082 {
65083@@ -78,7 +78,7 @@ static inline struct autogroup *autogrou
65084
65085 kref_init(&ag->kref);
65086 init_rwsem(&ag->lock);
65087- ag->id = atomic_inc_return(&autogroup_seq_nr);
65088+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
65089 ag->tg = tg;
65090 #ifdef CONFIG_RT_GROUP_SCHED
65091 /*
65092diff -urNp linux-3.0.7/kernel/sched.c linux-3.0.7/kernel/sched.c
65093--- linux-3.0.7/kernel/sched.c 2011-10-17 23:17:09.000000000 -0400
65094+++ linux-3.0.7/kernel/sched.c 2011-10-17 23:17:19.000000000 -0400
65095@@ -4227,6 +4227,8 @@ static void __sched __schedule(void)
65096 struct rq *rq;
65097 int cpu;
65098
65099+ pax_track_stack();
65100+
65101 need_resched:
65102 preempt_disable();
65103 cpu = smp_processor_id();
65104@@ -4920,6 +4922,8 @@ int can_nice(const struct task_struct *p
65105 /* convert nice value [19,-20] to rlimit style value [1,40] */
65106 int nice_rlim = 20 - nice;
65107
65108+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
65109+
65110 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
65111 capable(CAP_SYS_NICE));
65112 }
65113@@ -4953,7 +4957,8 @@ SYSCALL_DEFINE1(nice, int, increment)
65114 if (nice > 19)
65115 nice = 19;
65116
65117- if (increment < 0 && !can_nice(current, nice))
65118+ if (increment < 0 && (!can_nice(current, nice) ||
65119+ gr_handle_chroot_nice()))
65120 return -EPERM;
65121
65122 retval = security_task_setnice(current, nice);
65123@@ -5097,6 +5102,7 @@ recheck:
65124 unsigned long rlim_rtprio =
65125 task_rlimit(p, RLIMIT_RTPRIO);
65126
65127+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
65128 /* can't set/change the rt policy */
65129 if (policy != p->policy && !rlim_rtprio)
65130 return -EPERM;
65131diff -urNp linux-3.0.7/kernel/sched_fair.c linux-3.0.7/kernel/sched_fair.c
65132--- linux-3.0.7/kernel/sched_fair.c 2011-07-21 22:17:23.000000000 -0400
65133+++ linux-3.0.7/kernel/sched_fair.c 2011-08-23 21:47:56.000000000 -0400
65134@@ -4050,7 +4050,7 @@ static void nohz_idle_balance(int this_c
65135 * run_rebalance_domains is triggered when needed from the scheduler tick.
65136 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
65137 */
65138-static void run_rebalance_domains(struct softirq_action *h)
65139+static void run_rebalance_domains(void)
65140 {
65141 int this_cpu = smp_processor_id();
65142 struct rq *this_rq = cpu_rq(this_cpu);
65143diff -urNp linux-3.0.7/kernel/signal.c linux-3.0.7/kernel/signal.c
65144--- linux-3.0.7/kernel/signal.c 2011-07-21 22:17:23.000000000 -0400
65145+++ linux-3.0.7/kernel/signal.c 2011-08-23 21:48:14.000000000 -0400
65146@@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cache
65147
65148 int print_fatal_signals __read_mostly;
65149
65150-static void __user *sig_handler(struct task_struct *t, int sig)
65151+static __sighandler_t sig_handler(struct task_struct *t, int sig)
65152 {
65153 return t->sighand->action[sig - 1].sa.sa_handler;
65154 }
65155
65156-static int sig_handler_ignored(void __user *handler, int sig)
65157+static int sig_handler_ignored(__sighandler_t handler, int sig)
65158 {
65159 /* Is it explicitly or implicitly ignored? */
65160 return handler == SIG_IGN ||
65161@@ -60,7 +60,7 @@ static int sig_handler_ignored(void __us
65162 static int sig_task_ignored(struct task_struct *t, int sig,
65163 int from_ancestor_ns)
65164 {
65165- void __user *handler;
65166+ __sighandler_t handler;
65167
65168 handler = sig_handler(t, sig);
65169
65170@@ -320,6 +320,9 @@ __sigqueue_alloc(int sig, struct task_st
65171 atomic_inc(&user->sigpending);
65172 rcu_read_unlock();
65173
65174+ if (!override_rlimit)
65175+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
65176+
65177 if (override_rlimit ||
65178 atomic_read(&user->sigpending) <=
65179 task_rlimit(t, RLIMIT_SIGPENDING)) {
65180@@ -444,7 +447,7 @@ flush_signal_handlers(struct task_struct
65181
65182 int unhandled_signal(struct task_struct *tsk, int sig)
65183 {
65184- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
65185+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
65186 if (is_global_init(tsk))
65187 return 1;
65188 if (handler != SIG_IGN && handler != SIG_DFL)
65189@@ -770,6 +773,13 @@ static int check_kill_permission(int sig
65190 }
65191 }
65192
65193+ /* allow glibc communication via tgkill to other threads in our
65194+ thread group */
65195+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
65196+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
65197+ && gr_handle_signal(t, sig))
65198+ return -EPERM;
65199+
65200 return security_task_kill(t, info, sig, 0);
65201 }
65202
65203@@ -1092,7 +1102,7 @@ __group_send_sig_info(int sig, struct si
65204 return send_signal(sig, info, p, 1);
65205 }
65206
65207-static int
65208+int
65209 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
65210 {
65211 return send_signal(sig, info, t, 0);
65212@@ -1129,6 +1139,7 @@ force_sig_info(int sig, struct siginfo *
65213 unsigned long int flags;
65214 int ret, blocked, ignored;
65215 struct k_sigaction *action;
65216+ int is_unhandled = 0;
65217
65218 spin_lock_irqsave(&t->sighand->siglock, flags);
65219 action = &t->sighand->action[sig-1];
65220@@ -1143,9 +1154,18 @@ force_sig_info(int sig, struct siginfo *
65221 }
65222 if (action->sa.sa_handler == SIG_DFL)
65223 t->signal->flags &= ~SIGNAL_UNKILLABLE;
65224+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
65225+ is_unhandled = 1;
65226 ret = specific_send_sig_info(sig, info, t);
65227 spin_unlock_irqrestore(&t->sighand->siglock, flags);
65228
65229+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
65230+ normal operation */
65231+ if (is_unhandled) {
65232+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
65233+ gr_handle_crash(t, sig);
65234+ }
65235+
65236 return ret;
65237 }
65238
65239@@ -1212,8 +1232,11 @@ int group_send_sig_info(int sig, struct
65240 ret = check_kill_permission(sig, info, p);
65241 rcu_read_unlock();
65242
65243- if (!ret && sig)
65244+ if (!ret && sig) {
65245 ret = do_send_sig_info(sig, info, p, true);
65246+ if (!ret)
65247+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
65248+ }
65249
65250 return ret;
65251 }
65252@@ -1839,6 +1862,8 @@ void ptrace_notify(int exit_code)
65253 {
65254 siginfo_t info;
65255
65256+ pax_track_stack();
65257+
65258 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
65259
65260 memset(&info, 0, sizeof info);
65261@@ -2639,7 +2664,15 @@ do_send_specific(pid_t tgid, pid_t pid,
65262 int error = -ESRCH;
65263
65264 rcu_read_lock();
65265- p = find_task_by_vpid(pid);
65266+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
65267+ /* allow glibc communication via tgkill to other threads in our
65268+ thread group */
65269+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
65270+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
65271+ p = find_task_by_vpid_unrestricted(pid);
65272+ else
65273+#endif
65274+ p = find_task_by_vpid(pid);
65275 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
65276 error = check_kill_permission(sig, info, p);
65277 /*
65278diff -urNp linux-3.0.7/kernel/smp.c linux-3.0.7/kernel/smp.c
65279--- linux-3.0.7/kernel/smp.c 2011-07-21 22:17:23.000000000 -0400
65280+++ linux-3.0.7/kernel/smp.c 2011-08-23 21:47:56.000000000 -0400
65281@@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t fu
65282 }
65283 EXPORT_SYMBOL(smp_call_function);
65284
65285-void ipi_call_lock(void)
65286+void ipi_call_lock(void) __acquires(call_function.lock)
65287 {
65288 raw_spin_lock(&call_function.lock);
65289 }
65290
65291-void ipi_call_unlock(void)
65292+void ipi_call_unlock(void) __releases(call_function.lock)
65293 {
65294 raw_spin_unlock(&call_function.lock);
65295 }
65296
65297-void ipi_call_lock_irq(void)
65298+void ipi_call_lock_irq(void) __acquires(call_function.lock)
65299 {
65300 raw_spin_lock_irq(&call_function.lock);
65301 }
65302
65303-void ipi_call_unlock_irq(void)
65304+void ipi_call_unlock_irq(void) __releases(call_function.lock)
65305 {
65306 raw_spin_unlock_irq(&call_function.lock);
65307 }
65308diff -urNp linux-3.0.7/kernel/softirq.c linux-3.0.7/kernel/softirq.c
65309--- linux-3.0.7/kernel/softirq.c 2011-07-21 22:17:23.000000000 -0400
65310+++ linux-3.0.7/kernel/softirq.c 2011-08-23 21:47:56.000000000 -0400
65311@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
65312
65313 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
65314
65315-char *softirq_to_name[NR_SOFTIRQS] = {
65316+const char * const softirq_to_name[NR_SOFTIRQS] = {
65317 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
65318 "TASKLET", "SCHED", "HRTIMER", "RCU"
65319 };
65320@@ -235,7 +235,7 @@ restart:
65321 kstat_incr_softirqs_this_cpu(vec_nr);
65322
65323 trace_softirq_entry(vec_nr);
65324- h->action(h);
65325+ h->action();
65326 trace_softirq_exit(vec_nr);
65327 if (unlikely(prev_count != preempt_count())) {
65328 printk(KERN_ERR "huh, entered softirq %u %s %p"
65329@@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
65330 local_irq_restore(flags);
65331 }
65332
65333-void open_softirq(int nr, void (*action)(struct softirq_action *))
65334+void open_softirq(int nr, void (*action)(void))
65335 {
65336- softirq_vec[nr].action = action;
65337+ pax_open_kernel();
65338+ *(void **)&softirq_vec[nr].action = action;
65339+ pax_close_kernel();
65340 }
65341
65342 /*
65343@@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct
65344
65345 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
65346
65347-static void tasklet_action(struct softirq_action *a)
65348+static void tasklet_action(void)
65349 {
65350 struct tasklet_struct *list;
65351
65352@@ -476,7 +478,7 @@ static void tasklet_action(struct softir
65353 }
65354 }
65355
65356-static void tasklet_hi_action(struct softirq_action *a)
65357+static void tasklet_hi_action(void)
65358 {
65359 struct tasklet_struct *list;
65360
65361diff -urNp linux-3.0.7/kernel/sys.c linux-3.0.7/kernel/sys.c
65362--- linux-3.0.7/kernel/sys.c 2011-09-02 18:11:26.000000000 -0400
65363+++ linux-3.0.7/kernel/sys.c 2011-10-06 04:17:55.000000000 -0400
65364@@ -158,6 +158,12 @@ static int set_one_prio(struct task_stru
65365 error = -EACCES;
65366 goto out;
65367 }
65368+
65369+ if (gr_handle_chroot_setpriority(p, niceval)) {
65370+ error = -EACCES;
65371+ goto out;
65372+ }
65373+
65374 no_nice = security_task_setnice(p, niceval);
65375 if (no_nice) {
65376 error = no_nice;
65377@@ -541,6 +547,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
65378 goto error;
65379 }
65380
65381+ if (gr_check_group_change(new->gid, new->egid, -1))
65382+ goto error;
65383+
65384 if (rgid != (gid_t) -1 ||
65385 (egid != (gid_t) -1 && egid != old->gid))
65386 new->sgid = new->egid;
65387@@ -570,6 +579,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
65388 old = current_cred();
65389
65390 retval = -EPERM;
65391+
65392+ if (gr_check_group_change(gid, gid, gid))
65393+ goto error;
65394+
65395 if (nsown_capable(CAP_SETGID))
65396 new->gid = new->egid = new->sgid = new->fsgid = gid;
65397 else if (gid == old->gid || gid == old->sgid)
65398@@ -595,11 +608,18 @@ static int set_user(struct cred *new)
65399 if (!new_user)
65400 return -EAGAIN;
65401
65402+ /*
65403+ * We don't fail in case of NPROC limit excess here because too many
65404+ * poorly written programs don't check set*uid() return code, assuming
65405+ * it never fails if called by root. We may still enforce NPROC limit
65406+ * for programs doing set*uid()+execve() by harmlessly deferring the
65407+ * failure to the execve() stage.
65408+ */
65409 if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
65410- new_user != INIT_USER) {
65411- free_uid(new_user);
65412- return -EAGAIN;
65413- }
65414+ new_user != INIT_USER)
65415+ current->flags |= PF_NPROC_EXCEEDED;
65416+ else
65417+ current->flags &= ~PF_NPROC_EXCEEDED;
65418
65419 free_uid(new->user);
65420 new->user = new_user;
65421@@ -650,6 +670,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
65422 goto error;
65423 }
65424
65425+ if (gr_check_user_change(new->uid, new->euid, -1))
65426+ goto error;
65427+
65428 if (new->uid != old->uid) {
65429 retval = set_user(new);
65430 if (retval < 0)
65431@@ -694,6 +717,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
65432 old = current_cred();
65433
65434 retval = -EPERM;
65435+
65436+ if (gr_check_crash_uid(uid))
65437+ goto error;
65438+ if (gr_check_user_change(uid, uid, uid))
65439+ goto error;
65440+
65441 if (nsown_capable(CAP_SETUID)) {
65442 new->suid = new->uid = uid;
65443 if (uid != old->uid) {
65444@@ -748,6 +777,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
65445 goto error;
65446 }
65447
65448+ if (gr_check_user_change(ruid, euid, -1))
65449+ goto error;
65450+
65451 if (ruid != (uid_t) -1) {
65452 new->uid = ruid;
65453 if (ruid != old->uid) {
65454@@ -812,6 +844,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
65455 goto error;
65456 }
65457
65458+ if (gr_check_group_change(rgid, egid, -1))
65459+ goto error;
65460+
65461 if (rgid != (gid_t) -1)
65462 new->gid = rgid;
65463 if (egid != (gid_t) -1)
65464@@ -858,6 +893,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
65465 old = current_cred();
65466 old_fsuid = old->fsuid;
65467
65468+ if (gr_check_user_change(-1, -1, uid))
65469+ goto error;
65470+
65471 if (uid == old->uid || uid == old->euid ||
65472 uid == old->suid || uid == old->fsuid ||
65473 nsown_capable(CAP_SETUID)) {
65474@@ -868,6 +906,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
65475 }
65476 }
65477
65478+error:
65479 abort_creds(new);
65480 return old_fsuid;
65481
65482@@ -894,12 +933,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
65483 if (gid == old->gid || gid == old->egid ||
65484 gid == old->sgid || gid == old->fsgid ||
65485 nsown_capable(CAP_SETGID)) {
65486+ if (gr_check_group_change(-1, -1, gid))
65487+ goto error;
65488+
65489 if (gid != old_fsgid) {
65490 new->fsgid = gid;
65491 goto change_okay;
65492 }
65493 }
65494
65495+error:
65496 abort_creds(new);
65497 return old_fsgid;
65498
65499@@ -1205,19 +1248,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_
65500 return -EFAULT;
65501
65502 down_read(&uts_sem);
65503- error = __copy_to_user(&name->sysname, &utsname()->sysname,
65504+ error = __copy_to_user(name->sysname, &utsname()->sysname,
65505 __OLD_UTS_LEN);
65506 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
65507- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
65508+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
65509 __OLD_UTS_LEN);
65510 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
65511- error |= __copy_to_user(&name->release, &utsname()->release,
65512+ error |= __copy_to_user(name->release, &utsname()->release,
65513 __OLD_UTS_LEN);
65514 error |= __put_user(0, name->release + __OLD_UTS_LEN);
65515- error |= __copy_to_user(&name->version, &utsname()->version,
65516+ error |= __copy_to_user(name->version, &utsname()->version,
65517 __OLD_UTS_LEN);
65518 error |= __put_user(0, name->version + __OLD_UTS_LEN);
65519- error |= __copy_to_user(&name->machine, &utsname()->machine,
65520+ error |= __copy_to_user(name->machine, &utsname()->machine,
65521 __OLD_UTS_LEN);
65522 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
65523 up_read(&uts_sem);
65524@@ -1680,7 +1723,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
65525 error = get_dumpable(me->mm);
65526 break;
65527 case PR_SET_DUMPABLE:
65528- if (arg2 < 0 || arg2 > 1) {
65529+ if (arg2 > 1) {
65530 error = -EINVAL;
65531 break;
65532 }
65533diff -urNp linux-3.0.7/kernel/sysctl_binary.c linux-3.0.7/kernel/sysctl_binary.c
65534--- linux-3.0.7/kernel/sysctl_binary.c 2011-07-21 22:17:23.000000000 -0400
65535+++ linux-3.0.7/kernel/sysctl_binary.c 2011-10-06 04:17:55.000000000 -0400
65536@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *f
65537 int i;
65538
65539 set_fs(KERNEL_DS);
65540- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
65541+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
65542 set_fs(old_fs);
65543 if (result < 0)
65544 goto out_kfree;
65545@@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *f
65546 }
65547
65548 set_fs(KERNEL_DS);
65549- result = vfs_write(file, buffer, str - buffer, &pos);
65550+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
65551 set_fs(old_fs);
65552 if (result < 0)
65553 goto out_kfree;
65554@@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file
65555 int i;
65556
65557 set_fs(KERNEL_DS);
65558- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
65559+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
65560 set_fs(old_fs);
65561 if (result < 0)
65562 goto out_kfree;
65563@@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file
65564 }
65565
65566 set_fs(KERNEL_DS);
65567- result = vfs_write(file, buffer, str - buffer, &pos);
65568+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
65569 set_fs(old_fs);
65570 if (result < 0)
65571 goto out_kfree;
65572@@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *fil
65573 int i;
65574
65575 set_fs(KERNEL_DS);
65576- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
65577+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
65578 set_fs(old_fs);
65579 if (result < 0)
65580 goto out;
65581@@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struc
65582 __le16 dnaddr;
65583
65584 set_fs(KERNEL_DS);
65585- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
65586+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
65587 set_fs(old_fs);
65588 if (result < 0)
65589 goto out;
65590@@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struc
65591 le16_to_cpu(dnaddr) & 0x3ff);
65592
65593 set_fs(KERNEL_DS);
65594- result = vfs_write(file, buf, len, &pos);
65595+ result = vfs_write(file, (const char __force_user *)buf, len, &pos);
65596 set_fs(old_fs);
65597 if (result < 0)
65598 goto out;
65599diff -urNp linux-3.0.7/kernel/sysctl.c linux-3.0.7/kernel/sysctl.c
65600--- linux-3.0.7/kernel/sysctl.c 2011-07-21 22:17:23.000000000 -0400
65601+++ linux-3.0.7/kernel/sysctl.c 2011-08-23 21:48:14.000000000 -0400
65602@@ -85,6 +85,13 @@
65603
65604
65605 #if defined(CONFIG_SYSCTL)
65606+#include <linux/grsecurity.h>
65607+#include <linux/grinternal.h>
65608+
65609+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
65610+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
65611+ const int op);
65612+extern int gr_handle_chroot_sysctl(const int op);
65613
65614 /* External variables not in a header file. */
65615 extern int sysctl_overcommit_memory;
65616@@ -197,6 +204,7 @@ static int sysrq_sysctl_handler(ctl_tabl
65617 }
65618
65619 #endif
65620+extern struct ctl_table grsecurity_table[];
65621
65622 static struct ctl_table root_table[];
65623 static struct ctl_table_root sysctl_table_root;
65624@@ -226,6 +234,20 @@ extern struct ctl_table epoll_table[];
65625 int sysctl_legacy_va_layout;
65626 #endif
65627
65628+#ifdef CONFIG_PAX_SOFTMODE
65629+static ctl_table pax_table[] = {
65630+ {
65631+ .procname = "softmode",
65632+ .data = &pax_softmode,
65633+ .maxlen = sizeof(unsigned int),
65634+ .mode = 0600,
65635+ .proc_handler = &proc_dointvec,
65636+ },
65637+
65638+ { }
65639+};
65640+#endif
65641+
65642 /* The default sysctl tables: */
65643
65644 static struct ctl_table root_table[] = {
65645@@ -272,6 +294,22 @@ static int max_extfrag_threshold = 1000;
65646 #endif
65647
65648 static struct ctl_table kern_table[] = {
65649+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
65650+ {
65651+ .procname = "grsecurity",
65652+ .mode = 0500,
65653+ .child = grsecurity_table,
65654+ },
65655+#endif
65656+
65657+#ifdef CONFIG_PAX_SOFTMODE
65658+ {
65659+ .procname = "pax",
65660+ .mode = 0500,
65661+ .child = pax_table,
65662+ },
65663+#endif
65664+
65665 {
65666 .procname = "sched_child_runs_first",
65667 .data = &sysctl_sched_child_runs_first,
65668@@ -546,7 +584,7 @@ static struct ctl_table kern_table[] = {
65669 .data = &modprobe_path,
65670 .maxlen = KMOD_PATH_LEN,
65671 .mode = 0644,
65672- .proc_handler = proc_dostring,
65673+ .proc_handler = proc_dostring_modpriv,
65674 },
65675 {
65676 .procname = "modules_disabled",
65677@@ -713,16 +751,20 @@ static struct ctl_table kern_table[] = {
65678 .extra1 = &zero,
65679 .extra2 = &one,
65680 },
65681+#endif
65682 {
65683 .procname = "kptr_restrict",
65684 .data = &kptr_restrict,
65685 .maxlen = sizeof(int),
65686 .mode = 0644,
65687 .proc_handler = proc_dmesg_restrict,
65688+#ifdef CONFIG_GRKERNSEC_HIDESYM
65689+ .extra1 = &two,
65690+#else
65691 .extra1 = &zero,
65692+#endif
65693 .extra2 = &two,
65694 },
65695-#endif
65696 {
65697 .procname = "ngroups_max",
65698 .data = &ngroups_max,
65699@@ -1205,6 +1247,13 @@ static struct ctl_table vm_table[] = {
65700 .proc_handler = proc_dointvec_minmax,
65701 .extra1 = &zero,
65702 },
65703+ {
65704+ .procname = "heap_stack_gap",
65705+ .data = &sysctl_heap_stack_gap,
65706+ .maxlen = sizeof(sysctl_heap_stack_gap),
65707+ .mode = 0644,
65708+ .proc_handler = proc_doulongvec_minmax,
65709+ },
65710 #else
65711 {
65712 .procname = "nr_trim_pages",
65713@@ -1714,6 +1763,17 @@ static int test_perm(int mode, int op)
65714 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
65715 {
65716 int mode;
65717+ int error;
65718+
65719+ if (table->parent != NULL && table->parent->procname != NULL &&
65720+ table->procname != NULL &&
65721+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
65722+ return -EACCES;
65723+ if (gr_handle_chroot_sysctl(op))
65724+ return -EACCES;
65725+ error = gr_handle_sysctl(table, op);
65726+ if (error)
65727+ return error;
65728
65729 if (root->permissions)
65730 mode = root->permissions(root, current->nsproxy, table);
65731@@ -2118,6 +2178,16 @@ int proc_dostring(struct ctl_table *tabl
65732 buffer, lenp, ppos);
65733 }
65734
65735+int proc_dostring_modpriv(struct ctl_table *table, int write,
65736+ void __user *buffer, size_t *lenp, loff_t *ppos)
65737+{
65738+ if (write && !capable(CAP_SYS_MODULE))
65739+ return -EPERM;
65740+
65741+ return _proc_do_string(table->data, table->maxlen, write,
65742+ buffer, lenp, ppos);
65743+}
65744+
65745 static size_t proc_skip_spaces(char **buf)
65746 {
65747 size_t ret;
65748@@ -2223,6 +2293,8 @@ static int proc_put_long(void __user **b
65749 len = strlen(tmp);
65750 if (len > *size)
65751 len = *size;
65752+ if (len > sizeof(tmp))
65753+ len = sizeof(tmp);
65754 if (copy_to_user(*buf, tmp, len))
65755 return -EFAULT;
65756 *size -= len;
65757@@ -2539,8 +2611,11 @@ static int __do_proc_doulongvec_minmax(v
65758 *i = val;
65759 } else {
65760 val = convdiv * (*i) / convmul;
65761- if (!first)
65762+ if (!first) {
65763 err = proc_put_char(&buffer, &left, '\t');
65764+ if (err)
65765+ break;
65766+ }
65767 err = proc_put_long(&buffer, &left, val, false);
65768 if (err)
65769 break;
65770@@ -2935,6 +3010,12 @@ int proc_dostring(struct ctl_table *tabl
65771 return -ENOSYS;
65772 }
65773
65774+int proc_dostring_modpriv(struct ctl_table *table, int write,
65775+ void __user *buffer, size_t *lenp, loff_t *ppos)
65776+{
65777+ return -ENOSYS;
65778+}
65779+
65780 int proc_dointvec(struct ctl_table *table, int write,
65781 void __user *buffer, size_t *lenp, loff_t *ppos)
65782 {
65783@@ -2991,6 +3072,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
65784 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
65785 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
65786 EXPORT_SYMBOL(proc_dostring);
65787+EXPORT_SYMBOL(proc_dostring_modpriv);
65788 EXPORT_SYMBOL(proc_doulongvec_minmax);
65789 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
65790 EXPORT_SYMBOL(register_sysctl_table);
65791diff -urNp linux-3.0.7/kernel/sysctl_check.c linux-3.0.7/kernel/sysctl_check.c
65792--- linux-3.0.7/kernel/sysctl_check.c 2011-07-21 22:17:23.000000000 -0400
65793+++ linux-3.0.7/kernel/sysctl_check.c 2011-08-23 21:48:14.000000000 -0400
65794@@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *n
65795 set_fail(&fail, table, "Directory with extra2");
65796 } else {
65797 if ((table->proc_handler == proc_dostring) ||
65798+ (table->proc_handler == proc_dostring_modpriv) ||
65799 (table->proc_handler == proc_dointvec) ||
65800 (table->proc_handler == proc_dointvec_minmax) ||
65801 (table->proc_handler == proc_dointvec_jiffies) ||
65802diff -urNp linux-3.0.7/kernel/taskstats.c linux-3.0.7/kernel/taskstats.c
65803--- linux-3.0.7/kernel/taskstats.c 2011-07-21 22:17:23.000000000 -0400
65804+++ linux-3.0.7/kernel/taskstats.c 2011-08-23 21:48:14.000000000 -0400
65805@@ -27,9 +27,12 @@
65806 #include <linux/cgroup.h>
65807 #include <linux/fs.h>
65808 #include <linux/file.h>
65809+#include <linux/grsecurity.h>
65810 #include <net/genetlink.h>
65811 #include <asm/atomic.h>
65812
65813+extern int gr_is_taskstats_denied(int pid);
65814+
65815 /*
65816 * Maximum length of a cpumask that can be specified in
65817 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
65818@@ -558,6 +561,9 @@ err:
65819
65820 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
65821 {
65822+ if (gr_is_taskstats_denied(current->pid))
65823+ return -EACCES;
65824+
65825 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
65826 return cmd_attr_register_cpumask(info);
65827 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
65828diff -urNp linux-3.0.7/kernel/time/alarmtimer.c linux-3.0.7/kernel/time/alarmtimer.c
65829--- linux-3.0.7/kernel/time/alarmtimer.c 2011-10-16 21:54:54.000000000 -0400
65830+++ linux-3.0.7/kernel/time/alarmtimer.c 2011-10-16 21:55:28.000000000 -0400
65831@@ -693,7 +693,7 @@ static int __init alarmtimer_init(void)
65832 {
65833 int error = 0;
65834 int i;
65835- struct k_clock alarm_clock = {
65836+ static struct k_clock alarm_clock = {
65837 .clock_getres = alarm_clock_getres,
65838 .clock_get = alarm_clock_get,
65839 .timer_create = alarm_timer_create,
65840diff -urNp linux-3.0.7/kernel/time/tick-broadcast.c linux-3.0.7/kernel/time/tick-broadcast.c
65841--- linux-3.0.7/kernel/time/tick-broadcast.c 2011-07-21 22:17:23.000000000 -0400
65842+++ linux-3.0.7/kernel/time/tick-broadcast.c 2011-08-23 21:47:56.000000000 -0400
65843@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct cl
65844 * then clear the broadcast bit.
65845 */
65846 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
65847- int cpu = smp_processor_id();
65848+ cpu = smp_processor_id();
65849
65850 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
65851 tick_broadcast_clear_oneshot(cpu);
65852diff -urNp linux-3.0.7/kernel/time/timekeeping.c linux-3.0.7/kernel/time/timekeeping.c
65853--- linux-3.0.7/kernel/time/timekeeping.c 2011-07-21 22:17:23.000000000 -0400
65854+++ linux-3.0.7/kernel/time/timekeeping.c 2011-08-23 21:48:14.000000000 -0400
65855@@ -14,6 +14,7 @@
65856 #include <linux/init.h>
65857 #include <linux/mm.h>
65858 #include <linux/sched.h>
65859+#include <linux/grsecurity.h>
65860 #include <linux/syscore_ops.h>
65861 #include <linux/clocksource.h>
65862 #include <linux/jiffies.h>
65863@@ -361,6 +362,8 @@ int do_settimeofday(const struct timespe
65864 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
65865 return -EINVAL;
65866
65867+ gr_log_timechange();
65868+
65869 write_seqlock_irqsave(&xtime_lock, flags);
65870
65871 timekeeping_forward_now();
65872diff -urNp linux-3.0.7/kernel/time/timer_list.c linux-3.0.7/kernel/time/timer_list.c
65873--- linux-3.0.7/kernel/time/timer_list.c 2011-07-21 22:17:23.000000000 -0400
65874+++ linux-3.0.7/kernel/time/timer_list.c 2011-08-23 21:48:14.000000000 -0400
65875@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
65876
65877 static void print_name_offset(struct seq_file *m, void *sym)
65878 {
65879+#ifdef CONFIG_GRKERNSEC_HIDESYM
65880+ SEQ_printf(m, "<%p>", NULL);
65881+#else
65882 char symname[KSYM_NAME_LEN];
65883
65884 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
65885 SEQ_printf(m, "<%pK>", sym);
65886 else
65887 SEQ_printf(m, "%s", symname);
65888+#endif
65889 }
65890
65891 static void
65892@@ -112,7 +116,11 @@ next_one:
65893 static void
65894 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
65895 {
65896+#ifdef CONFIG_GRKERNSEC_HIDESYM
65897+ SEQ_printf(m, " .base: %p\n", NULL);
65898+#else
65899 SEQ_printf(m, " .base: %pK\n", base);
65900+#endif
65901 SEQ_printf(m, " .index: %d\n",
65902 base->index);
65903 SEQ_printf(m, " .resolution: %Lu nsecs\n",
65904@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs
65905 {
65906 struct proc_dir_entry *pe;
65907
65908+#ifdef CONFIG_GRKERNSEC_PROC_ADD
65909+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
65910+#else
65911 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
65912+#endif
65913 if (!pe)
65914 return -ENOMEM;
65915 return 0;
65916diff -urNp linux-3.0.7/kernel/time/timer_stats.c linux-3.0.7/kernel/time/timer_stats.c
65917--- linux-3.0.7/kernel/time/timer_stats.c 2011-07-21 22:17:23.000000000 -0400
65918+++ linux-3.0.7/kernel/time/timer_stats.c 2011-08-23 21:48:14.000000000 -0400
65919@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
65920 static unsigned long nr_entries;
65921 static struct entry entries[MAX_ENTRIES];
65922
65923-static atomic_t overflow_count;
65924+static atomic_unchecked_t overflow_count;
65925
65926 /*
65927 * The entries are in a hash-table, for fast lookup:
65928@@ -140,7 +140,7 @@ static void reset_entries(void)
65929 nr_entries = 0;
65930 memset(entries, 0, sizeof(entries));
65931 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
65932- atomic_set(&overflow_count, 0);
65933+ atomic_set_unchecked(&overflow_count, 0);
65934 }
65935
65936 static struct entry *alloc_entry(void)
65937@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
65938 if (likely(entry))
65939 entry->count++;
65940 else
65941- atomic_inc(&overflow_count);
65942+ atomic_inc_unchecked(&overflow_count);
65943
65944 out_unlock:
65945 raw_spin_unlock_irqrestore(lock, flags);
65946@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
65947
65948 static void print_name_offset(struct seq_file *m, unsigned long addr)
65949 {
65950+#ifdef CONFIG_GRKERNSEC_HIDESYM
65951+ seq_printf(m, "<%p>", NULL);
65952+#else
65953 char symname[KSYM_NAME_LEN];
65954
65955 if (lookup_symbol_name(addr, symname) < 0)
65956 seq_printf(m, "<%p>", (void *)addr);
65957 else
65958 seq_printf(m, "%s", symname);
65959+#endif
65960 }
65961
65962 static int tstats_show(struct seq_file *m, void *v)
65963@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
65964
65965 seq_puts(m, "Timer Stats Version: v0.2\n");
65966 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
65967- if (atomic_read(&overflow_count))
65968+ if (atomic_read_unchecked(&overflow_count))
65969 seq_printf(m, "Overflow: %d entries\n",
65970- atomic_read(&overflow_count));
65971+ atomic_read_unchecked(&overflow_count));
65972
65973 for (i = 0; i < nr_entries; i++) {
65974 entry = entries + i;
65975@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(voi
65976 {
65977 struct proc_dir_entry *pe;
65978
65979+#ifdef CONFIG_GRKERNSEC_PROC_ADD
65980+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
65981+#else
65982 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
65983+#endif
65984 if (!pe)
65985 return -ENOMEM;
65986 return 0;
65987diff -urNp linux-3.0.7/kernel/time.c linux-3.0.7/kernel/time.c
65988--- linux-3.0.7/kernel/time.c 2011-07-21 22:17:23.000000000 -0400
65989+++ linux-3.0.7/kernel/time.c 2011-08-23 21:48:14.000000000 -0400
65990@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct tim
65991 return error;
65992
65993 if (tz) {
65994+ /* we log in do_settimeofday called below, so don't log twice
65995+ */
65996+ if (!tv)
65997+ gr_log_timechange();
65998+
65999 /* SMP safe, global irq locking makes it work. */
66000 sys_tz = *tz;
66001 update_vsyscall_tz();
66002diff -urNp linux-3.0.7/kernel/timer.c linux-3.0.7/kernel/timer.c
66003--- linux-3.0.7/kernel/timer.c 2011-07-21 22:17:23.000000000 -0400
66004+++ linux-3.0.7/kernel/timer.c 2011-08-23 21:47:56.000000000 -0400
66005@@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
66006 /*
66007 * This function runs timers and the timer-tq in bottom half context.
66008 */
66009-static void run_timer_softirq(struct softirq_action *h)
66010+static void run_timer_softirq(void)
66011 {
66012 struct tvec_base *base = __this_cpu_read(tvec_bases);
66013
66014diff -urNp linux-3.0.7/kernel/trace/blktrace.c linux-3.0.7/kernel/trace/blktrace.c
66015--- linux-3.0.7/kernel/trace/blktrace.c 2011-07-21 22:17:23.000000000 -0400
66016+++ linux-3.0.7/kernel/trace/blktrace.c 2011-08-23 21:47:56.000000000 -0400
66017@@ -321,7 +321,7 @@ static ssize_t blk_dropped_read(struct f
66018 struct blk_trace *bt = filp->private_data;
66019 char buf[16];
66020
66021- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
66022+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
66023
66024 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
66025 }
66026@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(str
66027 return 1;
66028
66029 bt = buf->chan->private_data;
66030- atomic_inc(&bt->dropped);
66031+ atomic_inc_unchecked(&bt->dropped);
66032 return 0;
66033 }
66034
66035@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_qu
66036
66037 bt->dir = dir;
66038 bt->dev = dev;
66039- atomic_set(&bt->dropped, 0);
66040+ atomic_set_unchecked(&bt->dropped, 0);
66041
66042 ret = -EIO;
66043 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
66044diff -urNp linux-3.0.7/kernel/trace/ftrace.c linux-3.0.7/kernel/trace/ftrace.c
66045--- linux-3.0.7/kernel/trace/ftrace.c 2011-10-17 23:17:09.000000000 -0400
66046+++ linux-3.0.7/kernel/trace/ftrace.c 2011-10-17 23:17:19.000000000 -0400
66047@@ -1566,12 +1566,17 @@ ftrace_code_disable(struct module *mod,
66048 if (unlikely(ftrace_disabled))
66049 return 0;
66050
66051+ ret = ftrace_arch_code_modify_prepare();
66052+ FTRACE_WARN_ON(ret);
66053+ if (ret)
66054+ return 0;
66055+
66056 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
66057+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
66058 if (ret) {
66059 ftrace_bug(ret, ip);
66060- return 0;
66061 }
66062- return 1;
66063+ return ret ? 0 : 1;
66064 }
66065
66066 /*
66067@@ -2570,7 +2575,7 @@ static void ftrace_free_entry_rcu(struct
66068
66069 int
66070 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
66071- void *data)
66072+ void *data)
66073 {
66074 struct ftrace_func_probe *entry;
66075 struct ftrace_page *pg;
66076diff -urNp linux-3.0.7/kernel/trace/trace.c linux-3.0.7/kernel/trace/trace.c
66077--- linux-3.0.7/kernel/trace/trace.c 2011-07-21 22:17:23.000000000 -0400
66078+++ linux-3.0.7/kernel/trace/trace.c 2011-08-23 21:48:14.000000000 -0400
66079@@ -3339,6 +3339,8 @@ static ssize_t tracing_splice_read_pipe(
66080 size_t rem;
66081 unsigned int i;
66082
66083+ pax_track_stack();
66084+
66085 if (splice_grow_spd(pipe, &spd))
66086 return -ENOMEM;
66087
66088@@ -3822,6 +3824,8 @@ tracing_buffers_splice_read(struct file
66089 int entries, size, i;
66090 size_t ret;
66091
66092+ pax_track_stack();
66093+
66094 if (splice_grow_spd(pipe, &spd))
66095 return -ENOMEM;
66096
66097@@ -3990,10 +3994,9 @@ static const struct file_operations trac
66098 };
66099 #endif
66100
66101-static struct dentry *d_tracer;
66102-
66103 struct dentry *tracing_init_dentry(void)
66104 {
66105+ static struct dentry *d_tracer;
66106 static int once;
66107
66108 if (d_tracer)
66109@@ -4013,10 +4016,9 @@ struct dentry *tracing_init_dentry(void)
66110 return d_tracer;
66111 }
66112
66113-static struct dentry *d_percpu;
66114-
66115 struct dentry *tracing_dentry_percpu(void)
66116 {
66117+ static struct dentry *d_percpu;
66118 static int once;
66119 struct dentry *d_tracer;
66120
66121diff -urNp linux-3.0.7/kernel/trace/trace_events.c linux-3.0.7/kernel/trace/trace_events.c
66122--- linux-3.0.7/kernel/trace/trace_events.c 2011-09-02 18:11:21.000000000 -0400
66123+++ linux-3.0.7/kernel/trace/trace_events.c 2011-08-23 21:47:56.000000000 -0400
66124@@ -1318,10 +1318,6 @@ static LIST_HEAD(ftrace_module_file_list
66125 struct ftrace_module_file_ops {
66126 struct list_head list;
66127 struct module *mod;
66128- struct file_operations id;
66129- struct file_operations enable;
66130- struct file_operations format;
66131- struct file_operations filter;
66132 };
66133
66134 static struct ftrace_module_file_ops *
66135@@ -1342,17 +1338,12 @@ trace_create_file_ops(struct module *mod
66136
66137 file_ops->mod = mod;
66138
66139- file_ops->id = ftrace_event_id_fops;
66140- file_ops->id.owner = mod;
66141-
66142- file_ops->enable = ftrace_enable_fops;
66143- file_ops->enable.owner = mod;
66144-
66145- file_ops->filter = ftrace_event_filter_fops;
66146- file_ops->filter.owner = mod;
66147-
66148- file_ops->format = ftrace_event_format_fops;
66149- file_ops->format.owner = mod;
66150+ pax_open_kernel();
66151+ *(void **)&mod->trace_id.owner = mod;
66152+ *(void **)&mod->trace_enable.owner = mod;
66153+ *(void **)&mod->trace_filter.owner = mod;
66154+ *(void **)&mod->trace_format.owner = mod;
66155+ pax_close_kernel();
66156
66157 list_add(&file_ops->list, &ftrace_module_file_list);
66158
66159@@ -1376,8 +1367,8 @@ static void trace_module_add_events(stru
66160
66161 for_each_event(call, start, end) {
66162 __trace_add_event_call(*call, mod,
66163- &file_ops->id, &file_ops->enable,
66164- &file_ops->filter, &file_ops->format);
66165+ &mod->trace_id, &mod->trace_enable,
66166+ &mod->trace_filter, &mod->trace_format);
66167 }
66168 }
66169
66170diff -urNp linux-3.0.7/kernel/trace/trace_kprobe.c linux-3.0.7/kernel/trace/trace_kprobe.c
66171--- linux-3.0.7/kernel/trace/trace_kprobe.c 2011-07-21 22:17:23.000000000 -0400
66172+++ linux-3.0.7/kernel/trace/trace_kprobe.c 2011-10-06 04:17:55.000000000 -0400
66173@@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(me
66174 long ret;
66175 int maxlen = get_rloc_len(*(u32 *)dest);
66176 u8 *dst = get_rloc_data(dest);
66177- u8 *src = addr;
66178+ const u8 __user *src = (const u8 __force_user *)addr;
66179 mm_segment_t old_fs = get_fs();
66180 if (!maxlen)
66181 return;
66182@@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(me
66183 pagefault_disable();
66184 do
66185 ret = __copy_from_user_inatomic(dst++, src++, 1);
66186- while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
66187+ while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
66188 dst[-1] = '\0';
66189 pagefault_enable();
66190 set_fs(old_fs);
66191@@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(me
66192 ((u8 *)get_rloc_data(dest))[0] = '\0';
66193 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
66194 } else
66195- *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
66196+ *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
66197 get_rloc_offs(*(u32 *)dest));
66198 }
66199 /* Return the length of string -- including null terminal byte */
66200@@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(me
66201 set_fs(KERNEL_DS);
66202 pagefault_disable();
66203 do {
66204- ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
66205+ ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
66206 len++;
66207 } while (c && ret == 0 && len < MAX_STRING_SIZE);
66208 pagefault_enable();
66209diff -urNp linux-3.0.7/kernel/trace/trace_mmiotrace.c linux-3.0.7/kernel/trace/trace_mmiotrace.c
66210--- linux-3.0.7/kernel/trace/trace_mmiotrace.c 2011-07-21 22:17:23.000000000 -0400
66211+++ linux-3.0.7/kernel/trace/trace_mmiotrace.c 2011-08-23 21:47:56.000000000 -0400
66212@@ -24,7 +24,7 @@ struct header_iter {
66213 static struct trace_array *mmio_trace_array;
66214 static bool overrun_detected;
66215 static unsigned long prev_overruns;
66216-static atomic_t dropped_count;
66217+static atomic_unchecked_t dropped_count;
66218
66219 static void mmio_reset_data(struct trace_array *tr)
66220 {
66221@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iter
66222
66223 static unsigned long count_overruns(struct trace_iterator *iter)
66224 {
66225- unsigned long cnt = atomic_xchg(&dropped_count, 0);
66226+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
66227 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
66228
66229 if (over > prev_overruns)
66230@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct
66231 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
66232 sizeof(*entry), 0, pc);
66233 if (!event) {
66234- atomic_inc(&dropped_count);
66235+ atomic_inc_unchecked(&dropped_count);
66236 return;
66237 }
66238 entry = ring_buffer_event_data(event);
66239@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct
66240 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
66241 sizeof(*entry), 0, pc);
66242 if (!event) {
66243- atomic_inc(&dropped_count);
66244+ atomic_inc_unchecked(&dropped_count);
66245 return;
66246 }
66247 entry = ring_buffer_event_data(event);
66248diff -urNp linux-3.0.7/kernel/trace/trace_output.c linux-3.0.7/kernel/trace/trace_output.c
66249--- linux-3.0.7/kernel/trace/trace_output.c 2011-07-21 22:17:23.000000000 -0400
66250+++ linux-3.0.7/kernel/trace/trace_output.c 2011-08-23 21:47:56.000000000 -0400
66251@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s,
66252
66253 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
66254 if (!IS_ERR(p)) {
66255- p = mangle_path(s->buffer + s->len, p, "\n");
66256+ p = mangle_path(s->buffer + s->len, p, "\n\\");
66257 if (p) {
66258 s->len = p - s->buffer;
66259 return 1;
66260diff -urNp linux-3.0.7/kernel/trace/trace_stack.c linux-3.0.7/kernel/trace/trace_stack.c
66261--- linux-3.0.7/kernel/trace/trace_stack.c 2011-07-21 22:17:23.000000000 -0400
66262+++ linux-3.0.7/kernel/trace/trace_stack.c 2011-08-23 21:47:56.000000000 -0400
66263@@ -50,7 +50,7 @@ static inline void check_stack(void)
66264 return;
66265
66266 /* we do not handle interrupt stacks yet */
66267- if (!object_is_on_stack(&this_size))
66268+ if (!object_starts_on_stack(&this_size))
66269 return;
66270
66271 local_irq_save(flags);
66272diff -urNp linux-3.0.7/kernel/trace/trace_workqueue.c linux-3.0.7/kernel/trace/trace_workqueue.c
66273--- linux-3.0.7/kernel/trace/trace_workqueue.c 2011-07-21 22:17:23.000000000 -0400
66274+++ linux-3.0.7/kernel/trace/trace_workqueue.c 2011-08-23 21:47:56.000000000 -0400
66275@@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
66276 int cpu;
66277 pid_t pid;
66278 /* Can be inserted from interrupt or user context, need to be atomic */
66279- atomic_t inserted;
66280+ atomic_unchecked_t inserted;
66281 /*
66282 * Don't need to be atomic, works are serialized in a single workqueue thread
66283 * on a single CPU.
66284@@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
66285 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
66286 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
66287 if (node->pid == wq_thread->pid) {
66288- atomic_inc(&node->inserted);
66289+ atomic_inc_unchecked(&node->inserted);
66290 goto found;
66291 }
66292 }
66293@@ -210,7 +210,7 @@ static int workqueue_stat_show(struct se
66294 tsk = get_pid_task(pid, PIDTYPE_PID);
66295 if (tsk) {
66296 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
66297- atomic_read(&cws->inserted), cws->executed,
66298+ atomic_read_unchecked(&cws->inserted), cws->executed,
66299 tsk->comm);
66300 put_task_struct(tsk);
66301 }
66302diff -urNp linux-3.0.7/lib/bitmap.c linux-3.0.7/lib/bitmap.c
66303--- linux-3.0.7/lib/bitmap.c 2011-07-21 22:17:23.000000000 -0400
66304+++ linux-3.0.7/lib/bitmap.c 2011-10-06 04:17:55.000000000 -0400
66305@@ -421,7 +421,7 @@ int __bitmap_parse(const char *buf, unsi
66306 {
66307 int c, old_c, totaldigits, ndigits, nchunks, nbits;
66308 u32 chunk;
66309- const char __user *ubuf = buf;
66310+ const char __user *ubuf = (const char __force_user *)buf;
66311
66312 bitmap_zero(maskp, nmaskbits);
66313
66314@@ -506,7 +506,7 @@ int bitmap_parse_user(const char __user
66315 {
66316 if (!access_ok(VERIFY_READ, ubuf, ulen))
66317 return -EFAULT;
66318- return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
66319+ return __bitmap_parse((const char __force_kernel *)ubuf, ulen, 1, maskp, nmaskbits);
66320 }
66321 EXPORT_SYMBOL(bitmap_parse_user);
66322
66323@@ -596,7 +596,7 @@ static int __bitmap_parselist(const char
66324 {
66325 unsigned a, b;
66326 int c, old_c, totaldigits;
66327- const char __user *ubuf = buf;
66328+ const char __user *ubuf = (const char __force_user *)buf;
66329 int exp_digit, in_range;
66330
66331 totaldigits = c = 0;
66332@@ -696,7 +696,7 @@ int bitmap_parselist_user(const char __u
66333 {
66334 if (!access_ok(VERIFY_READ, ubuf, ulen))
66335 return -EFAULT;
66336- return __bitmap_parselist((const char *)ubuf,
66337+ return __bitmap_parselist((const char __force_kernel *)ubuf,
66338 ulen, 1, maskp, nmaskbits);
66339 }
66340 EXPORT_SYMBOL(bitmap_parselist_user);
66341diff -urNp linux-3.0.7/lib/bug.c linux-3.0.7/lib/bug.c
66342--- linux-3.0.7/lib/bug.c 2011-07-21 22:17:23.000000000 -0400
66343+++ linux-3.0.7/lib/bug.c 2011-08-23 21:47:56.000000000 -0400
66344@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned l
66345 return BUG_TRAP_TYPE_NONE;
66346
66347 bug = find_bug(bugaddr);
66348+ if (!bug)
66349+ return BUG_TRAP_TYPE_NONE;
66350
66351 file = NULL;
66352 line = 0;
66353diff -urNp linux-3.0.7/lib/debugobjects.c linux-3.0.7/lib/debugobjects.c
66354--- linux-3.0.7/lib/debugobjects.c 2011-07-21 22:17:23.000000000 -0400
66355+++ linux-3.0.7/lib/debugobjects.c 2011-08-23 21:47:56.000000000 -0400
66356@@ -284,7 +284,7 @@ static void debug_object_is_on_stack(voi
66357 if (limit > 4)
66358 return;
66359
66360- is_on_stack = object_is_on_stack(addr);
66361+ is_on_stack = object_starts_on_stack(addr);
66362 if (is_on_stack == onstack)
66363 return;
66364
66365diff -urNp linux-3.0.7/lib/devres.c linux-3.0.7/lib/devres.c
66366--- linux-3.0.7/lib/devres.c 2011-07-21 22:17:23.000000000 -0400
66367+++ linux-3.0.7/lib/devres.c 2011-10-06 04:17:55.000000000 -0400
66368@@ -81,7 +81,7 @@ void devm_iounmap(struct device *dev, vo
66369 {
66370 iounmap(addr);
66371 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
66372- (void *)addr));
66373+ (void __force *)addr));
66374 }
66375 EXPORT_SYMBOL(devm_iounmap);
66376
66377@@ -141,7 +141,7 @@ void devm_ioport_unmap(struct device *de
66378 {
66379 ioport_unmap(addr);
66380 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
66381- devm_ioport_map_match, (void *)addr));
66382+ devm_ioport_map_match, (void __force *)addr));
66383 }
66384 EXPORT_SYMBOL(devm_ioport_unmap);
66385
66386diff -urNp linux-3.0.7/lib/dma-debug.c linux-3.0.7/lib/dma-debug.c
66387--- linux-3.0.7/lib/dma-debug.c 2011-07-21 22:17:23.000000000 -0400
66388+++ linux-3.0.7/lib/dma-debug.c 2011-08-23 21:47:56.000000000 -0400
66389@@ -870,7 +870,7 @@ out:
66390
66391 static void check_for_stack(struct device *dev, void *addr)
66392 {
66393- if (object_is_on_stack(addr))
66394+ if (object_starts_on_stack(addr))
66395 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
66396 "stack [addr=%p]\n", addr);
66397 }
66398diff -urNp linux-3.0.7/lib/extable.c linux-3.0.7/lib/extable.c
66399--- linux-3.0.7/lib/extable.c 2011-07-21 22:17:23.000000000 -0400
66400+++ linux-3.0.7/lib/extable.c 2011-08-23 21:47:56.000000000 -0400
66401@@ -13,6 +13,7 @@
66402 #include <linux/init.h>
66403 #include <linux/sort.h>
66404 #include <asm/uaccess.h>
66405+#include <asm/pgtable.h>
66406
66407 #ifndef ARCH_HAS_SORT_EXTABLE
66408 /*
66409@@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const v
66410 void sort_extable(struct exception_table_entry *start,
66411 struct exception_table_entry *finish)
66412 {
66413+ pax_open_kernel();
66414 sort(start, finish - start, sizeof(struct exception_table_entry),
66415 cmp_ex, NULL);
66416+ pax_close_kernel();
66417 }
66418
66419 #ifdef CONFIG_MODULES
66420diff -urNp linux-3.0.7/lib/inflate.c linux-3.0.7/lib/inflate.c
66421--- linux-3.0.7/lib/inflate.c 2011-07-21 22:17:23.000000000 -0400
66422+++ linux-3.0.7/lib/inflate.c 2011-08-23 21:47:56.000000000 -0400
66423@@ -269,7 +269,7 @@ static void free(void *where)
66424 malloc_ptr = free_mem_ptr;
66425 }
66426 #else
66427-#define malloc(a) kmalloc(a, GFP_KERNEL)
66428+#define malloc(a) kmalloc((a), GFP_KERNEL)
66429 #define free(a) kfree(a)
66430 #endif
66431
66432diff -urNp linux-3.0.7/lib/Kconfig.debug linux-3.0.7/lib/Kconfig.debug
66433--- linux-3.0.7/lib/Kconfig.debug 2011-07-21 22:17:23.000000000 -0400
66434+++ linux-3.0.7/lib/Kconfig.debug 2011-08-23 21:48:14.000000000 -0400
66435@@ -1088,6 +1088,7 @@ config LATENCYTOP
66436 depends on DEBUG_KERNEL
66437 depends on STACKTRACE_SUPPORT
66438 depends on PROC_FS
66439+ depends on !GRKERNSEC_HIDESYM
66440 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
66441 select KALLSYMS
66442 select KALLSYMS_ALL
66443diff -urNp linux-3.0.7/lib/kref.c linux-3.0.7/lib/kref.c
66444--- linux-3.0.7/lib/kref.c 2011-07-21 22:17:23.000000000 -0400
66445+++ linux-3.0.7/lib/kref.c 2011-08-23 21:47:56.000000000 -0400
66446@@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
66447 */
66448 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
66449 {
66450- WARN_ON(release == NULL);
66451+ BUG_ON(release == NULL);
66452 WARN_ON(release == (void (*)(struct kref *))kfree);
66453
66454 if (atomic_dec_and_test(&kref->refcount)) {
66455diff -urNp linux-3.0.7/lib/radix-tree.c linux-3.0.7/lib/radix-tree.c
66456--- linux-3.0.7/lib/radix-tree.c 2011-07-21 22:17:23.000000000 -0400
66457+++ linux-3.0.7/lib/radix-tree.c 2011-08-23 21:47:56.000000000 -0400
66458@@ -80,7 +80,7 @@ struct radix_tree_preload {
66459 int nr;
66460 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
66461 };
66462-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
66463+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
66464
66465 static inline void *ptr_to_indirect(void *ptr)
66466 {
66467diff -urNp linux-3.0.7/lib/vsprintf.c linux-3.0.7/lib/vsprintf.c
66468--- linux-3.0.7/lib/vsprintf.c 2011-07-21 22:17:23.000000000 -0400
66469+++ linux-3.0.7/lib/vsprintf.c 2011-08-23 21:48:14.000000000 -0400
66470@@ -16,6 +16,9 @@
66471 * - scnprintf and vscnprintf
66472 */
66473
66474+#ifdef CONFIG_GRKERNSEC_HIDESYM
66475+#define __INCLUDED_BY_HIDESYM 1
66476+#endif
66477 #include <stdarg.h>
66478 #include <linux/module.h>
66479 #include <linux/types.h>
66480@@ -435,7 +438,7 @@ char *symbol_string(char *buf, char *end
66481 char sym[KSYM_SYMBOL_LEN];
66482 if (ext == 'B')
66483 sprint_backtrace(sym, value);
66484- else if (ext != 'f' && ext != 's')
66485+ else if (ext != 'f' && ext != 's' && ext != 'a')
66486 sprint_symbol(sym, value);
66487 else
66488 kallsyms_lookup(value, NULL, NULL, NULL, sym);
66489@@ -799,7 +802,11 @@ char *uuid_string(char *buf, char *end,
66490 return string(buf, end, uuid, spec);
66491 }
66492
66493+#ifdef CONFIG_GRKERNSEC_HIDESYM
66494+int kptr_restrict __read_mostly = 2;
66495+#else
66496 int kptr_restrict __read_mostly;
66497+#endif
66498
66499 /*
66500 * Show a '%p' thing. A kernel extension is that the '%p' is followed
66501@@ -813,6 +820,8 @@ int kptr_restrict __read_mostly;
66502 * - 'S' For symbolic direct pointers with offset
66503 * - 's' For symbolic direct pointers without offset
66504 * - 'B' For backtraced symbolic direct pointers with offset
66505+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
66506+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
66507 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
66508 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
66509 * - 'M' For a 6-byte MAC address, it prints the address in the
66510@@ -857,12 +866,12 @@ char *pointer(const char *fmt, char *buf
66511 {
66512 if (!ptr && *fmt != 'K') {
66513 /*
66514- * Print (null) with the same width as a pointer so it makes
66515+ * Print (nil) with the same width as a pointer so it makes
66516 * tabular output look nice.
66517 */
66518 if (spec.field_width == -1)
66519 spec.field_width = 2 * sizeof(void *);
66520- return string(buf, end, "(null)", spec);
66521+ return string(buf, end, "(nil)", spec);
66522 }
66523
66524 switch (*fmt) {
66525@@ -872,6 +881,13 @@ char *pointer(const char *fmt, char *buf
66526 /* Fallthrough */
66527 case 'S':
66528 case 's':
66529+#ifdef CONFIG_GRKERNSEC_HIDESYM
66530+ break;
66531+#else
66532+ return symbol_string(buf, end, ptr, spec, *fmt);
66533+#endif
66534+ case 'A':
66535+ case 'a':
66536 case 'B':
66537 return symbol_string(buf, end, ptr, spec, *fmt);
66538 case 'R':
66539@@ -1631,11 +1647,11 @@ int bstr_printf(char *buf, size_t size,
66540 typeof(type) value; \
66541 if (sizeof(type) == 8) { \
66542 args = PTR_ALIGN(args, sizeof(u32)); \
66543- *(u32 *)&value = *(u32 *)args; \
66544- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
66545+ *(u32 *)&value = *(const u32 *)args; \
66546+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
66547 } else { \
66548 args = PTR_ALIGN(args, sizeof(type)); \
66549- value = *(typeof(type) *)args; \
66550+ value = *(const typeof(type) *)args; \
66551 } \
66552 args += sizeof(type); \
66553 value; \
66554@@ -1698,7 +1714,7 @@ int bstr_printf(char *buf, size_t size,
66555 case FORMAT_TYPE_STR: {
66556 const char *str_arg = args;
66557 args += strlen(str_arg) + 1;
66558- str = string(str, end, (char *)str_arg, spec);
66559+ str = string(str, end, str_arg, spec);
66560 break;
66561 }
66562
66563diff -urNp linux-3.0.7/localversion-grsec linux-3.0.7/localversion-grsec
66564--- linux-3.0.7/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
66565+++ linux-3.0.7/localversion-grsec 2011-08-23 21:48:14.000000000 -0400
66566@@ -0,0 +1 @@
66567+-grsec
66568diff -urNp linux-3.0.7/Makefile linux-3.0.7/Makefile
66569--- linux-3.0.7/Makefile 2011-10-17 23:17:08.000000000 -0400
66570+++ linux-3.0.7/Makefile 2011-10-17 23:17:19.000000000 -0400
66571@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
66572
66573 HOSTCC = gcc
66574 HOSTCXX = g++
66575-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
66576-HOSTCXXFLAGS = -O2
66577+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
66578+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
66579+HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
66580
66581 # Decide whether to build built-in, modular, or both.
66582 # Normally, just do built-in.
66583@@ -365,10 +366,12 @@ LINUXINCLUDE := -I$(srctree)/arch/$(h
66584 KBUILD_CPPFLAGS := -D__KERNEL__
66585
66586 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
66587+ -W -Wno-unused-parameter -Wno-missing-field-initializers \
66588 -fno-strict-aliasing -fno-common \
66589 -Werror-implicit-function-declaration \
66590 -Wno-format-security \
66591 -fno-delete-null-pointer-checks
66592+KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
66593 KBUILD_AFLAGS_KERNEL :=
66594 KBUILD_CFLAGS_KERNEL :=
66595 KBUILD_AFLAGS := -D__ASSEMBLY__
66596@@ -407,8 +410,8 @@ export RCS_TAR_IGNORE := --exclude SCCS
66597 # Rules shared between *config targets and build targets
66598
66599 # Basic helpers built in scripts/
66600-PHONY += scripts_basic
66601-scripts_basic:
66602+PHONY += scripts_basic gcc-plugins
66603+scripts_basic: gcc-plugins
66604 $(Q)$(MAKE) $(build)=scripts/basic
66605 $(Q)rm -f .tmp_quiet_recordmcount
66606
66607@@ -564,6 +567,36 @@ else
66608 KBUILD_CFLAGS += -O2
66609 endif
66610
66611+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
66612+CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
66613+ifdef CONFIG_PAX_MEMORY_STACKLEAK
66614+STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -fplugin-arg-stackleak_plugin-track-lowest-sp=100
66615+endif
66616+ifdef CONFIG_KALLOCSTAT_PLUGIN
66617+KALLOCSTAT_PLUGIN := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
66618+endif
66619+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
66620+KERNEXEC_PLUGIN := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
66621+endif
66622+ifdef CONFIG_CHECKER_PLUGIN
66623+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
66624+CHECKER_PLUGIN := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
66625+endif
66626+endif
66627+GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN) $(CHECKER_PLUGIN)
66628+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
66629+gcc-plugins:
66630+ $(Q)$(MAKE) $(build)=tools/gcc
66631+else
66632+gcc-plugins:
66633+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
66634+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev.))
66635+else
66636+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
66637+endif
66638+ $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
66639+endif
66640+
66641 include $(srctree)/arch/$(SRCARCH)/Makefile
66642
66643 ifneq ($(CONFIG_FRAME_WARN),0)
66644@@ -708,7 +741,7 @@ export mod_strip_cmd
66645
66646
66647 ifeq ($(KBUILD_EXTMOD),)
66648-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
66649+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
66650
66651 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
66652 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
66653@@ -932,6 +965,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-mai
66654
66655 # The actual objects are generated when descending,
66656 # make sure no implicit rule kicks in
66657+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS)
66658 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
66659
66660 # Handle descending into subdirectories listed in $(vmlinux-dirs)
66661@@ -941,7 +975,7 @@ $(sort $(vmlinux-init) $(vmlinux-main))
66662 # Error messages still appears in the original language
66663
66664 PHONY += $(vmlinux-dirs)
66665-$(vmlinux-dirs): prepare scripts
66666+$(vmlinux-dirs): gcc-plugins prepare scripts
66667 $(Q)$(MAKE) $(build)=$@
66668
66669 # Store (new) KERNELRELASE string in include/config/kernel.release
66670@@ -986,6 +1020,7 @@ prepare0: archprepare FORCE
66671 $(Q)$(MAKE) $(build)=. missing-syscalls
66672
66673 # All the preparing..
66674+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS),$(KBUILD_CFLAGS))
66675 prepare: prepare0
66676
66677 # Generate some files
66678@@ -1087,6 +1122,7 @@ all: modules
66679 # using awk while concatenating to the final file.
66680
66681 PHONY += modules
66682+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
66683 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
66684 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
66685 @$(kecho) ' Building modules, stage 2.';
66686@@ -1102,7 +1138,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modu
66687
66688 # Target to prepare building external modules
66689 PHONY += modules_prepare
66690-modules_prepare: prepare scripts
66691+modules_prepare: gcc-plugins prepare scripts
66692
66693 # Target to install modules
66694 PHONY += modules_install
66695@@ -1198,7 +1234,7 @@ distclean: mrproper
66696 @find $(srctree) $(RCS_FIND_IGNORE) \
66697 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
66698 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
66699- -o -name '.*.rej' -o -size 0 \
66700+ -o -name '.*.rej' -o -name '*.so' -o -size 0 \
66701 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
66702 -type f -print | xargs rm -f
66703
66704@@ -1359,6 +1395,7 @@ PHONY += $(module-dirs) modules
66705 $(module-dirs): crmodverdir $(objtree)/Module.symvers
66706 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
66707
66708+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
66709 modules: $(module-dirs)
66710 @$(kecho) ' Building modules, stage 2.';
66711 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
66712@@ -1485,17 +1522,19 @@ else
66713 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
66714 endif
66715
66716-%.s: %.c prepare scripts FORCE
66717+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS)
66718+%.s: %.c gcc-plugins prepare scripts FORCE
66719 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
66720 %.i: %.c prepare scripts FORCE
66721 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
66722-%.o: %.c prepare scripts FORCE
66723+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS)
66724+%.o: %.c gcc-plugins prepare scripts FORCE
66725 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
66726 %.lst: %.c prepare scripts FORCE
66727 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
66728-%.s: %.S prepare scripts FORCE
66729+%.s: %.S gcc-plugins prepare scripts FORCE
66730 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
66731-%.o: %.S prepare scripts FORCE
66732+%.o: %.S gcc-plugins prepare scripts FORCE
66733 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
66734 %.symtypes: %.c prepare scripts FORCE
66735 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
66736@@ -1505,11 +1544,13 @@ endif
66737 $(cmd_crmodverdir)
66738 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
66739 $(build)=$(build-dir)
66740-%/: prepare scripts FORCE
66741+%/: KBUILD_CFLAGS += $(GCC_PLUGINS)
66742+%/: gcc-plugins prepare scripts FORCE
66743 $(cmd_crmodverdir)
66744 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
66745 $(build)=$(build-dir)
66746-%.ko: prepare scripts FORCE
66747+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS)
66748+%.ko: gcc-plugins prepare scripts FORCE
66749 $(cmd_crmodverdir)
66750 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
66751 $(build)=$(build-dir) $(@:.ko=.o)
66752diff -urNp linux-3.0.7/mm/filemap.c linux-3.0.7/mm/filemap.c
66753--- linux-3.0.7/mm/filemap.c 2011-07-21 22:17:23.000000000 -0400
66754+++ linux-3.0.7/mm/filemap.c 2011-08-23 21:48:14.000000000 -0400
66755@@ -1763,7 +1763,7 @@ int generic_file_mmap(struct file * file
66756 struct address_space *mapping = file->f_mapping;
66757
66758 if (!mapping->a_ops->readpage)
66759- return -ENOEXEC;
66760+ return -ENODEV;
66761 file_accessed(file);
66762 vma->vm_ops = &generic_file_vm_ops;
66763 vma->vm_flags |= VM_CAN_NONLINEAR;
66764@@ -2169,6 +2169,7 @@ inline int generic_write_checks(struct f
66765 *pos = i_size_read(inode);
66766
66767 if (limit != RLIM_INFINITY) {
66768+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
66769 if (*pos >= limit) {
66770 send_sig(SIGXFSZ, current, 0);
66771 return -EFBIG;
66772diff -urNp linux-3.0.7/mm/fremap.c linux-3.0.7/mm/fremap.c
66773--- linux-3.0.7/mm/fremap.c 2011-07-21 22:17:23.000000000 -0400
66774+++ linux-3.0.7/mm/fremap.c 2011-08-23 21:47:56.000000000 -0400
66775@@ -156,6 +156,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
66776 retry:
66777 vma = find_vma(mm, start);
66778
66779+#ifdef CONFIG_PAX_SEGMEXEC
66780+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
66781+ goto out;
66782+#endif
66783+
66784 /*
66785 * Make sure the vma is shared, that it supports prefaulting,
66786 * and that the remapped range is valid and fully within
66787diff -urNp linux-3.0.7/mm/highmem.c linux-3.0.7/mm/highmem.c
66788--- linux-3.0.7/mm/highmem.c 2011-07-21 22:17:23.000000000 -0400
66789+++ linux-3.0.7/mm/highmem.c 2011-08-23 21:47:56.000000000 -0400
66790@@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
66791 * So no dangers, even with speculative execution.
66792 */
66793 page = pte_page(pkmap_page_table[i]);
66794+ pax_open_kernel();
66795 pte_clear(&init_mm, (unsigned long)page_address(page),
66796 &pkmap_page_table[i]);
66797-
66798+ pax_close_kernel();
66799 set_page_address(page, NULL);
66800 need_flush = 1;
66801 }
66802@@ -186,9 +187,11 @@ start:
66803 }
66804 }
66805 vaddr = PKMAP_ADDR(last_pkmap_nr);
66806+
66807+ pax_open_kernel();
66808 set_pte_at(&init_mm, vaddr,
66809 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
66810-
66811+ pax_close_kernel();
66812 pkmap_count[last_pkmap_nr] = 1;
66813 set_page_address(page, (void *)vaddr);
66814
66815diff -urNp linux-3.0.7/mm/huge_memory.c linux-3.0.7/mm/huge_memory.c
66816--- linux-3.0.7/mm/huge_memory.c 2011-07-21 22:17:23.000000000 -0400
66817+++ linux-3.0.7/mm/huge_memory.c 2011-08-23 21:47:56.000000000 -0400
66818@@ -702,7 +702,7 @@ out:
66819 * run pte_offset_map on the pmd, if an huge pmd could
66820 * materialize from under us from a different thread.
66821 */
66822- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
66823+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
66824 return VM_FAULT_OOM;
66825 /* if an huge pmd materialized from under us just retry later */
66826 if (unlikely(pmd_trans_huge(*pmd)))
66827diff -urNp linux-3.0.7/mm/hugetlb.c linux-3.0.7/mm/hugetlb.c
66828--- linux-3.0.7/mm/hugetlb.c 2011-07-21 22:17:23.000000000 -0400
66829+++ linux-3.0.7/mm/hugetlb.c 2011-08-23 21:47:56.000000000 -0400
66830@@ -2339,6 +2339,27 @@ static int unmap_ref_private(struct mm_s
66831 return 1;
66832 }
66833
66834+#ifdef CONFIG_PAX_SEGMEXEC
66835+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
66836+{
66837+ struct mm_struct *mm = vma->vm_mm;
66838+ struct vm_area_struct *vma_m;
66839+ unsigned long address_m;
66840+ pte_t *ptep_m;
66841+
66842+ vma_m = pax_find_mirror_vma(vma);
66843+ if (!vma_m)
66844+ return;
66845+
66846+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
66847+ address_m = address + SEGMEXEC_TASK_SIZE;
66848+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
66849+ get_page(page_m);
66850+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
66851+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
66852+}
66853+#endif
66854+
66855 /*
66856 * Hugetlb_cow() should be called with page lock of the original hugepage held.
66857 */
66858@@ -2440,6 +2461,11 @@ retry_avoidcopy:
66859 make_huge_pte(vma, new_page, 1));
66860 page_remove_rmap(old_page);
66861 hugepage_add_new_anon_rmap(new_page, vma, address);
66862+
66863+#ifdef CONFIG_PAX_SEGMEXEC
66864+ pax_mirror_huge_pte(vma, address, new_page);
66865+#endif
66866+
66867 /* Make the old page be freed below */
66868 new_page = old_page;
66869 mmu_notifier_invalidate_range_end(mm,
66870@@ -2591,6 +2617,10 @@ retry:
66871 && (vma->vm_flags & VM_SHARED)));
66872 set_huge_pte_at(mm, address, ptep, new_pte);
66873
66874+#ifdef CONFIG_PAX_SEGMEXEC
66875+ pax_mirror_huge_pte(vma, address, page);
66876+#endif
66877+
66878 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
66879 /* Optimization, do the COW without a second fault */
66880 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
66881@@ -2620,6 +2650,10 @@ int hugetlb_fault(struct mm_struct *mm,
66882 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
66883 struct hstate *h = hstate_vma(vma);
66884
66885+#ifdef CONFIG_PAX_SEGMEXEC
66886+ struct vm_area_struct *vma_m;
66887+#endif
66888+
66889 ptep = huge_pte_offset(mm, address);
66890 if (ptep) {
66891 entry = huge_ptep_get(ptep);
66892@@ -2631,6 +2665,26 @@ int hugetlb_fault(struct mm_struct *mm,
66893 VM_FAULT_SET_HINDEX(h - hstates);
66894 }
66895
66896+#ifdef CONFIG_PAX_SEGMEXEC
66897+ vma_m = pax_find_mirror_vma(vma);
66898+ if (vma_m) {
66899+ unsigned long address_m;
66900+
66901+ if (vma->vm_start > vma_m->vm_start) {
66902+ address_m = address;
66903+ address -= SEGMEXEC_TASK_SIZE;
66904+ vma = vma_m;
66905+ h = hstate_vma(vma);
66906+ } else
66907+ address_m = address + SEGMEXEC_TASK_SIZE;
66908+
66909+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
66910+ return VM_FAULT_OOM;
66911+ address_m &= HPAGE_MASK;
66912+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
66913+ }
66914+#endif
66915+
66916 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
66917 if (!ptep)
66918 return VM_FAULT_OOM;
66919diff -urNp linux-3.0.7/mm/internal.h linux-3.0.7/mm/internal.h
66920--- linux-3.0.7/mm/internal.h 2011-07-21 22:17:23.000000000 -0400
66921+++ linux-3.0.7/mm/internal.h 2011-08-23 21:47:56.000000000 -0400
66922@@ -49,6 +49,7 @@ extern void putback_lru_page(struct page
66923 * in mm/page_alloc.c
66924 */
66925 extern void __free_pages_bootmem(struct page *page, unsigned int order);
66926+extern void free_compound_page(struct page *page);
66927 extern void prep_compound_page(struct page *page, unsigned long order);
66928 #ifdef CONFIG_MEMORY_FAILURE
66929 extern bool is_free_buddy_page(struct page *page);
66930diff -urNp linux-3.0.7/mm/Kconfig linux-3.0.7/mm/Kconfig
66931--- linux-3.0.7/mm/Kconfig 2011-07-21 22:17:23.000000000 -0400
66932+++ linux-3.0.7/mm/Kconfig 2011-08-23 21:48:14.000000000 -0400
66933@@ -240,7 +240,7 @@ config KSM
66934 config DEFAULT_MMAP_MIN_ADDR
66935 int "Low address space to protect from user allocation"
66936 depends on MMU
66937- default 4096
66938+ default 65536
66939 help
66940 This is the portion of low virtual memory which should be protected
66941 from userspace allocation. Keeping a user from writing to low pages
66942diff -urNp linux-3.0.7/mm/kmemleak.c linux-3.0.7/mm/kmemleak.c
66943--- linux-3.0.7/mm/kmemleak.c 2011-07-21 22:17:23.000000000 -0400
66944+++ linux-3.0.7/mm/kmemleak.c 2011-08-23 21:48:14.000000000 -0400
66945@@ -357,7 +357,7 @@ static void print_unreferenced(struct se
66946
66947 for (i = 0; i < object->trace_len; i++) {
66948 void *ptr = (void *)object->trace[i];
66949- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
66950+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
66951 }
66952 }
66953
66954diff -urNp linux-3.0.7/mm/maccess.c linux-3.0.7/mm/maccess.c
66955--- linux-3.0.7/mm/maccess.c 2011-07-21 22:17:23.000000000 -0400
66956+++ linux-3.0.7/mm/maccess.c 2011-10-06 04:17:55.000000000 -0400
66957@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, cons
66958 set_fs(KERNEL_DS);
66959 pagefault_disable();
66960 ret = __copy_from_user_inatomic(dst,
66961- (__force const void __user *)src, size);
66962+ (const void __force_user *)src, size);
66963 pagefault_enable();
66964 set_fs(old_fs);
66965
66966@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, con
66967
66968 set_fs(KERNEL_DS);
66969 pagefault_disable();
66970- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
66971+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
66972 pagefault_enable();
66973 set_fs(old_fs);
66974
66975diff -urNp linux-3.0.7/mm/madvise.c linux-3.0.7/mm/madvise.c
66976--- linux-3.0.7/mm/madvise.c 2011-07-21 22:17:23.000000000 -0400
66977+++ linux-3.0.7/mm/madvise.c 2011-08-23 21:47:56.000000000 -0400
66978@@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_a
66979 pgoff_t pgoff;
66980 unsigned long new_flags = vma->vm_flags;
66981
66982+#ifdef CONFIG_PAX_SEGMEXEC
66983+ struct vm_area_struct *vma_m;
66984+#endif
66985+
66986 switch (behavior) {
66987 case MADV_NORMAL:
66988 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
66989@@ -110,6 +114,13 @@ success:
66990 /*
66991 * vm_flags is protected by the mmap_sem held in write mode.
66992 */
66993+
66994+#ifdef CONFIG_PAX_SEGMEXEC
66995+ vma_m = pax_find_mirror_vma(vma);
66996+ if (vma_m)
66997+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
66998+#endif
66999+
67000 vma->vm_flags = new_flags;
67001
67002 out:
67003@@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_a
67004 struct vm_area_struct ** prev,
67005 unsigned long start, unsigned long end)
67006 {
67007+
67008+#ifdef CONFIG_PAX_SEGMEXEC
67009+ struct vm_area_struct *vma_m;
67010+#endif
67011+
67012 *prev = vma;
67013 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
67014 return -EINVAL;
67015@@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_a
67016 zap_page_range(vma, start, end - start, &details);
67017 } else
67018 zap_page_range(vma, start, end - start, NULL);
67019+
67020+#ifdef CONFIG_PAX_SEGMEXEC
67021+ vma_m = pax_find_mirror_vma(vma);
67022+ if (vma_m) {
67023+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
67024+ struct zap_details details = {
67025+ .nonlinear_vma = vma_m,
67026+ .last_index = ULONG_MAX,
67027+ };
67028+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
67029+ } else
67030+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
67031+ }
67032+#endif
67033+
67034 return 0;
67035 }
67036
67037@@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
67038 if (end < start)
67039 goto out;
67040
67041+#ifdef CONFIG_PAX_SEGMEXEC
67042+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
67043+ if (end > SEGMEXEC_TASK_SIZE)
67044+ goto out;
67045+ } else
67046+#endif
67047+
67048+ if (end > TASK_SIZE)
67049+ goto out;
67050+
67051 error = 0;
67052 if (end == start)
67053 goto out;
67054diff -urNp linux-3.0.7/mm/memory.c linux-3.0.7/mm/memory.c
67055--- linux-3.0.7/mm/memory.c 2011-09-02 18:11:21.000000000 -0400
67056+++ linux-3.0.7/mm/memory.c 2011-08-23 21:47:56.000000000 -0400
67057@@ -457,8 +457,12 @@ static inline void free_pmd_range(struct
67058 return;
67059
67060 pmd = pmd_offset(pud, start);
67061+
67062+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
67063 pud_clear(pud);
67064 pmd_free_tlb(tlb, pmd, start);
67065+#endif
67066+
67067 }
67068
67069 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
67070@@ -489,9 +493,12 @@ static inline void free_pud_range(struct
67071 if (end - 1 > ceiling - 1)
67072 return;
67073
67074+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
67075 pud = pud_offset(pgd, start);
67076 pgd_clear(pgd);
67077 pud_free_tlb(tlb, pud, start);
67078+#endif
67079+
67080 }
67081
67082 /*
67083@@ -1577,12 +1584,6 @@ no_page_table:
67084 return page;
67085 }
67086
67087-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
67088-{
67089- return stack_guard_page_start(vma, addr) ||
67090- stack_guard_page_end(vma, addr+PAGE_SIZE);
67091-}
67092-
67093 /**
67094 * __get_user_pages() - pin user pages in memory
67095 * @tsk: task_struct of target task
67096@@ -1655,10 +1656,10 @@ int __get_user_pages(struct task_struct
67097 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
67098 i = 0;
67099
67100- do {
67101+ while (nr_pages) {
67102 struct vm_area_struct *vma;
67103
67104- vma = find_extend_vma(mm, start);
67105+ vma = find_vma(mm, start);
67106 if (!vma && in_gate_area(mm, start)) {
67107 unsigned long pg = start & PAGE_MASK;
67108 pgd_t *pgd;
67109@@ -1706,7 +1707,7 @@ int __get_user_pages(struct task_struct
67110 goto next_page;
67111 }
67112
67113- if (!vma ||
67114+ if (!vma || start < vma->vm_start ||
67115 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
67116 !(vm_flags & vma->vm_flags))
67117 return i ? : -EFAULT;
67118@@ -1733,11 +1734,6 @@ int __get_user_pages(struct task_struct
67119 int ret;
67120 unsigned int fault_flags = 0;
67121
67122- /* For mlock, just skip the stack guard page. */
67123- if (foll_flags & FOLL_MLOCK) {
67124- if (stack_guard_page(vma, start))
67125- goto next_page;
67126- }
67127 if (foll_flags & FOLL_WRITE)
67128 fault_flags |= FAULT_FLAG_WRITE;
67129 if (nonblocking)
67130@@ -1811,7 +1807,7 @@ next_page:
67131 start += PAGE_SIZE;
67132 nr_pages--;
67133 } while (nr_pages && start < vma->vm_end);
67134- } while (nr_pages);
67135+ }
67136 return i;
67137 }
67138 EXPORT_SYMBOL(__get_user_pages);
67139@@ -2018,6 +2014,10 @@ static int insert_page(struct vm_area_st
67140 page_add_file_rmap(page);
67141 set_pte_at(mm, addr, pte, mk_pte(page, prot));
67142
67143+#ifdef CONFIG_PAX_SEGMEXEC
67144+ pax_mirror_file_pte(vma, addr, page, ptl);
67145+#endif
67146+
67147 retval = 0;
67148 pte_unmap_unlock(pte, ptl);
67149 return retval;
67150@@ -2052,10 +2052,22 @@ out:
67151 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
67152 struct page *page)
67153 {
67154+
67155+#ifdef CONFIG_PAX_SEGMEXEC
67156+ struct vm_area_struct *vma_m;
67157+#endif
67158+
67159 if (addr < vma->vm_start || addr >= vma->vm_end)
67160 return -EFAULT;
67161 if (!page_count(page))
67162 return -EINVAL;
67163+
67164+#ifdef CONFIG_PAX_SEGMEXEC
67165+ vma_m = pax_find_mirror_vma(vma);
67166+ if (vma_m)
67167+ vma_m->vm_flags |= VM_INSERTPAGE;
67168+#endif
67169+
67170 vma->vm_flags |= VM_INSERTPAGE;
67171 return insert_page(vma, addr, page, vma->vm_page_prot);
67172 }
67173@@ -2141,6 +2153,7 @@ int vm_insert_mixed(struct vm_area_struc
67174 unsigned long pfn)
67175 {
67176 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
67177+ BUG_ON(vma->vm_mirror);
67178
67179 if (addr < vma->vm_start || addr >= vma->vm_end)
67180 return -EFAULT;
67181@@ -2456,6 +2469,186 @@ static inline void cow_user_page(struct
67182 copy_user_highpage(dst, src, va, vma);
67183 }
67184
67185+#ifdef CONFIG_PAX_SEGMEXEC
67186+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
67187+{
67188+ struct mm_struct *mm = vma->vm_mm;
67189+ spinlock_t *ptl;
67190+ pte_t *pte, entry;
67191+
67192+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
67193+ entry = *pte;
67194+ if (!pte_present(entry)) {
67195+ if (!pte_none(entry)) {
67196+ BUG_ON(pte_file(entry));
67197+ free_swap_and_cache(pte_to_swp_entry(entry));
67198+ pte_clear_not_present_full(mm, address, pte, 0);
67199+ }
67200+ } else {
67201+ struct page *page;
67202+
67203+ flush_cache_page(vma, address, pte_pfn(entry));
67204+ entry = ptep_clear_flush(vma, address, pte);
67205+ BUG_ON(pte_dirty(entry));
67206+ page = vm_normal_page(vma, address, entry);
67207+ if (page) {
67208+ update_hiwater_rss(mm);
67209+ if (PageAnon(page))
67210+ dec_mm_counter_fast(mm, MM_ANONPAGES);
67211+ else
67212+ dec_mm_counter_fast(mm, MM_FILEPAGES);
67213+ page_remove_rmap(page);
67214+ page_cache_release(page);
67215+ }
67216+ }
67217+ pte_unmap_unlock(pte, ptl);
67218+}
67219+
67220+/* PaX: if vma is mirrored, synchronize the mirror's PTE
67221+ *
67222+ * the ptl of the lower mapped page is held on entry and is not released on exit
67223+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
67224+ */
67225+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
67226+{
67227+ struct mm_struct *mm = vma->vm_mm;
67228+ unsigned long address_m;
67229+ spinlock_t *ptl_m;
67230+ struct vm_area_struct *vma_m;
67231+ pmd_t *pmd_m;
67232+ pte_t *pte_m, entry_m;
67233+
67234+ BUG_ON(!page_m || !PageAnon(page_m));
67235+
67236+ vma_m = pax_find_mirror_vma(vma);
67237+ if (!vma_m)
67238+ return;
67239+
67240+ BUG_ON(!PageLocked(page_m));
67241+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67242+ address_m = address + SEGMEXEC_TASK_SIZE;
67243+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67244+ pte_m = pte_offset_map(pmd_m, address_m);
67245+ ptl_m = pte_lockptr(mm, pmd_m);
67246+ if (ptl != ptl_m) {
67247+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67248+ if (!pte_none(*pte_m))
67249+ goto out;
67250+ }
67251+
67252+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
67253+ page_cache_get(page_m);
67254+ page_add_anon_rmap(page_m, vma_m, address_m);
67255+ inc_mm_counter_fast(mm, MM_ANONPAGES);
67256+ set_pte_at(mm, address_m, pte_m, entry_m);
67257+ update_mmu_cache(vma_m, address_m, entry_m);
67258+out:
67259+ if (ptl != ptl_m)
67260+ spin_unlock(ptl_m);
67261+ pte_unmap(pte_m);
67262+ unlock_page(page_m);
67263+}
67264+
67265+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
67266+{
67267+ struct mm_struct *mm = vma->vm_mm;
67268+ unsigned long address_m;
67269+ spinlock_t *ptl_m;
67270+ struct vm_area_struct *vma_m;
67271+ pmd_t *pmd_m;
67272+ pte_t *pte_m, entry_m;
67273+
67274+ BUG_ON(!page_m || PageAnon(page_m));
67275+
67276+ vma_m = pax_find_mirror_vma(vma);
67277+ if (!vma_m)
67278+ return;
67279+
67280+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67281+ address_m = address + SEGMEXEC_TASK_SIZE;
67282+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67283+ pte_m = pte_offset_map(pmd_m, address_m);
67284+ ptl_m = pte_lockptr(mm, pmd_m);
67285+ if (ptl != ptl_m) {
67286+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67287+ if (!pte_none(*pte_m))
67288+ goto out;
67289+ }
67290+
67291+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
67292+ page_cache_get(page_m);
67293+ page_add_file_rmap(page_m);
67294+ inc_mm_counter_fast(mm, MM_FILEPAGES);
67295+ set_pte_at(mm, address_m, pte_m, entry_m);
67296+ update_mmu_cache(vma_m, address_m, entry_m);
67297+out:
67298+ if (ptl != ptl_m)
67299+ spin_unlock(ptl_m);
67300+ pte_unmap(pte_m);
67301+}
67302+
67303+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
67304+{
67305+ struct mm_struct *mm = vma->vm_mm;
67306+ unsigned long address_m;
67307+ spinlock_t *ptl_m;
67308+ struct vm_area_struct *vma_m;
67309+ pmd_t *pmd_m;
67310+ pte_t *pte_m, entry_m;
67311+
67312+ vma_m = pax_find_mirror_vma(vma);
67313+ if (!vma_m)
67314+ return;
67315+
67316+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67317+ address_m = address + SEGMEXEC_TASK_SIZE;
67318+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67319+ pte_m = pte_offset_map(pmd_m, address_m);
67320+ ptl_m = pte_lockptr(mm, pmd_m);
67321+ if (ptl != ptl_m) {
67322+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67323+ if (!pte_none(*pte_m))
67324+ goto out;
67325+ }
67326+
67327+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
67328+ set_pte_at(mm, address_m, pte_m, entry_m);
67329+out:
67330+ if (ptl != ptl_m)
67331+ spin_unlock(ptl_m);
67332+ pte_unmap(pte_m);
67333+}
67334+
67335+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
67336+{
67337+ struct page *page_m;
67338+ pte_t entry;
67339+
67340+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
67341+ goto out;
67342+
67343+ entry = *pte;
67344+ page_m = vm_normal_page(vma, address, entry);
67345+ if (!page_m)
67346+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
67347+ else if (PageAnon(page_m)) {
67348+ if (pax_find_mirror_vma(vma)) {
67349+ pte_unmap_unlock(pte, ptl);
67350+ lock_page(page_m);
67351+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
67352+ if (pte_same(entry, *pte))
67353+ pax_mirror_anon_pte(vma, address, page_m, ptl);
67354+ else
67355+ unlock_page(page_m);
67356+ }
67357+ } else
67358+ pax_mirror_file_pte(vma, address, page_m, ptl);
67359+
67360+out:
67361+ pte_unmap_unlock(pte, ptl);
67362+}
67363+#endif
67364+
67365 /*
67366 * This routine handles present pages, when users try to write
67367 * to a shared page. It is done by copying the page to a new address
67368@@ -2667,6 +2860,12 @@ gotten:
67369 */
67370 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
67371 if (likely(pte_same(*page_table, orig_pte))) {
67372+
67373+#ifdef CONFIG_PAX_SEGMEXEC
67374+ if (pax_find_mirror_vma(vma))
67375+ BUG_ON(!trylock_page(new_page));
67376+#endif
67377+
67378 if (old_page) {
67379 if (!PageAnon(old_page)) {
67380 dec_mm_counter_fast(mm, MM_FILEPAGES);
67381@@ -2718,6 +2917,10 @@ gotten:
67382 page_remove_rmap(old_page);
67383 }
67384
67385+#ifdef CONFIG_PAX_SEGMEXEC
67386+ pax_mirror_anon_pte(vma, address, new_page, ptl);
67387+#endif
67388+
67389 /* Free the old page.. */
67390 new_page = old_page;
67391 ret |= VM_FAULT_WRITE;
67392@@ -2997,6 +3200,11 @@ static int do_swap_page(struct mm_struct
67393 swap_free(entry);
67394 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
67395 try_to_free_swap(page);
67396+
67397+#ifdef CONFIG_PAX_SEGMEXEC
67398+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
67399+#endif
67400+
67401 unlock_page(page);
67402 if (swapcache) {
67403 /*
67404@@ -3020,6 +3228,11 @@ static int do_swap_page(struct mm_struct
67405
67406 /* No need to invalidate - it was non-present before */
67407 update_mmu_cache(vma, address, page_table);
67408+
67409+#ifdef CONFIG_PAX_SEGMEXEC
67410+ pax_mirror_anon_pte(vma, address, page, ptl);
67411+#endif
67412+
67413 unlock:
67414 pte_unmap_unlock(page_table, ptl);
67415 out:
67416@@ -3039,40 +3252,6 @@ out_release:
67417 }
67418
67419 /*
67420- * This is like a special single-page "expand_{down|up}wards()",
67421- * except we must first make sure that 'address{-|+}PAGE_SIZE'
67422- * doesn't hit another vma.
67423- */
67424-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
67425-{
67426- address &= PAGE_MASK;
67427- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
67428- struct vm_area_struct *prev = vma->vm_prev;
67429-
67430- /*
67431- * Is there a mapping abutting this one below?
67432- *
67433- * That's only ok if it's the same stack mapping
67434- * that has gotten split..
67435- */
67436- if (prev && prev->vm_end == address)
67437- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
67438-
67439- expand_downwards(vma, address - PAGE_SIZE);
67440- }
67441- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
67442- struct vm_area_struct *next = vma->vm_next;
67443-
67444- /* As VM_GROWSDOWN but s/below/above/ */
67445- if (next && next->vm_start == address + PAGE_SIZE)
67446- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
67447-
67448- expand_upwards(vma, address + PAGE_SIZE);
67449- }
67450- return 0;
67451-}
67452-
67453-/*
67454 * We enter with non-exclusive mmap_sem (to exclude vma changes,
67455 * but allow concurrent faults), and pte mapped but not yet locked.
67456 * We return with mmap_sem still held, but pte unmapped and unlocked.
67457@@ -3081,27 +3260,23 @@ static int do_anonymous_page(struct mm_s
67458 unsigned long address, pte_t *page_table, pmd_t *pmd,
67459 unsigned int flags)
67460 {
67461- struct page *page;
67462+ struct page *page = NULL;
67463 spinlock_t *ptl;
67464 pte_t entry;
67465
67466- pte_unmap(page_table);
67467-
67468- /* Check if we need to add a guard page to the stack */
67469- if (check_stack_guard_page(vma, address) < 0)
67470- return VM_FAULT_SIGBUS;
67471-
67472- /* Use the zero-page for reads */
67473 if (!(flags & FAULT_FLAG_WRITE)) {
67474 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
67475 vma->vm_page_prot));
67476- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
67477+ ptl = pte_lockptr(mm, pmd);
67478+ spin_lock(ptl);
67479 if (!pte_none(*page_table))
67480 goto unlock;
67481 goto setpte;
67482 }
67483
67484 /* Allocate our own private page. */
67485+ pte_unmap(page_table);
67486+
67487 if (unlikely(anon_vma_prepare(vma)))
67488 goto oom;
67489 page = alloc_zeroed_user_highpage_movable(vma, address);
67490@@ -3120,6 +3295,11 @@ static int do_anonymous_page(struct mm_s
67491 if (!pte_none(*page_table))
67492 goto release;
67493
67494+#ifdef CONFIG_PAX_SEGMEXEC
67495+ if (pax_find_mirror_vma(vma))
67496+ BUG_ON(!trylock_page(page));
67497+#endif
67498+
67499 inc_mm_counter_fast(mm, MM_ANONPAGES);
67500 page_add_new_anon_rmap(page, vma, address);
67501 setpte:
67502@@ -3127,6 +3307,12 @@ setpte:
67503
67504 /* No need to invalidate - it was non-present before */
67505 update_mmu_cache(vma, address, page_table);
67506+
67507+#ifdef CONFIG_PAX_SEGMEXEC
67508+ if (page)
67509+ pax_mirror_anon_pte(vma, address, page, ptl);
67510+#endif
67511+
67512 unlock:
67513 pte_unmap_unlock(page_table, ptl);
67514 return 0;
67515@@ -3264,6 +3450,12 @@ static int __do_fault(struct mm_struct *
67516 */
67517 /* Only go through if we didn't race with anybody else... */
67518 if (likely(pte_same(*page_table, orig_pte))) {
67519+
67520+#ifdef CONFIG_PAX_SEGMEXEC
67521+ if (anon && pax_find_mirror_vma(vma))
67522+ BUG_ON(!trylock_page(page));
67523+#endif
67524+
67525 flush_icache_page(vma, page);
67526 entry = mk_pte(page, vma->vm_page_prot);
67527 if (flags & FAULT_FLAG_WRITE)
67528@@ -3283,6 +3475,14 @@ static int __do_fault(struct mm_struct *
67529
67530 /* no need to invalidate: a not-present page won't be cached */
67531 update_mmu_cache(vma, address, page_table);
67532+
67533+#ifdef CONFIG_PAX_SEGMEXEC
67534+ if (anon)
67535+ pax_mirror_anon_pte(vma, address, page, ptl);
67536+ else
67537+ pax_mirror_file_pte(vma, address, page, ptl);
67538+#endif
67539+
67540 } else {
67541 if (charged)
67542 mem_cgroup_uncharge_page(page);
67543@@ -3430,6 +3630,12 @@ int handle_pte_fault(struct mm_struct *m
67544 if (flags & FAULT_FLAG_WRITE)
67545 flush_tlb_fix_spurious_fault(vma, address);
67546 }
67547+
67548+#ifdef CONFIG_PAX_SEGMEXEC
67549+ pax_mirror_pte(vma, address, pte, pmd, ptl);
67550+ return 0;
67551+#endif
67552+
67553 unlock:
67554 pte_unmap_unlock(pte, ptl);
67555 return 0;
67556@@ -3446,6 +3652,10 @@ int handle_mm_fault(struct mm_struct *mm
67557 pmd_t *pmd;
67558 pte_t *pte;
67559
67560+#ifdef CONFIG_PAX_SEGMEXEC
67561+ struct vm_area_struct *vma_m;
67562+#endif
67563+
67564 __set_current_state(TASK_RUNNING);
67565
67566 count_vm_event(PGFAULT);
67567@@ -3457,6 +3667,34 @@ int handle_mm_fault(struct mm_struct *mm
67568 if (unlikely(is_vm_hugetlb_page(vma)))
67569 return hugetlb_fault(mm, vma, address, flags);
67570
67571+#ifdef CONFIG_PAX_SEGMEXEC
67572+ vma_m = pax_find_mirror_vma(vma);
67573+ if (vma_m) {
67574+ unsigned long address_m;
67575+ pgd_t *pgd_m;
67576+ pud_t *pud_m;
67577+ pmd_t *pmd_m;
67578+
67579+ if (vma->vm_start > vma_m->vm_start) {
67580+ address_m = address;
67581+ address -= SEGMEXEC_TASK_SIZE;
67582+ vma = vma_m;
67583+ } else
67584+ address_m = address + SEGMEXEC_TASK_SIZE;
67585+
67586+ pgd_m = pgd_offset(mm, address_m);
67587+ pud_m = pud_alloc(mm, pgd_m, address_m);
67588+ if (!pud_m)
67589+ return VM_FAULT_OOM;
67590+ pmd_m = pmd_alloc(mm, pud_m, address_m);
67591+ if (!pmd_m)
67592+ return VM_FAULT_OOM;
67593+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
67594+ return VM_FAULT_OOM;
67595+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
67596+ }
67597+#endif
67598+
67599 pgd = pgd_offset(mm, address);
67600 pud = pud_alloc(mm, pgd, address);
67601 if (!pud)
67602@@ -3486,7 +3724,7 @@ int handle_mm_fault(struct mm_struct *mm
67603 * run pte_offset_map on the pmd, if an huge pmd could
67604 * materialize from under us from a different thread.
67605 */
67606- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
67607+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
67608 return VM_FAULT_OOM;
67609 /* if an huge pmd materialized from under us just retry later */
67610 if (unlikely(pmd_trans_huge(*pmd)))
67611@@ -3590,7 +3828,7 @@ static int __init gate_vma_init(void)
67612 gate_vma.vm_start = FIXADDR_USER_START;
67613 gate_vma.vm_end = FIXADDR_USER_END;
67614 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
67615- gate_vma.vm_page_prot = __P101;
67616+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
67617 /*
67618 * Make sure the vDSO gets into every core dump.
67619 * Dumping its contents makes post-mortem fully interpretable later
67620diff -urNp linux-3.0.7/mm/memory-failure.c linux-3.0.7/mm/memory-failure.c
67621--- linux-3.0.7/mm/memory-failure.c 2011-07-21 22:17:23.000000000 -0400
67622+++ linux-3.0.7/mm/memory-failure.c 2011-10-06 04:17:55.000000000 -0400
67623@@ -59,7 +59,7 @@ int sysctl_memory_failure_early_kill __r
67624
67625 int sysctl_memory_failure_recovery __read_mostly = 1;
67626
67627-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67628+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67629
67630 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
67631
67632@@ -200,7 +200,7 @@ static int kill_proc_ao(struct task_stru
67633 si.si_signo = SIGBUS;
67634 si.si_errno = 0;
67635 si.si_code = BUS_MCEERR_AO;
67636- si.si_addr = (void *)addr;
67637+ si.si_addr = (void __user *)addr;
67638 #ifdef __ARCH_SI_TRAPNO
67639 si.si_trapno = trapno;
67640 #endif
67641@@ -1008,7 +1008,7 @@ int __memory_failure(unsigned long pfn,
67642 }
67643
67644 nr_pages = 1 << compound_trans_order(hpage);
67645- atomic_long_add(nr_pages, &mce_bad_pages);
67646+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
67647
67648 /*
67649 * We need/can do nothing about count=0 pages.
67650@@ -1038,7 +1038,7 @@ int __memory_failure(unsigned long pfn,
67651 if (!PageHWPoison(hpage)
67652 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
67653 || (p != hpage && TestSetPageHWPoison(hpage))) {
67654- atomic_long_sub(nr_pages, &mce_bad_pages);
67655+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67656 return 0;
67657 }
67658 set_page_hwpoison_huge_page(hpage);
67659@@ -1096,7 +1096,7 @@ int __memory_failure(unsigned long pfn,
67660 }
67661 if (hwpoison_filter(p)) {
67662 if (TestClearPageHWPoison(p))
67663- atomic_long_sub(nr_pages, &mce_bad_pages);
67664+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67665 unlock_page(hpage);
67666 put_page(hpage);
67667 return 0;
67668@@ -1222,7 +1222,7 @@ int unpoison_memory(unsigned long pfn)
67669 return 0;
67670 }
67671 if (TestClearPageHWPoison(p))
67672- atomic_long_sub(nr_pages, &mce_bad_pages);
67673+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67674 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
67675 return 0;
67676 }
67677@@ -1236,7 +1236,7 @@ int unpoison_memory(unsigned long pfn)
67678 */
67679 if (TestClearPageHWPoison(page)) {
67680 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
67681- atomic_long_sub(nr_pages, &mce_bad_pages);
67682+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67683 freeit = 1;
67684 if (PageHuge(page))
67685 clear_page_hwpoison_huge_page(page);
67686@@ -1349,7 +1349,7 @@ static int soft_offline_huge_page(struct
67687 }
67688 done:
67689 if (!PageHWPoison(hpage))
67690- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
67691+ atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
67692 set_page_hwpoison_huge_page(hpage);
67693 dequeue_hwpoisoned_huge_page(hpage);
67694 /* keep elevated page count for bad page */
67695@@ -1480,7 +1480,7 @@ int soft_offline_page(struct page *page,
67696 return ret;
67697
67698 done:
67699- atomic_long_add(1, &mce_bad_pages);
67700+ atomic_long_add_unchecked(1, &mce_bad_pages);
67701 SetPageHWPoison(page);
67702 /* keep elevated page count for bad page */
67703 return ret;
67704diff -urNp linux-3.0.7/mm/mempolicy.c linux-3.0.7/mm/mempolicy.c
67705--- linux-3.0.7/mm/mempolicy.c 2011-07-21 22:17:23.000000000 -0400
67706+++ linux-3.0.7/mm/mempolicy.c 2011-08-23 21:48:14.000000000 -0400
67707@@ -639,6 +639,10 @@ static int mbind_range(struct mm_struct
67708 unsigned long vmstart;
67709 unsigned long vmend;
67710
67711+#ifdef CONFIG_PAX_SEGMEXEC
67712+ struct vm_area_struct *vma_m;
67713+#endif
67714+
67715 vma = find_vma_prev(mm, start, &prev);
67716 if (!vma || vma->vm_start > start)
67717 return -EFAULT;
67718@@ -669,6 +673,16 @@ static int mbind_range(struct mm_struct
67719 err = policy_vma(vma, new_pol);
67720 if (err)
67721 goto out;
67722+
67723+#ifdef CONFIG_PAX_SEGMEXEC
67724+ vma_m = pax_find_mirror_vma(vma);
67725+ if (vma_m) {
67726+ err = policy_vma(vma_m, new_pol);
67727+ if (err)
67728+ goto out;
67729+ }
67730+#endif
67731+
67732 }
67733
67734 out:
67735@@ -1102,6 +1116,17 @@ static long do_mbind(unsigned long start
67736
67737 if (end < start)
67738 return -EINVAL;
67739+
67740+#ifdef CONFIG_PAX_SEGMEXEC
67741+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
67742+ if (end > SEGMEXEC_TASK_SIZE)
67743+ return -EINVAL;
67744+ } else
67745+#endif
67746+
67747+ if (end > TASK_SIZE)
67748+ return -EINVAL;
67749+
67750 if (end == start)
67751 return 0;
67752
67753@@ -1320,6 +1345,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
67754 if (!mm)
67755 goto out;
67756
67757+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67758+ if (mm != current->mm &&
67759+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
67760+ err = -EPERM;
67761+ goto out;
67762+ }
67763+#endif
67764+
67765 /*
67766 * Check if this process has the right to modify the specified
67767 * process. The right exists if the process has administrative
67768@@ -1329,8 +1362,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
67769 rcu_read_lock();
67770 tcred = __task_cred(task);
67771 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
67772- cred->uid != tcred->suid && cred->uid != tcred->uid &&
67773- !capable(CAP_SYS_NICE)) {
67774+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
67775 rcu_read_unlock();
67776 err = -EPERM;
67777 goto out;
67778diff -urNp linux-3.0.7/mm/migrate.c linux-3.0.7/mm/migrate.c
67779--- linux-3.0.7/mm/migrate.c 2011-07-21 22:17:23.000000000 -0400
67780+++ linux-3.0.7/mm/migrate.c 2011-08-23 21:48:14.000000000 -0400
67781@@ -1124,6 +1124,8 @@ static int do_pages_move(struct mm_struc
67782 unsigned long chunk_start;
67783 int err;
67784
67785+ pax_track_stack();
67786+
67787 task_nodes = cpuset_mems_allowed(task);
67788
67789 err = -ENOMEM;
67790@@ -1308,6 +1310,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
67791 if (!mm)
67792 return -EINVAL;
67793
67794+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67795+ if (mm != current->mm &&
67796+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
67797+ err = -EPERM;
67798+ goto out;
67799+ }
67800+#endif
67801+
67802 /*
67803 * Check if this process has the right to modify the specified
67804 * process. The right exists if the process has administrative
67805@@ -1317,8 +1327,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
67806 rcu_read_lock();
67807 tcred = __task_cred(task);
67808 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
67809- cred->uid != tcred->suid && cred->uid != tcred->uid &&
67810- !capable(CAP_SYS_NICE)) {
67811+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
67812 rcu_read_unlock();
67813 err = -EPERM;
67814 goto out;
67815diff -urNp linux-3.0.7/mm/mlock.c linux-3.0.7/mm/mlock.c
67816--- linux-3.0.7/mm/mlock.c 2011-07-21 22:17:23.000000000 -0400
67817+++ linux-3.0.7/mm/mlock.c 2011-08-23 21:48:14.000000000 -0400
67818@@ -13,6 +13,7 @@
67819 #include <linux/pagemap.h>
67820 #include <linux/mempolicy.h>
67821 #include <linux/syscalls.h>
67822+#include <linux/security.h>
67823 #include <linux/sched.h>
67824 #include <linux/module.h>
67825 #include <linux/rmap.h>
67826@@ -377,6 +378,9 @@ static int do_mlock(unsigned long start,
67827 return -EINVAL;
67828 if (end == start)
67829 return 0;
67830+ if (end > TASK_SIZE)
67831+ return -EINVAL;
67832+
67833 vma = find_vma_prev(current->mm, start, &prev);
67834 if (!vma || vma->vm_start > start)
67835 return -ENOMEM;
67836@@ -387,6 +391,11 @@ static int do_mlock(unsigned long start,
67837 for (nstart = start ; ; ) {
67838 vm_flags_t newflags;
67839
67840+#ifdef CONFIG_PAX_SEGMEXEC
67841+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
67842+ break;
67843+#endif
67844+
67845 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
67846
67847 newflags = vma->vm_flags | VM_LOCKED;
67848@@ -492,6 +501,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
67849 lock_limit >>= PAGE_SHIFT;
67850
67851 /* check against resource limits */
67852+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
67853 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
67854 error = do_mlock(start, len, 1);
67855 up_write(&current->mm->mmap_sem);
67856@@ -515,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
67857 static int do_mlockall(int flags)
67858 {
67859 struct vm_area_struct * vma, * prev = NULL;
67860- unsigned int def_flags = 0;
67861
67862 if (flags & MCL_FUTURE)
67863- def_flags = VM_LOCKED;
67864- current->mm->def_flags = def_flags;
67865+ current->mm->def_flags |= VM_LOCKED;
67866+ else
67867+ current->mm->def_flags &= ~VM_LOCKED;
67868 if (flags == MCL_FUTURE)
67869 goto out;
67870
67871 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
67872 vm_flags_t newflags;
67873
67874+#ifdef CONFIG_PAX_SEGMEXEC
67875+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
67876+ break;
67877+#endif
67878+
67879+ BUG_ON(vma->vm_end > TASK_SIZE);
67880 newflags = vma->vm_flags | VM_LOCKED;
67881 if (!(flags & MCL_CURRENT))
67882 newflags &= ~VM_LOCKED;
67883@@ -557,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
67884 lock_limit >>= PAGE_SHIFT;
67885
67886 ret = -ENOMEM;
67887+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
67888 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
67889 capable(CAP_IPC_LOCK))
67890 ret = do_mlockall(flags);
67891diff -urNp linux-3.0.7/mm/mmap.c linux-3.0.7/mm/mmap.c
67892--- linux-3.0.7/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
67893+++ linux-3.0.7/mm/mmap.c 2011-08-23 21:48:14.000000000 -0400
67894@@ -46,6 +46,16 @@
67895 #define arch_rebalance_pgtables(addr, len) (addr)
67896 #endif
67897
67898+static inline void verify_mm_writelocked(struct mm_struct *mm)
67899+{
67900+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
67901+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
67902+ up_read(&mm->mmap_sem);
67903+ BUG();
67904+ }
67905+#endif
67906+}
67907+
67908 static void unmap_region(struct mm_struct *mm,
67909 struct vm_area_struct *vma, struct vm_area_struct *prev,
67910 unsigned long start, unsigned long end);
67911@@ -71,22 +81,32 @@ static void unmap_region(struct mm_struc
67912 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
67913 *
67914 */
67915-pgprot_t protection_map[16] = {
67916+pgprot_t protection_map[16] __read_only = {
67917 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
67918 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
67919 };
67920
67921-pgprot_t vm_get_page_prot(unsigned long vm_flags)
67922+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
67923 {
67924- return __pgprot(pgprot_val(protection_map[vm_flags &
67925+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
67926 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
67927 pgprot_val(arch_vm_get_page_prot(vm_flags)));
67928+
67929+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
67930+ if (!(__supported_pte_mask & _PAGE_NX) &&
67931+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
67932+ (vm_flags & (VM_READ | VM_WRITE)))
67933+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
67934+#endif
67935+
67936+ return prot;
67937 }
67938 EXPORT_SYMBOL(vm_get_page_prot);
67939
67940 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
67941 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
67942 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
67943+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
67944 /*
67945 * Make sure vm_committed_as in one cacheline and not cacheline shared with
67946 * other variables. It can be updated by several CPUs frequently.
67947@@ -236,6 +256,7 @@ static struct vm_area_struct *remove_vma
67948 struct vm_area_struct *next = vma->vm_next;
67949
67950 might_sleep();
67951+ BUG_ON(vma->vm_mirror);
67952 if (vma->vm_ops && vma->vm_ops->close)
67953 vma->vm_ops->close(vma);
67954 if (vma->vm_file) {
67955@@ -280,6 +301,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
67956 * not page aligned -Ram Gupta
67957 */
67958 rlim = rlimit(RLIMIT_DATA);
67959+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
67960 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
67961 (mm->end_data - mm->start_data) > rlim)
67962 goto out;
67963@@ -697,6 +719,12 @@ static int
67964 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
67965 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
67966 {
67967+
67968+#ifdef CONFIG_PAX_SEGMEXEC
67969+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
67970+ return 0;
67971+#endif
67972+
67973 if (is_mergeable_vma(vma, file, vm_flags) &&
67974 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
67975 if (vma->vm_pgoff == vm_pgoff)
67976@@ -716,6 +744,12 @@ static int
67977 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
67978 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
67979 {
67980+
67981+#ifdef CONFIG_PAX_SEGMEXEC
67982+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
67983+ return 0;
67984+#endif
67985+
67986 if (is_mergeable_vma(vma, file, vm_flags) &&
67987 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
67988 pgoff_t vm_pglen;
67989@@ -758,13 +792,20 @@ can_vma_merge_after(struct vm_area_struc
67990 struct vm_area_struct *vma_merge(struct mm_struct *mm,
67991 struct vm_area_struct *prev, unsigned long addr,
67992 unsigned long end, unsigned long vm_flags,
67993- struct anon_vma *anon_vma, struct file *file,
67994+ struct anon_vma *anon_vma, struct file *file,
67995 pgoff_t pgoff, struct mempolicy *policy)
67996 {
67997 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
67998 struct vm_area_struct *area, *next;
67999 int err;
68000
68001+#ifdef CONFIG_PAX_SEGMEXEC
68002+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
68003+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
68004+
68005+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
68006+#endif
68007+
68008 /*
68009 * We later require that vma->vm_flags == vm_flags,
68010 * so this tests vma->vm_flags & VM_SPECIAL, too.
68011@@ -780,6 +821,15 @@ struct vm_area_struct *vma_merge(struct
68012 if (next && next->vm_end == end) /* cases 6, 7, 8 */
68013 next = next->vm_next;
68014
68015+#ifdef CONFIG_PAX_SEGMEXEC
68016+ if (prev)
68017+ prev_m = pax_find_mirror_vma(prev);
68018+ if (area)
68019+ area_m = pax_find_mirror_vma(area);
68020+ if (next)
68021+ next_m = pax_find_mirror_vma(next);
68022+#endif
68023+
68024 /*
68025 * Can it merge with the predecessor?
68026 */
68027@@ -799,9 +849,24 @@ struct vm_area_struct *vma_merge(struct
68028 /* cases 1, 6 */
68029 err = vma_adjust(prev, prev->vm_start,
68030 next->vm_end, prev->vm_pgoff, NULL);
68031- } else /* cases 2, 5, 7 */
68032+
68033+#ifdef CONFIG_PAX_SEGMEXEC
68034+ if (!err && prev_m)
68035+ err = vma_adjust(prev_m, prev_m->vm_start,
68036+ next_m->vm_end, prev_m->vm_pgoff, NULL);
68037+#endif
68038+
68039+ } else { /* cases 2, 5, 7 */
68040 err = vma_adjust(prev, prev->vm_start,
68041 end, prev->vm_pgoff, NULL);
68042+
68043+#ifdef CONFIG_PAX_SEGMEXEC
68044+ if (!err && prev_m)
68045+ err = vma_adjust(prev_m, prev_m->vm_start,
68046+ end_m, prev_m->vm_pgoff, NULL);
68047+#endif
68048+
68049+ }
68050 if (err)
68051 return NULL;
68052 khugepaged_enter_vma_merge(prev);
68053@@ -815,12 +880,27 @@ struct vm_area_struct *vma_merge(struct
68054 mpol_equal(policy, vma_policy(next)) &&
68055 can_vma_merge_before(next, vm_flags,
68056 anon_vma, file, pgoff+pglen)) {
68057- if (prev && addr < prev->vm_end) /* case 4 */
68058+ if (prev && addr < prev->vm_end) { /* case 4 */
68059 err = vma_adjust(prev, prev->vm_start,
68060 addr, prev->vm_pgoff, NULL);
68061- else /* cases 3, 8 */
68062+
68063+#ifdef CONFIG_PAX_SEGMEXEC
68064+ if (!err && prev_m)
68065+ err = vma_adjust(prev_m, prev_m->vm_start,
68066+ addr_m, prev_m->vm_pgoff, NULL);
68067+#endif
68068+
68069+ } else { /* cases 3, 8 */
68070 err = vma_adjust(area, addr, next->vm_end,
68071 next->vm_pgoff - pglen, NULL);
68072+
68073+#ifdef CONFIG_PAX_SEGMEXEC
68074+ if (!err && area_m)
68075+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
68076+ next_m->vm_pgoff - pglen, NULL);
68077+#endif
68078+
68079+ }
68080 if (err)
68081 return NULL;
68082 khugepaged_enter_vma_merge(area);
68083@@ -929,14 +1009,11 @@ none:
68084 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
68085 struct file *file, long pages)
68086 {
68087- const unsigned long stack_flags
68088- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
68089-
68090 if (file) {
68091 mm->shared_vm += pages;
68092 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
68093 mm->exec_vm += pages;
68094- } else if (flags & stack_flags)
68095+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
68096 mm->stack_vm += pages;
68097 if (flags & (VM_RESERVED|VM_IO))
68098 mm->reserved_vm += pages;
68099@@ -963,7 +1040,7 @@ unsigned long do_mmap_pgoff(struct file
68100 * (the exception is when the underlying filesystem is noexec
68101 * mounted, in which case we dont add PROT_EXEC.)
68102 */
68103- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
68104+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
68105 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
68106 prot |= PROT_EXEC;
68107
68108@@ -989,7 +1066,7 @@ unsigned long do_mmap_pgoff(struct file
68109 /* Obtain the address to map to. we verify (or select) it and ensure
68110 * that it represents a valid section of the address space.
68111 */
68112- addr = get_unmapped_area(file, addr, len, pgoff, flags);
68113+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
68114 if (addr & ~PAGE_MASK)
68115 return addr;
68116
68117@@ -1000,6 +1077,36 @@ unsigned long do_mmap_pgoff(struct file
68118 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
68119 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
68120
68121+#ifdef CONFIG_PAX_MPROTECT
68122+ if (mm->pax_flags & MF_PAX_MPROTECT) {
68123+#ifndef CONFIG_PAX_MPROTECT_COMPAT
68124+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
68125+ gr_log_rwxmmap(file);
68126+
68127+#ifdef CONFIG_PAX_EMUPLT
68128+ vm_flags &= ~VM_EXEC;
68129+#else
68130+ return -EPERM;
68131+#endif
68132+
68133+ }
68134+
68135+ if (!(vm_flags & VM_EXEC))
68136+ vm_flags &= ~VM_MAYEXEC;
68137+#else
68138+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
68139+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
68140+#endif
68141+ else
68142+ vm_flags &= ~VM_MAYWRITE;
68143+ }
68144+#endif
68145+
68146+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68147+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
68148+ vm_flags &= ~VM_PAGEEXEC;
68149+#endif
68150+
68151 if (flags & MAP_LOCKED)
68152 if (!can_do_mlock())
68153 return -EPERM;
68154@@ -1011,6 +1118,7 @@ unsigned long do_mmap_pgoff(struct file
68155 locked += mm->locked_vm;
68156 lock_limit = rlimit(RLIMIT_MEMLOCK);
68157 lock_limit >>= PAGE_SHIFT;
68158+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68159 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
68160 return -EAGAIN;
68161 }
68162@@ -1081,6 +1189,9 @@ unsigned long do_mmap_pgoff(struct file
68163 if (error)
68164 return error;
68165
68166+ if (!gr_acl_handle_mmap(file, prot))
68167+ return -EACCES;
68168+
68169 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
68170 }
68171 EXPORT_SYMBOL(do_mmap_pgoff);
68172@@ -1161,7 +1272,7 @@ int vma_wants_writenotify(struct vm_area
68173 vm_flags_t vm_flags = vma->vm_flags;
68174
68175 /* If it was private or non-writable, the write bit is already clear */
68176- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
68177+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
68178 return 0;
68179
68180 /* The backer wishes to know when pages are first written to? */
68181@@ -1210,14 +1321,24 @@ unsigned long mmap_region(struct file *f
68182 unsigned long charged = 0;
68183 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
68184
68185+#ifdef CONFIG_PAX_SEGMEXEC
68186+ struct vm_area_struct *vma_m = NULL;
68187+#endif
68188+
68189+ /*
68190+ * mm->mmap_sem is required to protect against another thread
68191+ * changing the mappings in case we sleep.
68192+ */
68193+ verify_mm_writelocked(mm);
68194+
68195 /* Clear old maps */
68196 error = -ENOMEM;
68197-munmap_back:
68198 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68199 if (vma && vma->vm_start < addr + len) {
68200 if (do_munmap(mm, addr, len))
68201 return -ENOMEM;
68202- goto munmap_back;
68203+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68204+ BUG_ON(vma && vma->vm_start < addr + len);
68205 }
68206
68207 /* Check against address space limit. */
68208@@ -1266,6 +1387,16 @@ munmap_back:
68209 goto unacct_error;
68210 }
68211
68212+#ifdef CONFIG_PAX_SEGMEXEC
68213+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
68214+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
68215+ if (!vma_m) {
68216+ error = -ENOMEM;
68217+ goto free_vma;
68218+ }
68219+ }
68220+#endif
68221+
68222 vma->vm_mm = mm;
68223 vma->vm_start = addr;
68224 vma->vm_end = addr + len;
68225@@ -1289,6 +1420,19 @@ munmap_back:
68226 error = file->f_op->mmap(file, vma);
68227 if (error)
68228 goto unmap_and_free_vma;
68229+
68230+#ifdef CONFIG_PAX_SEGMEXEC
68231+ if (vma_m && (vm_flags & VM_EXECUTABLE))
68232+ added_exe_file_vma(mm);
68233+#endif
68234+
68235+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68236+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
68237+ vma->vm_flags |= VM_PAGEEXEC;
68238+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
68239+ }
68240+#endif
68241+
68242 if (vm_flags & VM_EXECUTABLE)
68243 added_exe_file_vma(mm);
68244
68245@@ -1324,6 +1468,11 @@ munmap_back:
68246 vma_link(mm, vma, prev, rb_link, rb_parent);
68247 file = vma->vm_file;
68248
68249+#ifdef CONFIG_PAX_SEGMEXEC
68250+ if (vma_m)
68251+ BUG_ON(pax_mirror_vma(vma_m, vma));
68252+#endif
68253+
68254 /* Once vma denies write, undo our temporary denial count */
68255 if (correct_wcount)
68256 atomic_inc(&inode->i_writecount);
68257@@ -1332,6 +1481,7 @@ out:
68258
68259 mm->total_vm += len >> PAGE_SHIFT;
68260 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
68261+ track_exec_limit(mm, addr, addr + len, vm_flags);
68262 if (vm_flags & VM_LOCKED) {
68263 if (!mlock_vma_pages_range(vma, addr, addr + len))
68264 mm->locked_vm += (len >> PAGE_SHIFT);
68265@@ -1349,6 +1499,12 @@ unmap_and_free_vma:
68266 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
68267 charged = 0;
68268 free_vma:
68269+
68270+#ifdef CONFIG_PAX_SEGMEXEC
68271+ if (vma_m)
68272+ kmem_cache_free(vm_area_cachep, vma_m);
68273+#endif
68274+
68275 kmem_cache_free(vm_area_cachep, vma);
68276 unacct_error:
68277 if (charged)
68278@@ -1356,6 +1512,44 @@ unacct_error:
68279 return error;
68280 }
68281
68282+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
68283+{
68284+ if (!vma) {
68285+#ifdef CONFIG_STACK_GROWSUP
68286+ if (addr > sysctl_heap_stack_gap)
68287+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
68288+ else
68289+ vma = find_vma(current->mm, 0);
68290+ if (vma && (vma->vm_flags & VM_GROWSUP))
68291+ return false;
68292+#endif
68293+ return true;
68294+ }
68295+
68296+ if (addr + len > vma->vm_start)
68297+ return false;
68298+
68299+ if (vma->vm_flags & VM_GROWSDOWN)
68300+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
68301+#ifdef CONFIG_STACK_GROWSUP
68302+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
68303+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
68304+#endif
68305+
68306+ return true;
68307+}
68308+
68309+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
68310+{
68311+ if (vma->vm_start < len)
68312+ return -ENOMEM;
68313+ if (!(vma->vm_flags & VM_GROWSDOWN))
68314+ return vma->vm_start - len;
68315+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
68316+ return vma->vm_start - len - sysctl_heap_stack_gap;
68317+ return -ENOMEM;
68318+}
68319+
68320 /* Get an address range which is currently unmapped.
68321 * For shmat() with addr=0.
68322 *
68323@@ -1382,18 +1576,23 @@ arch_get_unmapped_area(struct file *filp
68324 if (flags & MAP_FIXED)
68325 return addr;
68326
68327+#ifdef CONFIG_PAX_RANDMMAP
68328+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
68329+#endif
68330+
68331 if (addr) {
68332 addr = PAGE_ALIGN(addr);
68333- vma = find_vma(mm, addr);
68334- if (TASK_SIZE - len >= addr &&
68335- (!vma || addr + len <= vma->vm_start))
68336- return addr;
68337+ if (TASK_SIZE - len >= addr) {
68338+ vma = find_vma(mm, addr);
68339+ if (check_heap_stack_gap(vma, addr, len))
68340+ return addr;
68341+ }
68342 }
68343 if (len > mm->cached_hole_size) {
68344- start_addr = addr = mm->free_area_cache;
68345+ start_addr = addr = mm->free_area_cache;
68346 } else {
68347- start_addr = addr = TASK_UNMAPPED_BASE;
68348- mm->cached_hole_size = 0;
68349+ start_addr = addr = mm->mmap_base;
68350+ mm->cached_hole_size = 0;
68351 }
68352
68353 full_search:
68354@@ -1404,34 +1603,40 @@ full_search:
68355 * Start a new search - just in case we missed
68356 * some holes.
68357 */
68358- if (start_addr != TASK_UNMAPPED_BASE) {
68359- addr = TASK_UNMAPPED_BASE;
68360- start_addr = addr;
68361+ if (start_addr != mm->mmap_base) {
68362+ start_addr = addr = mm->mmap_base;
68363 mm->cached_hole_size = 0;
68364 goto full_search;
68365 }
68366 return -ENOMEM;
68367 }
68368- if (!vma || addr + len <= vma->vm_start) {
68369- /*
68370- * Remember the place where we stopped the search:
68371- */
68372- mm->free_area_cache = addr + len;
68373- return addr;
68374- }
68375+ if (check_heap_stack_gap(vma, addr, len))
68376+ break;
68377 if (addr + mm->cached_hole_size < vma->vm_start)
68378 mm->cached_hole_size = vma->vm_start - addr;
68379 addr = vma->vm_end;
68380 }
68381+
68382+ /*
68383+ * Remember the place where we stopped the search:
68384+ */
68385+ mm->free_area_cache = addr + len;
68386+ return addr;
68387 }
68388 #endif
68389
68390 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
68391 {
68392+
68393+#ifdef CONFIG_PAX_SEGMEXEC
68394+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
68395+ return;
68396+#endif
68397+
68398 /*
68399 * Is this a new hole at the lowest possible address?
68400 */
68401- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
68402+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
68403 mm->free_area_cache = addr;
68404 mm->cached_hole_size = ~0UL;
68405 }
68406@@ -1449,7 +1654,7 @@ arch_get_unmapped_area_topdown(struct fi
68407 {
68408 struct vm_area_struct *vma;
68409 struct mm_struct *mm = current->mm;
68410- unsigned long addr = addr0;
68411+ unsigned long base = mm->mmap_base, addr = addr0;
68412
68413 /* requested length too big for entire address space */
68414 if (len > TASK_SIZE)
68415@@ -1458,13 +1663,18 @@ arch_get_unmapped_area_topdown(struct fi
68416 if (flags & MAP_FIXED)
68417 return addr;
68418
68419+#ifdef CONFIG_PAX_RANDMMAP
68420+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
68421+#endif
68422+
68423 /* requesting a specific address */
68424 if (addr) {
68425 addr = PAGE_ALIGN(addr);
68426- vma = find_vma(mm, addr);
68427- if (TASK_SIZE - len >= addr &&
68428- (!vma || addr + len <= vma->vm_start))
68429- return addr;
68430+ if (TASK_SIZE - len >= addr) {
68431+ vma = find_vma(mm, addr);
68432+ if (check_heap_stack_gap(vma, addr, len))
68433+ return addr;
68434+ }
68435 }
68436
68437 /* check if free_area_cache is useful for us */
68438@@ -1479,7 +1689,7 @@ arch_get_unmapped_area_topdown(struct fi
68439 /* make sure it can fit in the remaining address space */
68440 if (addr > len) {
68441 vma = find_vma(mm, addr-len);
68442- if (!vma || addr <= vma->vm_start)
68443+ if (check_heap_stack_gap(vma, addr - len, len))
68444 /* remember the address as a hint for next time */
68445 return (mm->free_area_cache = addr-len);
68446 }
68447@@ -1496,7 +1706,7 @@ arch_get_unmapped_area_topdown(struct fi
68448 * return with success:
68449 */
68450 vma = find_vma(mm, addr);
68451- if (!vma || addr+len <= vma->vm_start)
68452+ if (check_heap_stack_gap(vma, addr, len))
68453 /* remember the address as a hint for next time */
68454 return (mm->free_area_cache = addr);
68455
68456@@ -1505,8 +1715,8 @@ arch_get_unmapped_area_topdown(struct fi
68457 mm->cached_hole_size = vma->vm_start - addr;
68458
68459 /* try just below the current vma->vm_start */
68460- addr = vma->vm_start-len;
68461- } while (len < vma->vm_start);
68462+ addr = skip_heap_stack_gap(vma, len);
68463+ } while (!IS_ERR_VALUE(addr));
68464
68465 bottomup:
68466 /*
68467@@ -1515,13 +1725,21 @@ bottomup:
68468 * can happen with large stack limits and large mmap()
68469 * allocations.
68470 */
68471+ mm->mmap_base = TASK_UNMAPPED_BASE;
68472+
68473+#ifdef CONFIG_PAX_RANDMMAP
68474+ if (mm->pax_flags & MF_PAX_RANDMMAP)
68475+ mm->mmap_base += mm->delta_mmap;
68476+#endif
68477+
68478+ mm->free_area_cache = mm->mmap_base;
68479 mm->cached_hole_size = ~0UL;
68480- mm->free_area_cache = TASK_UNMAPPED_BASE;
68481 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
68482 /*
68483 * Restore the topdown base:
68484 */
68485- mm->free_area_cache = mm->mmap_base;
68486+ mm->mmap_base = base;
68487+ mm->free_area_cache = base;
68488 mm->cached_hole_size = ~0UL;
68489
68490 return addr;
68491@@ -1530,6 +1748,12 @@ bottomup:
68492
68493 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
68494 {
68495+
68496+#ifdef CONFIG_PAX_SEGMEXEC
68497+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
68498+ return;
68499+#endif
68500+
68501 /*
68502 * Is this a new hole at the highest possible address?
68503 */
68504@@ -1537,8 +1761,10 @@ void arch_unmap_area_topdown(struct mm_s
68505 mm->free_area_cache = addr;
68506
68507 /* dont allow allocations above current base */
68508- if (mm->free_area_cache > mm->mmap_base)
68509+ if (mm->free_area_cache > mm->mmap_base) {
68510 mm->free_area_cache = mm->mmap_base;
68511+ mm->cached_hole_size = ~0UL;
68512+ }
68513 }
68514
68515 unsigned long
68516@@ -1646,6 +1872,28 @@ out:
68517 return prev ? prev->vm_next : vma;
68518 }
68519
68520+#ifdef CONFIG_PAX_SEGMEXEC
68521+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
68522+{
68523+ struct vm_area_struct *vma_m;
68524+
68525+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
68526+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
68527+ BUG_ON(vma->vm_mirror);
68528+ return NULL;
68529+ }
68530+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
68531+ vma_m = vma->vm_mirror;
68532+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
68533+ BUG_ON(vma->vm_file != vma_m->vm_file);
68534+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
68535+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
68536+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
68537+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
68538+ return vma_m;
68539+}
68540+#endif
68541+
68542 /*
68543 * Verify that the stack growth is acceptable and
68544 * update accounting. This is shared with both the
68545@@ -1662,6 +1910,7 @@ static int acct_stack_growth(struct vm_a
68546 return -ENOMEM;
68547
68548 /* Stack limit test */
68549+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
68550 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
68551 return -ENOMEM;
68552
68553@@ -1672,6 +1921,7 @@ static int acct_stack_growth(struct vm_a
68554 locked = mm->locked_vm + grow;
68555 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
68556 limit >>= PAGE_SHIFT;
68557+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68558 if (locked > limit && !capable(CAP_IPC_LOCK))
68559 return -ENOMEM;
68560 }
68561@@ -1702,37 +1952,48 @@ static int acct_stack_growth(struct vm_a
68562 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
68563 * vma is the last one with address > vma->vm_end. Have to extend vma.
68564 */
68565+#ifndef CONFIG_IA64
68566+static
68567+#endif
68568 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
68569 {
68570 int error;
68571+ bool locknext;
68572
68573 if (!(vma->vm_flags & VM_GROWSUP))
68574 return -EFAULT;
68575
68576+ /* Also guard against wrapping around to address 0. */
68577+ if (address < PAGE_ALIGN(address+1))
68578+ address = PAGE_ALIGN(address+1);
68579+ else
68580+ return -ENOMEM;
68581+
68582 /*
68583 * We must make sure the anon_vma is allocated
68584 * so that the anon_vma locking is not a noop.
68585 */
68586 if (unlikely(anon_vma_prepare(vma)))
68587 return -ENOMEM;
68588+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
68589+ if (locknext && anon_vma_prepare(vma->vm_next))
68590+ return -ENOMEM;
68591 vma_lock_anon_vma(vma);
68592+ if (locknext)
68593+ vma_lock_anon_vma(vma->vm_next);
68594
68595 /*
68596 * vma->vm_start/vm_end cannot change under us because the caller
68597 * is required to hold the mmap_sem in read mode. We need the
68598- * anon_vma lock to serialize against concurrent expand_stacks.
68599- * Also guard against wrapping around to address 0.
68600+ * anon_vma locks to serialize against concurrent expand_stacks
68601+ * and expand_upwards.
68602 */
68603- if (address < PAGE_ALIGN(address+4))
68604- address = PAGE_ALIGN(address+4);
68605- else {
68606- vma_unlock_anon_vma(vma);
68607- return -ENOMEM;
68608- }
68609 error = 0;
68610
68611 /* Somebody else might have raced and expanded it already */
68612- if (address > vma->vm_end) {
68613+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
68614+ error = -ENOMEM;
68615+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
68616 unsigned long size, grow;
68617
68618 size = address - vma->vm_start;
68619@@ -1747,6 +2008,8 @@ int expand_upwards(struct vm_area_struct
68620 }
68621 }
68622 }
68623+ if (locknext)
68624+ vma_unlock_anon_vma(vma->vm_next);
68625 vma_unlock_anon_vma(vma);
68626 khugepaged_enter_vma_merge(vma);
68627 return error;
68628@@ -1760,6 +2023,8 @@ int expand_downwards(struct vm_area_stru
68629 unsigned long address)
68630 {
68631 int error;
68632+ bool lockprev = false;
68633+ struct vm_area_struct *prev;
68634
68635 /*
68636 * We must make sure the anon_vma is allocated
68637@@ -1773,6 +2038,15 @@ int expand_downwards(struct vm_area_stru
68638 if (error)
68639 return error;
68640
68641+ prev = vma->vm_prev;
68642+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
68643+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
68644+#endif
68645+ if (lockprev && anon_vma_prepare(prev))
68646+ return -ENOMEM;
68647+ if (lockprev)
68648+ vma_lock_anon_vma(prev);
68649+
68650 vma_lock_anon_vma(vma);
68651
68652 /*
68653@@ -1782,9 +2056,17 @@ int expand_downwards(struct vm_area_stru
68654 */
68655
68656 /* Somebody else might have raced and expanded it already */
68657- if (address < vma->vm_start) {
68658+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
68659+ error = -ENOMEM;
68660+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
68661 unsigned long size, grow;
68662
68663+#ifdef CONFIG_PAX_SEGMEXEC
68664+ struct vm_area_struct *vma_m;
68665+
68666+ vma_m = pax_find_mirror_vma(vma);
68667+#endif
68668+
68669 size = vma->vm_end - address;
68670 grow = (vma->vm_start - address) >> PAGE_SHIFT;
68671
68672@@ -1794,11 +2076,22 @@ int expand_downwards(struct vm_area_stru
68673 if (!error) {
68674 vma->vm_start = address;
68675 vma->vm_pgoff -= grow;
68676+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
68677+
68678+#ifdef CONFIG_PAX_SEGMEXEC
68679+ if (vma_m) {
68680+ vma_m->vm_start -= grow << PAGE_SHIFT;
68681+ vma_m->vm_pgoff -= grow;
68682+ }
68683+#endif
68684+
68685 perf_event_mmap(vma);
68686 }
68687 }
68688 }
68689 vma_unlock_anon_vma(vma);
68690+ if (lockprev)
68691+ vma_unlock_anon_vma(prev);
68692 khugepaged_enter_vma_merge(vma);
68693 return error;
68694 }
68695@@ -1868,6 +2161,13 @@ static void remove_vma_list(struct mm_st
68696 do {
68697 long nrpages = vma_pages(vma);
68698
68699+#ifdef CONFIG_PAX_SEGMEXEC
68700+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
68701+ vma = remove_vma(vma);
68702+ continue;
68703+ }
68704+#endif
68705+
68706 mm->total_vm -= nrpages;
68707 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
68708 vma = remove_vma(vma);
68709@@ -1913,6 +2213,16 @@ detach_vmas_to_be_unmapped(struct mm_str
68710 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
68711 vma->vm_prev = NULL;
68712 do {
68713+
68714+#ifdef CONFIG_PAX_SEGMEXEC
68715+ if (vma->vm_mirror) {
68716+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
68717+ vma->vm_mirror->vm_mirror = NULL;
68718+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
68719+ vma->vm_mirror = NULL;
68720+ }
68721+#endif
68722+
68723 rb_erase(&vma->vm_rb, &mm->mm_rb);
68724 mm->map_count--;
68725 tail_vma = vma;
68726@@ -1941,14 +2251,33 @@ static int __split_vma(struct mm_struct
68727 struct vm_area_struct *new;
68728 int err = -ENOMEM;
68729
68730+#ifdef CONFIG_PAX_SEGMEXEC
68731+ struct vm_area_struct *vma_m, *new_m = NULL;
68732+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
68733+#endif
68734+
68735 if (is_vm_hugetlb_page(vma) && (addr &
68736 ~(huge_page_mask(hstate_vma(vma)))))
68737 return -EINVAL;
68738
68739+#ifdef CONFIG_PAX_SEGMEXEC
68740+ vma_m = pax_find_mirror_vma(vma);
68741+#endif
68742+
68743 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
68744 if (!new)
68745 goto out_err;
68746
68747+#ifdef CONFIG_PAX_SEGMEXEC
68748+ if (vma_m) {
68749+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
68750+ if (!new_m) {
68751+ kmem_cache_free(vm_area_cachep, new);
68752+ goto out_err;
68753+ }
68754+ }
68755+#endif
68756+
68757 /* most fields are the same, copy all, and then fixup */
68758 *new = *vma;
68759
68760@@ -1961,6 +2290,22 @@ static int __split_vma(struct mm_struct
68761 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
68762 }
68763
68764+#ifdef CONFIG_PAX_SEGMEXEC
68765+ if (vma_m) {
68766+ *new_m = *vma_m;
68767+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
68768+ new_m->vm_mirror = new;
68769+ new->vm_mirror = new_m;
68770+
68771+ if (new_below)
68772+ new_m->vm_end = addr_m;
68773+ else {
68774+ new_m->vm_start = addr_m;
68775+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
68776+ }
68777+ }
68778+#endif
68779+
68780 pol = mpol_dup(vma_policy(vma));
68781 if (IS_ERR(pol)) {
68782 err = PTR_ERR(pol);
68783@@ -1986,6 +2331,42 @@ static int __split_vma(struct mm_struct
68784 else
68785 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
68786
68787+#ifdef CONFIG_PAX_SEGMEXEC
68788+ if (!err && vma_m) {
68789+ if (anon_vma_clone(new_m, vma_m))
68790+ goto out_free_mpol;
68791+
68792+ mpol_get(pol);
68793+ vma_set_policy(new_m, pol);
68794+
68795+ if (new_m->vm_file) {
68796+ get_file(new_m->vm_file);
68797+ if (vma_m->vm_flags & VM_EXECUTABLE)
68798+ added_exe_file_vma(mm);
68799+ }
68800+
68801+ if (new_m->vm_ops && new_m->vm_ops->open)
68802+ new_m->vm_ops->open(new_m);
68803+
68804+ if (new_below)
68805+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
68806+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
68807+ else
68808+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
68809+
68810+ if (err) {
68811+ if (new_m->vm_ops && new_m->vm_ops->close)
68812+ new_m->vm_ops->close(new_m);
68813+ if (new_m->vm_file) {
68814+ if (vma_m->vm_flags & VM_EXECUTABLE)
68815+ removed_exe_file_vma(mm);
68816+ fput(new_m->vm_file);
68817+ }
68818+ mpol_put(pol);
68819+ }
68820+ }
68821+#endif
68822+
68823 /* Success. */
68824 if (!err)
68825 return 0;
68826@@ -1998,10 +2379,18 @@ static int __split_vma(struct mm_struct
68827 removed_exe_file_vma(mm);
68828 fput(new->vm_file);
68829 }
68830- unlink_anon_vmas(new);
68831 out_free_mpol:
68832 mpol_put(pol);
68833 out_free_vma:
68834+
68835+#ifdef CONFIG_PAX_SEGMEXEC
68836+ if (new_m) {
68837+ unlink_anon_vmas(new_m);
68838+ kmem_cache_free(vm_area_cachep, new_m);
68839+ }
68840+#endif
68841+
68842+ unlink_anon_vmas(new);
68843 kmem_cache_free(vm_area_cachep, new);
68844 out_err:
68845 return err;
68846@@ -2014,6 +2403,15 @@ static int __split_vma(struct mm_struct
68847 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
68848 unsigned long addr, int new_below)
68849 {
68850+
68851+#ifdef CONFIG_PAX_SEGMEXEC
68852+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
68853+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
68854+ if (mm->map_count >= sysctl_max_map_count-1)
68855+ return -ENOMEM;
68856+ } else
68857+#endif
68858+
68859 if (mm->map_count >= sysctl_max_map_count)
68860 return -ENOMEM;
68861
68862@@ -2025,11 +2423,30 @@ int split_vma(struct mm_struct *mm, stru
68863 * work. This now handles partial unmappings.
68864 * Jeremy Fitzhardinge <jeremy@goop.org>
68865 */
68866+#ifdef CONFIG_PAX_SEGMEXEC
68867 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
68868 {
68869+ int ret = __do_munmap(mm, start, len);
68870+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
68871+ return ret;
68872+
68873+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
68874+}
68875+
68876+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
68877+#else
68878+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
68879+#endif
68880+{
68881 unsigned long end;
68882 struct vm_area_struct *vma, *prev, *last;
68883
68884+ /*
68885+ * mm->mmap_sem is required to protect against another thread
68886+ * changing the mappings in case we sleep.
68887+ */
68888+ verify_mm_writelocked(mm);
68889+
68890 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
68891 return -EINVAL;
68892
68893@@ -2104,6 +2521,8 @@ int do_munmap(struct mm_struct *mm, unsi
68894 /* Fix up all other VM information */
68895 remove_vma_list(mm, vma);
68896
68897+ track_exec_limit(mm, start, end, 0UL);
68898+
68899 return 0;
68900 }
68901
68902@@ -2116,22 +2535,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
68903
68904 profile_munmap(addr);
68905
68906+#ifdef CONFIG_PAX_SEGMEXEC
68907+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
68908+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
68909+ return -EINVAL;
68910+#endif
68911+
68912 down_write(&mm->mmap_sem);
68913 ret = do_munmap(mm, addr, len);
68914 up_write(&mm->mmap_sem);
68915 return ret;
68916 }
68917
68918-static inline void verify_mm_writelocked(struct mm_struct *mm)
68919-{
68920-#ifdef CONFIG_DEBUG_VM
68921- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
68922- WARN_ON(1);
68923- up_read(&mm->mmap_sem);
68924- }
68925-#endif
68926-}
68927-
68928 /*
68929 * this is really a simplified "do_mmap". it only handles
68930 * anonymous maps. eventually we may be able to do some
68931@@ -2145,6 +2560,7 @@ unsigned long do_brk(unsigned long addr,
68932 struct rb_node ** rb_link, * rb_parent;
68933 pgoff_t pgoff = addr >> PAGE_SHIFT;
68934 int error;
68935+ unsigned long charged;
68936
68937 len = PAGE_ALIGN(len);
68938 if (!len)
68939@@ -2156,16 +2572,30 @@ unsigned long do_brk(unsigned long addr,
68940
68941 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
68942
68943+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
68944+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
68945+ flags &= ~VM_EXEC;
68946+
68947+#ifdef CONFIG_PAX_MPROTECT
68948+ if (mm->pax_flags & MF_PAX_MPROTECT)
68949+ flags &= ~VM_MAYEXEC;
68950+#endif
68951+
68952+ }
68953+#endif
68954+
68955 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
68956 if (error & ~PAGE_MASK)
68957 return error;
68958
68959+ charged = len >> PAGE_SHIFT;
68960+
68961 /*
68962 * mlock MCL_FUTURE?
68963 */
68964 if (mm->def_flags & VM_LOCKED) {
68965 unsigned long locked, lock_limit;
68966- locked = len >> PAGE_SHIFT;
68967+ locked = charged;
68968 locked += mm->locked_vm;
68969 lock_limit = rlimit(RLIMIT_MEMLOCK);
68970 lock_limit >>= PAGE_SHIFT;
68971@@ -2182,22 +2612,22 @@ unsigned long do_brk(unsigned long addr,
68972 /*
68973 * Clear old maps. this also does some error checking for us
68974 */
68975- munmap_back:
68976 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68977 if (vma && vma->vm_start < addr + len) {
68978 if (do_munmap(mm, addr, len))
68979 return -ENOMEM;
68980- goto munmap_back;
68981+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68982+ BUG_ON(vma && vma->vm_start < addr + len);
68983 }
68984
68985 /* Check against address space limits *after* clearing old maps... */
68986- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
68987+ if (!may_expand_vm(mm, charged))
68988 return -ENOMEM;
68989
68990 if (mm->map_count > sysctl_max_map_count)
68991 return -ENOMEM;
68992
68993- if (security_vm_enough_memory(len >> PAGE_SHIFT))
68994+ if (security_vm_enough_memory(charged))
68995 return -ENOMEM;
68996
68997 /* Can we just expand an old private anonymous mapping? */
68998@@ -2211,7 +2641,7 @@ unsigned long do_brk(unsigned long addr,
68999 */
69000 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69001 if (!vma) {
69002- vm_unacct_memory(len >> PAGE_SHIFT);
69003+ vm_unacct_memory(charged);
69004 return -ENOMEM;
69005 }
69006
69007@@ -2225,11 +2655,12 @@ unsigned long do_brk(unsigned long addr,
69008 vma_link(mm, vma, prev, rb_link, rb_parent);
69009 out:
69010 perf_event_mmap(vma);
69011- mm->total_vm += len >> PAGE_SHIFT;
69012+ mm->total_vm += charged;
69013 if (flags & VM_LOCKED) {
69014 if (!mlock_vma_pages_range(vma, addr, addr + len))
69015- mm->locked_vm += (len >> PAGE_SHIFT);
69016+ mm->locked_vm += charged;
69017 }
69018+ track_exec_limit(mm, addr, addr + len, flags);
69019 return addr;
69020 }
69021
69022@@ -2276,8 +2707,10 @@ void exit_mmap(struct mm_struct *mm)
69023 * Walk the list again, actually closing and freeing it,
69024 * with preemption enabled, without holding any MM locks.
69025 */
69026- while (vma)
69027+ while (vma) {
69028+ vma->vm_mirror = NULL;
69029 vma = remove_vma(vma);
69030+ }
69031
69032 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
69033 }
69034@@ -2291,6 +2724,13 @@ int insert_vm_struct(struct mm_struct *
69035 struct vm_area_struct * __vma, * prev;
69036 struct rb_node ** rb_link, * rb_parent;
69037
69038+#ifdef CONFIG_PAX_SEGMEXEC
69039+ struct vm_area_struct *vma_m = NULL;
69040+#endif
69041+
69042+ if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
69043+ return -EPERM;
69044+
69045 /*
69046 * The vm_pgoff of a purely anonymous vma should be irrelevant
69047 * until its first write fault, when page's anon_vma and index
69048@@ -2313,7 +2753,22 @@ int insert_vm_struct(struct mm_struct *
69049 if ((vma->vm_flags & VM_ACCOUNT) &&
69050 security_vm_enough_memory_mm(mm, vma_pages(vma)))
69051 return -ENOMEM;
69052+
69053+#ifdef CONFIG_PAX_SEGMEXEC
69054+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
69055+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69056+ if (!vma_m)
69057+ return -ENOMEM;
69058+ }
69059+#endif
69060+
69061 vma_link(mm, vma, prev, rb_link, rb_parent);
69062+
69063+#ifdef CONFIG_PAX_SEGMEXEC
69064+ if (vma_m)
69065+ BUG_ON(pax_mirror_vma(vma_m, vma));
69066+#endif
69067+
69068 return 0;
69069 }
69070
69071@@ -2331,6 +2786,8 @@ struct vm_area_struct *copy_vma(struct v
69072 struct rb_node **rb_link, *rb_parent;
69073 struct mempolicy *pol;
69074
69075+ BUG_ON(vma->vm_mirror);
69076+
69077 /*
69078 * If anonymous vma has not yet been faulted, update new pgoff
69079 * to match new location, to increase its chance of merging.
69080@@ -2381,6 +2838,39 @@ struct vm_area_struct *copy_vma(struct v
69081 return NULL;
69082 }
69083
69084+#ifdef CONFIG_PAX_SEGMEXEC
69085+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
69086+{
69087+ struct vm_area_struct *prev_m;
69088+ struct rb_node **rb_link_m, *rb_parent_m;
69089+ struct mempolicy *pol_m;
69090+
69091+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
69092+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
69093+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
69094+ *vma_m = *vma;
69095+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
69096+ if (anon_vma_clone(vma_m, vma))
69097+ return -ENOMEM;
69098+ pol_m = vma_policy(vma_m);
69099+ mpol_get(pol_m);
69100+ vma_set_policy(vma_m, pol_m);
69101+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
69102+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
69103+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
69104+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
69105+ if (vma_m->vm_file)
69106+ get_file(vma_m->vm_file);
69107+ if (vma_m->vm_ops && vma_m->vm_ops->open)
69108+ vma_m->vm_ops->open(vma_m);
69109+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
69110+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
69111+ vma_m->vm_mirror = vma;
69112+ vma->vm_mirror = vma_m;
69113+ return 0;
69114+}
69115+#endif
69116+
69117 /*
69118 * Return true if the calling process may expand its vm space by the passed
69119 * number of pages
69120@@ -2391,7 +2881,7 @@ int may_expand_vm(struct mm_struct *mm,
69121 unsigned long lim;
69122
69123 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
69124-
69125+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
69126 if (cur + npages > lim)
69127 return 0;
69128 return 1;
69129@@ -2462,6 +2952,22 @@ int install_special_mapping(struct mm_st
69130 vma->vm_start = addr;
69131 vma->vm_end = addr + len;
69132
69133+#ifdef CONFIG_PAX_MPROTECT
69134+ if (mm->pax_flags & MF_PAX_MPROTECT) {
69135+#ifndef CONFIG_PAX_MPROTECT_COMPAT
69136+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
69137+ return -EPERM;
69138+ if (!(vm_flags & VM_EXEC))
69139+ vm_flags &= ~VM_MAYEXEC;
69140+#else
69141+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
69142+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
69143+#endif
69144+ else
69145+ vm_flags &= ~VM_MAYWRITE;
69146+ }
69147+#endif
69148+
69149 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
69150 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
69151
69152diff -urNp linux-3.0.7/mm/mprotect.c linux-3.0.7/mm/mprotect.c
69153--- linux-3.0.7/mm/mprotect.c 2011-07-21 22:17:23.000000000 -0400
69154+++ linux-3.0.7/mm/mprotect.c 2011-08-23 21:48:14.000000000 -0400
69155@@ -23,10 +23,16 @@
69156 #include <linux/mmu_notifier.h>
69157 #include <linux/migrate.h>
69158 #include <linux/perf_event.h>
69159+
69160+#ifdef CONFIG_PAX_MPROTECT
69161+#include <linux/elf.h>
69162+#endif
69163+
69164 #include <asm/uaccess.h>
69165 #include <asm/pgtable.h>
69166 #include <asm/cacheflush.h>
69167 #include <asm/tlbflush.h>
69168+#include <asm/mmu_context.h>
69169
69170 #ifndef pgprot_modify
69171 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
69172@@ -141,6 +147,48 @@ static void change_protection(struct vm_
69173 flush_tlb_range(vma, start, end);
69174 }
69175
69176+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69177+/* called while holding the mmap semaphor for writing except stack expansion */
69178+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
69179+{
69180+ unsigned long oldlimit, newlimit = 0UL;
69181+
69182+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
69183+ return;
69184+
69185+ spin_lock(&mm->page_table_lock);
69186+ oldlimit = mm->context.user_cs_limit;
69187+ if ((prot & VM_EXEC) && oldlimit < end)
69188+ /* USER_CS limit moved up */
69189+ newlimit = end;
69190+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
69191+ /* USER_CS limit moved down */
69192+ newlimit = start;
69193+
69194+ if (newlimit) {
69195+ mm->context.user_cs_limit = newlimit;
69196+
69197+#ifdef CONFIG_SMP
69198+ wmb();
69199+ cpus_clear(mm->context.cpu_user_cs_mask);
69200+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
69201+#endif
69202+
69203+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
69204+ }
69205+ spin_unlock(&mm->page_table_lock);
69206+ if (newlimit == end) {
69207+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
69208+
69209+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
69210+ if (is_vm_hugetlb_page(vma))
69211+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
69212+ else
69213+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
69214+ }
69215+}
69216+#endif
69217+
69218 int
69219 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
69220 unsigned long start, unsigned long end, unsigned long newflags)
69221@@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vm
69222 int error;
69223 int dirty_accountable = 0;
69224
69225+#ifdef CONFIG_PAX_SEGMEXEC
69226+ struct vm_area_struct *vma_m = NULL;
69227+ unsigned long start_m, end_m;
69228+
69229+ start_m = start + SEGMEXEC_TASK_SIZE;
69230+ end_m = end + SEGMEXEC_TASK_SIZE;
69231+#endif
69232+
69233 if (newflags == oldflags) {
69234 *pprev = vma;
69235 return 0;
69236 }
69237
69238+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
69239+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
69240+
69241+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
69242+ return -ENOMEM;
69243+
69244+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
69245+ return -ENOMEM;
69246+ }
69247+
69248 /*
69249 * If we make a private mapping writable we increase our commit;
69250 * but (without finer accounting) cannot reduce our commit if we
69251@@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vm
69252 }
69253 }
69254
69255+#ifdef CONFIG_PAX_SEGMEXEC
69256+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
69257+ if (start != vma->vm_start) {
69258+ error = split_vma(mm, vma, start, 1);
69259+ if (error)
69260+ goto fail;
69261+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
69262+ *pprev = (*pprev)->vm_next;
69263+ }
69264+
69265+ if (end != vma->vm_end) {
69266+ error = split_vma(mm, vma, end, 0);
69267+ if (error)
69268+ goto fail;
69269+ }
69270+
69271+ if (pax_find_mirror_vma(vma)) {
69272+ error = __do_munmap(mm, start_m, end_m - start_m);
69273+ if (error)
69274+ goto fail;
69275+ } else {
69276+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69277+ if (!vma_m) {
69278+ error = -ENOMEM;
69279+ goto fail;
69280+ }
69281+ vma->vm_flags = newflags;
69282+ error = pax_mirror_vma(vma_m, vma);
69283+ if (error) {
69284+ vma->vm_flags = oldflags;
69285+ goto fail;
69286+ }
69287+ }
69288+ }
69289+#endif
69290+
69291 /*
69292 * First try to merge with previous and/or next vma.
69293 */
69294@@ -204,9 +306,21 @@ success:
69295 * vm_flags and vm_page_prot are protected by the mmap_sem
69296 * held in write mode.
69297 */
69298+
69299+#ifdef CONFIG_PAX_SEGMEXEC
69300+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
69301+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
69302+#endif
69303+
69304 vma->vm_flags = newflags;
69305+
69306+#ifdef CONFIG_PAX_MPROTECT
69307+ if (mm->binfmt && mm->binfmt->handle_mprotect)
69308+ mm->binfmt->handle_mprotect(vma, newflags);
69309+#endif
69310+
69311 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
69312- vm_get_page_prot(newflags));
69313+ vm_get_page_prot(vma->vm_flags));
69314
69315 if (vma_wants_writenotify(vma)) {
69316 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
69317@@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69318 end = start + len;
69319 if (end <= start)
69320 return -ENOMEM;
69321+
69322+#ifdef CONFIG_PAX_SEGMEXEC
69323+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
69324+ if (end > SEGMEXEC_TASK_SIZE)
69325+ return -EINVAL;
69326+ } else
69327+#endif
69328+
69329+ if (end > TASK_SIZE)
69330+ return -EINVAL;
69331+
69332 if (!arch_validate_prot(prot))
69333 return -EINVAL;
69334
69335@@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69336 /*
69337 * Does the application expect PROT_READ to imply PROT_EXEC:
69338 */
69339- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
69340+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
69341 prot |= PROT_EXEC;
69342
69343 vm_flags = calc_vm_prot_bits(prot);
69344@@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69345 if (start > vma->vm_start)
69346 prev = vma;
69347
69348+#ifdef CONFIG_PAX_MPROTECT
69349+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
69350+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
69351+#endif
69352+
69353 for (nstart = start ; ; ) {
69354 unsigned long newflags;
69355
69356@@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69357
69358 /* newflags >> 4 shift VM_MAY% in place of VM_% */
69359 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
69360+ if (prot & (PROT_WRITE | PROT_EXEC))
69361+ gr_log_rwxmprotect(vma->vm_file);
69362+
69363+ error = -EACCES;
69364+ goto out;
69365+ }
69366+
69367+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
69368 error = -EACCES;
69369 goto out;
69370 }
69371@@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69372 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
69373 if (error)
69374 goto out;
69375+
69376+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
69377+
69378 nstart = tmp;
69379
69380 if (nstart < prev->vm_end)
69381diff -urNp linux-3.0.7/mm/mremap.c linux-3.0.7/mm/mremap.c
69382--- linux-3.0.7/mm/mremap.c 2011-07-21 22:17:23.000000000 -0400
69383+++ linux-3.0.7/mm/mremap.c 2011-08-23 21:47:56.000000000 -0400
69384@@ -113,6 +113,12 @@ static void move_ptes(struct vm_area_str
69385 continue;
69386 pte = ptep_clear_flush(vma, old_addr, old_pte);
69387 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
69388+
69389+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69390+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
69391+ pte = pte_exprotect(pte);
69392+#endif
69393+
69394 set_pte_at(mm, new_addr, new_pte, pte);
69395 }
69396
69397@@ -272,6 +278,11 @@ static struct vm_area_struct *vma_to_res
69398 if (is_vm_hugetlb_page(vma))
69399 goto Einval;
69400
69401+#ifdef CONFIG_PAX_SEGMEXEC
69402+ if (pax_find_mirror_vma(vma))
69403+ goto Einval;
69404+#endif
69405+
69406 /* We can't remap across vm area boundaries */
69407 if (old_len > vma->vm_end - addr)
69408 goto Efault;
69409@@ -328,20 +339,25 @@ static unsigned long mremap_to(unsigned
69410 unsigned long ret = -EINVAL;
69411 unsigned long charged = 0;
69412 unsigned long map_flags;
69413+ unsigned long pax_task_size = TASK_SIZE;
69414
69415 if (new_addr & ~PAGE_MASK)
69416 goto out;
69417
69418- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
69419+#ifdef CONFIG_PAX_SEGMEXEC
69420+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
69421+ pax_task_size = SEGMEXEC_TASK_SIZE;
69422+#endif
69423+
69424+ pax_task_size -= PAGE_SIZE;
69425+
69426+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
69427 goto out;
69428
69429 /* Check if the location we're moving into overlaps the
69430 * old location at all, and fail if it does.
69431 */
69432- if ((new_addr <= addr) && (new_addr+new_len) > addr)
69433- goto out;
69434-
69435- if ((addr <= new_addr) && (addr+old_len) > new_addr)
69436+ if (addr + old_len > new_addr && new_addr + new_len > addr)
69437 goto out;
69438
69439 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
69440@@ -413,6 +429,7 @@ unsigned long do_mremap(unsigned long ad
69441 struct vm_area_struct *vma;
69442 unsigned long ret = -EINVAL;
69443 unsigned long charged = 0;
69444+ unsigned long pax_task_size = TASK_SIZE;
69445
69446 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
69447 goto out;
69448@@ -431,6 +448,17 @@ unsigned long do_mremap(unsigned long ad
69449 if (!new_len)
69450 goto out;
69451
69452+#ifdef CONFIG_PAX_SEGMEXEC
69453+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
69454+ pax_task_size = SEGMEXEC_TASK_SIZE;
69455+#endif
69456+
69457+ pax_task_size -= PAGE_SIZE;
69458+
69459+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
69460+ old_len > pax_task_size || addr > pax_task_size-old_len)
69461+ goto out;
69462+
69463 if (flags & MREMAP_FIXED) {
69464 if (flags & MREMAP_MAYMOVE)
69465 ret = mremap_to(addr, old_len, new_addr, new_len);
69466@@ -480,6 +508,7 @@ unsigned long do_mremap(unsigned long ad
69467 addr + new_len);
69468 }
69469 ret = addr;
69470+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
69471 goto out;
69472 }
69473 }
69474@@ -506,7 +535,13 @@ unsigned long do_mremap(unsigned long ad
69475 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
69476 if (ret)
69477 goto out;
69478+
69479+ map_flags = vma->vm_flags;
69480 ret = move_vma(vma, addr, old_len, new_len, new_addr);
69481+ if (!(ret & ~PAGE_MASK)) {
69482+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
69483+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
69484+ }
69485 }
69486 out:
69487 if (ret & ~PAGE_MASK)
69488diff -urNp linux-3.0.7/mm/nobootmem.c linux-3.0.7/mm/nobootmem.c
69489--- linux-3.0.7/mm/nobootmem.c 2011-07-21 22:17:23.000000000 -0400
69490+++ linux-3.0.7/mm/nobootmem.c 2011-08-23 21:47:56.000000000 -0400
69491@@ -110,19 +110,30 @@ static void __init __free_pages_memory(u
69492 unsigned long __init free_all_memory_core_early(int nodeid)
69493 {
69494 int i;
69495- u64 start, end;
69496+ u64 start, end, startrange, endrange;
69497 unsigned long count = 0;
69498- struct range *range = NULL;
69499+ struct range *range = NULL, rangerange = { 0, 0 };
69500 int nr_range;
69501
69502 nr_range = get_free_all_memory_range(&range, nodeid);
69503+ startrange = __pa(range) >> PAGE_SHIFT;
69504+ endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
69505
69506 for (i = 0; i < nr_range; i++) {
69507 start = range[i].start;
69508 end = range[i].end;
69509+ if (start <= endrange && startrange < end) {
69510+ BUG_ON(rangerange.start | rangerange.end);
69511+ rangerange = range[i];
69512+ continue;
69513+ }
69514 count += end - start;
69515 __free_pages_memory(start, end);
69516 }
69517+ start = rangerange.start;
69518+ end = rangerange.end;
69519+ count += end - start;
69520+ __free_pages_memory(start, end);
69521
69522 return count;
69523 }
69524diff -urNp linux-3.0.7/mm/nommu.c linux-3.0.7/mm/nommu.c
69525--- linux-3.0.7/mm/nommu.c 2011-07-21 22:17:23.000000000 -0400
69526+++ linux-3.0.7/mm/nommu.c 2011-08-23 21:47:56.000000000 -0400
69527@@ -63,7 +63,6 @@ int sysctl_overcommit_memory = OVERCOMMI
69528 int sysctl_overcommit_ratio = 50; /* default is 50% */
69529 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
69530 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
69531-int heap_stack_gap = 0;
69532
69533 atomic_long_t mmap_pages_allocated;
69534
69535@@ -826,15 +825,6 @@ struct vm_area_struct *find_vma(struct m
69536 EXPORT_SYMBOL(find_vma);
69537
69538 /*
69539- * find a VMA
69540- * - we don't extend stack VMAs under NOMMU conditions
69541- */
69542-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
69543-{
69544- return find_vma(mm, addr);
69545-}
69546-
69547-/*
69548 * expand a stack to a given address
69549 * - not supported under NOMMU conditions
69550 */
69551@@ -1554,6 +1544,7 @@ int split_vma(struct mm_struct *mm, stru
69552
69553 /* most fields are the same, copy all, and then fixup */
69554 *new = *vma;
69555+ INIT_LIST_HEAD(&new->anon_vma_chain);
69556 *region = *vma->vm_region;
69557 new->vm_region = region;
69558
69559diff -urNp linux-3.0.7/mm/page_alloc.c linux-3.0.7/mm/page_alloc.c
69560--- linux-3.0.7/mm/page_alloc.c 2011-10-16 21:54:54.000000000 -0400
69561+++ linux-3.0.7/mm/page_alloc.c 2011-10-16 21:55:28.000000000 -0400
69562@@ -340,7 +340,7 @@ out:
69563 * This usage means that zero-order pages may not be compound.
69564 */
69565
69566-static void free_compound_page(struct page *page)
69567+void free_compound_page(struct page *page)
69568 {
69569 __free_pages_ok(page, compound_order(page));
69570 }
69571@@ -653,6 +653,10 @@ static bool free_pages_prepare(struct pa
69572 int i;
69573 int bad = 0;
69574
69575+#ifdef CONFIG_PAX_MEMORY_SANITIZE
69576+ unsigned long index = 1UL << order;
69577+#endif
69578+
69579 trace_mm_page_free_direct(page, order);
69580 kmemcheck_free_shadow(page, order);
69581
69582@@ -668,6 +672,12 @@ static bool free_pages_prepare(struct pa
69583 debug_check_no_obj_freed(page_address(page),
69584 PAGE_SIZE << order);
69585 }
69586+
69587+#ifdef CONFIG_PAX_MEMORY_SANITIZE
69588+ for (; index; --index)
69589+ sanitize_highpage(page + index - 1);
69590+#endif
69591+
69592 arch_free_page(page, order);
69593 kernel_map_pages(page, 1 << order, 0);
69594
69595@@ -783,8 +793,10 @@ static int prep_new_page(struct page *pa
69596 arch_alloc_page(page, order);
69597 kernel_map_pages(page, 1 << order, 1);
69598
69599+#ifndef CONFIG_PAX_MEMORY_SANITIZE
69600 if (gfp_flags & __GFP_ZERO)
69601 prep_zero_page(page, order, gfp_flags);
69602+#endif
69603
69604 if (order && (gfp_flags & __GFP_COMP))
69605 prep_compound_page(page, order);
69606@@ -2557,6 +2569,8 @@ void show_free_areas(unsigned int filter
69607 int cpu;
69608 struct zone *zone;
69609
69610+ pax_track_stack();
69611+
69612 for_each_populated_zone(zone) {
69613 if (skip_free_areas_node(filter, zone_to_nid(zone)))
69614 continue;
69615diff -urNp linux-3.0.7/mm/percpu.c linux-3.0.7/mm/percpu.c
69616--- linux-3.0.7/mm/percpu.c 2011-07-21 22:17:23.000000000 -0400
69617+++ linux-3.0.7/mm/percpu.c 2011-08-23 21:47:56.000000000 -0400
69618@@ -121,7 +121,7 @@ static unsigned int pcpu_first_unit_cpu
69619 static unsigned int pcpu_last_unit_cpu __read_mostly;
69620
69621 /* the address of the first chunk which starts with the kernel static area */
69622-void *pcpu_base_addr __read_mostly;
69623+void *pcpu_base_addr __read_only;
69624 EXPORT_SYMBOL_GPL(pcpu_base_addr);
69625
69626 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
69627diff -urNp linux-3.0.7/mm/rmap.c linux-3.0.7/mm/rmap.c
69628--- linux-3.0.7/mm/rmap.c 2011-07-21 22:17:23.000000000 -0400
69629+++ linux-3.0.7/mm/rmap.c 2011-08-23 21:47:56.000000000 -0400
69630@@ -153,6 +153,10 @@ int anon_vma_prepare(struct vm_area_stru
69631 struct anon_vma *anon_vma = vma->anon_vma;
69632 struct anon_vma_chain *avc;
69633
69634+#ifdef CONFIG_PAX_SEGMEXEC
69635+ struct anon_vma_chain *avc_m = NULL;
69636+#endif
69637+
69638 might_sleep();
69639 if (unlikely(!anon_vma)) {
69640 struct mm_struct *mm = vma->vm_mm;
69641@@ -162,6 +166,12 @@ int anon_vma_prepare(struct vm_area_stru
69642 if (!avc)
69643 goto out_enomem;
69644
69645+#ifdef CONFIG_PAX_SEGMEXEC
69646+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
69647+ if (!avc_m)
69648+ goto out_enomem_free_avc;
69649+#endif
69650+
69651 anon_vma = find_mergeable_anon_vma(vma);
69652 allocated = NULL;
69653 if (!anon_vma) {
69654@@ -175,6 +185,21 @@ int anon_vma_prepare(struct vm_area_stru
69655 /* page_table_lock to protect against threads */
69656 spin_lock(&mm->page_table_lock);
69657 if (likely(!vma->anon_vma)) {
69658+
69659+#ifdef CONFIG_PAX_SEGMEXEC
69660+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
69661+
69662+ if (vma_m) {
69663+ BUG_ON(vma_m->anon_vma);
69664+ vma_m->anon_vma = anon_vma;
69665+ avc_m->anon_vma = anon_vma;
69666+ avc_m->vma = vma;
69667+ list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
69668+ list_add(&avc_m->same_anon_vma, &anon_vma->head);
69669+ avc_m = NULL;
69670+ }
69671+#endif
69672+
69673 vma->anon_vma = anon_vma;
69674 avc->anon_vma = anon_vma;
69675 avc->vma = vma;
69676@@ -188,12 +213,24 @@ int anon_vma_prepare(struct vm_area_stru
69677
69678 if (unlikely(allocated))
69679 put_anon_vma(allocated);
69680+
69681+#ifdef CONFIG_PAX_SEGMEXEC
69682+ if (unlikely(avc_m))
69683+ anon_vma_chain_free(avc_m);
69684+#endif
69685+
69686 if (unlikely(avc))
69687 anon_vma_chain_free(avc);
69688 }
69689 return 0;
69690
69691 out_enomem_free_avc:
69692+
69693+#ifdef CONFIG_PAX_SEGMEXEC
69694+ if (avc_m)
69695+ anon_vma_chain_free(avc_m);
69696+#endif
69697+
69698 anon_vma_chain_free(avc);
69699 out_enomem:
69700 return -ENOMEM;
69701@@ -244,7 +281,7 @@ static void anon_vma_chain_link(struct v
69702 * Attach the anon_vmas from src to dst.
69703 * Returns 0 on success, -ENOMEM on failure.
69704 */
69705-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
69706+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
69707 {
69708 struct anon_vma_chain *avc, *pavc;
69709 struct anon_vma *root = NULL;
69710@@ -277,7 +314,7 @@ int anon_vma_clone(struct vm_area_struct
69711 * the corresponding VMA in the parent process is attached to.
69712 * Returns 0 on success, non-zero on failure.
69713 */
69714-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
69715+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
69716 {
69717 struct anon_vma_chain *avc;
69718 struct anon_vma *anon_vma;
69719diff -urNp linux-3.0.7/mm/shmem.c linux-3.0.7/mm/shmem.c
69720--- linux-3.0.7/mm/shmem.c 2011-07-21 22:17:23.000000000 -0400
69721+++ linux-3.0.7/mm/shmem.c 2011-08-23 21:48:14.000000000 -0400
69722@@ -31,7 +31,7 @@
69723 #include <linux/percpu_counter.h>
69724 #include <linux/swap.h>
69725
69726-static struct vfsmount *shm_mnt;
69727+struct vfsmount *shm_mnt;
69728
69729 #ifdef CONFIG_SHMEM
69730 /*
69731@@ -1101,6 +1101,8 @@ static int shmem_writepage(struct page *
69732 goto unlock;
69733 }
69734 entry = shmem_swp_entry(info, index, NULL);
69735+ if (!entry)
69736+ goto unlock;
69737 if (entry->val) {
69738 /*
69739 * The more uptodate page coming down from a stacked
69740@@ -1172,6 +1174,8 @@ static struct page *shmem_swapin(swp_ent
69741 struct vm_area_struct pvma;
69742 struct page *page;
69743
69744+ pax_track_stack();
69745+
69746 spol = mpol_cond_copy(&mpol,
69747 mpol_shared_policy_lookup(&info->policy, idx));
69748
69749@@ -2568,8 +2572,7 @@ int shmem_fill_super(struct super_block
69750 int err = -ENOMEM;
69751
69752 /* Round up to L1_CACHE_BYTES to resist false sharing */
69753- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
69754- L1_CACHE_BYTES), GFP_KERNEL);
69755+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
69756 if (!sbinfo)
69757 return -ENOMEM;
69758
69759diff -urNp linux-3.0.7/mm/slab.c linux-3.0.7/mm/slab.c
69760--- linux-3.0.7/mm/slab.c 2011-07-21 22:17:23.000000000 -0400
69761+++ linux-3.0.7/mm/slab.c 2011-08-23 21:48:14.000000000 -0400
69762@@ -151,7 +151,7 @@
69763
69764 /* Legal flag mask for kmem_cache_create(). */
69765 #if DEBUG
69766-# define CREATE_MASK (SLAB_RED_ZONE | \
69767+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
69768 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
69769 SLAB_CACHE_DMA | \
69770 SLAB_STORE_USER | \
69771@@ -159,7 +159,7 @@
69772 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
69773 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
69774 #else
69775-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
69776+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
69777 SLAB_CACHE_DMA | \
69778 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
69779 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
69780@@ -288,7 +288,7 @@ struct kmem_list3 {
69781 * Need this for bootstrapping a per node allocator.
69782 */
69783 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
69784-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
69785+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
69786 #define CACHE_CACHE 0
69787 #define SIZE_AC MAX_NUMNODES
69788 #define SIZE_L3 (2 * MAX_NUMNODES)
69789@@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_
69790 if ((x)->max_freeable < i) \
69791 (x)->max_freeable = i; \
69792 } while (0)
69793-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
69794-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
69795-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
69796-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
69797+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
69798+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
69799+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
69800+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
69801 #else
69802 #define STATS_INC_ACTIVE(x) do { } while (0)
69803 #define STATS_DEC_ACTIVE(x) do { } while (0)
69804@@ -538,7 +538,7 @@ static inline void *index_to_obj(struct
69805 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
69806 */
69807 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
69808- const struct slab *slab, void *obj)
69809+ const struct slab *slab, const void *obj)
69810 {
69811 u32 offset = (obj - slab->s_mem);
69812 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
69813@@ -564,7 +564,7 @@ struct cache_names {
69814 static struct cache_names __initdata cache_names[] = {
69815 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
69816 #include <linux/kmalloc_sizes.h>
69817- {NULL,}
69818+ {NULL}
69819 #undef CACHE
69820 };
69821
69822@@ -1530,7 +1530,7 @@ void __init kmem_cache_init(void)
69823 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
69824 sizes[INDEX_AC].cs_size,
69825 ARCH_KMALLOC_MINALIGN,
69826- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
69827+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
69828 NULL);
69829
69830 if (INDEX_AC != INDEX_L3) {
69831@@ -1538,7 +1538,7 @@ void __init kmem_cache_init(void)
69832 kmem_cache_create(names[INDEX_L3].name,
69833 sizes[INDEX_L3].cs_size,
69834 ARCH_KMALLOC_MINALIGN,
69835- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
69836+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
69837 NULL);
69838 }
69839
69840@@ -1556,7 +1556,7 @@ void __init kmem_cache_init(void)
69841 sizes->cs_cachep = kmem_cache_create(names->name,
69842 sizes->cs_size,
69843 ARCH_KMALLOC_MINALIGN,
69844- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
69845+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
69846 NULL);
69847 }
69848 #ifdef CONFIG_ZONE_DMA
69849@@ -4272,10 +4272,10 @@ static int s_show(struct seq_file *m, vo
69850 }
69851 /* cpu stats */
69852 {
69853- unsigned long allochit = atomic_read(&cachep->allochit);
69854- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
69855- unsigned long freehit = atomic_read(&cachep->freehit);
69856- unsigned long freemiss = atomic_read(&cachep->freemiss);
69857+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
69858+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
69859+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
69860+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
69861
69862 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
69863 allochit, allocmiss, freehit, freemiss);
69864@@ -4532,15 +4532,66 @@ static const struct file_operations proc
69865
69866 static int __init slab_proc_init(void)
69867 {
69868- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
69869+ mode_t gr_mode = S_IRUGO;
69870+
69871+#ifdef CONFIG_GRKERNSEC_PROC_ADD
69872+ gr_mode = S_IRUSR;
69873+#endif
69874+
69875+ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
69876 #ifdef CONFIG_DEBUG_SLAB_LEAK
69877- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
69878+ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
69879 #endif
69880 return 0;
69881 }
69882 module_init(slab_proc_init);
69883 #endif
69884
69885+void check_object_size(const void *ptr, unsigned long n, bool to)
69886+{
69887+
69888+#ifdef CONFIG_PAX_USERCOPY
69889+ struct page *page;
69890+ struct kmem_cache *cachep = NULL;
69891+ struct slab *slabp;
69892+ unsigned int objnr;
69893+ unsigned long offset;
69894+
69895+ if (!n)
69896+ return;
69897+
69898+ if (ZERO_OR_NULL_PTR(ptr))
69899+ goto report;
69900+
69901+ if (!virt_addr_valid(ptr))
69902+ return;
69903+
69904+ page = virt_to_head_page(ptr);
69905+
69906+ if (!PageSlab(page)) {
69907+ if (object_is_on_stack(ptr, n) == -1)
69908+ goto report;
69909+ return;
69910+ }
69911+
69912+ cachep = page_get_cache(page);
69913+ if (!(cachep->flags & SLAB_USERCOPY))
69914+ goto report;
69915+
69916+ slabp = page_get_slab(page);
69917+ objnr = obj_to_index(cachep, slabp, ptr);
69918+ BUG_ON(objnr >= cachep->num);
69919+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
69920+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
69921+ return;
69922+
69923+report:
69924+ pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
69925+#endif
69926+
69927+}
69928+EXPORT_SYMBOL(check_object_size);
69929+
69930 /**
69931 * ksize - get the actual amount of memory allocated for a given object
69932 * @objp: Pointer to the object
69933diff -urNp linux-3.0.7/mm/slob.c linux-3.0.7/mm/slob.c
69934--- linux-3.0.7/mm/slob.c 2011-07-21 22:17:23.000000000 -0400
69935+++ linux-3.0.7/mm/slob.c 2011-08-23 21:47:56.000000000 -0400
69936@@ -29,7 +29,7 @@
69937 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
69938 * alloc_pages() directly, allocating compound pages so the page order
69939 * does not have to be separately tracked, and also stores the exact
69940- * allocation size in page->private so that it can be used to accurately
69941+ * allocation size in slob_page->size so that it can be used to accurately
69942 * provide ksize(). These objects are detected in kfree() because slob_page()
69943 * is false for them.
69944 *
69945@@ -58,6 +58,7 @@
69946 */
69947
69948 #include <linux/kernel.h>
69949+#include <linux/sched.h>
69950 #include <linux/slab.h>
69951 #include <linux/mm.h>
69952 #include <linux/swap.h> /* struct reclaim_state */
69953@@ -102,7 +103,8 @@ struct slob_page {
69954 unsigned long flags; /* mandatory */
69955 atomic_t _count; /* mandatory */
69956 slobidx_t units; /* free units left in page */
69957- unsigned long pad[2];
69958+ unsigned long pad[1];
69959+ unsigned long size; /* size when >=PAGE_SIZE */
69960 slob_t *free; /* first free slob_t in page */
69961 struct list_head list; /* linked list of free pages */
69962 };
69963@@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
69964 */
69965 static inline int is_slob_page(struct slob_page *sp)
69966 {
69967- return PageSlab((struct page *)sp);
69968+ return PageSlab((struct page *)sp) && !sp->size;
69969 }
69970
69971 static inline void set_slob_page(struct slob_page *sp)
69972@@ -150,7 +152,7 @@ static inline void clear_slob_page(struc
69973
69974 static inline struct slob_page *slob_page(const void *addr)
69975 {
69976- return (struct slob_page *)virt_to_page(addr);
69977+ return (struct slob_page *)virt_to_head_page(addr);
69978 }
69979
69980 /*
69981@@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_
69982 /*
69983 * Return the size of a slob block.
69984 */
69985-static slobidx_t slob_units(slob_t *s)
69986+static slobidx_t slob_units(const slob_t *s)
69987 {
69988 if (s->units > 0)
69989 return s->units;
69990@@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
69991 /*
69992 * Return the next free slob block pointer after this one.
69993 */
69994-static slob_t *slob_next(slob_t *s)
69995+static slob_t *slob_next(const slob_t *s)
69996 {
69997 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
69998 slobidx_t next;
69999@@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
70000 /*
70001 * Returns true if s is the last free block in its page.
70002 */
70003-static int slob_last(slob_t *s)
70004+static int slob_last(const slob_t *s)
70005 {
70006 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
70007 }
70008@@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, i
70009 if (!page)
70010 return NULL;
70011
70012+ set_slob_page(page);
70013 return page_address(page);
70014 }
70015
70016@@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp
70017 if (!b)
70018 return NULL;
70019 sp = slob_page(b);
70020- set_slob_page(sp);
70021
70022 spin_lock_irqsave(&slob_lock, flags);
70023 sp->units = SLOB_UNITS(PAGE_SIZE);
70024 sp->free = b;
70025+ sp->size = 0;
70026 INIT_LIST_HEAD(&sp->list);
70027 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
70028 set_slob_page_free(sp, slob_list);
70029@@ -476,10 +479,9 @@ out:
70030 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
70031 */
70032
70033-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70034+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
70035 {
70036- unsigned int *m;
70037- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70038+ slob_t *m;
70039 void *ret;
70040
70041 lockdep_trace_alloc(gfp);
70042@@ -492,7 +494,10 @@ void *__kmalloc_node(size_t size, gfp_t
70043
70044 if (!m)
70045 return NULL;
70046- *m = size;
70047+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
70048+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
70049+ m[0].units = size;
70050+ m[1].units = align;
70051 ret = (void *)m + align;
70052
70053 trace_kmalloc_node(_RET_IP_, ret,
70054@@ -504,16 +509,25 @@ void *__kmalloc_node(size_t size, gfp_t
70055 gfp |= __GFP_COMP;
70056 ret = slob_new_pages(gfp, order, node);
70057 if (ret) {
70058- struct page *page;
70059- page = virt_to_page(ret);
70060- page->private = size;
70061+ struct slob_page *sp;
70062+ sp = slob_page(ret);
70063+ sp->size = size;
70064 }
70065
70066 trace_kmalloc_node(_RET_IP_, ret,
70067 size, PAGE_SIZE << order, gfp, node);
70068 }
70069
70070- kmemleak_alloc(ret, size, 1, gfp);
70071+ return ret;
70072+}
70073+
70074+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70075+{
70076+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70077+ void *ret = __kmalloc_node_align(size, gfp, node, align);
70078+
70079+ if (!ZERO_OR_NULL_PTR(ret))
70080+ kmemleak_alloc(ret, size, 1, gfp);
70081 return ret;
70082 }
70083 EXPORT_SYMBOL(__kmalloc_node);
70084@@ -531,13 +545,88 @@ void kfree(const void *block)
70085 sp = slob_page(block);
70086 if (is_slob_page(sp)) {
70087 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70088- unsigned int *m = (unsigned int *)(block - align);
70089- slob_free(m, *m + align);
70090- } else
70091+ slob_t *m = (slob_t *)(block - align);
70092+ slob_free(m, m[0].units + align);
70093+ } else {
70094+ clear_slob_page(sp);
70095+ free_slob_page(sp);
70096+ sp->size = 0;
70097 put_page(&sp->page);
70098+ }
70099 }
70100 EXPORT_SYMBOL(kfree);
70101
70102+void check_object_size(const void *ptr, unsigned long n, bool to)
70103+{
70104+
70105+#ifdef CONFIG_PAX_USERCOPY
70106+ struct slob_page *sp;
70107+ const slob_t *free;
70108+ const void *base;
70109+ unsigned long flags;
70110+
70111+ if (!n)
70112+ return;
70113+
70114+ if (ZERO_OR_NULL_PTR(ptr))
70115+ goto report;
70116+
70117+ if (!virt_addr_valid(ptr))
70118+ return;
70119+
70120+ sp = slob_page(ptr);
70121+ if (!PageSlab((struct page*)sp)) {
70122+ if (object_is_on_stack(ptr, n) == -1)
70123+ goto report;
70124+ return;
70125+ }
70126+
70127+ if (sp->size) {
70128+ base = page_address(&sp->page);
70129+ if (base <= ptr && n <= sp->size - (ptr - base))
70130+ return;
70131+ goto report;
70132+ }
70133+
70134+ /* some tricky double walking to find the chunk */
70135+ spin_lock_irqsave(&slob_lock, flags);
70136+ base = (void *)((unsigned long)ptr & PAGE_MASK);
70137+ free = sp->free;
70138+
70139+ while (!slob_last(free) && (void *)free <= ptr) {
70140+ base = free + slob_units(free);
70141+ free = slob_next(free);
70142+ }
70143+
70144+ while (base < (void *)free) {
70145+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
70146+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
70147+ int offset;
70148+
70149+ if (ptr < base + align)
70150+ break;
70151+
70152+ offset = ptr - base - align;
70153+ if (offset >= m) {
70154+ base += size;
70155+ continue;
70156+ }
70157+
70158+ if (n > m - offset)
70159+ break;
70160+
70161+ spin_unlock_irqrestore(&slob_lock, flags);
70162+ return;
70163+ }
70164+
70165+ spin_unlock_irqrestore(&slob_lock, flags);
70166+report:
70167+ pax_report_usercopy(ptr, n, to, NULL);
70168+#endif
70169+
70170+}
70171+EXPORT_SYMBOL(check_object_size);
70172+
70173 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
70174 size_t ksize(const void *block)
70175 {
70176@@ -550,10 +639,10 @@ size_t ksize(const void *block)
70177 sp = slob_page(block);
70178 if (is_slob_page(sp)) {
70179 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70180- unsigned int *m = (unsigned int *)(block - align);
70181- return SLOB_UNITS(*m) * SLOB_UNIT;
70182+ slob_t *m = (slob_t *)(block - align);
70183+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
70184 } else
70185- return sp->page.private;
70186+ return sp->size;
70187 }
70188 EXPORT_SYMBOL(ksize);
70189
70190@@ -569,8 +658,13 @@ struct kmem_cache *kmem_cache_create(con
70191 {
70192 struct kmem_cache *c;
70193
70194+#ifdef CONFIG_PAX_USERCOPY
70195+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
70196+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
70197+#else
70198 c = slob_alloc(sizeof(struct kmem_cache),
70199 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
70200+#endif
70201
70202 if (c) {
70203 c->name = name;
70204@@ -608,17 +702,25 @@ void *kmem_cache_alloc_node(struct kmem_
70205 {
70206 void *b;
70207
70208+#ifdef CONFIG_PAX_USERCOPY
70209+ b = __kmalloc_node_align(c->size, flags, node, c->align);
70210+#else
70211 if (c->size < PAGE_SIZE) {
70212 b = slob_alloc(c->size, flags, c->align, node);
70213 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70214 SLOB_UNITS(c->size) * SLOB_UNIT,
70215 flags, node);
70216 } else {
70217+ struct slob_page *sp;
70218+
70219 b = slob_new_pages(flags, get_order(c->size), node);
70220+ sp = slob_page(b);
70221+ sp->size = c->size;
70222 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70223 PAGE_SIZE << get_order(c->size),
70224 flags, node);
70225 }
70226+#endif
70227
70228 if (c->ctor)
70229 c->ctor(b);
70230@@ -630,10 +732,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
70231
70232 static void __kmem_cache_free(void *b, int size)
70233 {
70234- if (size < PAGE_SIZE)
70235+ struct slob_page *sp = slob_page(b);
70236+
70237+ if (is_slob_page(sp))
70238 slob_free(b, size);
70239- else
70240+ else {
70241+ clear_slob_page(sp);
70242+ free_slob_page(sp);
70243+ sp->size = 0;
70244 slob_free_pages(b, get_order(size));
70245+ }
70246 }
70247
70248 static void kmem_rcu_free(struct rcu_head *head)
70249@@ -646,17 +754,31 @@ static void kmem_rcu_free(struct rcu_hea
70250
70251 void kmem_cache_free(struct kmem_cache *c, void *b)
70252 {
70253+ int size = c->size;
70254+
70255+#ifdef CONFIG_PAX_USERCOPY
70256+ if (size + c->align < PAGE_SIZE) {
70257+ size += c->align;
70258+ b -= c->align;
70259+ }
70260+#endif
70261+
70262 kmemleak_free_recursive(b, c->flags);
70263 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
70264 struct slob_rcu *slob_rcu;
70265- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
70266- slob_rcu->size = c->size;
70267+ slob_rcu = b + (size - sizeof(struct slob_rcu));
70268+ slob_rcu->size = size;
70269 call_rcu(&slob_rcu->head, kmem_rcu_free);
70270 } else {
70271- __kmem_cache_free(b, c->size);
70272+ __kmem_cache_free(b, size);
70273 }
70274
70275+#ifdef CONFIG_PAX_USERCOPY
70276+ trace_kfree(_RET_IP_, b);
70277+#else
70278 trace_kmem_cache_free(_RET_IP_, b);
70279+#endif
70280+
70281 }
70282 EXPORT_SYMBOL(kmem_cache_free);
70283
70284diff -urNp linux-3.0.7/mm/slub.c linux-3.0.7/mm/slub.c
70285--- linux-3.0.7/mm/slub.c 2011-07-21 22:17:23.000000000 -0400
70286+++ linux-3.0.7/mm/slub.c 2011-09-25 22:15:40.000000000 -0400
70287@@ -200,7 +200,7 @@ struct track {
70288
70289 enum track_item { TRACK_ALLOC, TRACK_FREE };
70290
70291-#ifdef CONFIG_SYSFS
70292+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70293 static int sysfs_slab_add(struct kmem_cache *);
70294 static int sysfs_slab_alias(struct kmem_cache *, const char *);
70295 static void sysfs_slab_remove(struct kmem_cache *);
70296@@ -442,7 +442,7 @@ static void print_track(const char *s, s
70297 if (!t->addr)
70298 return;
70299
70300- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
70301+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
70302 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
70303 }
70304
70305@@ -2137,6 +2137,8 @@ void kmem_cache_free(struct kmem_cache *
70306
70307 page = virt_to_head_page(x);
70308
70309+ BUG_ON(!PageSlab(page));
70310+
70311 slab_free(s, page, x, _RET_IP_);
70312
70313 trace_kmem_cache_free(_RET_IP_, x);
70314@@ -2170,7 +2172,7 @@ static int slub_min_objects;
70315 * Merge control. If this is set then no merging of slab caches will occur.
70316 * (Could be removed. This was introduced to pacify the merge skeptics.)
70317 */
70318-static int slub_nomerge;
70319+static int slub_nomerge = 1;
70320
70321 /*
70322 * Calculate the order of allocation given an slab object size.
70323@@ -2594,7 +2596,7 @@ static int kmem_cache_open(struct kmem_c
70324 * list to avoid pounding the page allocator excessively.
70325 */
70326 set_min_partial(s, ilog2(s->size));
70327- s->refcount = 1;
70328+ atomic_set(&s->refcount, 1);
70329 #ifdef CONFIG_NUMA
70330 s->remote_node_defrag_ratio = 1000;
70331 #endif
70332@@ -2699,8 +2701,7 @@ static inline int kmem_cache_close(struc
70333 void kmem_cache_destroy(struct kmem_cache *s)
70334 {
70335 down_write(&slub_lock);
70336- s->refcount--;
70337- if (!s->refcount) {
70338+ if (atomic_dec_and_test(&s->refcount)) {
70339 list_del(&s->list);
70340 if (kmem_cache_close(s)) {
70341 printk(KERN_ERR "SLUB %s: %s called for cache that "
70342@@ -2910,6 +2911,46 @@ void *__kmalloc_node(size_t size, gfp_t
70343 EXPORT_SYMBOL(__kmalloc_node);
70344 #endif
70345
70346+void check_object_size(const void *ptr, unsigned long n, bool to)
70347+{
70348+
70349+#ifdef CONFIG_PAX_USERCOPY
70350+ struct page *page;
70351+ struct kmem_cache *s = NULL;
70352+ unsigned long offset;
70353+
70354+ if (!n)
70355+ return;
70356+
70357+ if (ZERO_OR_NULL_PTR(ptr))
70358+ goto report;
70359+
70360+ if (!virt_addr_valid(ptr))
70361+ return;
70362+
70363+ page = virt_to_head_page(ptr);
70364+
70365+ if (!PageSlab(page)) {
70366+ if (object_is_on_stack(ptr, n) == -1)
70367+ goto report;
70368+ return;
70369+ }
70370+
70371+ s = page->slab;
70372+ if (!(s->flags & SLAB_USERCOPY))
70373+ goto report;
70374+
70375+ offset = (ptr - page_address(page)) % s->size;
70376+ if (offset <= s->objsize && n <= s->objsize - offset)
70377+ return;
70378+
70379+report:
70380+ pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
70381+#endif
70382+
70383+}
70384+EXPORT_SYMBOL(check_object_size);
70385+
70386 size_t ksize(const void *object)
70387 {
70388 struct page *page;
70389@@ -3154,7 +3195,7 @@ static void __init kmem_cache_bootstrap_
70390 int node;
70391
70392 list_add(&s->list, &slab_caches);
70393- s->refcount = -1;
70394+ atomic_set(&s->refcount, -1);
70395
70396 for_each_node_state(node, N_NORMAL_MEMORY) {
70397 struct kmem_cache_node *n = get_node(s, node);
70398@@ -3271,17 +3312,17 @@ void __init kmem_cache_init(void)
70399
70400 /* Caches that are not of the two-to-the-power-of size */
70401 if (KMALLOC_MIN_SIZE <= 32) {
70402- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
70403+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
70404 caches++;
70405 }
70406
70407 if (KMALLOC_MIN_SIZE <= 64) {
70408- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
70409+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
70410 caches++;
70411 }
70412
70413 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
70414- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
70415+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
70416 caches++;
70417 }
70418
70419@@ -3349,7 +3390,7 @@ static int slab_unmergeable(struct kmem_
70420 /*
70421 * We may have set a slab to be unmergeable during bootstrap.
70422 */
70423- if (s->refcount < 0)
70424+ if (atomic_read(&s->refcount) < 0)
70425 return 1;
70426
70427 return 0;
70428@@ -3408,7 +3449,7 @@ struct kmem_cache *kmem_cache_create(con
70429 down_write(&slub_lock);
70430 s = find_mergeable(size, align, flags, name, ctor);
70431 if (s) {
70432- s->refcount++;
70433+ atomic_inc(&s->refcount);
70434 /*
70435 * Adjust the object sizes so that we clear
70436 * the complete object on kzalloc.
70437@@ -3417,7 +3458,7 @@ struct kmem_cache *kmem_cache_create(con
70438 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
70439
70440 if (sysfs_slab_alias(s, name)) {
70441- s->refcount--;
70442+ atomic_dec(&s->refcount);
70443 goto err;
70444 }
70445 up_write(&slub_lock);
70446@@ -3545,7 +3586,7 @@ void *__kmalloc_node_track_caller(size_t
70447 }
70448 #endif
70449
70450-#ifdef CONFIG_SYSFS
70451+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70452 static int count_inuse(struct page *page)
70453 {
70454 return page->inuse;
70455@@ -3935,12 +3976,12 @@ static void resiliency_test(void)
70456 validate_slab_cache(kmalloc_caches[9]);
70457 }
70458 #else
70459-#ifdef CONFIG_SYSFS
70460+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70461 static void resiliency_test(void) {};
70462 #endif
70463 #endif
70464
70465-#ifdef CONFIG_SYSFS
70466+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70467 enum slab_stat_type {
70468 SL_ALL, /* All slabs */
70469 SL_PARTIAL, /* Only partially allocated slabs */
70470@@ -4150,7 +4191,7 @@ SLAB_ATTR_RO(ctor);
70471
70472 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
70473 {
70474- return sprintf(buf, "%d\n", s->refcount - 1);
70475+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
70476 }
70477 SLAB_ATTR_RO(aliases);
70478
70479@@ -4662,6 +4703,7 @@ static char *create_unique_id(struct kme
70480 return name;
70481 }
70482
70483+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70484 static int sysfs_slab_add(struct kmem_cache *s)
70485 {
70486 int err;
70487@@ -4724,6 +4766,7 @@ static void sysfs_slab_remove(struct kme
70488 kobject_del(&s->kobj);
70489 kobject_put(&s->kobj);
70490 }
70491+#endif
70492
70493 /*
70494 * Need to buffer aliases during bootup until sysfs becomes
70495@@ -4737,6 +4780,7 @@ struct saved_alias {
70496
70497 static struct saved_alias *alias_list;
70498
70499+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70500 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
70501 {
70502 struct saved_alias *al;
70503@@ -4759,6 +4803,7 @@ static int sysfs_slab_alias(struct kmem_
70504 alias_list = al;
70505 return 0;
70506 }
70507+#endif
70508
70509 static int __init slab_sysfs_init(void)
70510 {
70511@@ -4894,7 +4939,13 @@ static const struct file_operations proc
70512
70513 static int __init slab_proc_init(void)
70514 {
70515- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
70516+ mode_t gr_mode = S_IRUGO;
70517+
70518+#ifdef CONFIG_GRKERNSEC_PROC_ADD
70519+ gr_mode = S_IRUSR;
70520+#endif
70521+
70522+ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
70523 return 0;
70524 }
70525 module_init(slab_proc_init);
70526diff -urNp linux-3.0.7/mm/swap.c linux-3.0.7/mm/swap.c
70527--- linux-3.0.7/mm/swap.c 2011-07-21 22:17:23.000000000 -0400
70528+++ linux-3.0.7/mm/swap.c 2011-08-23 21:47:56.000000000 -0400
70529@@ -31,6 +31,7 @@
70530 #include <linux/backing-dev.h>
70531 #include <linux/memcontrol.h>
70532 #include <linux/gfp.h>
70533+#include <linux/hugetlb.h>
70534
70535 #include "internal.h"
70536
70537@@ -71,6 +72,8 @@ static void __put_compound_page(struct p
70538
70539 __page_cache_release(page);
70540 dtor = get_compound_page_dtor(page);
70541+ if (!PageHuge(page))
70542+ BUG_ON(dtor != free_compound_page);
70543 (*dtor)(page);
70544 }
70545
70546diff -urNp linux-3.0.7/mm/swapfile.c linux-3.0.7/mm/swapfile.c
70547--- linux-3.0.7/mm/swapfile.c 2011-07-21 22:17:23.000000000 -0400
70548+++ linux-3.0.7/mm/swapfile.c 2011-08-23 21:47:56.000000000 -0400
70549@@ -62,7 +62,7 @@ static DEFINE_MUTEX(swapon_mutex);
70550
70551 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
70552 /* Activity counter to indicate that a swapon or swapoff has occurred */
70553-static atomic_t proc_poll_event = ATOMIC_INIT(0);
70554+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
70555
70556 static inline unsigned char swap_count(unsigned char ent)
70557 {
70558@@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __us
70559 }
70560 filp_close(swap_file, NULL);
70561 err = 0;
70562- atomic_inc(&proc_poll_event);
70563+ atomic_inc_unchecked(&proc_poll_event);
70564 wake_up_interruptible(&proc_poll_wait);
70565
70566 out_dput:
70567@@ -1692,8 +1692,8 @@ static unsigned swaps_poll(struct file *
70568
70569 poll_wait(file, &proc_poll_wait, wait);
70570
70571- if (s->event != atomic_read(&proc_poll_event)) {
70572- s->event = atomic_read(&proc_poll_event);
70573+ if (s->event != atomic_read_unchecked(&proc_poll_event)) {
70574+ s->event = atomic_read_unchecked(&proc_poll_event);
70575 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
70576 }
70577
70578@@ -1799,7 +1799,7 @@ static int swaps_open(struct inode *inod
70579 }
70580
70581 s->seq.private = s;
70582- s->event = atomic_read(&proc_poll_event);
70583+ s->event = atomic_read_unchecked(&proc_poll_event);
70584 return ret;
70585 }
70586
70587@@ -2133,7 +2133,7 @@ SYSCALL_DEFINE2(swapon, const char __use
70588 (p->flags & SWP_DISCARDABLE) ? "D" : "");
70589
70590 mutex_unlock(&swapon_mutex);
70591- atomic_inc(&proc_poll_event);
70592+ atomic_inc_unchecked(&proc_poll_event);
70593 wake_up_interruptible(&proc_poll_wait);
70594
70595 if (S_ISREG(inode->i_mode))
70596diff -urNp linux-3.0.7/mm/util.c linux-3.0.7/mm/util.c
70597--- linux-3.0.7/mm/util.c 2011-07-21 22:17:23.000000000 -0400
70598+++ linux-3.0.7/mm/util.c 2011-08-23 21:47:56.000000000 -0400
70599@@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
70600 * allocated buffer. Use this if you don't want to free the buffer immediately
70601 * like, for example, with RCU.
70602 */
70603+#undef __krealloc
70604 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
70605 {
70606 void *ret;
70607@@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
70608 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
70609 * %NULL pointer, the object pointed to is freed.
70610 */
70611+#undef krealloc
70612 void *krealloc(const void *p, size_t new_size, gfp_t flags)
70613 {
70614 void *ret;
70615@@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *m
70616 void arch_pick_mmap_layout(struct mm_struct *mm)
70617 {
70618 mm->mmap_base = TASK_UNMAPPED_BASE;
70619+
70620+#ifdef CONFIG_PAX_RANDMMAP
70621+ if (mm->pax_flags & MF_PAX_RANDMMAP)
70622+ mm->mmap_base += mm->delta_mmap;
70623+#endif
70624+
70625 mm->get_unmapped_area = arch_get_unmapped_area;
70626 mm->unmap_area = arch_unmap_area;
70627 }
70628diff -urNp linux-3.0.7/mm/vmalloc.c linux-3.0.7/mm/vmalloc.c
70629--- linux-3.0.7/mm/vmalloc.c 2011-10-16 21:54:54.000000000 -0400
70630+++ linux-3.0.7/mm/vmalloc.c 2011-10-16 21:55:28.000000000 -0400
70631@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd,
70632
70633 pte = pte_offset_kernel(pmd, addr);
70634 do {
70635- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
70636- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
70637+
70638+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70639+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
70640+ BUG_ON(!pte_exec(*pte));
70641+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
70642+ continue;
70643+ }
70644+#endif
70645+
70646+ {
70647+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
70648+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
70649+ }
70650 } while (pte++, addr += PAGE_SIZE, addr != end);
70651 }
70652
70653@@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, un
70654 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
70655 {
70656 pte_t *pte;
70657+ int ret = -ENOMEM;
70658
70659 /*
70660 * nr is a running index into the array which helps higher level
70661@@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, un
70662 pte = pte_alloc_kernel(pmd, addr);
70663 if (!pte)
70664 return -ENOMEM;
70665+
70666+ pax_open_kernel();
70667 do {
70668 struct page *page = pages[*nr];
70669
70670- if (WARN_ON(!pte_none(*pte)))
70671- return -EBUSY;
70672- if (WARN_ON(!page))
70673- return -ENOMEM;
70674+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70675+ if (pgprot_val(prot) & _PAGE_NX)
70676+#endif
70677+
70678+ if (WARN_ON(!pte_none(*pte))) {
70679+ ret = -EBUSY;
70680+ goto out;
70681+ }
70682+ if (WARN_ON(!page)) {
70683+ ret = -ENOMEM;
70684+ goto out;
70685+ }
70686 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
70687 (*nr)++;
70688 } while (pte++, addr += PAGE_SIZE, addr != end);
70689- return 0;
70690+ ret = 0;
70691+out:
70692+ pax_close_kernel();
70693+ return ret;
70694 }
70695
70696 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
70697@@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void
70698 * and fall back on vmalloc() if that fails. Others
70699 * just put it in the vmalloc space.
70700 */
70701-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
70702+#ifdef CONFIG_MODULES
70703+#ifdef MODULES_VADDR
70704 unsigned long addr = (unsigned long)x;
70705 if (addr >= MODULES_VADDR && addr < MODULES_END)
70706 return 1;
70707 #endif
70708+
70709+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70710+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
70711+ return 1;
70712+#endif
70713+
70714+#endif
70715+
70716 return is_vmalloc_addr(x);
70717 }
70718
70719@@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void
70720
70721 if (!pgd_none(*pgd)) {
70722 pud_t *pud = pud_offset(pgd, addr);
70723+#ifdef CONFIG_X86
70724+ if (!pud_large(*pud))
70725+#endif
70726 if (!pud_none(*pud)) {
70727 pmd_t *pmd = pmd_offset(pud, addr);
70728+#ifdef CONFIG_X86
70729+ if (!pmd_large(*pmd))
70730+#endif
70731 if (!pmd_none(*pmd)) {
70732 pte_t *ptep, pte;
70733
70734@@ -1297,6 +1337,16 @@ static struct vm_struct *__get_vm_area_n
70735 struct vm_struct *area;
70736
70737 BUG_ON(in_interrupt());
70738+
70739+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70740+ if (flags & VM_KERNEXEC) {
70741+ if (start != VMALLOC_START || end != VMALLOC_END)
70742+ return NULL;
70743+ start = (unsigned long)MODULES_EXEC_VADDR;
70744+ end = (unsigned long)MODULES_EXEC_END;
70745+ }
70746+#endif
70747+
70748 if (flags & VM_IOREMAP) {
70749 int bit = fls(size);
70750
70751@@ -1515,6 +1565,11 @@ void *vmap(struct page **pages, unsigned
70752 if (count > totalram_pages)
70753 return NULL;
70754
70755+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70756+ if (!(pgprot_val(prot) & _PAGE_NX))
70757+ flags |= VM_KERNEXEC;
70758+#endif
70759+
70760 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
70761 __builtin_return_address(0));
70762 if (!area)
70763@@ -1616,6 +1671,13 @@ void *__vmalloc_node_range(unsigned long
70764 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
70765 return NULL;
70766
70767+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70768+ if (!(pgprot_val(prot) & _PAGE_NX))
70769+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
70770+ node, gfp_mask, caller);
70771+ else
70772+#endif
70773+
70774 area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node,
70775 gfp_mask, caller);
70776
70777@@ -1655,6 +1717,7 @@ static void *__vmalloc_node(unsigned lon
70778 gfp_mask, prot, node, caller);
70779 }
70780
70781+#undef __vmalloc
70782 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
70783 {
70784 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
70785@@ -1678,6 +1741,7 @@ static inline void *__vmalloc_node_flags
70786 * For tight control over page level allocator and protection flags
70787 * use __vmalloc() instead.
70788 */
70789+#undef vmalloc
70790 void *vmalloc(unsigned long size)
70791 {
70792 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
70793@@ -1694,6 +1758,7 @@ EXPORT_SYMBOL(vmalloc);
70794 * For tight control over page level allocator and protection flags
70795 * use __vmalloc() instead.
70796 */
70797+#undef vzalloc
70798 void *vzalloc(unsigned long size)
70799 {
70800 return __vmalloc_node_flags(size, -1,
70801@@ -1708,6 +1773,7 @@ EXPORT_SYMBOL(vzalloc);
70802 * The resulting memory area is zeroed so it can be mapped to userspace
70803 * without leaking data.
70804 */
70805+#undef vmalloc_user
70806 void *vmalloc_user(unsigned long size)
70807 {
70808 struct vm_struct *area;
70809@@ -1735,6 +1801,7 @@ EXPORT_SYMBOL(vmalloc_user);
70810 * For tight control over page level allocator and protection flags
70811 * use __vmalloc() instead.
70812 */
70813+#undef vmalloc_node
70814 void *vmalloc_node(unsigned long size, int node)
70815 {
70816 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
70817@@ -1754,6 +1821,7 @@ EXPORT_SYMBOL(vmalloc_node);
70818 * For tight control over page level allocator and protection flags
70819 * use __vmalloc_node() instead.
70820 */
70821+#undef vzalloc_node
70822 void *vzalloc_node(unsigned long size, int node)
70823 {
70824 return __vmalloc_node_flags(size, node,
70825@@ -1776,10 +1844,10 @@ EXPORT_SYMBOL(vzalloc_node);
70826 * For tight control over page level allocator and protection flags
70827 * use __vmalloc() instead.
70828 */
70829-
70830+#undef vmalloc_exec
70831 void *vmalloc_exec(unsigned long size)
70832 {
70833- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
70834+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
70835 -1, __builtin_return_address(0));
70836 }
70837
70838@@ -1798,6 +1866,7 @@ void *vmalloc_exec(unsigned long size)
70839 * Allocate enough 32bit PA addressable pages to cover @size from the
70840 * page level allocator and map them into contiguous kernel virtual space.
70841 */
70842+#undef vmalloc_32
70843 void *vmalloc_32(unsigned long size)
70844 {
70845 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
70846@@ -1812,6 +1881,7 @@ EXPORT_SYMBOL(vmalloc_32);
70847 * The resulting memory area is 32bit addressable and zeroed so it can be
70848 * mapped to userspace without leaking data.
70849 */
70850+#undef vmalloc_32_user
70851 void *vmalloc_32_user(unsigned long size)
70852 {
70853 struct vm_struct *area;
70854@@ -2074,6 +2144,8 @@ int remap_vmalloc_range(struct vm_area_s
70855 unsigned long uaddr = vma->vm_start;
70856 unsigned long usize = vma->vm_end - vma->vm_start;
70857
70858+ BUG_ON(vma->vm_mirror);
70859+
70860 if ((PAGE_SIZE-1) & (unsigned long)addr)
70861 return -EINVAL;
70862
70863diff -urNp linux-3.0.7/mm/vmstat.c linux-3.0.7/mm/vmstat.c
70864--- linux-3.0.7/mm/vmstat.c 2011-07-21 22:17:23.000000000 -0400
70865+++ linux-3.0.7/mm/vmstat.c 2011-08-23 21:48:14.000000000 -0400
70866@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
70867 *
70868 * vm_stat contains the global counters
70869 */
70870-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70871+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70872 EXPORT_SYMBOL(vm_stat);
70873
70874 #ifdef CONFIG_SMP
70875@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
70876 v = p->vm_stat_diff[i];
70877 p->vm_stat_diff[i] = 0;
70878 local_irq_restore(flags);
70879- atomic_long_add(v, &zone->vm_stat[i]);
70880+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
70881 global_diff[i] += v;
70882 #ifdef CONFIG_NUMA
70883 /* 3 seconds idle till flush */
70884@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
70885
70886 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
70887 if (global_diff[i])
70888- atomic_long_add(global_diff[i], &vm_stat[i]);
70889+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
70890 }
70891
70892 #endif
70893@@ -1207,10 +1207,20 @@ static int __init setup_vmstat(void)
70894 start_cpu_timer(cpu);
70895 #endif
70896 #ifdef CONFIG_PROC_FS
70897- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
70898- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
70899- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
70900- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
70901+ {
70902+ mode_t gr_mode = S_IRUGO;
70903+#ifdef CONFIG_GRKERNSEC_PROC_ADD
70904+ gr_mode = S_IRUSR;
70905+#endif
70906+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
70907+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
70908+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
70909+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
70910+#else
70911+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
70912+#endif
70913+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
70914+ }
70915 #endif
70916 return 0;
70917 }
70918diff -urNp linux-3.0.7/net/8021q/vlan.c linux-3.0.7/net/8021q/vlan.c
70919--- linux-3.0.7/net/8021q/vlan.c 2011-07-21 22:17:23.000000000 -0400
70920+++ linux-3.0.7/net/8021q/vlan.c 2011-08-23 21:47:56.000000000 -0400
70921@@ -591,8 +591,7 @@ static int vlan_ioctl_handler(struct net
70922 err = -EPERM;
70923 if (!capable(CAP_NET_ADMIN))
70924 break;
70925- if ((args.u.name_type >= 0) &&
70926- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
70927+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
70928 struct vlan_net *vn;
70929
70930 vn = net_generic(net, vlan_net_id);
70931diff -urNp linux-3.0.7/net/9p/trans_fd.c linux-3.0.7/net/9p/trans_fd.c
70932--- linux-3.0.7/net/9p/trans_fd.c 2011-07-21 22:17:23.000000000 -0400
70933+++ linux-3.0.7/net/9p/trans_fd.c 2011-10-06 04:17:55.000000000 -0400
70934@@ -423,7 +423,7 @@ static int p9_fd_write(struct p9_client
70935 oldfs = get_fs();
70936 set_fs(get_ds());
70937 /* The cast to a user pointer is valid due to the set_fs() */
70938- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
70939+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
70940 set_fs(oldfs);
70941
70942 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
70943diff -urNp linux-3.0.7/net/9p/trans_virtio.c linux-3.0.7/net/9p/trans_virtio.c
70944--- linux-3.0.7/net/9p/trans_virtio.c 2011-10-16 21:54:54.000000000 -0400
70945+++ linux-3.0.7/net/9p/trans_virtio.c 2011-10-16 21:55:28.000000000 -0400
70946@@ -327,7 +327,7 @@ req_retry_pinned:
70947 } else {
70948 char *pbuf;
70949 if (req->tc->pubuf)
70950- pbuf = (__force char *) req->tc->pubuf;
70951+ pbuf = (char __force_kernel *) req->tc->pubuf;
70952 else
70953 pbuf = req->tc->pkbuf;
70954 outp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, pbuf,
70955@@ -357,7 +357,7 @@ req_retry_pinned:
70956 } else {
70957 char *pbuf;
70958 if (req->tc->pubuf)
70959- pbuf = (__force char *) req->tc->pubuf;
70960+ pbuf = (char __force_kernel *) req->tc->pubuf;
70961 else
70962 pbuf = req->tc->pkbuf;
70963
70964diff -urNp linux-3.0.7/net/atm/atm_misc.c linux-3.0.7/net/atm/atm_misc.c
70965--- linux-3.0.7/net/atm/atm_misc.c 2011-07-21 22:17:23.000000000 -0400
70966+++ linux-3.0.7/net/atm/atm_misc.c 2011-08-23 21:47:56.000000000 -0400
70967@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int
70968 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
70969 return 1;
70970 atm_return(vcc, truesize);
70971- atomic_inc(&vcc->stats->rx_drop);
70972+ atomic_inc_unchecked(&vcc->stats->rx_drop);
70973 return 0;
70974 }
70975 EXPORT_SYMBOL(atm_charge);
70976@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct
70977 }
70978 }
70979 atm_return(vcc, guess);
70980- atomic_inc(&vcc->stats->rx_drop);
70981+ atomic_inc_unchecked(&vcc->stats->rx_drop);
70982 return NULL;
70983 }
70984 EXPORT_SYMBOL(atm_alloc_charge);
70985@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
70986
70987 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
70988 {
70989-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
70990+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
70991 __SONET_ITEMS
70992 #undef __HANDLE_ITEM
70993 }
70994@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
70995
70996 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
70997 {
70998-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
70999+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
71000 __SONET_ITEMS
71001 #undef __HANDLE_ITEM
71002 }
71003diff -urNp linux-3.0.7/net/atm/lec.h linux-3.0.7/net/atm/lec.h
71004--- linux-3.0.7/net/atm/lec.h 2011-07-21 22:17:23.000000000 -0400
71005+++ linux-3.0.7/net/atm/lec.h 2011-08-23 21:47:56.000000000 -0400
71006@@ -48,7 +48,7 @@ struct lane2_ops {
71007 const u8 *tlvs, u32 sizeoftlvs);
71008 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
71009 const u8 *tlvs, u32 sizeoftlvs);
71010-};
71011+} __no_const;
71012
71013 /*
71014 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
71015diff -urNp linux-3.0.7/net/atm/mpc.h linux-3.0.7/net/atm/mpc.h
71016--- linux-3.0.7/net/atm/mpc.h 2011-07-21 22:17:23.000000000 -0400
71017+++ linux-3.0.7/net/atm/mpc.h 2011-08-23 21:47:56.000000000 -0400
71018@@ -33,7 +33,7 @@ struct mpoa_client {
71019 struct mpc_parameters parameters; /* parameters for this client */
71020
71021 const struct net_device_ops *old_ops;
71022- struct net_device_ops new_ops;
71023+ net_device_ops_no_const new_ops;
71024 };
71025
71026
71027diff -urNp linux-3.0.7/net/atm/mpoa_caches.c linux-3.0.7/net/atm/mpoa_caches.c
71028--- linux-3.0.7/net/atm/mpoa_caches.c 2011-07-21 22:17:23.000000000 -0400
71029+++ linux-3.0.7/net/atm/mpoa_caches.c 2011-08-23 21:48:14.000000000 -0400
71030@@ -255,6 +255,8 @@ static void check_resolving_entries(stru
71031 struct timeval now;
71032 struct k_message msg;
71033
71034+ pax_track_stack();
71035+
71036 do_gettimeofday(&now);
71037
71038 read_lock_bh(&client->ingress_lock);
71039diff -urNp linux-3.0.7/net/atm/proc.c linux-3.0.7/net/atm/proc.c
71040--- linux-3.0.7/net/atm/proc.c 2011-07-21 22:17:23.000000000 -0400
71041+++ linux-3.0.7/net/atm/proc.c 2011-08-23 21:47:56.000000000 -0400
71042@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *s
71043 const struct k_atm_aal_stats *stats)
71044 {
71045 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
71046- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
71047- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
71048- atomic_read(&stats->rx_drop));
71049+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
71050+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
71051+ atomic_read_unchecked(&stats->rx_drop));
71052 }
71053
71054 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
71055diff -urNp linux-3.0.7/net/atm/resources.c linux-3.0.7/net/atm/resources.c
71056--- linux-3.0.7/net/atm/resources.c 2011-07-21 22:17:23.000000000 -0400
71057+++ linux-3.0.7/net/atm/resources.c 2011-08-23 21:47:56.000000000 -0400
71058@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
71059 static void copy_aal_stats(struct k_atm_aal_stats *from,
71060 struct atm_aal_stats *to)
71061 {
71062-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71063+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71064 __AAL_STAT_ITEMS
71065 #undef __HANDLE_ITEM
71066 }
71067@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_
71068 static void subtract_aal_stats(struct k_atm_aal_stats *from,
71069 struct atm_aal_stats *to)
71070 {
71071-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
71072+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
71073 __AAL_STAT_ITEMS
71074 #undef __HANDLE_ITEM
71075 }
71076diff -urNp linux-3.0.7/net/batman-adv/hard-interface.c linux-3.0.7/net/batman-adv/hard-interface.c
71077--- linux-3.0.7/net/batman-adv/hard-interface.c 2011-07-21 22:17:23.000000000 -0400
71078+++ linux-3.0.7/net/batman-adv/hard-interface.c 2011-08-23 21:47:56.000000000 -0400
71079@@ -351,8 +351,8 @@ int hardif_enable_interface(struct hard_
71080 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
71081 dev_add_pack(&hard_iface->batman_adv_ptype);
71082
71083- atomic_set(&hard_iface->seqno, 1);
71084- atomic_set(&hard_iface->frag_seqno, 1);
71085+ atomic_set_unchecked(&hard_iface->seqno, 1);
71086+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
71087 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
71088 hard_iface->net_dev->name);
71089
71090diff -urNp linux-3.0.7/net/batman-adv/routing.c linux-3.0.7/net/batman-adv/routing.c
71091--- linux-3.0.7/net/batman-adv/routing.c 2011-07-21 22:17:23.000000000 -0400
71092+++ linux-3.0.7/net/batman-adv/routing.c 2011-08-23 21:47:56.000000000 -0400
71093@@ -627,7 +627,7 @@ void receive_bat_packet(struct ethhdr *e
71094 return;
71095
71096 /* could be changed by schedule_own_packet() */
71097- if_incoming_seqno = atomic_read(&if_incoming->seqno);
71098+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
71099
71100 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
71101
71102diff -urNp linux-3.0.7/net/batman-adv/send.c linux-3.0.7/net/batman-adv/send.c
71103--- linux-3.0.7/net/batman-adv/send.c 2011-07-21 22:17:23.000000000 -0400
71104+++ linux-3.0.7/net/batman-adv/send.c 2011-08-23 21:47:56.000000000 -0400
71105@@ -279,7 +279,7 @@ void schedule_own_packet(struct hard_ifa
71106
71107 /* change sequence number to network order */
71108 batman_packet->seqno =
71109- htonl((uint32_t)atomic_read(&hard_iface->seqno));
71110+ htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
71111
71112 if (vis_server == VIS_TYPE_SERVER_SYNC)
71113 batman_packet->flags |= VIS_SERVER;
71114@@ -293,7 +293,7 @@ void schedule_own_packet(struct hard_ifa
71115 else
71116 batman_packet->gw_flags = 0;
71117
71118- atomic_inc(&hard_iface->seqno);
71119+ atomic_inc_unchecked(&hard_iface->seqno);
71120
71121 slide_own_bcast_window(hard_iface);
71122 send_time = own_send_time(bat_priv);
71123diff -urNp linux-3.0.7/net/batman-adv/soft-interface.c linux-3.0.7/net/batman-adv/soft-interface.c
71124--- linux-3.0.7/net/batman-adv/soft-interface.c 2011-07-21 22:17:23.000000000 -0400
71125+++ linux-3.0.7/net/batman-adv/soft-interface.c 2011-08-23 21:47:56.000000000 -0400
71126@@ -628,7 +628,7 @@ int interface_tx(struct sk_buff *skb, st
71127
71128 /* set broadcast sequence number */
71129 bcast_packet->seqno =
71130- htonl(atomic_inc_return(&bat_priv->bcast_seqno));
71131+ htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
71132
71133 add_bcast_packet_to_list(bat_priv, skb);
71134
71135@@ -830,7 +830,7 @@ struct net_device *softif_create(char *n
71136 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
71137
71138 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
71139- atomic_set(&bat_priv->bcast_seqno, 1);
71140+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
71141 atomic_set(&bat_priv->tt_local_changed, 0);
71142
71143 bat_priv->primary_if = NULL;
71144diff -urNp linux-3.0.7/net/batman-adv/types.h linux-3.0.7/net/batman-adv/types.h
71145--- linux-3.0.7/net/batman-adv/types.h 2011-07-21 22:17:23.000000000 -0400
71146+++ linux-3.0.7/net/batman-adv/types.h 2011-08-23 21:47:56.000000000 -0400
71147@@ -38,8 +38,8 @@ struct hard_iface {
71148 int16_t if_num;
71149 char if_status;
71150 struct net_device *net_dev;
71151- atomic_t seqno;
71152- atomic_t frag_seqno;
71153+ atomic_unchecked_t seqno;
71154+ atomic_unchecked_t frag_seqno;
71155 unsigned char *packet_buff;
71156 int packet_len;
71157 struct kobject *hardif_obj;
71158@@ -142,7 +142,7 @@ struct bat_priv {
71159 atomic_t orig_interval; /* uint */
71160 atomic_t hop_penalty; /* uint */
71161 atomic_t log_level; /* uint */
71162- atomic_t bcast_seqno;
71163+ atomic_unchecked_t bcast_seqno;
71164 atomic_t bcast_queue_left;
71165 atomic_t batman_queue_left;
71166 char num_ifaces;
71167diff -urNp linux-3.0.7/net/batman-adv/unicast.c linux-3.0.7/net/batman-adv/unicast.c
71168--- linux-3.0.7/net/batman-adv/unicast.c 2011-07-21 22:17:23.000000000 -0400
71169+++ linux-3.0.7/net/batman-adv/unicast.c 2011-08-23 21:47:56.000000000 -0400
71170@@ -265,7 +265,7 @@ int frag_send_skb(struct sk_buff *skb, s
71171 frag1->flags = UNI_FRAG_HEAD | large_tail;
71172 frag2->flags = large_tail;
71173
71174- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
71175+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
71176 frag1->seqno = htons(seqno - 1);
71177 frag2->seqno = htons(seqno);
71178
71179diff -urNp linux-3.0.7/net/bridge/br_multicast.c linux-3.0.7/net/bridge/br_multicast.c
71180--- linux-3.0.7/net/bridge/br_multicast.c 2011-10-16 21:54:54.000000000 -0400
71181+++ linux-3.0.7/net/bridge/br_multicast.c 2011-10-16 21:55:28.000000000 -0400
71182@@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct
71183 nexthdr = ip6h->nexthdr;
71184 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
71185
71186- if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
71187+ if (nexthdr != IPPROTO_ICMPV6)
71188 return 0;
71189
71190 /* Okay, we found ICMPv6 header */
71191diff -urNp linux-3.0.7/net/bridge/netfilter/ebtables.c linux-3.0.7/net/bridge/netfilter/ebtables.c
71192--- linux-3.0.7/net/bridge/netfilter/ebtables.c 2011-07-21 22:17:23.000000000 -0400
71193+++ linux-3.0.7/net/bridge/netfilter/ebtables.c 2011-08-23 21:48:14.000000000 -0400
71194@@ -1512,7 +1512,7 @@ static int do_ebt_get_ctl(struct sock *s
71195 tmp.valid_hooks = t->table->valid_hooks;
71196 }
71197 mutex_unlock(&ebt_mutex);
71198- if (copy_to_user(user, &tmp, *len) != 0){
71199+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
71200 BUGPRINT("c2u Didn't work\n");
71201 ret = -EFAULT;
71202 break;
71203@@ -1780,6 +1780,8 @@ static int compat_copy_everything_to_use
71204 int ret;
71205 void __user *pos;
71206
71207+ pax_track_stack();
71208+
71209 memset(&tinfo, 0, sizeof(tinfo));
71210
71211 if (cmd == EBT_SO_GET_ENTRIES) {
71212diff -urNp linux-3.0.7/net/caif/caif_socket.c linux-3.0.7/net/caif/caif_socket.c
71213--- linux-3.0.7/net/caif/caif_socket.c 2011-07-21 22:17:23.000000000 -0400
71214+++ linux-3.0.7/net/caif/caif_socket.c 2011-08-23 21:47:56.000000000 -0400
71215@@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
71216 #ifdef CONFIG_DEBUG_FS
71217 struct debug_fs_counter {
71218 atomic_t caif_nr_socks;
71219- atomic_t caif_sock_create;
71220- atomic_t num_connect_req;
71221- atomic_t num_connect_resp;
71222- atomic_t num_connect_fail_resp;
71223- atomic_t num_disconnect;
71224- atomic_t num_remote_shutdown_ind;
71225- atomic_t num_tx_flow_off_ind;
71226- atomic_t num_tx_flow_on_ind;
71227- atomic_t num_rx_flow_off;
71228- atomic_t num_rx_flow_on;
71229+ atomic_unchecked_t caif_sock_create;
71230+ atomic_unchecked_t num_connect_req;
71231+ atomic_unchecked_t num_connect_resp;
71232+ atomic_unchecked_t num_connect_fail_resp;
71233+ atomic_unchecked_t num_disconnect;
71234+ atomic_unchecked_t num_remote_shutdown_ind;
71235+ atomic_unchecked_t num_tx_flow_off_ind;
71236+ atomic_unchecked_t num_tx_flow_on_ind;
71237+ atomic_unchecked_t num_rx_flow_off;
71238+ atomic_unchecked_t num_rx_flow_on;
71239 };
71240 static struct debug_fs_counter cnt;
71241 #define dbfs_atomic_inc(v) atomic_inc_return(v)
71242+#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
71243 #define dbfs_atomic_dec(v) atomic_dec_return(v)
71244 #else
71245 #define dbfs_atomic_inc(v) 0
71246@@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct soc
71247 atomic_read(&cf_sk->sk.sk_rmem_alloc),
71248 sk_rcvbuf_lowwater(cf_sk));
71249 set_rx_flow_off(cf_sk);
71250- dbfs_atomic_inc(&cnt.num_rx_flow_off);
71251+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
71252 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
71253 }
71254
71255@@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct soc
71256 set_rx_flow_off(cf_sk);
71257 if (net_ratelimit())
71258 pr_debug("sending flow OFF due to rmem_schedule\n");
71259- dbfs_atomic_inc(&cnt.num_rx_flow_off);
71260+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
71261 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
71262 }
71263 skb->dev = NULL;
71264@@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer
71265 switch (flow) {
71266 case CAIF_CTRLCMD_FLOW_ON_IND:
71267 /* OK from modem to start sending again */
71268- dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
71269+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
71270 set_tx_flow_on(cf_sk);
71271 cf_sk->sk.sk_state_change(&cf_sk->sk);
71272 break;
71273
71274 case CAIF_CTRLCMD_FLOW_OFF_IND:
71275 /* Modem asks us to shut up */
71276- dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
71277+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
71278 set_tx_flow_off(cf_sk);
71279 cf_sk->sk.sk_state_change(&cf_sk->sk);
71280 break;
71281@@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer
71282 /* We're now connected */
71283 caif_client_register_refcnt(&cf_sk->layer,
71284 cfsk_hold, cfsk_put);
71285- dbfs_atomic_inc(&cnt.num_connect_resp);
71286+ dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
71287 cf_sk->sk.sk_state = CAIF_CONNECTED;
71288 set_tx_flow_on(cf_sk);
71289 cf_sk->sk.sk_state_change(&cf_sk->sk);
71290@@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer
71291
71292 case CAIF_CTRLCMD_INIT_FAIL_RSP:
71293 /* Connect request failed */
71294- dbfs_atomic_inc(&cnt.num_connect_fail_resp);
71295+ dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
71296 cf_sk->sk.sk_err = ECONNREFUSED;
71297 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
71298 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
71299@@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer
71300
71301 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
71302 /* Modem has closed this connection, or device is down. */
71303- dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
71304+ dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
71305 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
71306 cf_sk->sk.sk_err = ECONNRESET;
71307 set_rx_flow_on(cf_sk);
71308@@ -297,7 +298,7 @@ static void caif_check_flow_release(stru
71309 return;
71310
71311 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
71312- dbfs_atomic_inc(&cnt.num_rx_flow_on);
71313+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
71314 set_rx_flow_on(cf_sk);
71315 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
71316 }
71317@@ -854,7 +855,7 @@ static int caif_connect(struct socket *s
71318 /*ifindex = id of the interface.*/
71319 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
71320
71321- dbfs_atomic_inc(&cnt.num_connect_req);
71322+ dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
71323 cf_sk->layer.receive = caif_sktrecv_cb;
71324
71325 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
71326@@ -943,7 +944,7 @@ static int caif_release(struct socket *s
71327 spin_unlock_bh(&sk->sk_receive_queue.lock);
71328 sock->sk = NULL;
71329
71330- dbfs_atomic_inc(&cnt.num_disconnect);
71331+ dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
71332
71333 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
71334 if (cf_sk->debugfs_socket_dir != NULL)
71335@@ -1122,7 +1123,7 @@ static int caif_create(struct net *net,
71336 cf_sk->conn_req.protocol = protocol;
71337 /* Increase the number of sockets created. */
71338 dbfs_atomic_inc(&cnt.caif_nr_socks);
71339- num = dbfs_atomic_inc(&cnt.caif_sock_create);
71340+ num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
71341 #ifdef CONFIG_DEBUG_FS
71342 if (!IS_ERR(debugfsdir)) {
71343
71344diff -urNp linux-3.0.7/net/caif/cfctrl.c linux-3.0.7/net/caif/cfctrl.c
71345--- linux-3.0.7/net/caif/cfctrl.c 2011-07-21 22:17:23.000000000 -0400
71346+++ linux-3.0.7/net/caif/cfctrl.c 2011-08-23 21:48:14.000000000 -0400
71347@@ -9,6 +9,7 @@
71348 #include <linux/stddef.h>
71349 #include <linux/spinlock.h>
71350 #include <linux/slab.h>
71351+#include <linux/sched.h>
71352 #include <net/caif/caif_layer.h>
71353 #include <net/caif/cfpkt.h>
71354 #include <net/caif/cfctrl.h>
71355@@ -45,8 +46,8 @@ struct cflayer *cfctrl_create(void)
71356 dev_info.id = 0xff;
71357 memset(this, 0, sizeof(*this));
71358 cfsrvl_init(&this->serv, 0, &dev_info, false);
71359- atomic_set(&this->req_seq_no, 1);
71360- atomic_set(&this->rsp_seq_no, 1);
71361+ atomic_set_unchecked(&this->req_seq_no, 1);
71362+ atomic_set_unchecked(&this->rsp_seq_no, 1);
71363 this->serv.layer.receive = cfctrl_recv;
71364 sprintf(this->serv.layer.name, "ctrl");
71365 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
71366@@ -132,8 +133,8 @@ static void cfctrl_insert_req(struct cfc
71367 struct cfctrl_request_info *req)
71368 {
71369 spin_lock_bh(&ctrl->info_list_lock);
71370- atomic_inc(&ctrl->req_seq_no);
71371- req->sequence_no = atomic_read(&ctrl->req_seq_no);
71372+ atomic_inc_unchecked(&ctrl->req_seq_no);
71373+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
71374 list_add_tail(&req->list, &ctrl->list);
71375 spin_unlock_bh(&ctrl->info_list_lock);
71376 }
71377@@ -151,7 +152,7 @@ static struct cfctrl_request_info *cfctr
71378 if (p != first)
71379 pr_warn("Requests are not received in order\n");
71380
71381- atomic_set(&ctrl->rsp_seq_no,
71382+ atomic_set_unchecked(&ctrl->rsp_seq_no,
71383 p->sequence_no);
71384 list_del(&p->list);
71385 goto out;
71386@@ -364,6 +365,7 @@ static int cfctrl_recv(struct cflayer *l
71387 struct cfctrl *cfctrl = container_obj(layer);
71388 struct cfctrl_request_info rsp, *req;
71389
71390+ pax_track_stack();
71391
71392 cfpkt_extr_head(pkt, &cmdrsp, 1);
71393 cmd = cmdrsp & CFCTRL_CMD_MASK;
71394diff -urNp linux-3.0.7/net/compat.c linux-3.0.7/net/compat.c
71395--- linux-3.0.7/net/compat.c 2011-07-21 22:17:23.000000000 -0400
71396+++ linux-3.0.7/net/compat.c 2011-10-06 04:17:55.000000000 -0400
71397@@ -70,9 +70,9 @@ int get_compat_msghdr(struct msghdr *kms
71398 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
71399 __get_user(kmsg->msg_flags, &umsg->msg_flags))
71400 return -EFAULT;
71401- kmsg->msg_name = compat_ptr(tmp1);
71402- kmsg->msg_iov = compat_ptr(tmp2);
71403- kmsg->msg_control = compat_ptr(tmp3);
71404+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
71405+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
71406+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
71407 return 0;
71408 }
71409
71410@@ -84,7 +84,7 @@ int verify_compat_iovec(struct msghdr *k
71411
71412 if (kern_msg->msg_namelen) {
71413 if (mode == VERIFY_READ) {
71414- int err = move_addr_to_kernel(kern_msg->msg_name,
71415+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
71416 kern_msg->msg_namelen,
71417 kern_address);
71418 if (err < 0)
71419@@ -95,7 +95,7 @@ int verify_compat_iovec(struct msghdr *k
71420 kern_msg->msg_name = NULL;
71421
71422 tot_len = iov_from_user_compat_to_kern(kern_iov,
71423- (struct compat_iovec __user *)kern_msg->msg_iov,
71424+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
71425 kern_msg->msg_iovlen);
71426 if (tot_len >= 0)
71427 kern_msg->msg_iov = kern_iov;
71428@@ -115,20 +115,20 @@ int verify_compat_iovec(struct msghdr *k
71429
71430 #define CMSG_COMPAT_FIRSTHDR(msg) \
71431 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
71432- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
71433+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
71434 (struct compat_cmsghdr __user *)NULL)
71435
71436 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
71437 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
71438 (ucmlen) <= (unsigned long) \
71439 ((mhdr)->msg_controllen - \
71440- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
71441+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
71442
71443 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
71444 struct compat_cmsghdr __user *cmsg, int cmsg_len)
71445 {
71446 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
71447- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
71448+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
71449 msg->msg_controllen)
71450 return NULL;
71451 return (struct compat_cmsghdr __user *)ptr;
71452@@ -220,7 +220,7 @@ int put_cmsg_compat(struct msghdr *kmsg,
71453 {
71454 struct compat_timeval ctv;
71455 struct compat_timespec cts[3];
71456- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
71457+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
71458 struct compat_cmsghdr cmhdr;
71459 int cmlen;
71460
71461@@ -272,7 +272,7 @@ int put_cmsg_compat(struct msghdr *kmsg,
71462
71463 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
71464 {
71465- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
71466+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
71467 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
71468 int fdnum = scm->fp->count;
71469 struct file **fp = scm->fp->fp;
71470@@ -369,7 +369,7 @@ static int do_set_sock_timeout(struct so
71471 return -EFAULT;
71472 old_fs = get_fs();
71473 set_fs(KERNEL_DS);
71474- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
71475+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
71476 set_fs(old_fs);
71477
71478 return err;
71479@@ -430,7 +430,7 @@ static int do_get_sock_timeout(struct so
71480 len = sizeof(ktime);
71481 old_fs = get_fs();
71482 set_fs(KERNEL_DS);
71483- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
71484+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
71485 set_fs(old_fs);
71486
71487 if (!err) {
71488@@ -565,7 +565,7 @@ int compat_mc_setsockopt(struct sock *so
71489 case MCAST_JOIN_GROUP:
71490 case MCAST_LEAVE_GROUP:
71491 {
71492- struct compat_group_req __user *gr32 = (void *)optval;
71493+ struct compat_group_req __user *gr32 = (void __user *)optval;
71494 struct group_req __user *kgr =
71495 compat_alloc_user_space(sizeof(struct group_req));
71496 u32 interface;
71497@@ -586,7 +586,7 @@ int compat_mc_setsockopt(struct sock *so
71498 case MCAST_BLOCK_SOURCE:
71499 case MCAST_UNBLOCK_SOURCE:
71500 {
71501- struct compat_group_source_req __user *gsr32 = (void *)optval;
71502+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
71503 struct group_source_req __user *kgsr = compat_alloc_user_space(
71504 sizeof(struct group_source_req));
71505 u32 interface;
71506@@ -607,7 +607,7 @@ int compat_mc_setsockopt(struct sock *so
71507 }
71508 case MCAST_MSFILTER:
71509 {
71510- struct compat_group_filter __user *gf32 = (void *)optval;
71511+ struct compat_group_filter __user *gf32 = (void __user *)optval;
71512 struct group_filter __user *kgf;
71513 u32 interface, fmode, numsrc;
71514
71515@@ -645,7 +645,7 @@ int compat_mc_getsockopt(struct sock *so
71516 char __user *optval, int __user *optlen,
71517 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
71518 {
71519- struct compat_group_filter __user *gf32 = (void *)optval;
71520+ struct compat_group_filter __user *gf32 = (void __user *)optval;
71521 struct group_filter __user *kgf;
71522 int __user *koptlen;
71523 u32 interface, fmode, numsrc;
71524diff -urNp linux-3.0.7/net/core/datagram.c linux-3.0.7/net/core/datagram.c
71525--- linux-3.0.7/net/core/datagram.c 2011-07-21 22:17:23.000000000 -0400
71526+++ linux-3.0.7/net/core/datagram.c 2011-08-23 21:47:56.000000000 -0400
71527@@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, s
71528 }
71529
71530 kfree_skb(skb);
71531- atomic_inc(&sk->sk_drops);
71532+ atomic_inc_unchecked(&sk->sk_drops);
71533 sk_mem_reclaim_partial(sk);
71534
71535 return err;
71536diff -urNp linux-3.0.7/net/core/dev.c linux-3.0.7/net/core/dev.c
71537--- linux-3.0.7/net/core/dev.c 2011-07-21 22:17:23.000000000 -0400
71538+++ linux-3.0.7/net/core/dev.c 2011-08-23 21:48:14.000000000 -0400
71539@@ -1125,10 +1125,14 @@ void dev_load(struct net *net, const cha
71540 if (no_module && capable(CAP_NET_ADMIN))
71541 no_module = request_module("netdev-%s", name);
71542 if (no_module && capable(CAP_SYS_MODULE)) {
71543+#ifdef CONFIG_GRKERNSEC_MODHARDEN
71544+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
71545+#else
71546 if (!request_module("%s", name))
71547 pr_err("Loading kernel module for a network device "
71548 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
71549 "instead\n", name);
71550+#endif
71551 }
71552 }
71553 EXPORT_SYMBOL(dev_load);
71554@@ -1959,7 +1963,7 @@ static int illegal_highdma(struct net_de
71555
71556 struct dev_gso_cb {
71557 void (*destructor)(struct sk_buff *skb);
71558-};
71559+} __no_const;
71560
71561 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
71562
71563@@ -2912,7 +2916,7 @@ int netif_rx_ni(struct sk_buff *skb)
71564 }
71565 EXPORT_SYMBOL(netif_rx_ni);
71566
71567-static void net_tx_action(struct softirq_action *h)
71568+static void net_tx_action(void)
71569 {
71570 struct softnet_data *sd = &__get_cpu_var(softnet_data);
71571
71572@@ -3761,7 +3765,7 @@ void netif_napi_del(struct napi_struct *
71573 }
71574 EXPORT_SYMBOL(netif_napi_del);
71575
71576-static void net_rx_action(struct softirq_action *h)
71577+static void net_rx_action(void)
71578 {
71579 struct softnet_data *sd = &__get_cpu_var(softnet_data);
71580 unsigned long time_limit = jiffies + 2;
71581diff -urNp linux-3.0.7/net/core/flow.c linux-3.0.7/net/core/flow.c
71582--- linux-3.0.7/net/core/flow.c 2011-07-21 22:17:23.000000000 -0400
71583+++ linux-3.0.7/net/core/flow.c 2011-08-23 21:47:56.000000000 -0400
71584@@ -60,7 +60,7 @@ struct flow_cache {
71585 struct timer_list rnd_timer;
71586 };
71587
71588-atomic_t flow_cache_genid = ATOMIC_INIT(0);
71589+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
71590 EXPORT_SYMBOL(flow_cache_genid);
71591 static struct flow_cache flow_cache_global;
71592 static struct kmem_cache *flow_cachep __read_mostly;
71593@@ -85,7 +85,7 @@ static void flow_cache_new_hashrnd(unsig
71594
71595 static int flow_entry_valid(struct flow_cache_entry *fle)
71596 {
71597- if (atomic_read(&flow_cache_genid) != fle->genid)
71598+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
71599 return 0;
71600 if (fle->object && !fle->object->ops->check(fle->object))
71601 return 0;
71602@@ -253,7 +253,7 @@ flow_cache_lookup(struct net *net, const
71603 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
71604 fcp->hash_count++;
71605 }
71606- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
71607+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
71608 flo = fle->object;
71609 if (!flo)
71610 goto ret_object;
71611@@ -274,7 +274,7 @@ nocache:
71612 }
71613 flo = resolver(net, key, family, dir, flo, ctx);
71614 if (fle) {
71615- fle->genid = atomic_read(&flow_cache_genid);
71616+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
71617 if (!IS_ERR(flo))
71618 fle->object = flo;
71619 else
71620diff -urNp linux-3.0.7/net/core/iovec.c linux-3.0.7/net/core/iovec.c
71621--- linux-3.0.7/net/core/iovec.c 2011-07-21 22:17:23.000000000 -0400
71622+++ linux-3.0.7/net/core/iovec.c 2011-10-06 04:17:55.000000000 -0400
71623@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struc
71624 if (m->msg_namelen) {
71625 if (mode == VERIFY_READ) {
71626 void __user *namep;
71627- namep = (void __user __force *) m->msg_name;
71628+ namep = (void __force_user *) m->msg_name;
71629 err = move_addr_to_kernel(namep, m->msg_namelen,
71630 address);
71631 if (err < 0)
71632@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struc
71633 }
71634
71635 size = m->msg_iovlen * sizeof(struct iovec);
71636- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
71637+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
71638 return -EFAULT;
71639
71640 m->msg_iov = iov;
71641diff -urNp linux-3.0.7/net/core/rtnetlink.c linux-3.0.7/net/core/rtnetlink.c
71642--- linux-3.0.7/net/core/rtnetlink.c 2011-07-21 22:17:23.000000000 -0400
71643+++ linux-3.0.7/net/core/rtnetlink.c 2011-08-23 21:47:56.000000000 -0400
71644@@ -56,7 +56,7 @@
71645 struct rtnl_link {
71646 rtnl_doit_func doit;
71647 rtnl_dumpit_func dumpit;
71648-};
71649+} __no_const;
71650
71651 static DEFINE_MUTEX(rtnl_mutex);
71652
71653diff -urNp linux-3.0.7/net/core/scm.c linux-3.0.7/net/core/scm.c
71654--- linux-3.0.7/net/core/scm.c 2011-10-16 21:54:54.000000000 -0400
71655+++ linux-3.0.7/net/core/scm.c 2011-10-16 21:55:28.000000000 -0400
71656@@ -218,7 +218,7 @@ EXPORT_SYMBOL(__scm_send);
71657 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
71658 {
71659 struct cmsghdr __user *cm
71660- = (__force struct cmsghdr __user *)msg->msg_control;
71661+ = (struct cmsghdr __force_user *)msg->msg_control;
71662 struct cmsghdr cmhdr;
71663 int cmlen = CMSG_LEN(len);
71664 int err;
71665@@ -241,7 +241,7 @@ int put_cmsg(struct msghdr * msg, int le
71666 err = -EFAULT;
71667 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
71668 goto out;
71669- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
71670+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
71671 goto out;
71672 cmlen = CMSG_SPACE(len);
71673 if (msg->msg_controllen < cmlen)
71674@@ -257,7 +257,7 @@ EXPORT_SYMBOL(put_cmsg);
71675 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
71676 {
71677 struct cmsghdr __user *cm
71678- = (__force struct cmsghdr __user*)msg->msg_control;
71679+ = (struct cmsghdr __force_user *)msg->msg_control;
71680
71681 int fdmax = 0;
71682 int fdnum = scm->fp->count;
71683@@ -277,7 +277,7 @@ void scm_detach_fds(struct msghdr *msg,
71684 if (fdnum < fdmax)
71685 fdmax = fdnum;
71686
71687- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
71688+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
71689 i++, cmfptr++)
71690 {
71691 int new_fd;
71692diff -urNp linux-3.0.7/net/core/skbuff.c linux-3.0.7/net/core/skbuff.c
71693--- linux-3.0.7/net/core/skbuff.c 2011-07-21 22:17:23.000000000 -0400
71694+++ linux-3.0.7/net/core/skbuff.c 2011-08-23 21:48:14.000000000 -0400
71695@@ -1543,6 +1543,8 @@ int skb_splice_bits(struct sk_buff *skb,
71696 struct sock *sk = skb->sk;
71697 int ret = 0;
71698
71699+ pax_track_stack();
71700+
71701 if (splice_grow_spd(pipe, &spd))
71702 return -ENOMEM;
71703
71704diff -urNp linux-3.0.7/net/core/sock.c linux-3.0.7/net/core/sock.c
71705--- linux-3.0.7/net/core/sock.c 2011-07-21 22:17:23.000000000 -0400
71706+++ linux-3.0.7/net/core/sock.c 2011-08-23 21:48:14.000000000 -0400
71707@@ -291,7 +291,7 @@ int sock_queue_rcv_skb(struct sock *sk,
71708 */
71709 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
71710 (unsigned)sk->sk_rcvbuf) {
71711- atomic_inc(&sk->sk_drops);
71712+ atomic_inc_unchecked(&sk->sk_drops);
71713 return -ENOMEM;
71714 }
71715
71716@@ -300,7 +300,7 @@ int sock_queue_rcv_skb(struct sock *sk,
71717 return err;
71718
71719 if (!sk_rmem_schedule(sk, skb->truesize)) {
71720- atomic_inc(&sk->sk_drops);
71721+ atomic_inc_unchecked(&sk->sk_drops);
71722 return -ENOBUFS;
71723 }
71724
71725@@ -320,7 +320,7 @@ int sock_queue_rcv_skb(struct sock *sk,
71726 skb_dst_force(skb);
71727
71728 spin_lock_irqsave(&list->lock, flags);
71729- skb->dropcount = atomic_read(&sk->sk_drops);
71730+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
71731 __skb_queue_tail(list, skb);
71732 spin_unlock_irqrestore(&list->lock, flags);
71733
71734@@ -340,7 +340,7 @@ int sk_receive_skb(struct sock *sk, stru
71735 skb->dev = NULL;
71736
71737 if (sk_rcvqueues_full(sk, skb)) {
71738- atomic_inc(&sk->sk_drops);
71739+ atomic_inc_unchecked(&sk->sk_drops);
71740 goto discard_and_relse;
71741 }
71742 if (nested)
71743@@ -358,7 +358,7 @@ int sk_receive_skb(struct sock *sk, stru
71744 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
71745 } else if (sk_add_backlog(sk, skb)) {
71746 bh_unlock_sock(sk);
71747- atomic_inc(&sk->sk_drops);
71748+ atomic_inc_unchecked(&sk->sk_drops);
71749 goto discard_and_relse;
71750 }
71751
71752@@ -921,7 +921,7 @@ int sock_getsockopt(struct socket *sock,
71753 if (len > sizeof(peercred))
71754 len = sizeof(peercred);
71755 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
71756- if (copy_to_user(optval, &peercred, len))
71757+ if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
71758 return -EFAULT;
71759 goto lenout;
71760 }
71761@@ -934,7 +934,7 @@ int sock_getsockopt(struct socket *sock,
71762 return -ENOTCONN;
71763 if (lv < len)
71764 return -EINVAL;
71765- if (copy_to_user(optval, address, len))
71766+ if (len > sizeof(address) || copy_to_user(optval, address, len))
71767 return -EFAULT;
71768 goto lenout;
71769 }
71770@@ -967,7 +967,7 @@ int sock_getsockopt(struct socket *sock,
71771
71772 if (len > lv)
71773 len = lv;
71774- if (copy_to_user(optval, &v, len))
71775+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
71776 return -EFAULT;
71777 lenout:
71778 if (put_user(len, optlen))
71779@@ -2023,7 +2023,7 @@ void sock_init_data(struct socket *sock,
71780 */
71781 smp_wmb();
71782 atomic_set(&sk->sk_refcnt, 1);
71783- atomic_set(&sk->sk_drops, 0);
71784+ atomic_set_unchecked(&sk->sk_drops, 0);
71785 }
71786 EXPORT_SYMBOL(sock_init_data);
71787
71788diff -urNp linux-3.0.7/net/decnet/sysctl_net_decnet.c linux-3.0.7/net/decnet/sysctl_net_decnet.c
71789--- linux-3.0.7/net/decnet/sysctl_net_decnet.c 2011-07-21 22:17:23.000000000 -0400
71790+++ linux-3.0.7/net/decnet/sysctl_net_decnet.c 2011-08-23 21:47:56.000000000 -0400
71791@@ -173,7 +173,7 @@ static int dn_node_address_handler(ctl_t
71792
71793 if (len > *lenp) len = *lenp;
71794
71795- if (copy_to_user(buffer, addr, len))
71796+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
71797 return -EFAULT;
71798
71799 *lenp = len;
71800@@ -236,7 +236,7 @@ static int dn_def_dev_handler(ctl_table
71801
71802 if (len > *lenp) len = *lenp;
71803
71804- if (copy_to_user(buffer, devname, len))
71805+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
71806 return -EFAULT;
71807
71808 *lenp = len;
71809diff -urNp linux-3.0.7/net/econet/Kconfig linux-3.0.7/net/econet/Kconfig
71810--- linux-3.0.7/net/econet/Kconfig 2011-07-21 22:17:23.000000000 -0400
71811+++ linux-3.0.7/net/econet/Kconfig 2011-08-23 21:48:14.000000000 -0400
71812@@ -4,7 +4,7 @@
71813
71814 config ECONET
71815 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
71816- depends on EXPERIMENTAL && INET
71817+ depends on EXPERIMENTAL && INET && BROKEN
71818 ---help---
71819 Econet is a fairly old and slow networking protocol mainly used by
71820 Acorn computers to access file and print servers. It uses native
71821diff -urNp linux-3.0.7/net/ipv4/fib_frontend.c linux-3.0.7/net/ipv4/fib_frontend.c
71822--- linux-3.0.7/net/ipv4/fib_frontend.c 2011-07-21 22:17:23.000000000 -0400
71823+++ linux-3.0.7/net/ipv4/fib_frontend.c 2011-08-23 21:47:56.000000000 -0400
71824@@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct not
71825 #ifdef CONFIG_IP_ROUTE_MULTIPATH
71826 fib_sync_up(dev);
71827 #endif
71828- atomic_inc(&net->ipv4.dev_addr_genid);
71829+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
71830 rt_cache_flush(dev_net(dev), -1);
71831 break;
71832 case NETDEV_DOWN:
71833 fib_del_ifaddr(ifa, NULL);
71834- atomic_inc(&net->ipv4.dev_addr_genid);
71835+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
71836 if (ifa->ifa_dev->ifa_list == NULL) {
71837 /* Last address was deleted from this interface.
71838 * Disable IP.
71839@@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notif
71840 #ifdef CONFIG_IP_ROUTE_MULTIPATH
71841 fib_sync_up(dev);
71842 #endif
71843- atomic_inc(&net->ipv4.dev_addr_genid);
71844+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
71845 rt_cache_flush(dev_net(dev), -1);
71846 break;
71847 case NETDEV_DOWN:
71848diff -urNp linux-3.0.7/net/ipv4/fib_semantics.c linux-3.0.7/net/ipv4/fib_semantics.c
71849--- linux-3.0.7/net/ipv4/fib_semantics.c 2011-07-21 22:17:23.000000000 -0400
71850+++ linux-3.0.7/net/ipv4/fib_semantics.c 2011-08-23 21:47:56.000000000 -0400
71851@@ -691,7 +691,7 @@ __be32 fib_info_update_nh_saddr(struct n
71852 nh->nh_saddr = inet_select_addr(nh->nh_dev,
71853 nh->nh_gw,
71854 nh->nh_parent->fib_scope);
71855- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
71856+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
71857
71858 return nh->nh_saddr;
71859 }
71860diff -urNp linux-3.0.7/net/ipv4/inet_diag.c linux-3.0.7/net/ipv4/inet_diag.c
71861--- linux-3.0.7/net/ipv4/inet_diag.c 2011-07-21 22:17:23.000000000 -0400
71862+++ linux-3.0.7/net/ipv4/inet_diag.c 2011-08-23 21:48:14.000000000 -0400
71863@@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct soc
71864 r->idiag_retrans = 0;
71865
71866 r->id.idiag_if = sk->sk_bound_dev_if;
71867+
71868+#ifdef CONFIG_GRKERNSEC_HIDESYM
71869+ r->id.idiag_cookie[0] = 0;
71870+ r->id.idiag_cookie[1] = 0;
71871+#else
71872 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
71873 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
71874+#endif
71875
71876 r->id.idiag_sport = inet->inet_sport;
71877 r->id.idiag_dport = inet->inet_dport;
71878@@ -201,8 +207,15 @@ static int inet_twsk_diag_fill(struct in
71879 r->idiag_family = tw->tw_family;
71880 r->idiag_retrans = 0;
71881 r->id.idiag_if = tw->tw_bound_dev_if;
71882+
71883+#ifdef CONFIG_GRKERNSEC_HIDESYM
71884+ r->id.idiag_cookie[0] = 0;
71885+ r->id.idiag_cookie[1] = 0;
71886+#else
71887 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
71888 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
71889+#endif
71890+
71891 r->id.idiag_sport = tw->tw_sport;
71892 r->id.idiag_dport = tw->tw_dport;
71893 r->id.idiag_src[0] = tw->tw_rcv_saddr;
71894@@ -285,12 +298,14 @@ static int inet_diag_get_exact(struct sk
71895 if (sk == NULL)
71896 goto unlock;
71897
71898+#ifndef CONFIG_GRKERNSEC_HIDESYM
71899 err = -ESTALE;
71900 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
71901 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
71902 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
71903 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
71904 goto out;
71905+#endif
71906
71907 err = -ENOMEM;
71908 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
71909@@ -580,8 +595,14 @@ static int inet_diag_fill_req(struct sk_
71910 r->idiag_retrans = req->retrans;
71911
71912 r->id.idiag_if = sk->sk_bound_dev_if;
71913+
71914+#ifdef CONFIG_GRKERNSEC_HIDESYM
71915+ r->id.idiag_cookie[0] = 0;
71916+ r->id.idiag_cookie[1] = 0;
71917+#else
71918 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
71919 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
71920+#endif
71921
71922 tmo = req->expires - jiffies;
71923 if (tmo < 0)
71924diff -urNp linux-3.0.7/net/ipv4/inet_hashtables.c linux-3.0.7/net/ipv4/inet_hashtables.c
71925--- linux-3.0.7/net/ipv4/inet_hashtables.c 2011-09-02 18:11:21.000000000 -0400
71926+++ linux-3.0.7/net/ipv4/inet_hashtables.c 2011-08-23 21:55:24.000000000 -0400
71927@@ -18,12 +18,15 @@
71928 #include <linux/sched.h>
71929 #include <linux/slab.h>
71930 #include <linux/wait.h>
71931+#include <linux/security.h>
71932
71933 #include <net/inet_connection_sock.h>
71934 #include <net/inet_hashtables.h>
71935 #include <net/secure_seq.h>
71936 #include <net/ip.h>
71937
71938+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
71939+
71940 /*
71941 * Allocate and initialize a new local port bind bucket.
71942 * The bindhash mutex for snum's hash chain must be held here.
71943@@ -530,6 +533,8 @@ ok:
71944 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
71945 spin_unlock(&head->lock);
71946
71947+ gr_update_task_in_ip_table(current, inet_sk(sk));
71948+
71949 if (tw) {
71950 inet_twsk_deschedule(tw, death_row);
71951 while (twrefcnt) {
71952diff -urNp linux-3.0.7/net/ipv4/inetpeer.c linux-3.0.7/net/ipv4/inetpeer.c
71953--- linux-3.0.7/net/ipv4/inetpeer.c 2011-09-02 18:11:21.000000000 -0400
71954+++ linux-3.0.7/net/ipv4/inetpeer.c 2011-08-23 21:48:14.000000000 -0400
71955@@ -481,6 +481,8 @@ struct inet_peer *inet_getpeer(struct in
71956 unsigned int sequence;
71957 int invalidated, newrefcnt = 0;
71958
71959+ pax_track_stack();
71960+
71961 /* Look up for the address quickly, lockless.
71962 * Because of a concurrent writer, we might not find an existing entry.
71963 */
71964@@ -517,8 +519,8 @@ found: /* The existing node has been fo
71965 if (p) {
71966 p->daddr = *daddr;
71967 atomic_set(&p->refcnt, 1);
71968- atomic_set(&p->rid, 0);
71969- atomic_set(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
71970+ atomic_set_unchecked(&p->rid, 0);
71971+ atomic_set_unchecked(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
71972 p->tcp_ts_stamp = 0;
71973 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
71974 p->rate_tokens = 0;
71975diff -urNp linux-3.0.7/net/ipv4/ipconfig.c linux-3.0.7/net/ipv4/ipconfig.c
71976--- linux-3.0.7/net/ipv4/ipconfig.c 2011-07-21 22:17:23.000000000 -0400
71977+++ linux-3.0.7/net/ipv4/ipconfig.c 2011-10-06 04:17:55.000000000 -0400
71978@@ -313,7 +313,7 @@ static int __init ic_devinet_ioctl(unsig
71979
71980 mm_segment_t oldfs = get_fs();
71981 set_fs(get_ds());
71982- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
71983+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
71984 set_fs(oldfs);
71985 return res;
71986 }
71987@@ -324,7 +324,7 @@ static int __init ic_dev_ioctl(unsigned
71988
71989 mm_segment_t oldfs = get_fs();
71990 set_fs(get_ds());
71991- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
71992+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
71993 set_fs(oldfs);
71994 return res;
71995 }
71996@@ -335,7 +335,7 @@ static int __init ic_route_ioctl(unsigne
71997
71998 mm_segment_t oldfs = get_fs();
71999 set_fs(get_ds());
72000- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
72001+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
72002 set_fs(oldfs);
72003 return res;
72004 }
72005diff -urNp linux-3.0.7/net/ipv4/ip_fragment.c linux-3.0.7/net/ipv4/ip_fragment.c
72006--- linux-3.0.7/net/ipv4/ip_fragment.c 2011-07-21 22:17:23.000000000 -0400
72007+++ linux-3.0.7/net/ipv4/ip_fragment.c 2011-08-23 21:47:56.000000000 -0400
72008@@ -315,7 +315,7 @@ static inline int ip_frag_too_far(struct
72009 return 0;
72010
72011 start = qp->rid;
72012- end = atomic_inc_return(&peer->rid);
72013+ end = atomic_inc_return_unchecked(&peer->rid);
72014 qp->rid = end;
72015
72016 rc = qp->q.fragments && (end - start) > max;
72017diff -urNp linux-3.0.7/net/ipv4/ip_sockglue.c linux-3.0.7/net/ipv4/ip_sockglue.c
72018--- linux-3.0.7/net/ipv4/ip_sockglue.c 2011-07-21 22:17:23.000000000 -0400
72019+++ linux-3.0.7/net/ipv4/ip_sockglue.c 2011-10-06 04:17:55.000000000 -0400
72020@@ -1073,6 +1073,8 @@ static int do_ip_getsockopt(struct sock
72021 int val;
72022 int len;
72023
72024+ pax_track_stack();
72025+
72026 if (level != SOL_IP)
72027 return -EOPNOTSUPP;
72028
72029@@ -1110,7 +1112,8 @@ static int do_ip_getsockopt(struct sock
72030 len = min_t(unsigned int, len, opt->optlen);
72031 if (put_user(len, optlen))
72032 return -EFAULT;
72033- if (copy_to_user(optval, opt->__data, len))
72034+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
72035+ copy_to_user(optval, opt->__data, len))
72036 return -EFAULT;
72037 return 0;
72038 }
72039@@ -1238,7 +1241,7 @@ static int do_ip_getsockopt(struct sock
72040 if (sk->sk_type != SOCK_STREAM)
72041 return -ENOPROTOOPT;
72042
72043- msg.msg_control = optval;
72044+ msg.msg_control = (void __force_kernel *)optval;
72045 msg.msg_controllen = len;
72046 msg.msg_flags = 0;
72047
72048diff -urNp linux-3.0.7/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-3.0.7/net/ipv4/netfilter/nf_nat_snmp_basic.c
72049--- linux-3.0.7/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-07-21 22:17:23.000000000 -0400
72050+++ linux-3.0.7/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-08-23 21:47:56.000000000 -0400
72051@@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(
72052
72053 *len = 0;
72054
72055- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
72056+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
72057 if (*octets == NULL) {
72058 if (net_ratelimit())
72059 pr_notice("OOM in bsalg (%d)\n", __LINE__);
72060diff -urNp linux-3.0.7/net/ipv4/ping.c linux-3.0.7/net/ipv4/ping.c
72061--- linux-3.0.7/net/ipv4/ping.c 2011-07-21 22:17:23.000000000 -0400
72062+++ linux-3.0.7/net/ipv4/ping.c 2011-08-23 21:47:56.000000000 -0400
72063@@ -837,7 +837,7 @@ static void ping_format_sock(struct sock
72064 sk_rmem_alloc_get(sp),
72065 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72066 atomic_read(&sp->sk_refcnt), sp,
72067- atomic_read(&sp->sk_drops), len);
72068+ atomic_read_unchecked(&sp->sk_drops), len);
72069 }
72070
72071 static int ping_seq_show(struct seq_file *seq, void *v)
72072diff -urNp linux-3.0.7/net/ipv4/raw.c linux-3.0.7/net/ipv4/raw.c
72073--- linux-3.0.7/net/ipv4/raw.c 2011-07-21 22:17:23.000000000 -0400
72074+++ linux-3.0.7/net/ipv4/raw.c 2011-08-23 21:48:14.000000000 -0400
72075@@ -302,7 +302,7 @@ static int raw_rcv_skb(struct sock * sk,
72076 int raw_rcv(struct sock *sk, struct sk_buff *skb)
72077 {
72078 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
72079- atomic_inc(&sk->sk_drops);
72080+ atomic_inc_unchecked(&sk->sk_drops);
72081 kfree_skb(skb);
72082 return NET_RX_DROP;
72083 }
72084@@ -736,16 +736,20 @@ static int raw_init(struct sock *sk)
72085
72086 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
72087 {
72088+ struct icmp_filter filter;
72089+
72090 if (optlen > sizeof(struct icmp_filter))
72091 optlen = sizeof(struct icmp_filter);
72092- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
72093+ if (copy_from_user(&filter, optval, optlen))
72094 return -EFAULT;
72095+ raw_sk(sk)->filter = filter;
72096 return 0;
72097 }
72098
72099 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
72100 {
72101 int len, ret = -EFAULT;
72102+ struct icmp_filter filter;
72103
72104 if (get_user(len, optlen))
72105 goto out;
72106@@ -755,8 +759,9 @@ static int raw_geticmpfilter(struct sock
72107 if (len > sizeof(struct icmp_filter))
72108 len = sizeof(struct icmp_filter);
72109 ret = -EFAULT;
72110- if (put_user(len, optlen) ||
72111- copy_to_user(optval, &raw_sk(sk)->filter, len))
72112+ filter = raw_sk(sk)->filter;
72113+ if (put_user(len, optlen) || len > sizeof filter ||
72114+ copy_to_user(optval, &filter, len))
72115 goto out;
72116 ret = 0;
72117 out: return ret;
72118@@ -984,7 +989,13 @@ static void raw_sock_seq_show(struct seq
72119 sk_wmem_alloc_get(sp),
72120 sk_rmem_alloc_get(sp),
72121 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72122- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
72123+ atomic_read(&sp->sk_refcnt),
72124+#ifdef CONFIG_GRKERNSEC_HIDESYM
72125+ NULL,
72126+#else
72127+ sp,
72128+#endif
72129+ atomic_read_unchecked(&sp->sk_drops));
72130 }
72131
72132 static int raw_seq_show(struct seq_file *seq, void *v)
72133diff -urNp linux-3.0.7/net/ipv4/route.c linux-3.0.7/net/ipv4/route.c
72134--- linux-3.0.7/net/ipv4/route.c 2011-10-16 21:54:54.000000000 -0400
72135+++ linux-3.0.7/net/ipv4/route.c 2011-10-16 21:55:28.000000000 -0400
72136@@ -304,7 +304,7 @@ static inline unsigned int rt_hash(__be3
72137
72138 static inline int rt_genid(struct net *net)
72139 {
72140- return atomic_read(&net->ipv4.rt_genid);
72141+ return atomic_read_unchecked(&net->ipv4.rt_genid);
72142 }
72143
72144 #ifdef CONFIG_PROC_FS
72145@@ -832,7 +832,7 @@ static void rt_cache_invalidate(struct n
72146 unsigned char shuffle;
72147
72148 get_random_bytes(&shuffle, sizeof(shuffle));
72149- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
72150+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
72151 }
72152
72153 /*
72154@@ -2832,7 +2832,7 @@ static int rt_fill_info(struct net *net,
72155 error = rt->dst.error;
72156 if (peer) {
72157 inet_peer_refcheck(rt->peer);
72158- id = atomic_read(&peer->ip_id_count) & 0xffff;
72159+ id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
72160 if (peer->tcp_ts_stamp) {
72161 ts = peer->tcp_ts;
72162 tsage = get_seconds() - peer->tcp_ts_stamp;
72163diff -urNp linux-3.0.7/net/ipv4/tcp.c linux-3.0.7/net/ipv4/tcp.c
72164--- linux-3.0.7/net/ipv4/tcp.c 2011-07-21 22:17:23.000000000 -0400
72165+++ linux-3.0.7/net/ipv4/tcp.c 2011-08-23 21:48:14.000000000 -0400
72166@@ -2122,6 +2122,8 @@ static int do_tcp_setsockopt(struct sock
72167 int val;
72168 int err = 0;
72169
72170+ pax_track_stack();
72171+
72172 /* These are data/string values, all the others are ints */
72173 switch (optname) {
72174 case TCP_CONGESTION: {
72175@@ -2501,6 +2503,8 @@ static int do_tcp_getsockopt(struct sock
72176 struct tcp_sock *tp = tcp_sk(sk);
72177 int val, len;
72178
72179+ pax_track_stack();
72180+
72181 if (get_user(len, optlen))
72182 return -EFAULT;
72183
72184diff -urNp linux-3.0.7/net/ipv4/tcp_ipv4.c linux-3.0.7/net/ipv4/tcp_ipv4.c
72185--- linux-3.0.7/net/ipv4/tcp_ipv4.c 2011-09-02 18:11:21.000000000 -0400
72186+++ linux-3.0.7/net/ipv4/tcp_ipv4.c 2011-08-23 21:48:14.000000000 -0400
72187@@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
72188 int sysctl_tcp_low_latency __read_mostly;
72189 EXPORT_SYMBOL(sysctl_tcp_low_latency);
72190
72191+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72192+extern int grsec_enable_blackhole;
72193+#endif
72194
72195 #ifdef CONFIG_TCP_MD5SIG
72196 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
72197@@ -1607,6 +1610,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
72198 return 0;
72199
72200 reset:
72201+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72202+ if (!grsec_enable_blackhole)
72203+#endif
72204 tcp_v4_send_reset(rsk, skb);
72205 discard:
72206 kfree_skb(skb);
72207@@ -1669,12 +1675,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
72208 TCP_SKB_CB(skb)->sacked = 0;
72209
72210 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
72211- if (!sk)
72212+ if (!sk) {
72213+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72214+ ret = 1;
72215+#endif
72216 goto no_tcp_socket;
72217-
72218+ }
72219 process:
72220- if (sk->sk_state == TCP_TIME_WAIT)
72221+ if (sk->sk_state == TCP_TIME_WAIT) {
72222+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72223+ ret = 2;
72224+#endif
72225 goto do_time_wait;
72226+ }
72227
72228 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
72229 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
72230@@ -1724,6 +1737,10 @@ no_tcp_socket:
72231 bad_packet:
72232 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
72233 } else {
72234+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72235+ if (!grsec_enable_blackhole || (ret == 1 &&
72236+ (skb->dev->flags & IFF_LOOPBACK)))
72237+#endif
72238 tcp_v4_send_reset(NULL, skb);
72239 }
72240
72241@@ -2388,7 +2405,11 @@ static void get_openreq4(struct sock *sk
72242 0, /* non standard timer */
72243 0, /* open_requests have no inode */
72244 atomic_read(&sk->sk_refcnt),
72245+#ifdef CONFIG_GRKERNSEC_HIDESYM
72246+ NULL,
72247+#else
72248 req,
72249+#endif
72250 len);
72251 }
72252
72253@@ -2438,7 +2459,12 @@ static void get_tcp4_sock(struct sock *s
72254 sock_i_uid(sk),
72255 icsk->icsk_probes_out,
72256 sock_i_ino(sk),
72257- atomic_read(&sk->sk_refcnt), sk,
72258+ atomic_read(&sk->sk_refcnt),
72259+#ifdef CONFIG_GRKERNSEC_HIDESYM
72260+ NULL,
72261+#else
72262+ sk,
72263+#endif
72264 jiffies_to_clock_t(icsk->icsk_rto),
72265 jiffies_to_clock_t(icsk->icsk_ack.ato),
72266 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
72267@@ -2466,7 +2492,13 @@ static void get_timewait4_sock(struct in
72268 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
72269 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
72270 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
72271- atomic_read(&tw->tw_refcnt), tw, len);
72272+ atomic_read(&tw->tw_refcnt),
72273+#ifdef CONFIG_GRKERNSEC_HIDESYM
72274+ NULL,
72275+#else
72276+ tw,
72277+#endif
72278+ len);
72279 }
72280
72281 #define TMPSZ 150
72282diff -urNp linux-3.0.7/net/ipv4/tcp_minisocks.c linux-3.0.7/net/ipv4/tcp_minisocks.c
72283--- linux-3.0.7/net/ipv4/tcp_minisocks.c 2011-07-21 22:17:23.000000000 -0400
72284+++ linux-3.0.7/net/ipv4/tcp_minisocks.c 2011-08-23 21:48:14.000000000 -0400
72285@@ -27,6 +27,10 @@
72286 #include <net/inet_common.h>
72287 #include <net/xfrm.h>
72288
72289+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72290+extern int grsec_enable_blackhole;
72291+#endif
72292+
72293 int sysctl_tcp_syncookies __read_mostly = 1;
72294 EXPORT_SYMBOL(sysctl_tcp_syncookies);
72295
72296@@ -745,6 +749,10 @@ listen_overflow:
72297
72298 embryonic_reset:
72299 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
72300+
72301+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72302+ if (!grsec_enable_blackhole)
72303+#endif
72304 if (!(flg & TCP_FLAG_RST))
72305 req->rsk_ops->send_reset(sk, skb);
72306
72307diff -urNp linux-3.0.7/net/ipv4/tcp_output.c linux-3.0.7/net/ipv4/tcp_output.c
72308--- linux-3.0.7/net/ipv4/tcp_output.c 2011-07-21 22:17:23.000000000 -0400
72309+++ linux-3.0.7/net/ipv4/tcp_output.c 2011-08-23 21:48:14.000000000 -0400
72310@@ -2421,6 +2421,8 @@ struct sk_buff *tcp_make_synack(struct s
72311 int mss;
72312 int s_data_desired = 0;
72313
72314+ pax_track_stack();
72315+
72316 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
72317 s_data_desired = cvp->s_data_desired;
72318 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
72319diff -urNp linux-3.0.7/net/ipv4/tcp_probe.c linux-3.0.7/net/ipv4/tcp_probe.c
72320--- linux-3.0.7/net/ipv4/tcp_probe.c 2011-07-21 22:17:23.000000000 -0400
72321+++ linux-3.0.7/net/ipv4/tcp_probe.c 2011-08-23 21:47:56.000000000 -0400
72322@@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file
72323 if (cnt + width >= len)
72324 break;
72325
72326- if (copy_to_user(buf + cnt, tbuf, width))
72327+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
72328 return -EFAULT;
72329 cnt += width;
72330 }
72331diff -urNp linux-3.0.7/net/ipv4/tcp_timer.c linux-3.0.7/net/ipv4/tcp_timer.c
72332--- linux-3.0.7/net/ipv4/tcp_timer.c 2011-07-21 22:17:23.000000000 -0400
72333+++ linux-3.0.7/net/ipv4/tcp_timer.c 2011-08-23 21:48:14.000000000 -0400
72334@@ -22,6 +22,10 @@
72335 #include <linux/gfp.h>
72336 #include <net/tcp.h>
72337
72338+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72339+extern int grsec_lastack_retries;
72340+#endif
72341+
72342 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
72343 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
72344 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
72345@@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock
72346 }
72347 }
72348
72349+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72350+ if ((sk->sk_state == TCP_LAST_ACK) &&
72351+ (grsec_lastack_retries > 0) &&
72352+ (grsec_lastack_retries < retry_until))
72353+ retry_until = grsec_lastack_retries;
72354+#endif
72355+
72356 if (retransmits_timed_out(sk, retry_until,
72357 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
72358 /* Has it gone just too far? */
72359diff -urNp linux-3.0.7/net/ipv4/udp.c linux-3.0.7/net/ipv4/udp.c
72360--- linux-3.0.7/net/ipv4/udp.c 2011-07-21 22:17:23.000000000 -0400
72361+++ linux-3.0.7/net/ipv4/udp.c 2011-08-23 21:48:14.000000000 -0400
72362@@ -86,6 +86,7 @@
72363 #include <linux/types.h>
72364 #include <linux/fcntl.h>
72365 #include <linux/module.h>
72366+#include <linux/security.h>
72367 #include <linux/socket.h>
72368 #include <linux/sockios.h>
72369 #include <linux/igmp.h>
72370@@ -107,6 +108,10 @@
72371 #include <net/xfrm.h>
72372 #include "udp_impl.h"
72373
72374+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72375+extern int grsec_enable_blackhole;
72376+#endif
72377+
72378 struct udp_table udp_table __read_mostly;
72379 EXPORT_SYMBOL(udp_table);
72380
72381@@ -564,6 +569,9 @@ found:
72382 return s;
72383 }
72384
72385+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
72386+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
72387+
72388 /*
72389 * This routine is called by the ICMP module when it gets some
72390 * sort of error condition. If err < 0 then the socket should
72391@@ -855,9 +863,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
72392 dport = usin->sin_port;
72393 if (dport == 0)
72394 return -EINVAL;
72395+
72396+ err = gr_search_udp_sendmsg(sk, usin);
72397+ if (err)
72398+ return err;
72399 } else {
72400 if (sk->sk_state != TCP_ESTABLISHED)
72401 return -EDESTADDRREQ;
72402+
72403+ err = gr_search_udp_sendmsg(sk, NULL);
72404+ if (err)
72405+ return err;
72406+
72407 daddr = inet->inet_daddr;
72408 dport = inet->inet_dport;
72409 /* Open fast path for connected socket.
72410@@ -1098,7 +1115,7 @@ static unsigned int first_packet_length(
72411 udp_lib_checksum_complete(skb)) {
72412 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
72413 IS_UDPLITE(sk));
72414- atomic_inc(&sk->sk_drops);
72415+ atomic_inc_unchecked(&sk->sk_drops);
72416 __skb_unlink(skb, rcvq);
72417 __skb_queue_tail(&list_kill, skb);
72418 }
72419@@ -1184,6 +1201,10 @@ try_again:
72420 if (!skb)
72421 goto out;
72422
72423+ err = gr_search_udp_recvmsg(sk, skb);
72424+ if (err)
72425+ goto out_free;
72426+
72427 ulen = skb->len - sizeof(struct udphdr);
72428 if (len > ulen)
72429 len = ulen;
72430@@ -1483,7 +1504,7 @@ int udp_queue_rcv_skb(struct sock *sk, s
72431
72432 drop:
72433 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
72434- atomic_inc(&sk->sk_drops);
72435+ atomic_inc_unchecked(&sk->sk_drops);
72436 kfree_skb(skb);
72437 return -1;
72438 }
72439@@ -1502,7 +1523,7 @@ static void flush_stack(struct sock **st
72440 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
72441
72442 if (!skb1) {
72443- atomic_inc(&sk->sk_drops);
72444+ atomic_inc_unchecked(&sk->sk_drops);
72445 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
72446 IS_UDPLITE(sk));
72447 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
72448@@ -1671,6 +1692,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
72449 goto csum_error;
72450
72451 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
72452+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72453+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
72454+#endif
72455 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
72456
72457 /*
72458@@ -2098,8 +2122,13 @@ static void udp4_format_sock(struct sock
72459 sk_wmem_alloc_get(sp),
72460 sk_rmem_alloc_get(sp),
72461 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72462- atomic_read(&sp->sk_refcnt), sp,
72463- atomic_read(&sp->sk_drops), len);
72464+ atomic_read(&sp->sk_refcnt),
72465+#ifdef CONFIG_GRKERNSEC_HIDESYM
72466+ NULL,
72467+#else
72468+ sp,
72469+#endif
72470+ atomic_read_unchecked(&sp->sk_drops), len);
72471 }
72472
72473 int udp4_seq_show(struct seq_file *seq, void *v)
72474diff -urNp linux-3.0.7/net/ipv6/addrconf.c linux-3.0.7/net/ipv6/addrconf.c
72475--- linux-3.0.7/net/ipv6/addrconf.c 2011-07-21 22:17:23.000000000 -0400
72476+++ linux-3.0.7/net/ipv6/addrconf.c 2011-10-06 04:17:55.000000000 -0400
72477@@ -2072,7 +2072,7 @@ int addrconf_set_dstaddr(struct net *net
72478 p.iph.ihl = 5;
72479 p.iph.protocol = IPPROTO_IPV6;
72480 p.iph.ttl = 64;
72481- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
72482+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
72483
72484 if (ops->ndo_do_ioctl) {
72485 mm_segment_t oldfs = get_fs();
72486diff -urNp linux-3.0.7/net/ipv6/inet6_connection_sock.c linux-3.0.7/net/ipv6/inet6_connection_sock.c
72487--- linux-3.0.7/net/ipv6/inet6_connection_sock.c 2011-07-21 22:17:23.000000000 -0400
72488+++ linux-3.0.7/net/ipv6/inet6_connection_sock.c 2011-08-23 21:47:56.000000000 -0400
72489@@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *
72490 #ifdef CONFIG_XFRM
72491 {
72492 struct rt6_info *rt = (struct rt6_info *)dst;
72493- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
72494+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
72495 }
72496 #endif
72497 }
72498@@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(
72499 #ifdef CONFIG_XFRM
72500 if (dst) {
72501 struct rt6_info *rt = (struct rt6_info *)dst;
72502- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
72503+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
72504 __sk_dst_reset(sk);
72505 dst = NULL;
72506 }
72507diff -urNp linux-3.0.7/net/ipv6/ipv6_sockglue.c linux-3.0.7/net/ipv6/ipv6_sockglue.c
72508--- linux-3.0.7/net/ipv6/ipv6_sockglue.c 2011-10-16 21:54:54.000000000 -0400
72509+++ linux-3.0.7/net/ipv6/ipv6_sockglue.c 2011-10-16 21:55:28.000000000 -0400
72510@@ -129,6 +129,8 @@ static int do_ipv6_setsockopt(struct soc
72511 int val, valbool;
72512 int retv = -ENOPROTOOPT;
72513
72514+ pax_track_stack();
72515+
72516 if (optval == NULL)
72517 val=0;
72518 else {
72519@@ -919,6 +921,8 @@ static int do_ipv6_getsockopt(struct soc
72520 int len;
72521 int val;
72522
72523+ pax_track_stack();
72524+
72525 if (ip6_mroute_opt(optname))
72526 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
72527
72528@@ -960,7 +964,7 @@ static int do_ipv6_getsockopt(struct soc
72529 if (sk->sk_type != SOCK_STREAM)
72530 return -ENOPROTOOPT;
72531
72532- msg.msg_control = optval;
72533+ msg.msg_control = (void __force_kernel *)optval;
72534 msg.msg_controllen = len;
72535 msg.msg_flags = flags;
72536
72537diff -urNp linux-3.0.7/net/ipv6/raw.c linux-3.0.7/net/ipv6/raw.c
72538--- linux-3.0.7/net/ipv6/raw.c 2011-07-21 22:17:23.000000000 -0400
72539+++ linux-3.0.7/net/ipv6/raw.c 2011-08-23 21:48:14.000000000 -0400
72540@@ -376,7 +376,7 @@ static inline int rawv6_rcv_skb(struct s
72541 {
72542 if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
72543 skb_checksum_complete(skb)) {
72544- atomic_inc(&sk->sk_drops);
72545+ atomic_inc_unchecked(&sk->sk_drops);
72546 kfree_skb(skb);
72547 return NET_RX_DROP;
72548 }
72549@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
72550 struct raw6_sock *rp = raw6_sk(sk);
72551
72552 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
72553- atomic_inc(&sk->sk_drops);
72554+ atomic_inc_unchecked(&sk->sk_drops);
72555 kfree_skb(skb);
72556 return NET_RX_DROP;
72557 }
72558@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
72559
72560 if (inet->hdrincl) {
72561 if (skb_checksum_complete(skb)) {
72562- atomic_inc(&sk->sk_drops);
72563+ atomic_inc_unchecked(&sk->sk_drops);
72564 kfree_skb(skb);
72565 return NET_RX_DROP;
72566 }
72567@@ -601,7 +601,7 @@ out:
72568 return err;
72569 }
72570
72571-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
72572+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
72573 struct flowi6 *fl6, struct dst_entry **dstp,
72574 unsigned int flags)
72575 {
72576@@ -742,6 +742,8 @@ static int rawv6_sendmsg(struct kiocb *i
72577 u16 proto;
72578 int err;
72579
72580+ pax_track_stack();
72581+
72582 /* Rough check on arithmetic overflow,
72583 better check is made in ip6_append_data().
72584 */
72585@@ -909,12 +911,15 @@ do_confirm:
72586 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
72587 char __user *optval, int optlen)
72588 {
72589+ struct icmp6_filter filter;
72590+
72591 switch (optname) {
72592 case ICMPV6_FILTER:
72593 if (optlen > sizeof(struct icmp6_filter))
72594 optlen = sizeof(struct icmp6_filter);
72595- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
72596+ if (copy_from_user(&filter, optval, optlen))
72597 return -EFAULT;
72598+ raw6_sk(sk)->filter = filter;
72599 return 0;
72600 default:
72601 return -ENOPROTOOPT;
72602@@ -927,6 +932,7 @@ static int rawv6_geticmpfilter(struct so
72603 char __user *optval, int __user *optlen)
72604 {
72605 int len;
72606+ struct icmp6_filter filter;
72607
72608 switch (optname) {
72609 case ICMPV6_FILTER:
72610@@ -938,7 +944,8 @@ static int rawv6_geticmpfilter(struct so
72611 len = sizeof(struct icmp6_filter);
72612 if (put_user(len, optlen))
72613 return -EFAULT;
72614- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
72615+ filter = raw6_sk(sk)->filter;
72616+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
72617 return -EFAULT;
72618 return 0;
72619 default:
72620@@ -1252,7 +1259,13 @@ static void raw6_sock_seq_show(struct se
72621 0, 0L, 0,
72622 sock_i_uid(sp), 0,
72623 sock_i_ino(sp),
72624- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
72625+ atomic_read(&sp->sk_refcnt),
72626+#ifdef CONFIG_GRKERNSEC_HIDESYM
72627+ NULL,
72628+#else
72629+ sp,
72630+#endif
72631+ atomic_read_unchecked(&sp->sk_drops));
72632 }
72633
72634 static int raw6_seq_show(struct seq_file *seq, void *v)
72635diff -urNp linux-3.0.7/net/ipv6/tcp_ipv6.c linux-3.0.7/net/ipv6/tcp_ipv6.c
72636--- linux-3.0.7/net/ipv6/tcp_ipv6.c 2011-09-02 18:11:21.000000000 -0400
72637+++ linux-3.0.7/net/ipv6/tcp_ipv6.c 2011-08-23 21:48:14.000000000 -0400
72638@@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
72639 }
72640 #endif
72641
72642+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72643+extern int grsec_enable_blackhole;
72644+#endif
72645+
72646 static void tcp_v6_hash(struct sock *sk)
72647 {
72648 if (sk->sk_state != TCP_CLOSE) {
72649@@ -1662,6 +1666,9 @@ static int tcp_v6_do_rcv(struct sock *sk
72650 return 0;
72651
72652 reset:
72653+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72654+ if (!grsec_enable_blackhole)
72655+#endif
72656 tcp_v6_send_reset(sk, skb);
72657 discard:
72658 if (opt_skb)
72659@@ -1741,12 +1748,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
72660 TCP_SKB_CB(skb)->sacked = 0;
72661
72662 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
72663- if (!sk)
72664+ if (!sk) {
72665+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72666+ ret = 1;
72667+#endif
72668 goto no_tcp_socket;
72669+ }
72670
72671 process:
72672- if (sk->sk_state == TCP_TIME_WAIT)
72673+ if (sk->sk_state == TCP_TIME_WAIT) {
72674+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72675+ ret = 2;
72676+#endif
72677 goto do_time_wait;
72678+ }
72679
72680 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
72681 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
72682@@ -1794,6 +1809,10 @@ no_tcp_socket:
72683 bad_packet:
72684 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
72685 } else {
72686+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72687+ if (!grsec_enable_blackhole || (ret == 1 &&
72688+ (skb->dev->flags & IFF_LOOPBACK)))
72689+#endif
72690 tcp_v6_send_reset(NULL, skb);
72691 }
72692
72693@@ -2054,7 +2073,13 @@ static void get_openreq6(struct seq_file
72694 uid,
72695 0, /* non standard timer */
72696 0, /* open_requests have no inode */
72697- 0, req);
72698+ 0,
72699+#ifdef CONFIG_GRKERNSEC_HIDESYM
72700+ NULL
72701+#else
72702+ req
72703+#endif
72704+ );
72705 }
72706
72707 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
72708@@ -2104,7 +2129,12 @@ static void get_tcp6_sock(struct seq_fil
72709 sock_i_uid(sp),
72710 icsk->icsk_probes_out,
72711 sock_i_ino(sp),
72712- atomic_read(&sp->sk_refcnt), sp,
72713+ atomic_read(&sp->sk_refcnt),
72714+#ifdef CONFIG_GRKERNSEC_HIDESYM
72715+ NULL,
72716+#else
72717+ sp,
72718+#endif
72719 jiffies_to_clock_t(icsk->icsk_rto),
72720 jiffies_to_clock_t(icsk->icsk_ack.ato),
72721 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
72722@@ -2139,7 +2169,13 @@ static void get_timewait6_sock(struct se
72723 dest->s6_addr32[2], dest->s6_addr32[3], destp,
72724 tw->tw_substate, 0, 0,
72725 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
72726- atomic_read(&tw->tw_refcnt), tw);
72727+ atomic_read(&tw->tw_refcnt),
72728+#ifdef CONFIG_GRKERNSEC_HIDESYM
72729+ NULL
72730+#else
72731+ tw
72732+#endif
72733+ );
72734 }
72735
72736 static int tcp6_seq_show(struct seq_file *seq, void *v)
72737diff -urNp linux-3.0.7/net/ipv6/udp.c linux-3.0.7/net/ipv6/udp.c
72738--- linux-3.0.7/net/ipv6/udp.c 2011-10-17 23:17:09.000000000 -0400
72739+++ linux-3.0.7/net/ipv6/udp.c 2011-10-17 23:17:19.000000000 -0400
72740@@ -50,6 +50,10 @@
72741 #include <linux/seq_file.h>
72742 #include "udp_impl.h"
72743
72744+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72745+extern int grsec_enable_blackhole;
72746+#endif
72747+
72748 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
72749 {
72750 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
72751@@ -548,7 +552,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
72752
72753 return 0;
72754 drop:
72755- atomic_inc(&sk->sk_drops);
72756+ atomic_inc_unchecked(&sk->sk_drops);
72757 drop_no_sk_drops_inc:
72758 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
72759 kfree_skb(skb);
72760@@ -624,7 +628,7 @@ static void flush_stack(struct sock **st
72761 continue;
72762 }
72763 drop:
72764- atomic_inc(&sk->sk_drops);
72765+ atomic_inc_unchecked(&sk->sk_drops);
72766 UDP6_INC_STATS_BH(sock_net(sk),
72767 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
72768 UDP6_INC_STATS_BH(sock_net(sk),
72769@@ -779,6 +783,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
72770 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
72771 proto == IPPROTO_UDPLITE);
72772
72773+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72774+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
72775+#endif
72776 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
72777
72778 kfree_skb(skb);
72779@@ -795,7 +802,7 @@ int __udp6_lib_rcv(struct sk_buff *skb,
72780 if (!sock_owned_by_user(sk))
72781 udpv6_queue_rcv_skb(sk, skb);
72782 else if (sk_add_backlog(sk, skb)) {
72783- atomic_inc(&sk->sk_drops);
72784+ atomic_inc_unchecked(&sk->sk_drops);
72785 bh_unlock_sock(sk);
72786 sock_put(sk);
72787 goto discard;
72788@@ -1408,8 +1415,13 @@ static void udp6_sock_seq_show(struct se
72789 0, 0L, 0,
72790 sock_i_uid(sp), 0,
72791 sock_i_ino(sp),
72792- atomic_read(&sp->sk_refcnt), sp,
72793- atomic_read(&sp->sk_drops));
72794+ atomic_read(&sp->sk_refcnt),
72795+#ifdef CONFIG_GRKERNSEC_HIDESYM
72796+ NULL,
72797+#else
72798+ sp,
72799+#endif
72800+ atomic_read_unchecked(&sp->sk_drops));
72801 }
72802
72803 int udp6_seq_show(struct seq_file *seq, void *v)
72804diff -urNp linux-3.0.7/net/irda/ircomm/ircomm_tty.c linux-3.0.7/net/irda/ircomm/ircomm_tty.c
72805--- linux-3.0.7/net/irda/ircomm/ircomm_tty.c 2011-07-21 22:17:23.000000000 -0400
72806+++ linux-3.0.7/net/irda/ircomm/ircomm_tty.c 2011-08-23 21:47:56.000000000 -0400
72807@@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(st
72808 add_wait_queue(&self->open_wait, &wait);
72809
72810 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
72811- __FILE__,__LINE__, tty->driver->name, self->open_count );
72812+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
72813
72814 /* As far as I can see, we protect open_count - Jean II */
72815 spin_lock_irqsave(&self->spinlock, flags);
72816 if (!tty_hung_up_p(filp)) {
72817 extra_count = 1;
72818- self->open_count--;
72819+ local_dec(&self->open_count);
72820 }
72821 spin_unlock_irqrestore(&self->spinlock, flags);
72822- self->blocked_open++;
72823+ local_inc(&self->blocked_open);
72824
72825 while (1) {
72826 if (tty->termios->c_cflag & CBAUD) {
72827@@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(st
72828 }
72829
72830 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
72831- __FILE__,__LINE__, tty->driver->name, self->open_count );
72832+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
72833
72834 schedule();
72835 }
72836@@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(st
72837 if (extra_count) {
72838 /* ++ is not atomic, so this should be protected - Jean II */
72839 spin_lock_irqsave(&self->spinlock, flags);
72840- self->open_count++;
72841+ local_inc(&self->open_count);
72842 spin_unlock_irqrestore(&self->spinlock, flags);
72843 }
72844- self->blocked_open--;
72845+ local_dec(&self->blocked_open);
72846
72847 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
72848- __FILE__,__LINE__, tty->driver->name, self->open_count);
72849+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
72850
72851 if (!retval)
72852 self->flags |= ASYNC_NORMAL_ACTIVE;
72853@@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_st
72854 }
72855 /* ++ is not atomic, so this should be protected - Jean II */
72856 spin_lock_irqsave(&self->spinlock, flags);
72857- self->open_count++;
72858+ local_inc(&self->open_count);
72859
72860 tty->driver_data = self;
72861 self->tty = tty;
72862 spin_unlock_irqrestore(&self->spinlock, flags);
72863
72864 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
72865- self->line, self->open_count);
72866+ self->line, local_read(&self->open_count));
72867
72868 /* Not really used by us, but lets do it anyway */
72869 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
72870@@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_
72871 return;
72872 }
72873
72874- if ((tty->count == 1) && (self->open_count != 1)) {
72875+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
72876 /*
72877 * Uh, oh. tty->count is 1, which means that the tty
72878 * structure will be freed. state->count should always
72879@@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_
72880 */
72881 IRDA_DEBUG(0, "%s(), bad serial port count; "
72882 "tty->count is 1, state->count is %d\n", __func__ ,
72883- self->open_count);
72884- self->open_count = 1;
72885+ local_read(&self->open_count));
72886+ local_set(&self->open_count, 1);
72887 }
72888
72889- if (--self->open_count < 0) {
72890+ if (local_dec_return(&self->open_count) < 0) {
72891 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
72892- __func__, self->line, self->open_count);
72893- self->open_count = 0;
72894+ __func__, self->line, local_read(&self->open_count));
72895+ local_set(&self->open_count, 0);
72896 }
72897- if (self->open_count) {
72898+ if (local_read(&self->open_count)) {
72899 spin_unlock_irqrestore(&self->spinlock, flags);
72900
72901 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
72902@@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_
72903 tty->closing = 0;
72904 self->tty = NULL;
72905
72906- if (self->blocked_open) {
72907+ if (local_read(&self->blocked_open)) {
72908 if (self->close_delay)
72909 schedule_timeout_interruptible(self->close_delay);
72910 wake_up_interruptible(&self->open_wait);
72911@@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty
72912 spin_lock_irqsave(&self->spinlock, flags);
72913 self->flags &= ~ASYNC_NORMAL_ACTIVE;
72914 self->tty = NULL;
72915- self->open_count = 0;
72916+ local_set(&self->open_count, 0);
72917 spin_unlock_irqrestore(&self->spinlock, flags);
72918
72919 wake_up_interruptible(&self->open_wait);
72920@@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct
72921 seq_putc(m, '\n');
72922
72923 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
72924- seq_printf(m, "Open count: %d\n", self->open_count);
72925+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
72926 seq_printf(m, "Max data size: %d\n", self->max_data_size);
72927 seq_printf(m, "Max header size: %d\n", self->max_header_size);
72928
72929diff -urNp linux-3.0.7/net/iucv/af_iucv.c linux-3.0.7/net/iucv/af_iucv.c
72930--- linux-3.0.7/net/iucv/af_iucv.c 2011-07-21 22:17:23.000000000 -0400
72931+++ linux-3.0.7/net/iucv/af_iucv.c 2011-08-23 21:47:56.000000000 -0400
72932@@ -648,10 +648,10 @@ static int iucv_sock_autobind(struct soc
72933
72934 write_lock_bh(&iucv_sk_list.lock);
72935
72936- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
72937+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
72938 while (__iucv_get_sock_by_name(name)) {
72939 sprintf(name, "%08x",
72940- atomic_inc_return(&iucv_sk_list.autobind_name));
72941+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
72942 }
72943
72944 write_unlock_bh(&iucv_sk_list.lock);
72945diff -urNp linux-3.0.7/net/key/af_key.c linux-3.0.7/net/key/af_key.c
72946--- linux-3.0.7/net/key/af_key.c 2011-07-21 22:17:23.000000000 -0400
72947+++ linux-3.0.7/net/key/af_key.c 2011-08-23 21:48:14.000000000 -0400
72948@@ -2481,6 +2481,8 @@ static int pfkey_migrate(struct sock *sk
72949 struct xfrm_migrate m[XFRM_MAX_DEPTH];
72950 struct xfrm_kmaddress k;
72951
72952+ pax_track_stack();
72953+
72954 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
72955 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
72956 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
72957@@ -3016,10 +3018,10 @@ static int pfkey_send_policy_notify(stru
72958 static u32 get_acqseq(void)
72959 {
72960 u32 res;
72961- static atomic_t acqseq;
72962+ static atomic_unchecked_t acqseq;
72963
72964 do {
72965- res = atomic_inc_return(&acqseq);
72966+ res = atomic_inc_return_unchecked(&acqseq);
72967 } while (!res);
72968 return res;
72969 }
72970diff -urNp linux-3.0.7/net/lapb/lapb_iface.c linux-3.0.7/net/lapb/lapb_iface.c
72971--- linux-3.0.7/net/lapb/lapb_iface.c 2011-07-21 22:17:23.000000000 -0400
72972+++ linux-3.0.7/net/lapb/lapb_iface.c 2011-08-23 21:47:56.000000000 -0400
72973@@ -158,7 +158,7 @@ int lapb_register(struct net_device *dev
72974 goto out;
72975
72976 lapb->dev = dev;
72977- lapb->callbacks = *callbacks;
72978+ lapb->callbacks = callbacks;
72979
72980 __lapb_insert_cb(lapb);
72981
72982@@ -380,32 +380,32 @@ int lapb_data_received(struct net_device
72983
72984 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
72985 {
72986- if (lapb->callbacks.connect_confirmation)
72987- lapb->callbacks.connect_confirmation(lapb->dev, reason);
72988+ if (lapb->callbacks->connect_confirmation)
72989+ lapb->callbacks->connect_confirmation(lapb->dev, reason);
72990 }
72991
72992 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
72993 {
72994- if (lapb->callbacks.connect_indication)
72995- lapb->callbacks.connect_indication(lapb->dev, reason);
72996+ if (lapb->callbacks->connect_indication)
72997+ lapb->callbacks->connect_indication(lapb->dev, reason);
72998 }
72999
73000 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
73001 {
73002- if (lapb->callbacks.disconnect_confirmation)
73003- lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
73004+ if (lapb->callbacks->disconnect_confirmation)
73005+ lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
73006 }
73007
73008 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
73009 {
73010- if (lapb->callbacks.disconnect_indication)
73011- lapb->callbacks.disconnect_indication(lapb->dev, reason);
73012+ if (lapb->callbacks->disconnect_indication)
73013+ lapb->callbacks->disconnect_indication(lapb->dev, reason);
73014 }
73015
73016 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
73017 {
73018- if (lapb->callbacks.data_indication)
73019- return lapb->callbacks.data_indication(lapb->dev, skb);
73020+ if (lapb->callbacks->data_indication)
73021+ return lapb->callbacks->data_indication(lapb->dev, skb);
73022
73023 kfree_skb(skb);
73024 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
73025@@ -415,8 +415,8 @@ int lapb_data_transmit(struct lapb_cb *l
73026 {
73027 int used = 0;
73028
73029- if (lapb->callbacks.data_transmit) {
73030- lapb->callbacks.data_transmit(lapb->dev, skb);
73031+ if (lapb->callbacks->data_transmit) {
73032+ lapb->callbacks->data_transmit(lapb->dev, skb);
73033 used = 1;
73034 }
73035
73036diff -urNp linux-3.0.7/net/mac80211/debugfs_sta.c linux-3.0.7/net/mac80211/debugfs_sta.c
73037--- linux-3.0.7/net/mac80211/debugfs_sta.c 2011-07-21 22:17:23.000000000 -0400
73038+++ linux-3.0.7/net/mac80211/debugfs_sta.c 2011-08-23 21:48:14.000000000 -0400
73039@@ -140,6 +140,8 @@ static ssize_t sta_agg_status_read(struc
73040 struct tid_ampdu_rx *tid_rx;
73041 struct tid_ampdu_tx *tid_tx;
73042
73043+ pax_track_stack();
73044+
73045 rcu_read_lock();
73046
73047 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
73048@@ -240,6 +242,8 @@ static ssize_t sta_ht_capa_read(struct f
73049 struct sta_info *sta = file->private_data;
73050 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
73051
73052+ pax_track_stack();
73053+
73054 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
73055 htc->ht_supported ? "" : "not ");
73056 if (htc->ht_supported) {
73057diff -urNp linux-3.0.7/net/mac80211/ieee80211_i.h linux-3.0.7/net/mac80211/ieee80211_i.h
73058--- linux-3.0.7/net/mac80211/ieee80211_i.h 2011-07-21 22:17:23.000000000 -0400
73059+++ linux-3.0.7/net/mac80211/ieee80211_i.h 2011-08-23 21:47:56.000000000 -0400
73060@@ -27,6 +27,7 @@
73061 #include <net/ieee80211_radiotap.h>
73062 #include <net/cfg80211.h>
73063 #include <net/mac80211.h>
73064+#include <asm/local.h>
73065 #include "key.h"
73066 #include "sta_info.h"
73067
73068@@ -721,7 +722,7 @@ struct ieee80211_local {
73069 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
73070 spinlock_t queue_stop_reason_lock;
73071
73072- int open_count;
73073+ local_t open_count;
73074 int monitors, cooked_mntrs;
73075 /* number of interfaces with corresponding FIF_ flags */
73076 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
73077diff -urNp linux-3.0.7/net/mac80211/iface.c linux-3.0.7/net/mac80211/iface.c
73078--- linux-3.0.7/net/mac80211/iface.c 2011-09-02 18:11:21.000000000 -0400
73079+++ linux-3.0.7/net/mac80211/iface.c 2011-08-23 21:47:56.000000000 -0400
73080@@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_
73081 break;
73082 }
73083
73084- if (local->open_count == 0) {
73085+ if (local_read(&local->open_count) == 0) {
73086 res = drv_start(local);
73087 if (res)
73088 goto err_del_bss;
73089@@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_
73090 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
73091
73092 if (!is_valid_ether_addr(dev->dev_addr)) {
73093- if (!local->open_count)
73094+ if (!local_read(&local->open_count))
73095 drv_stop(local);
73096 return -EADDRNOTAVAIL;
73097 }
73098@@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_
73099 mutex_unlock(&local->mtx);
73100
73101 if (coming_up)
73102- local->open_count++;
73103+ local_inc(&local->open_count);
73104
73105 if (hw_reconf_flags) {
73106 ieee80211_hw_config(local, hw_reconf_flags);
73107@@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_
73108 err_del_interface:
73109 drv_remove_interface(local, &sdata->vif);
73110 err_stop:
73111- if (!local->open_count)
73112+ if (!local_read(&local->open_count))
73113 drv_stop(local);
73114 err_del_bss:
73115 sdata->bss = NULL;
73116@@ -475,7 +475,7 @@ static void ieee80211_do_stop(struct iee
73117 }
73118
73119 if (going_down)
73120- local->open_count--;
73121+ local_dec(&local->open_count);
73122
73123 switch (sdata->vif.type) {
73124 case NL80211_IFTYPE_AP_VLAN:
73125@@ -534,7 +534,7 @@ static void ieee80211_do_stop(struct iee
73126
73127 ieee80211_recalc_ps(local, -1);
73128
73129- if (local->open_count == 0) {
73130+ if (local_read(&local->open_count) == 0) {
73131 if (local->ops->napi_poll)
73132 napi_disable(&local->napi);
73133 ieee80211_clear_tx_pending(local);
73134diff -urNp linux-3.0.7/net/mac80211/main.c linux-3.0.7/net/mac80211/main.c
73135--- linux-3.0.7/net/mac80211/main.c 2011-07-21 22:17:23.000000000 -0400
73136+++ linux-3.0.7/net/mac80211/main.c 2011-08-23 21:47:56.000000000 -0400
73137@@ -209,7 +209,7 @@ int ieee80211_hw_config(struct ieee80211
73138 local->hw.conf.power_level = power;
73139 }
73140
73141- if (changed && local->open_count) {
73142+ if (changed && local_read(&local->open_count)) {
73143 ret = drv_config(local, changed);
73144 /*
73145 * Goal:
73146diff -urNp linux-3.0.7/net/mac80211/mlme.c linux-3.0.7/net/mac80211/mlme.c
73147--- linux-3.0.7/net/mac80211/mlme.c 2011-09-02 18:11:21.000000000 -0400
73148+++ linux-3.0.7/net/mac80211/mlme.c 2011-08-23 21:48:14.000000000 -0400
73149@@ -1444,6 +1444,8 @@ static bool ieee80211_assoc_success(stru
73150 bool have_higher_than_11mbit = false;
73151 u16 ap_ht_cap_flags;
73152
73153+ pax_track_stack();
73154+
73155 /* AssocResp and ReassocResp have identical structure */
73156
73157 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
73158diff -urNp linux-3.0.7/net/mac80211/pm.c linux-3.0.7/net/mac80211/pm.c
73159--- linux-3.0.7/net/mac80211/pm.c 2011-07-21 22:17:23.000000000 -0400
73160+++ linux-3.0.7/net/mac80211/pm.c 2011-08-23 21:47:56.000000000 -0400
73161@@ -47,7 +47,7 @@ int __ieee80211_suspend(struct ieee80211
73162 cancel_work_sync(&local->dynamic_ps_enable_work);
73163 del_timer_sync(&local->dynamic_ps_timer);
73164
73165- local->wowlan = wowlan && local->open_count;
73166+ local->wowlan = wowlan && local_read(&local->open_count);
73167 if (local->wowlan) {
73168 int err = drv_suspend(local, wowlan);
73169 if (err) {
73170@@ -111,7 +111,7 @@ int __ieee80211_suspend(struct ieee80211
73171 }
73172
73173 /* stop hardware - this must stop RX */
73174- if (local->open_count)
73175+ if (local_read(&local->open_count))
73176 ieee80211_stop_device(local);
73177
73178 suspend:
73179diff -urNp linux-3.0.7/net/mac80211/rate.c linux-3.0.7/net/mac80211/rate.c
73180--- linux-3.0.7/net/mac80211/rate.c 2011-07-21 22:17:23.000000000 -0400
73181+++ linux-3.0.7/net/mac80211/rate.c 2011-08-23 21:47:56.000000000 -0400
73182@@ -371,7 +371,7 @@ int ieee80211_init_rate_ctrl_alg(struct
73183
73184 ASSERT_RTNL();
73185
73186- if (local->open_count)
73187+ if (local_read(&local->open_count))
73188 return -EBUSY;
73189
73190 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
73191diff -urNp linux-3.0.7/net/mac80211/rc80211_pid_debugfs.c linux-3.0.7/net/mac80211/rc80211_pid_debugfs.c
73192--- linux-3.0.7/net/mac80211/rc80211_pid_debugfs.c 2011-07-21 22:17:23.000000000 -0400
73193+++ linux-3.0.7/net/mac80211/rc80211_pid_debugfs.c 2011-08-23 21:47:56.000000000 -0400
73194@@ -192,7 +192,7 @@ static ssize_t rate_control_pid_events_r
73195
73196 spin_unlock_irqrestore(&events->lock, status);
73197
73198- if (copy_to_user(buf, pb, p))
73199+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
73200 return -EFAULT;
73201
73202 return p;
73203diff -urNp linux-3.0.7/net/mac80211/util.c linux-3.0.7/net/mac80211/util.c
73204--- linux-3.0.7/net/mac80211/util.c 2011-07-21 22:17:23.000000000 -0400
73205+++ linux-3.0.7/net/mac80211/util.c 2011-08-23 21:47:56.000000000 -0400
73206@@ -1147,7 +1147,7 @@ int ieee80211_reconfig(struct ieee80211_
73207 #endif
73208
73209 /* restart hardware */
73210- if (local->open_count) {
73211+ if (local_read(&local->open_count)) {
73212 /*
73213 * Upon resume hardware can sometimes be goofy due to
73214 * various platform / driver / bus issues, so restarting
73215diff -urNp linux-3.0.7/net/netfilter/ipvs/ip_vs_conn.c linux-3.0.7/net/netfilter/ipvs/ip_vs_conn.c
73216--- linux-3.0.7/net/netfilter/ipvs/ip_vs_conn.c 2011-07-21 22:17:23.000000000 -0400
73217+++ linux-3.0.7/net/netfilter/ipvs/ip_vs_conn.c 2011-08-23 21:47:56.000000000 -0400
73218@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
73219 /* Increase the refcnt counter of the dest */
73220 atomic_inc(&dest->refcnt);
73221
73222- conn_flags = atomic_read(&dest->conn_flags);
73223+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
73224 if (cp->protocol != IPPROTO_UDP)
73225 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
73226 /* Bind with the destination and its corresponding transmitter */
73227@@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_p
73228 atomic_set(&cp->refcnt, 1);
73229
73230 atomic_set(&cp->n_control, 0);
73231- atomic_set(&cp->in_pkts, 0);
73232+ atomic_set_unchecked(&cp->in_pkts, 0);
73233
73234 atomic_inc(&ipvs->conn_count);
73235 if (flags & IP_VS_CONN_F_NO_CPORT)
73236@@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip
73237
73238 /* Don't drop the entry if its number of incoming packets is not
73239 located in [0, 8] */
73240- i = atomic_read(&cp->in_pkts);
73241+ i = atomic_read_unchecked(&cp->in_pkts);
73242 if (i > 8 || i < 0) return 0;
73243
73244 if (!todrop_rate[i]) return 0;
73245diff -urNp linux-3.0.7/net/netfilter/ipvs/ip_vs_core.c linux-3.0.7/net/netfilter/ipvs/ip_vs_core.c
73246--- linux-3.0.7/net/netfilter/ipvs/ip_vs_core.c 2011-07-21 22:17:23.000000000 -0400
73247+++ linux-3.0.7/net/netfilter/ipvs/ip_vs_core.c 2011-08-23 21:47:56.000000000 -0400
73248@@ -563,7 +563,7 @@ int ip_vs_leave(struct ip_vs_service *sv
73249 ret = cp->packet_xmit(skb, cp, pd->pp);
73250 /* do not touch skb anymore */
73251
73252- atomic_inc(&cp->in_pkts);
73253+ atomic_inc_unchecked(&cp->in_pkts);
73254 ip_vs_conn_put(cp);
73255 return ret;
73256 }
73257@@ -1613,7 +1613,7 @@ ip_vs_in(unsigned int hooknum, struct sk
73258 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
73259 pkts = sysctl_sync_threshold(ipvs);
73260 else
73261- pkts = atomic_add_return(1, &cp->in_pkts);
73262+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
73263
73264 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
73265 cp->protocol == IPPROTO_SCTP) {
73266diff -urNp linux-3.0.7/net/netfilter/ipvs/ip_vs_ctl.c linux-3.0.7/net/netfilter/ipvs/ip_vs_ctl.c
73267--- linux-3.0.7/net/netfilter/ipvs/ip_vs_ctl.c 2011-09-02 18:11:21.000000000 -0400
73268+++ linux-3.0.7/net/netfilter/ipvs/ip_vs_ctl.c 2011-08-23 21:48:14.000000000 -0400
73269@@ -782,7 +782,7 @@ __ip_vs_update_dest(struct ip_vs_service
73270 ip_vs_rs_hash(ipvs, dest);
73271 write_unlock_bh(&ipvs->rs_lock);
73272 }
73273- atomic_set(&dest->conn_flags, conn_flags);
73274+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
73275
73276 /* bind the service */
73277 if (!dest->svc) {
73278@@ -2027,7 +2027,7 @@ static int ip_vs_info_seq_show(struct se
73279 " %-7s %-6d %-10d %-10d\n",
73280 &dest->addr.in6,
73281 ntohs(dest->port),
73282- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
73283+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
73284 atomic_read(&dest->weight),
73285 atomic_read(&dest->activeconns),
73286 atomic_read(&dest->inactconns));
73287@@ -2038,7 +2038,7 @@ static int ip_vs_info_seq_show(struct se
73288 "%-7s %-6d %-10d %-10d\n",
73289 ntohl(dest->addr.ip),
73290 ntohs(dest->port),
73291- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
73292+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
73293 atomic_read(&dest->weight),
73294 atomic_read(&dest->activeconns),
73295 atomic_read(&dest->inactconns));
73296@@ -2284,6 +2284,8 @@ do_ip_vs_set_ctl(struct sock *sk, int cm
73297 struct ip_vs_dest_user *udest_compat;
73298 struct ip_vs_dest_user_kern udest;
73299
73300+ pax_track_stack();
73301+
73302 if (!capable(CAP_NET_ADMIN))
73303 return -EPERM;
73304
73305@@ -2498,7 +2500,7 @@ __ip_vs_get_dest_entries(struct net *net
73306
73307 entry.addr = dest->addr.ip;
73308 entry.port = dest->port;
73309- entry.conn_flags = atomic_read(&dest->conn_flags);
73310+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
73311 entry.weight = atomic_read(&dest->weight);
73312 entry.u_threshold = dest->u_threshold;
73313 entry.l_threshold = dest->l_threshold;
73314@@ -3026,7 +3028,7 @@ static int ip_vs_genl_fill_dest(struct s
73315 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
73316
73317 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
73318- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
73319+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
73320 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
73321 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
73322 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
73323diff -urNp linux-3.0.7/net/netfilter/ipvs/ip_vs_sync.c linux-3.0.7/net/netfilter/ipvs/ip_vs_sync.c
73324--- linux-3.0.7/net/netfilter/ipvs/ip_vs_sync.c 2011-07-21 22:17:23.000000000 -0400
73325+++ linux-3.0.7/net/netfilter/ipvs/ip_vs_sync.c 2011-08-23 21:47:56.000000000 -0400
73326@@ -648,7 +648,7 @@ control:
73327 * i.e only increment in_pkts for Templates.
73328 */
73329 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
73330- int pkts = atomic_add_return(1, &cp->in_pkts);
73331+ int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
73332
73333 if (pkts % sysctl_sync_period(ipvs) != 1)
73334 return;
73335@@ -794,7 +794,7 @@ static void ip_vs_proc_conn(struct net *
73336
73337 if (opt)
73338 memcpy(&cp->in_seq, opt, sizeof(*opt));
73339- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
73340+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
73341 cp->state = state;
73342 cp->old_state = cp->state;
73343 /*
73344diff -urNp linux-3.0.7/net/netfilter/ipvs/ip_vs_xmit.c linux-3.0.7/net/netfilter/ipvs/ip_vs_xmit.c
73345--- linux-3.0.7/net/netfilter/ipvs/ip_vs_xmit.c 2011-07-21 22:17:23.000000000 -0400
73346+++ linux-3.0.7/net/netfilter/ipvs/ip_vs_xmit.c 2011-08-23 21:47:56.000000000 -0400
73347@@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
73348 else
73349 rc = NF_ACCEPT;
73350 /* do not touch skb anymore */
73351- atomic_inc(&cp->in_pkts);
73352+ atomic_inc_unchecked(&cp->in_pkts);
73353 goto out;
73354 }
73355
73356@@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
73357 else
73358 rc = NF_ACCEPT;
73359 /* do not touch skb anymore */
73360- atomic_inc(&cp->in_pkts);
73361+ atomic_inc_unchecked(&cp->in_pkts);
73362 goto out;
73363 }
73364
73365diff -urNp linux-3.0.7/net/netfilter/Kconfig linux-3.0.7/net/netfilter/Kconfig
73366--- linux-3.0.7/net/netfilter/Kconfig 2011-07-21 22:17:23.000000000 -0400
73367+++ linux-3.0.7/net/netfilter/Kconfig 2011-08-23 21:48:14.000000000 -0400
73368@@ -781,6 +781,16 @@ config NETFILTER_XT_MATCH_ESP
73369
73370 To compile it as a module, choose M here. If unsure, say N.
73371
73372+config NETFILTER_XT_MATCH_GRADM
73373+ tristate '"gradm" match support'
73374+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
73375+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
73376+ ---help---
73377+ The gradm match allows to match on grsecurity RBAC being enabled.
73378+ It is useful when iptables rules are applied early on bootup to
73379+ prevent connections to the machine (except from a trusted host)
73380+ while the RBAC system is disabled.
73381+
73382 config NETFILTER_XT_MATCH_HASHLIMIT
73383 tristate '"hashlimit" match support'
73384 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
73385diff -urNp linux-3.0.7/net/netfilter/Makefile linux-3.0.7/net/netfilter/Makefile
73386--- linux-3.0.7/net/netfilter/Makefile 2011-07-21 22:17:23.000000000 -0400
73387+++ linux-3.0.7/net/netfilter/Makefile 2011-08-23 21:48:14.000000000 -0400
73388@@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) +=
73389 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
73390 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
73391 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
73392+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
73393 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
73394 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
73395 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
73396diff -urNp linux-3.0.7/net/netfilter/nfnetlink_log.c linux-3.0.7/net/netfilter/nfnetlink_log.c
73397--- linux-3.0.7/net/netfilter/nfnetlink_log.c 2011-07-21 22:17:23.000000000 -0400
73398+++ linux-3.0.7/net/netfilter/nfnetlink_log.c 2011-08-23 21:47:56.000000000 -0400
73399@@ -70,7 +70,7 @@ struct nfulnl_instance {
73400 };
73401
73402 static DEFINE_SPINLOCK(instances_lock);
73403-static atomic_t global_seq;
73404+static atomic_unchecked_t global_seq;
73405
73406 #define INSTANCE_BUCKETS 16
73407 static struct hlist_head instance_table[INSTANCE_BUCKETS];
73408@@ -505,7 +505,7 @@ __build_packet_message(struct nfulnl_ins
73409 /* global sequence number */
73410 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
73411 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
73412- htonl(atomic_inc_return(&global_seq)));
73413+ htonl(atomic_inc_return_unchecked(&global_seq)));
73414
73415 if (data_len) {
73416 struct nlattr *nla;
73417diff -urNp linux-3.0.7/net/netfilter/nfnetlink_queue.c linux-3.0.7/net/netfilter/nfnetlink_queue.c
73418--- linux-3.0.7/net/netfilter/nfnetlink_queue.c 2011-07-21 22:17:23.000000000 -0400
73419+++ linux-3.0.7/net/netfilter/nfnetlink_queue.c 2011-08-23 21:47:56.000000000 -0400
73420@@ -58,7 +58,7 @@ struct nfqnl_instance {
73421 */
73422 spinlock_t lock;
73423 unsigned int queue_total;
73424- atomic_t id_sequence; /* 'sequence' of pkt ids */
73425+ atomic_unchecked_t id_sequence; /* 'sequence' of pkt ids */
73426 struct list_head queue_list; /* packets in queue */
73427 };
73428
73429@@ -272,7 +272,7 @@ nfqnl_build_packet_message(struct nfqnl_
73430 nfmsg->version = NFNETLINK_V0;
73431 nfmsg->res_id = htons(queue->queue_num);
73432
73433- entry->id = atomic_inc_return(&queue->id_sequence);
73434+ entry->id = atomic_inc_return_unchecked(&queue->id_sequence);
73435 pmsg.packet_id = htonl(entry->id);
73436 pmsg.hw_protocol = entskb->protocol;
73437 pmsg.hook = entry->hook;
73438@@ -870,7 +870,7 @@ static int seq_show(struct seq_file *s,
73439 inst->peer_pid, inst->queue_total,
73440 inst->copy_mode, inst->copy_range,
73441 inst->queue_dropped, inst->queue_user_dropped,
73442- atomic_read(&inst->id_sequence), 1);
73443+ atomic_read_unchecked(&inst->id_sequence), 1);
73444 }
73445
73446 static const struct seq_operations nfqnl_seq_ops = {
73447diff -urNp linux-3.0.7/net/netfilter/xt_gradm.c linux-3.0.7/net/netfilter/xt_gradm.c
73448--- linux-3.0.7/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
73449+++ linux-3.0.7/net/netfilter/xt_gradm.c 2011-08-23 21:48:14.000000000 -0400
73450@@ -0,0 +1,51 @@
73451+/*
73452+ * gradm match for netfilter
73453