]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.2.2-3.0.3-201108251825.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-3.0.3-201108251825.patch
CommitLineData
3724b450
PK
1diff -urNp linux-3.0.3/arch/alpha/include/asm/elf.h linux-3.0.3/arch/alpha/include/asm/elf.h
2--- linux-3.0.3/arch/alpha/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
3+++ linux-3.0.3/arch/alpha/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
4@@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8+#ifdef CONFIG_PAX_ASLR
9+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10+
11+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13+#endif
14+
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18diff -urNp linux-3.0.3/arch/alpha/include/asm/pgtable.h linux-3.0.3/arch/alpha/include/asm/pgtable.h
19--- linux-3.0.3/arch/alpha/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
20+++ linux-3.0.3/arch/alpha/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
21@@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25+
26+#ifdef CONFIG_PAX_PAGEEXEC
27+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30+#else
31+# define PAGE_SHARED_NOEXEC PAGE_SHARED
32+# define PAGE_COPY_NOEXEC PAGE_COPY
33+# define PAGE_READONLY_NOEXEC PAGE_READONLY
34+#endif
35+
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39diff -urNp linux-3.0.3/arch/alpha/kernel/module.c linux-3.0.3/arch/alpha/kernel/module.c
40--- linux-3.0.3/arch/alpha/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
41+++ linux-3.0.3/arch/alpha/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
42@@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46- gp = (u64)me->module_core + me->core_size - 0x8000;
47+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51diff -urNp linux-3.0.3/arch/alpha/kernel/osf_sys.c linux-3.0.3/arch/alpha/kernel/osf_sys.c
52--- linux-3.0.3/arch/alpha/kernel/osf_sys.c 2011-07-21 22:17:23.000000000 -0400
53+++ linux-3.0.3/arch/alpha/kernel/osf_sys.c 2011-08-23 21:47:55.000000000 -0400
54@@ -1145,7 +1145,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
57 return -ENOMEM;
58- if (!vma || addr + len <= vma->vm_start)
59+ if (check_heap_stack_gap(vma, addr, len))
60 return addr;
61 addr = vma->vm_end;
62 vma = vma->vm_next;
63@@ -1181,6 +1181,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
66
67+#ifdef CONFIG_PAX_RANDMMAP
68+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69+#endif
70+
71 if (addr) {
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74@@ -1188,8 +1192,8 @@ arch_get_unmapped_area(struct file *filp
75 }
76
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79- len, limit);
80+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81+
82 if (addr != (unsigned long) -ENOMEM)
83 return addr;
84
85diff -urNp linux-3.0.3/arch/alpha/mm/fault.c linux-3.0.3/arch/alpha/mm/fault.c
86--- linux-3.0.3/arch/alpha/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
87+++ linux-3.0.3/arch/alpha/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
88@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89 __reload_thread(pcb);
90 }
91
92+#ifdef CONFIG_PAX_PAGEEXEC
93+/*
94+ * PaX: decide what to do with offenders (regs->pc = fault address)
95+ *
96+ * returns 1 when task should be killed
97+ * 2 when patched PLT trampoline was detected
98+ * 3 when unpatched PLT trampoline was detected
99+ */
100+static int pax_handle_fetch_fault(struct pt_regs *regs)
101+{
102+
103+#ifdef CONFIG_PAX_EMUPLT
104+ int err;
105+
106+ do { /* PaX: patched PLT emulation #1 */
107+ unsigned int ldah, ldq, jmp;
108+
109+ err = get_user(ldah, (unsigned int *)regs->pc);
110+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112+
113+ if (err)
114+ break;
115+
116+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118+ jmp == 0x6BFB0000U)
119+ {
120+ unsigned long r27, addr;
121+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123+
124+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125+ err = get_user(r27, (unsigned long *)addr);
126+ if (err)
127+ break;
128+
129+ regs->r27 = r27;
130+ regs->pc = r27;
131+ return 2;
132+ }
133+ } while (0);
134+
135+ do { /* PaX: patched PLT emulation #2 */
136+ unsigned int ldah, lda, br;
137+
138+ err = get_user(ldah, (unsigned int *)regs->pc);
139+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
140+ err |= get_user(br, (unsigned int *)(regs->pc+8));
141+
142+ if (err)
143+ break;
144+
145+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
147+ (br & 0xFFE00000U) == 0xC3E00000U)
148+ {
149+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152+
153+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155+ return 2;
156+ }
157+ } while (0);
158+
159+ do { /* PaX: unpatched PLT emulation */
160+ unsigned int br;
161+
162+ err = get_user(br, (unsigned int *)regs->pc);
163+
164+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165+ unsigned int br2, ldq, nop, jmp;
166+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167+
168+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169+ err = get_user(br2, (unsigned int *)addr);
170+ err |= get_user(ldq, (unsigned int *)(addr+4));
171+ err |= get_user(nop, (unsigned int *)(addr+8));
172+ err |= get_user(jmp, (unsigned int *)(addr+12));
173+ err |= get_user(resolver, (unsigned long *)(addr+16));
174+
175+ if (err)
176+ break;
177+
178+ if (br2 == 0xC3600000U &&
179+ ldq == 0xA77B000CU &&
180+ nop == 0x47FF041FU &&
181+ jmp == 0x6B7B0000U)
182+ {
183+ regs->r28 = regs->pc+4;
184+ regs->r27 = addr+16;
185+ regs->pc = resolver;
186+ return 3;
187+ }
188+ }
189+ } while (0);
190+#endif
191+
192+ return 1;
193+}
194+
195+void pax_report_insns(void *pc, void *sp)
196+{
197+ unsigned long i;
198+
199+ printk(KERN_ERR "PAX: bytes at PC: ");
200+ for (i = 0; i < 5; i++) {
201+ unsigned int c;
202+ if (get_user(c, (unsigned int *)pc+i))
203+ printk(KERN_CONT "???????? ");
204+ else
205+ printk(KERN_CONT "%08x ", c);
206+ }
207+ printk("\n");
208+}
209+#endif
210
211 /*
212 * This routine handles page faults. It determines the address,
213@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214 good_area:
215 si_code = SEGV_ACCERR;
216 if (cause < 0) {
217- if (!(vma->vm_flags & VM_EXEC))
218+ if (!(vma->vm_flags & VM_EXEC)) {
219+
220+#ifdef CONFIG_PAX_PAGEEXEC
221+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222+ goto bad_area;
223+
224+ up_read(&mm->mmap_sem);
225+ switch (pax_handle_fetch_fault(regs)) {
226+
227+#ifdef CONFIG_PAX_EMUPLT
228+ case 2:
229+ case 3:
230+ return;
231+#endif
232+
233+ }
234+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235+ do_group_exit(SIGKILL);
236+#else
237 goto bad_area;
238+#endif
239+
240+ }
241 } else if (!cause) {
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244diff -urNp linux-3.0.3/arch/arm/include/asm/elf.h linux-3.0.3/arch/arm/include/asm/elf.h
245--- linux-3.0.3/arch/arm/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
246+++ linux-3.0.3/arch/arm/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
247@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
250
251-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253+
254+#ifdef CONFIG_PAX_ASLR
255+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
256+
257+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259+#endif
260
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263@@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t
264 extern void elf_set_personality(const struct elf32_hdr *);
265 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
266
267-struct mm_struct;
268-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
269-#define arch_randomize_brk arch_randomize_brk
270-
271 extern int vectors_user_mapping(void);
272 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
273 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
274diff -urNp linux-3.0.3/arch/arm/include/asm/kmap_types.h linux-3.0.3/arch/arm/include/asm/kmap_types.h
275--- linux-3.0.3/arch/arm/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
276+++ linux-3.0.3/arch/arm/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
277@@ -21,6 +21,7 @@ enum km_type {
278 KM_L1_CACHE,
279 KM_L2_CACHE,
280 KM_KDB,
281+ KM_CLEARPAGE,
282 KM_TYPE_NR
283 };
284
285diff -urNp linux-3.0.3/arch/arm/include/asm/uaccess.h linux-3.0.3/arch/arm/include/asm/uaccess.h
286--- linux-3.0.3/arch/arm/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
287+++ linux-3.0.3/arch/arm/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
288@@ -22,6 +22,8 @@
289 #define VERIFY_READ 0
290 #define VERIFY_WRITE 1
291
292+extern void check_object_size(const void *ptr, unsigned long n, bool to);
293+
294 /*
295 * The exception table consists of pairs of addresses: the first is the
296 * address of an instruction that is allowed to fault, and the second is
297@@ -387,8 +389,23 @@ do { \
298
299
300 #ifdef CONFIG_MMU
301-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
302-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
303+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
304+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
305+
306+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
307+{
308+ if (!__builtin_constant_p(n))
309+ check_object_size(to, n, false);
310+ return ___copy_from_user(to, from, n);
311+}
312+
313+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
314+{
315+ if (!__builtin_constant_p(n))
316+ check_object_size(from, n, true);
317+ return ___copy_to_user(to, from, n);
318+}
319+
320 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
321 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
322 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
323@@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
324
325 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
326 {
327+ if ((long)n < 0)
328+ return n;
329+
330 if (access_ok(VERIFY_READ, from, n))
331 n = __copy_from_user(to, from, n);
332 else /* security hole - plug it */
333@@ -412,6 +432,9 @@ static inline unsigned long __must_check
334
335 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
336 {
337+ if ((long)n < 0)
338+ return n;
339+
340 if (access_ok(VERIFY_WRITE, to, n))
341 n = __copy_to_user(to, from, n);
342 return n;
343diff -urNp linux-3.0.3/arch/arm/kernel/armksyms.c linux-3.0.3/arch/arm/kernel/armksyms.c
344--- linux-3.0.3/arch/arm/kernel/armksyms.c 2011-07-21 22:17:23.000000000 -0400
345+++ linux-3.0.3/arch/arm/kernel/armksyms.c 2011-08-23 21:47:55.000000000 -0400
346@@ -98,8 +98,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
347 #ifdef CONFIG_MMU
348 EXPORT_SYMBOL(copy_page);
349
350-EXPORT_SYMBOL(__copy_from_user);
351-EXPORT_SYMBOL(__copy_to_user);
352+EXPORT_SYMBOL(___copy_from_user);
353+EXPORT_SYMBOL(___copy_to_user);
354 EXPORT_SYMBOL(__clear_user);
355
356 EXPORT_SYMBOL(__get_user_1);
357diff -urNp linux-3.0.3/arch/arm/kernel/process.c linux-3.0.3/arch/arm/kernel/process.c
358--- linux-3.0.3/arch/arm/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
359+++ linux-3.0.3/arch/arm/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
360@@ -28,7 +28,6 @@
361 #include <linux/tick.h>
362 #include <linux/utsname.h>
363 #include <linux/uaccess.h>
364-#include <linux/random.h>
365 #include <linux/hw_breakpoint.h>
366
367 #include <asm/cacheflush.h>
368@@ -479,12 +478,6 @@ unsigned long get_wchan(struct task_stru
369 return 0;
370 }
371
372-unsigned long arch_randomize_brk(struct mm_struct *mm)
373-{
374- unsigned long range_end = mm->brk + 0x02000000;
375- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
376-}
377-
378 #ifdef CONFIG_MMU
379 /*
380 * The vectors page is always readable from user space for the
381diff -urNp linux-3.0.3/arch/arm/kernel/traps.c linux-3.0.3/arch/arm/kernel/traps.c
382--- linux-3.0.3/arch/arm/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
383+++ linux-3.0.3/arch/arm/kernel/traps.c 2011-08-23 21:48:14.000000000 -0400
384@@ -257,6 +257,8 @@ static int __die(const char *str, int er
385
386 static DEFINE_SPINLOCK(die_lock);
387
388+extern void gr_handle_kernel_exploit(void);
389+
390 /*
391 * This function is protected against re-entrancy.
392 */
393@@ -284,6 +286,9 @@ void die(const char *str, struct pt_regs
394 panic("Fatal exception in interrupt");
395 if (panic_on_oops)
396 panic("Fatal exception");
397+
398+ gr_handle_kernel_exploit();
399+
400 if (ret != NOTIFY_STOP)
401 do_exit(SIGSEGV);
402 }
403diff -urNp linux-3.0.3/arch/arm/lib/copy_from_user.S linux-3.0.3/arch/arm/lib/copy_from_user.S
404--- linux-3.0.3/arch/arm/lib/copy_from_user.S 2011-07-21 22:17:23.000000000 -0400
405+++ linux-3.0.3/arch/arm/lib/copy_from_user.S 2011-08-23 21:47:55.000000000 -0400
406@@ -16,7 +16,7 @@
407 /*
408 * Prototype:
409 *
410- * size_t __copy_from_user(void *to, const void *from, size_t n)
411+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
412 *
413 * Purpose:
414 *
415@@ -84,11 +84,11 @@
416
417 .text
418
419-ENTRY(__copy_from_user)
420+ENTRY(___copy_from_user)
421
422 #include "copy_template.S"
423
424-ENDPROC(__copy_from_user)
425+ENDPROC(___copy_from_user)
426
427 .pushsection .fixup,"ax"
428 .align 0
429diff -urNp linux-3.0.3/arch/arm/lib/copy_to_user.S linux-3.0.3/arch/arm/lib/copy_to_user.S
430--- linux-3.0.3/arch/arm/lib/copy_to_user.S 2011-07-21 22:17:23.000000000 -0400
431+++ linux-3.0.3/arch/arm/lib/copy_to_user.S 2011-08-23 21:47:55.000000000 -0400
432@@ -16,7 +16,7 @@
433 /*
434 * Prototype:
435 *
436- * size_t __copy_to_user(void *to, const void *from, size_t n)
437+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
438 *
439 * Purpose:
440 *
441@@ -88,11 +88,11 @@
442 .text
443
444 ENTRY(__copy_to_user_std)
445-WEAK(__copy_to_user)
446+WEAK(___copy_to_user)
447
448 #include "copy_template.S"
449
450-ENDPROC(__copy_to_user)
451+ENDPROC(___copy_to_user)
452 ENDPROC(__copy_to_user_std)
453
454 .pushsection .fixup,"ax"
455diff -urNp linux-3.0.3/arch/arm/lib/uaccess.S linux-3.0.3/arch/arm/lib/uaccess.S
456--- linux-3.0.3/arch/arm/lib/uaccess.S 2011-07-21 22:17:23.000000000 -0400
457+++ linux-3.0.3/arch/arm/lib/uaccess.S 2011-08-23 21:47:55.000000000 -0400
458@@ -20,7 +20,7 @@
459
460 #define PAGE_SHIFT 12
461
462-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
463+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
464 * Purpose : copy a block to user memory from kernel memory
465 * Params : to - user memory
466 * : from - kernel memory
467@@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May f
468 sub r2, r2, ip
469 b .Lc2u_dest_aligned
470
471-ENTRY(__copy_to_user)
472+ENTRY(___copy_to_user)
473 stmfd sp!, {r2, r4 - r7, lr}
474 cmp r2, #4
475 blt .Lc2u_not_enough
476@@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May f
477 ldrgtb r3, [r1], #0
478 USER( T(strgtb) r3, [r0], #1) @ May fault
479 b .Lc2u_finished
480-ENDPROC(__copy_to_user)
481+ENDPROC(___copy_to_user)
482
483 .pushsection .fixup,"ax"
484 .align 0
485 9001: ldmfd sp!, {r0, r4 - r7, pc}
486 .popsection
487
488-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
489+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
490 * Purpose : copy a block from user memory to kernel memory
491 * Params : to - kernel memory
492 * : from - user memory
493@@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May f
494 sub r2, r2, ip
495 b .Lcfu_dest_aligned
496
497-ENTRY(__copy_from_user)
498+ENTRY(___copy_from_user)
499 stmfd sp!, {r0, r2, r4 - r7, lr}
500 cmp r2, #4
501 blt .Lcfu_not_enough
502@@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May f
503 USER( T(ldrgtb) r3, [r1], #1) @ May fault
504 strgtb r3, [r0], #1
505 b .Lcfu_finished
506-ENDPROC(__copy_from_user)
507+ENDPROC(___copy_from_user)
508
509 .pushsection .fixup,"ax"
510 .align 0
511diff -urNp linux-3.0.3/arch/arm/lib/uaccess_with_memcpy.c linux-3.0.3/arch/arm/lib/uaccess_with_memcpy.c
512--- linux-3.0.3/arch/arm/lib/uaccess_with_memcpy.c 2011-07-21 22:17:23.000000000 -0400
513+++ linux-3.0.3/arch/arm/lib/uaccess_with_memcpy.c 2011-08-23 21:47:55.000000000 -0400
514@@ -103,7 +103,7 @@ out:
515 }
516
517 unsigned long
518-__copy_to_user(void __user *to, const void *from, unsigned long n)
519+___copy_to_user(void __user *to, const void *from, unsigned long n)
520 {
521 /*
522 * This test is stubbed out of the main function above to keep
523diff -urNp linux-3.0.3/arch/arm/mach-ux500/mbox-db5500.c linux-3.0.3/arch/arm/mach-ux500/mbox-db5500.c
524--- linux-3.0.3/arch/arm/mach-ux500/mbox-db5500.c 2011-07-21 22:17:23.000000000 -0400
525+++ linux-3.0.3/arch/arm/mach-ux500/mbox-db5500.c 2011-08-23 21:48:14.000000000 -0400
526@@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct dev
527 return sprintf(buf, "0x%X\n", mbox_value);
528 }
529
530-static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
531+static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
532
533 static int mbox_show(struct seq_file *s, void *data)
534 {
535diff -urNp linux-3.0.3/arch/arm/mm/fault.c linux-3.0.3/arch/arm/mm/fault.c
536--- linux-3.0.3/arch/arm/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
537+++ linux-3.0.3/arch/arm/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
538@@ -182,6 +182,13 @@ __do_user_fault(struct task_struct *tsk,
539 }
540 #endif
541
542+#ifdef CONFIG_PAX_PAGEEXEC
543+ if (fsr & FSR_LNX_PF) {
544+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
545+ do_group_exit(SIGKILL);
546+ }
547+#endif
548+
549 tsk->thread.address = addr;
550 tsk->thread.error_code = fsr;
551 tsk->thread.trap_no = 14;
552@@ -379,6 +386,33 @@ do_page_fault(unsigned long addr, unsign
553 }
554 #endif /* CONFIG_MMU */
555
556+#ifdef CONFIG_PAX_PAGEEXEC
557+void pax_report_insns(void *pc, void *sp)
558+{
559+ long i;
560+
561+ printk(KERN_ERR "PAX: bytes at PC: ");
562+ for (i = 0; i < 20; i++) {
563+ unsigned char c;
564+ if (get_user(c, (__force unsigned char __user *)pc+i))
565+ printk(KERN_CONT "?? ");
566+ else
567+ printk(KERN_CONT "%02x ", c);
568+ }
569+ printk("\n");
570+
571+ printk(KERN_ERR "PAX: bytes at SP-4: ");
572+ for (i = -1; i < 20; i++) {
573+ unsigned long c;
574+ if (get_user(c, (__force unsigned long __user *)sp+i))
575+ printk(KERN_CONT "???????? ");
576+ else
577+ printk(KERN_CONT "%08lx ", c);
578+ }
579+ printk("\n");
580+}
581+#endif
582+
583 /*
584 * First Level Translation Fault Handler
585 *
586diff -urNp linux-3.0.3/arch/arm/mm/mmap.c linux-3.0.3/arch/arm/mm/mmap.c
587--- linux-3.0.3/arch/arm/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
588+++ linux-3.0.3/arch/arm/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
589@@ -65,6 +65,10 @@ arch_get_unmapped_area(struct file *filp
590 if (len > TASK_SIZE)
591 return -ENOMEM;
592
593+#ifdef CONFIG_PAX_RANDMMAP
594+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
595+#endif
596+
597 if (addr) {
598 if (do_align)
599 addr = COLOUR_ALIGN(addr, pgoff);
600@@ -72,15 +76,14 @@ arch_get_unmapped_area(struct file *filp
601 addr = PAGE_ALIGN(addr);
602
603 vma = find_vma(mm, addr);
604- if (TASK_SIZE - len >= addr &&
605- (!vma || addr + len <= vma->vm_start))
606+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
607 return addr;
608 }
609 if (len > mm->cached_hole_size) {
610- start_addr = addr = mm->free_area_cache;
611+ start_addr = addr = mm->free_area_cache;
612 } else {
613- start_addr = addr = TASK_UNMAPPED_BASE;
614- mm->cached_hole_size = 0;
615+ start_addr = addr = mm->mmap_base;
616+ mm->cached_hole_size = 0;
617 }
618 /* 8 bits of randomness in 20 address space bits */
619 if ((current->flags & PF_RANDOMIZE) &&
620@@ -100,14 +103,14 @@ full_search:
621 * Start a new search - just in case we missed
622 * some holes.
623 */
624- if (start_addr != TASK_UNMAPPED_BASE) {
625- start_addr = addr = TASK_UNMAPPED_BASE;
626+ if (start_addr != mm->mmap_base) {
627+ start_addr = addr = mm->mmap_base;
628 mm->cached_hole_size = 0;
629 goto full_search;
630 }
631 return -ENOMEM;
632 }
633- if (!vma || addr + len <= vma->vm_start) {
634+ if (check_heap_stack_gap(vma, addr, len)) {
635 /*
636 * Remember the place where we stopped the search:
637 */
638diff -urNp linux-3.0.3/arch/avr32/include/asm/elf.h linux-3.0.3/arch/avr32/include/asm/elf.h
639--- linux-3.0.3/arch/avr32/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
640+++ linux-3.0.3/arch/avr32/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
641@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpreg
642 the loader. We need to make sure that it is out of the way of the program
643 that it will "exec", and that there is sufficient room for the brk. */
644
645-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
646+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
647
648+#ifdef CONFIG_PAX_ASLR
649+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
650+
651+#define PAX_DELTA_MMAP_LEN 15
652+#define PAX_DELTA_STACK_LEN 15
653+#endif
654
655 /* This yields a mask that user programs can use to figure out what
656 instruction set this CPU supports. This could be done in user space,
657diff -urNp linux-3.0.3/arch/avr32/include/asm/kmap_types.h linux-3.0.3/arch/avr32/include/asm/kmap_types.h
658--- linux-3.0.3/arch/avr32/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
659+++ linux-3.0.3/arch/avr32/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
660@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
661 D(11) KM_IRQ1,
662 D(12) KM_SOFTIRQ0,
663 D(13) KM_SOFTIRQ1,
664-D(14) KM_TYPE_NR
665+D(14) KM_CLEARPAGE,
666+D(15) KM_TYPE_NR
667 };
668
669 #undef D
670diff -urNp linux-3.0.3/arch/avr32/mm/fault.c linux-3.0.3/arch/avr32/mm/fault.c
671--- linux-3.0.3/arch/avr32/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
672+++ linux-3.0.3/arch/avr32/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
673@@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
674
675 int exception_trace = 1;
676
677+#ifdef CONFIG_PAX_PAGEEXEC
678+void pax_report_insns(void *pc, void *sp)
679+{
680+ unsigned long i;
681+
682+ printk(KERN_ERR "PAX: bytes at PC: ");
683+ for (i = 0; i < 20; i++) {
684+ unsigned char c;
685+ if (get_user(c, (unsigned char *)pc+i))
686+ printk(KERN_CONT "???????? ");
687+ else
688+ printk(KERN_CONT "%02x ", c);
689+ }
690+ printk("\n");
691+}
692+#endif
693+
694 /*
695 * This routine handles page faults. It determines the address and the
696 * problem, and then passes it off to one of the appropriate routines.
697@@ -156,6 +173,16 @@ bad_area:
698 up_read(&mm->mmap_sem);
699
700 if (user_mode(regs)) {
701+
702+#ifdef CONFIG_PAX_PAGEEXEC
703+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
704+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
705+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
706+ do_group_exit(SIGKILL);
707+ }
708+ }
709+#endif
710+
711 if (exception_trace && printk_ratelimit())
712 printk("%s%s[%d]: segfault at %08lx pc %08lx "
713 "sp %08lx ecr %lu\n",
714diff -urNp linux-3.0.3/arch/frv/include/asm/kmap_types.h linux-3.0.3/arch/frv/include/asm/kmap_types.h
715--- linux-3.0.3/arch/frv/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
716+++ linux-3.0.3/arch/frv/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
717@@ -23,6 +23,7 @@ enum km_type {
718 KM_IRQ1,
719 KM_SOFTIRQ0,
720 KM_SOFTIRQ1,
721+ KM_CLEARPAGE,
722 KM_TYPE_NR
723 };
724
725diff -urNp linux-3.0.3/arch/frv/mm/elf-fdpic.c linux-3.0.3/arch/frv/mm/elf-fdpic.c
726--- linux-3.0.3/arch/frv/mm/elf-fdpic.c 2011-07-21 22:17:23.000000000 -0400
727+++ linux-3.0.3/arch/frv/mm/elf-fdpic.c 2011-08-23 21:47:55.000000000 -0400
728@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
729 if (addr) {
730 addr = PAGE_ALIGN(addr);
731 vma = find_vma(current->mm, addr);
732- if (TASK_SIZE - len >= addr &&
733- (!vma || addr + len <= vma->vm_start))
734+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
735 goto success;
736 }
737
738@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
739 for (; vma; vma = vma->vm_next) {
740 if (addr > limit)
741 break;
742- if (addr + len <= vma->vm_start)
743+ if (check_heap_stack_gap(vma, addr, len))
744 goto success;
745 addr = vma->vm_end;
746 }
747@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
748 for (; vma; vma = vma->vm_next) {
749 if (addr > limit)
750 break;
751- if (addr + len <= vma->vm_start)
752+ if (check_heap_stack_gap(vma, addr, len))
753 goto success;
754 addr = vma->vm_end;
755 }
756diff -urNp linux-3.0.3/arch/ia64/include/asm/elf.h linux-3.0.3/arch/ia64/include/asm/elf.h
757--- linux-3.0.3/arch/ia64/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
758+++ linux-3.0.3/arch/ia64/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
759@@ -42,6 +42,13 @@
760 */
761 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
762
763+#ifdef CONFIG_PAX_ASLR
764+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
765+
766+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
767+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
768+#endif
769+
770 #define PT_IA_64_UNWIND 0x70000001
771
772 /* IA-64 relocations: */
773diff -urNp linux-3.0.3/arch/ia64/include/asm/pgtable.h linux-3.0.3/arch/ia64/include/asm/pgtable.h
774--- linux-3.0.3/arch/ia64/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
775+++ linux-3.0.3/arch/ia64/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
776@@ -12,7 +12,7 @@
777 * David Mosberger-Tang <davidm@hpl.hp.com>
778 */
779
780-
781+#include <linux/const.h>
782 #include <asm/mman.h>
783 #include <asm/page.h>
784 #include <asm/processor.h>
785@@ -143,6 +143,17 @@
786 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
787 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
788 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
789+
790+#ifdef CONFIG_PAX_PAGEEXEC
791+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
792+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
793+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
794+#else
795+# define PAGE_SHARED_NOEXEC PAGE_SHARED
796+# define PAGE_READONLY_NOEXEC PAGE_READONLY
797+# define PAGE_COPY_NOEXEC PAGE_COPY
798+#endif
799+
800 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
801 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
802 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
803diff -urNp linux-3.0.3/arch/ia64/include/asm/spinlock.h linux-3.0.3/arch/ia64/include/asm/spinlock.h
804--- linux-3.0.3/arch/ia64/include/asm/spinlock.h 2011-07-21 22:17:23.000000000 -0400
805+++ linux-3.0.3/arch/ia64/include/asm/spinlock.h 2011-08-23 21:47:55.000000000 -0400
806@@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
807 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
808
809 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
810- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
811+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
812 }
813
814 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
815diff -urNp linux-3.0.3/arch/ia64/include/asm/uaccess.h linux-3.0.3/arch/ia64/include/asm/uaccess.h
816--- linux-3.0.3/arch/ia64/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
817+++ linux-3.0.3/arch/ia64/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
818@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
819 const void *__cu_from = (from); \
820 long __cu_len = (n); \
821 \
822- if (__access_ok(__cu_to, __cu_len, get_fs())) \
823+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
824 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
825 __cu_len; \
826 })
827@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
828 long __cu_len = (n); \
829 \
830 __chk_user_ptr(__cu_from); \
831- if (__access_ok(__cu_from, __cu_len, get_fs())) \
832+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
833 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
834 __cu_len; \
835 })
836diff -urNp linux-3.0.3/arch/ia64/kernel/module.c linux-3.0.3/arch/ia64/kernel/module.c
837--- linux-3.0.3/arch/ia64/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
838+++ linux-3.0.3/arch/ia64/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
839@@ -315,8 +315,7 @@ module_alloc (unsigned long size)
840 void
841 module_free (struct module *mod, void *module_region)
842 {
843- if (mod && mod->arch.init_unw_table &&
844- module_region == mod->module_init) {
845+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
846 unw_remove_unwind_table(mod->arch.init_unw_table);
847 mod->arch.init_unw_table = NULL;
848 }
849@@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
850 }
851
852 static inline int
853+in_init_rx (const struct module *mod, uint64_t addr)
854+{
855+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
856+}
857+
858+static inline int
859+in_init_rw (const struct module *mod, uint64_t addr)
860+{
861+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
862+}
863+
864+static inline int
865 in_init (const struct module *mod, uint64_t addr)
866 {
867- return addr - (uint64_t) mod->module_init < mod->init_size;
868+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
869+}
870+
871+static inline int
872+in_core_rx (const struct module *mod, uint64_t addr)
873+{
874+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
875+}
876+
877+static inline int
878+in_core_rw (const struct module *mod, uint64_t addr)
879+{
880+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
881 }
882
883 static inline int
884 in_core (const struct module *mod, uint64_t addr)
885 {
886- return addr - (uint64_t) mod->module_core < mod->core_size;
887+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
888 }
889
890 static inline int
891@@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
892 break;
893
894 case RV_BDREL:
895- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
896+ if (in_init_rx(mod, val))
897+ val -= (uint64_t) mod->module_init_rx;
898+ else if (in_init_rw(mod, val))
899+ val -= (uint64_t) mod->module_init_rw;
900+ else if (in_core_rx(mod, val))
901+ val -= (uint64_t) mod->module_core_rx;
902+ else if (in_core_rw(mod, val))
903+ val -= (uint64_t) mod->module_core_rw;
904 break;
905
906 case RV_LTV:
907@@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
908 * addresses have been selected...
909 */
910 uint64_t gp;
911- if (mod->core_size > MAX_LTOFF)
912+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
913 /*
914 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
915 * at the end of the module.
916 */
917- gp = mod->core_size - MAX_LTOFF / 2;
918+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
919 else
920- gp = mod->core_size / 2;
921- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
922+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
923+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
924 mod->arch.gp = gp;
925 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
926 }
927diff -urNp linux-3.0.3/arch/ia64/kernel/sys_ia64.c linux-3.0.3/arch/ia64/kernel/sys_ia64.c
928--- linux-3.0.3/arch/ia64/kernel/sys_ia64.c 2011-07-21 22:17:23.000000000 -0400
929+++ linux-3.0.3/arch/ia64/kernel/sys_ia64.c 2011-08-23 21:47:55.000000000 -0400
930@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
931 if (REGION_NUMBER(addr) == RGN_HPAGE)
932 addr = 0;
933 #endif
934+
935+#ifdef CONFIG_PAX_RANDMMAP
936+ if (mm->pax_flags & MF_PAX_RANDMMAP)
937+ addr = mm->free_area_cache;
938+ else
939+#endif
940+
941 if (!addr)
942 addr = mm->free_area_cache;
943
944@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
945 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
946 /* At this point: (!vma || addr < vma->vm_end). */
947 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
948- if (start_addr != TASK_UNMAPPED_BASE) {
949+ if (start_addr != mm->mmap_base) {
950 /* Start a new search --- just in case we missed some holes. */
951- addr = TASK_UNMAPPED_BASE;
952+ addr = mm->mmap_base;
953 goto full_search;
954 }
955 return -ENOMEM;
956 }
957- if (!vma || addr + len <= vma->vm_start) {
958+ if (check_heap_stack_gap(vma, addr, len)) {
959 /* Remember the address where we stopped this search: */
960 mm->free_area_cache = addr + len;
961 return addr;
962diff -urNp linux-3.0.3/arch/ia64/kernel/vmlinux.lds.S linux-3.0.3/arch/ia64/kernel/vmlinux.lds.S
963--- linux-3.0.3/arch/ia64/kernel/vmlinux.lds.S 2011-07-21 22:17:23.000000000 -0400
964+++ linux-3.0.3/arch/ia64/kernel/vmlinux.lds.S 2011-08-23 21:47:55.000000000 -0400
965@@ -199,7 +199,7 @@ SECTIONS {
966 /* Per-cpu data: */
967 . = ALIGN(PERCPU_PAGE_SIZE);
968 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
969- __phys_per_cpu_start = __per_cpu_load;
970+ __phys_per_cpu_start = per_cpu_load;
971 /*
972 * ensure percpu data fits
973 * into percpu page size
974diff -urNp linux-3.0.3/arch/ia64/mm/fault.c linux-3.0.3/arch/ia64/mm/fault.c
975--- linux-3.0.3/arch/ia64/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
976+++ linux-3.0.3/arch/ia64/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
977@@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned
978 return pte_present(pte);
979 }
980
981+#ifdef CONFIG_PAX_PAGEEXEC
982+void pax_report_insns(void *pc, void *sp)
983+{
984+ unsigned long i;
985+
986+ printk(KERN_ERR "PAX: bytes at PC: ");
987+ for (i = 0; i < 8; i++) {
988+ unsigned int c;
989+ if (get_user(c, (unsigned int *)pc+i))
990+ printk(KERN_CONT "???????? ");
991+ else
992+ printk(KERN_CONT "%08x ", c);
993+ }
994+ printk("\n");
995+}
996+#endif
997+
998 void __kprobes
999 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1000 {
1001@@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long addres
1002 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1003 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1004
1005- if ((vma->vm_flags & mask) != mask)
1006+ if ((vma->vm_flags & mask) != mask) {
1007+
1008+#ifdef CONFIG_PAX_PAGEEXEC
1009+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1010+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1011+ goto bad_area;
1012+
1013+ up_read(&mm->mmap_sem);
1014+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1015+ do_group_exit(SIGKILL);
1016+ }
1017+#endif
1018+
1019 goto bad_area;
1020
1021+ }
1022+
1023 /*
1024 * If for any reason at all we couldn't handle the fault, make
1025 * sure we exit gracefully rather than endlessly redo the
1026diff -urNp linux-3.0.3/arch/ia64/mm/hugetlbpage.c linux-3.0.3/arch/ia64/mm/hugetlbpage.c
1027--- linux-3.0.3/arch/ia64/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
1028+++ linux-3.0.3/arch/ia64/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
1029@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(
1030 /* At this point: (!vmm || addr < vmm->vm_end). */
1031 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1032 return -ENOMEM;
1033- if (!vmm || (addr + len) <= vmm->vm_start)
1034+ if (check_heap_stack_gap(vmm, addr, len))
1035 return addr;
1036 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1037 }
1038diff -urNp linux-3.0.3/arch/ia64/mm/init.c linux-3.0.3/arch/ia64/mm/init.c
1039--- linux-3.0.3/arch/ia64/mm/init.c 2011-07-21 22:17:23.000000000 -0400
1040+++ linux-3.0.3/arch/ia64/mm/init.c 2011-08-23 21:47:55.000000000 -0400
1041@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
1042 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1043 vma->vm_end = vma->vm_start + PAGE_SIZE;
1044 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1045+
1046+#ifdef CONFIG_PAX_PAGEEXEC
1047+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1048+ vma->vm_flags &= ~VM_EXEC;
1049+
1050+#ifdef CONFIG_PAX_MPROTECT
1051+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1052+ vma->vm_flags &= ~VM_MAYEXEC;
1053+#endif
1054+
1055+ }
1056+#endif
1057+
1058 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1059 down_write(&current->mm->mmap_sem);
1060 if (insert_vm_struct(current->mm, vma)) {
1061diff -urNp linux-3.0.3/arch/m32r/lib/usercopy.c linux-3.0.3/arch/m32r/lib/usercopy.c
1062--- linux-3.0.3/arch/m32r/lib/usercopy.c 2011-07-21 22:17:23.000000000 -0400
1063+++ linux-3.0.3/arch/m32r/lib/usercopy.c 2011-08-23 21:47:55.000000000 -0400
1064@@ -14,6 +14,9 @@
1065 unsigned long
1066 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1067 {
1068+ if ((long)n < 0)
1069+ return n;
1070+
1071 prefetch(from);
1072 if (access_ok(VERIFY_WRITE, to, n))
1073 __copy_user(to,from,n);
1074@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1075 unsigned long
1076 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1077 {
1078+ if ((long)n < 0)
1079+ return n;
1080+
1081 prefetchw(to);
1082 if (access_ok(VERIFY_READ, from, n))
1083 __copy_user_zeroing(to,from,n);
1084diff -urNp linux-3.0.3/arch/mips/include/asm/elf.h linux-3.0.3/arch/mips/include/asm/elf.h
1085--- linux-3.0.3/arch/mips/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1086+++ linux-3.0.3/arch/mips/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1087@@ -372,13 +372,16 @@ extern const char *__elf_platform;
1088 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1089 #endif
1090
1091+#ifdef CONFIG_PAX_ASLR
1092+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1093+
1094+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1095+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1096+#endif
1097+
1098 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1099 struct linux_binprm;
1100 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1101 int uses_interp);
1102
1103-struct mm_struct;
1104-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1105-#define arch_randomize_brk arch_randomize_brk
1106-
1107 #endif /* _ASM_ELF_H */
1108diff -urNp linux-3.0.3/arch/mips/include/asm/page.h linux-3.0.3/arch/mips/include/asm/page.h
1109--- linux-3.0.3/arch/mips/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
1110+++ linux-3.0.3/arch/mips/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
1111@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1112 #ifdef CONFIG_CPU_MIPS32
1113 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1114 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1115- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1116+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1117 #else
1118 typedef struct { unsigned long long pte; } pte_t;
1119 #define pte_val(x) ((x).pte)
1120diff -urNp linux-3.0.3/arch/mips/include/asm/system.h linux-3.0.3/arch/mips/include/asm/system.h
1121--- linux-3.0.3/arch/mips/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
1122+++ linux-3.0.3/arch/mips/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
1123@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1124 */
1125 #define __ARCH_WANT_UNLOCKED_CTXSW
1126
1127-extern unsigned long arch_align_stack(unsigned long sp);
1128+#define arch_align_stack(x) ((x) & ~0xfUL)
1129
1130 #endif /* _ASM_SYSTEM_H */
1131diff -urNp linux-3.0.3/arch/mips/kernel/binfmt_elfn32.c linux-3.0.3/arch/mips/kernel/binfmt_elfn32.c
1132--- linux-3.0.3/arch/mips/kernel/binfmt_elfn32.c 2011-07-21 22:17:23.000000000 -0400
1133+++ linux-3.0.3/arch/mips/kernel/binfmt_elfn32.c 2011-08-23 21:47:55.000000000 -0400
1134@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1135 #undef ELF_ET_DYN_BASE
1136 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1137
1138+#ifdef CONFIG_PAX_ASLR
1139+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1140+
1141+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1142+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1143+#endif
1144+
1145 #include <asm/processor.h>
1146 #include <linux/module.h>
1147 #include <linux/elfcore.h>
1148diff -urNp linux-3.0.3/arch/mips/kernel/binfmt_elfo32.c linux-3.0.3/arch/mips/kernel/binfmt_elfo32.c
1149--- linux-3.0.3/arch/mips/kernel/binfmt_elfo32.c 2011-07-21 22:17:23.000000000 -0400
1150+++ linux-3.0.3/arch/mips/kernel/binfmt_elfo32.c 2011-08-23 21:47:55.000000000 -0400
1151@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1152 #undef ELF_ET_DYN_BASE
1153 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1154
1155+#ifdef CONFIG_PAX_ASLR
1156+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1157+
1158+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1159+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1160+#endif
1161+
1162 #include <asm/processor.h>
1163
1164 /*
1165diff -urNp linux-3.0.3/arch/mips/kernel/process.c linux-3.0.3/arch/mips/kernel/process.c
1166--- linux-3.0.3/arch/mips/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
1167+++ linux-3.0.3/arch/mips/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
1168@@ -473,15 +473,3 @@ unsigned long get_wchan(struct task_stru
1169 out:
1170 return pc;
1171 }
1172-
1173-/*
1174- * Don't forget that the stack pointer must be aligned on a 8 bytes
1175- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1176- */
1177-unsigned long arch_align_stack(unsigned long sp)
1178-{
1179- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1180- sp -= get_random_int() & ~PAGE_MASK;
1181-
1182- return sp & ALMASK;
1183-}
1184diff -urNp linux-3.0.3/arch/mips/mm/fault.c linux-3.0.3/arch/mips/mm/fault.c
1185--- linux-3.0.3/arch/mips/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
1186+++ linux-3.0.3/arch/mips/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
1187@@ -28,6 +28,23 @@
1188 #include <asm/highmem.h> /* For VMALLOC_END */
1189 #include <linux/kdebug.h>
1190
1191+#ifdef CONFIG_PAX_PAGEEXEC
1192+void pax_report_insns(void *pc, void *sp)
1193+{
1194+ unsigned long i;
1195+
1196+ printk(KERN_ERR "PAX: bytes at PC: ");
1197+ for (i = 0; i < 5; i++) {
1198+ unsigned int c;
1199+ if (get_user(c, (unsigned int *)pc+i))
1200+ printk(KERN_CONT "???????? ");
1201+ else
1202+ printk(KERN_CONT "%08x ", c);
1203+ }
1204+ printk("\n");
1205+}
1206+#endif
1207+
1208 /*
1209 * This routine handles page faults. It determines the address,
1210 * and the problem, and then passes it off to one of the appropriate
1211diff -urNp linux-3.0.3/arch/mips/mm/mmap.c linux-3.0.3/arch/mips/mm/mmap.c
1212--- linux-3.0.3/arch/mips/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
1213+++ linux-3.0.3/arch/mips/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
1214@@ -48,14 +48,18 @@ unsigned long arch_get_unmapped_area(str
1215 do_color_align = 0;
1216 if (filp || (flags & MAP_SHARED))
1217 do_color_align = 1;
1218+
1219+#ifdef CONFIG_PAX_RANDMMAP
1220+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1221+#endif
1222+
1223 if (addr) {
1224 if (do_color_align)
1225 addr = COLOUR_ALIGN(addr, pgoff);
1226 else
1227 addr = PAGE_ALIGN(addr);
1228 vmm = find_vma(current->mm, addr);
1229- if (TASK_SIZE - len >= addr &&
1230- (!vmm || addr + len <= vmm->vm_start))
1231+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
1232 return addr;
1233 }
1234 addr = current->mm->mmap_base;
1235@@ -68,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
1236 /* At this point: (!vmm || addr < vmm->vm_end). */
1237 if (TASK_SIZE - len < addr)
1238 return -ENOMEM;
1239- if (!vmm || addr + len <= vmm->vm_start)
1240+ if (check_heap_stack_gap(vmm, addr, len))
1241 return addr;
1242 addr = vmm->vm_end;
1243 if (do_color_align)
1244@@ -93,30 +97,3 @@ void arch_pick_mmap_layout(struct mm_str
1245 mm->get_unmapped_area = arch_get_unmapped_area;
1246 mm->unmap_area = arch_unmap_area;
1247 }
1248-
1249-static inline unsigned long brk_rnd(void)
1250-{
1251- unsigned long rnd = get_random_int();
1252-
1253- rnd = rnd << PAGE_SHIFT;
1254- /* 8MB for 32bit, 256MB for 64bit */
1255- if (TASK_IS_32BIT_ADDR)
1256- rnd = rnd & 0x7ffffful;
1257- else
1258- rnd = rnd & 0xffffffful;
1259-
1260- return rnd;
1261-}
1262-
1263-unsigned long arch_randomize_brk(struct mm_struct *mm)
1264-{
1265- unsigned long base = mm->brk;
1266- unsigned long ret;
1267-
1268- ret = PAGE_ALIGN(base + brk_rnd());
1269-
1270- if (ret < mm->brk)
1271- return mm->brk;
1272-
1273- return ret;
1274-}
1275diff -urNp linux-3.0.3/arch/parisc/include/asm/elf.h linux-3.0.3/arch/parisc/include/asm/elf.h
1276--- linux-3.0.3/arch/parisc/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1277+++ linux-3.0.3/arch/parisc/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1278@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration..
1279
1280 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1281
1282+#ifdef CONFIG_PAX_ASLR
1283+#define PAX_ELF_ET_DYN_BASE 0x10000UL
1284+
1285+#define PAX_DELTA_MMAP_LEN 16
1286+#define PAX_DELTA_STACK_LEN 16
1287+#endif
1288+
1289 /* This yields a mask that user programs can use to figure out what
1290 instruction set this CPU supports. This could be done in user space,
1291 but it's not easy, and we've already done it here. */
1292diff -urNp linux-3.0.3/arch/parisc/include/asm/pgtable.h linux-3.0.3/arch/parisc/include/asm/pgtable.h
1293--- linux-3.0.3/arch/parisc/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
1294+++ linux-3.0.3/arch/parisc/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
1295@@ -210,6 +210,17 @@ struct vm_area_struct;
1296 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1297 #define PAGE_COPY PAGE_EXECREAD
1298 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1299+
1300+#ifdef CONFIG_PAX_PAGEEXEC
1301+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1302+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1303+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1304+#else
1305+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1306+# define PAGE_COPY_NOEXEC PAGE_COPY
1307+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1308+#endif
1309+
1310 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1311 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
1312 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
1313diff -urNp linux-3.0.3/arch/parisc/kernel/module.c linux-3.0.3/arch/parisc/kernel/module.c
1314--- linux-3.0.3/arch/parisc/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
1315+++ linux-3.0.3/arch/parisc/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
1316@@ -98,16 +98,38 @@
1317
1318 /* three functions to determine where in the module core
1319 * or init pieces the location is */
1320+static inline int in_init_rx(struct module *me, void *loc)
1321+{
1322+ return (loc >= me->module_init_rx &&
1323+ loc < (me->module_init_rx + me->init_size_rx));
1324+}
1325+
1326+static inline int in_init_rw(struct module *me, void *loc)
1327+{
1328+ return (loc >= me->module_init_rw &&
1329+ loc < (me->module_init_rw + me->init_size_rw));
1330+}
1331+
1332 static inline int in_init(struct module *me, void *loc)
1333 {
1334- return (loc >= me->module_init &&
1335- loc <= (me->module_init + me->init_size));
1336+ return in_init_rx(me, loc) || in_init_rw(me, loc);
1337+}
1338+
1339+static inline int in_core_rx(struct module *me, void *loc)
1340+{
1341+ return (loc >= me->module_core_rx &&
1342+ loc < (me->module_core_rx + me->core_size_rx));
1343+}
1344+
1345+static inline int in_core_rw(struct module *me, void *loc)
1346+{
1347+ return (loc >= me->module_core_rw &&
1348+ loc < (me->module_core_rw + me->core_size_rw));
1349 }
1350
1351 static inline int in_core(struct module *me, void *loc)
1352 {
1353- return (loc >= me->module_core &&
1354- loc <= (me->module_core + me->core_size));
1355+ return in_core_rx(me, loc) || in_core_rw(me, loc);
1356 }
1357
1358 static inline int in_local(struct module *me, void *loc)
1359@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_
1360 }
1361
1362 /* align things a bit */
1363- me->core_size = ALIGN(me->core_size, 16);
1364- me->arch.got_offset = me->core_size;
1365- me->core_size += gots * sizeof(struct got_entry);
1366-
1367- me->core_size = ALIGN(me->core_size, 16);
1368- me->arch.fdesc_offset = me->core_size;
1369- me->core_size += fdescs * sizeof(Elf_Fdesc);
1370+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1371+ me->arch.got_offset = me->core_size_rw;
1372+ me->core_size_rw += gots * sizeof(struct got_entry);
1373+
1374+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1375+ me->arch.fdesc_offset = me->core_size_rw;
1376+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1377
1378 me->arch.got_max = gots;
1379 me->arch.fdesc_max = fdescs;
1380@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module
1381
1382 BUG_ON(value == 0);
1383
1384- got = me->module_core + me->arch.got_offset;
1385+ got = me->module_core_rw + me->arch.got_offset;
1386 for (i = 0; got[i].addr; i++)
1387 if (got[i].addr == value)
1388 goto out;
1389@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module
1390 #ifdef CONFIG_64BIT
1391 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1392 {
1393- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1394+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1395
1396 if (!value) {
1397 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1398@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module
1399
1400 /* Create new one */
1401 fdesc->addr = value;
1402- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1403+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1404 return (Elf_Addr)fdesc;
1405 }
1406 #endif /* CONFIG_64BIT */
1407@@ -857,7 +879,7 @@ register_unwind_table(struct module *me,
1408
1409 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1410 end = table + sechdrs[me->arch.unwind_section].sh_size;
1411- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1412+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1413
1414 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1415 me->arch.unwind_section, table, end, gp);
1416diff -urNp linux-3.0.3/arch/parisc/kernel/sys_parisc.c linux-3.0.3/arch/parisc/kernel/sys_parisc.c
1417--- linux-3.0.3/arch/parisc/kernel/sys_parisc.c 2011-07-21 22:17:23.000000000 -0400
1418+++ linux-3.0.3/arch/parisc/kernel/sys_parisc.c 2011-08-23 21:47:55.000000000 -0400
1419@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1420 /* At this point: (!vma || addr < vma->vm_end). */
1421 if (TASK_SIZE - len < addr)
1422 return -ENOMEM;
1423- if (!vma || addr + len <= vma->vm_start)
1424+ if (check_heap_stack_gap(vma, addr, len))
1425 return addr;
1426 addr = vma->vm_end;
1427 }
1428@@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1429 /* At this point: (!vma || addr < vma->vm_end). */
1430 if (TASK_SIZE - len < addr)
1431 return -ENOMEM;
1432- if (!vma || addr + len <= vma->vm_start)
1433+ if (check_heap_stack_gap(vma, addr, len))
1434 return addr;
1435 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1436 if (addr < vma->vm_end) /* handle wraparound */
1437@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1438 if (flags & MAP_FIXED)
1439 return addr;
1440 if (!addr)
1441- addr = TASK_UNMAPPED_BASE;
1442+ addr = current->mm->mmap_base;
1443
1444 if (filp) {
1445 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1446diff -urNp linux-3.0.3/arch/parisc/kernel/traps.c linux-3.0.3/arch/parisc/kernel/traps.c
1447--- linux-3.0.3/arch/parisc/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
1448+++ linux-3.0.3/arch/parisc/kernel/traps.c 2011-08-23 21:47:55.000000000 -0400
1449@@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1450
1451 down_read(&current->mm->mmap_sem);
1452 vma = find_vma(current->mm,regs->iaoq[0]);
1453- if (vma && (regs->iaoq[0] >= vma->vm_start)
1454- && (vma->vm_flags & VM_EXEC)) {
1455-
1456+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1457 fault_address = regs->iaoq[0];
1458 fault_space = regs->iasq[0];
1459
1460diff -urNp linux-3.0.3/arch/parisc/mm/fault.c linux-3.0.3/arch/parisc/mm/fault.c
1461--- linux-3.0.3/arch/parisc/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
1462+++ linux-3.0.3/arch/parisc/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
1463@@ -15,6 +15,7 @@
1464 #include <linux/sched.h>
1465 #include <linux/interrupt.h>
1466 #include <linux/module.h>
1467+#include <linux/unistd.h>
1468
1469 #include <asm/uaccess.h>
1470 #include <asm/traps.h>
1471@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1472 static unsigned long
1473 parisc_acctyp(unsigned long code, unsigned int inst)
1474 {
1475- if (code == 6 || code == 16)
1476+ if (code == 6 || code == 7 || code == 16)
1477 return VM_EXEC;
1478
1479 switch (inst & 0xf0000000) {
1480@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1481 }
1482 #endif
1483
1484+#ifdef CONFIG_PAX_PAGEEXEC
1485+/*
1486+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1487+ *
1488+ * returns 1 when task should be killed
1489+ * 2 when rt_sigreturn trampoline was detected
1490+ * 3 when unpatched PLT trampoline was detected
1491+ */
1492+static int pax_handle_fetch_fault(struct pt_regs *regs)
1493+{
1494+
1495+#ifdef CONFIG_PAX_EMUPLT
1496+ int err;
1497+
1498+ do { /* PaX: unpatched PLT emulation */
1499+ unsigned int bl, depwi;
1500+
1501+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1502+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1503+
1504+ if (err)
1505+ break;
1506+
1507+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1508+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1509+
1510+ err = get_user(ldw, (unsigned int *)addr);
1511+ err |= get_user(bv, (unsigned int *)(addr+4));
1512+ err |= get_user(ldw2, (unsigned int *)(addr+8));
1513+
1514+ if (err)
1515+ break;
1516+
1517+ if (ldw == 0x0E801096U &&
1518+ bv == 0xEAC0C000U &&
1519+ ldw2 == 0x0E881095U)
1520+ {
1521+ unsigned int resolver, map;
1522+
1523+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1524+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1525+ if (err)
1526+ break;
1527+
1528+ regs->gr[20] = instruction_pointer(regs)+8;
1529+ regs->gr[21] = map;
1530+ regs->gr[22] = resolver;
1531+ regs->iaoq[0] = resolver | 3UL;
1532+ regs->iaoq[1] = regs->iaoq[0] + 4;
1533+ return 3;
1534+ }
1535+ }
1536+ } while (0);
1537+#endif
1538+
1539+#ifdef CONFIG_PAX_EMUTRAMP
1540+
1541+#ifndef CONFIG_PAX_EMUSIGRT
1542+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1543+ return 1;
1544+#endif
1545+
1546+ do { /* PaX: rt_sigreturn emulation */
1547+ unsigned int ldi1, ldi2, bel, nop;
1548+
1549+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1550+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1551+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1552+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1553+
1554+ if (err)
1555+ break;
1556+
1557+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1558+ ldi2 == 0x3414015AU &&
1559+ bel == 0xE4008200U &&
1560+ nop == 0x08000240U)
1561+ {
1562+ regs->gr[25] = (ldi1 & 2) >> 1;
1563+ regs->gr[20] = __NR_rt_sigreturn;
1564+ regs->gr[31] = regs->iaoq[1] + 16;
1565+ regs->sr[0] = regs->iasq[1];
1566+ regs->iaoq[0] = 0x100UL;
1567+ regs->iaoq[1] = regs->iaoq[0] + 4;
1568+ regs->iasq[0] = regs->sr[2];
1569+ regs->iasq[1] = regs->sr[2];
1570+ return 2;
1571+ }
1572+ } while (0);
1573+#endif
1574+
1575+ return 1;
1576+}
1577+
1578+void pax_report_insns(void *pc, void *sp)
1579+{
1580+ unsigned long i;
1581+
1582+ printk(KERN_ERR "PAX: bytes at PC: ");
1583+ for (i = 0; i < 5; i++) {
1584+ unsigned int c;
1585+ if (get_user(c, (unsigned int *)pc+i))
1586+ printk(KERN_CONT "???????? ");
1587+ else
1588+ printk(KERN_CONT "%08x ", c);
1589+ }
1590+ printk("\n");
1591+}
1592+#endif
1593+
1594 int fixup_exception(struct pt_regs *regs)
1595 {
1596 const struct exception_table_entry *fix;
1597@@ -192,8 +303,33 @@ good_area:
1598
1599 acc_type = parisc_acctyp(code,regs->iir);
1600
1601- if ((vma->vm_flags & acc_type) != acc_type)
1602+ if ((vma->vm_flags & acc_type) != acc_type) {
1603+
1604+#ifdef CONFIG_PAX_PAGEEXEC
1605+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1606+ (address & ~3UL) == instruction_pointer(regs))
1607+ {
1608+ up_read(&mm->mmap_sem);
1609+ switch (pax_handle_fetch_fault(regs)) {
1610+
1611+#ifdef CONFIG_PAX_EMUPLT
1612+ case 3:
1613+ return;
1614+#endif
1615+
1616+#ifdef CONFIG_PAX_EMUTRAMP
1617+ case 2:
1618+ return;
1619+#endif
1620+
1621+ }
1622+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1623+ do_group_exit(SIGKILL);
1624+ }
1625+#endif
1626+
1627 goto bad_area;
1628+ }
1629
1630 /*
1631 * If for any reason at all we couldn't handle the fault, make
1632diff -urNp linux-3.0.3/arch/powerpc/include/asm/elf.h linux-3.0.3/arch/powerpc/include/asm/elf.h
1633--- linux-3.0.3/arch/powerpc/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1634+++ linux-3.0.3/arch/powerpc/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1635@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
1636 the loader. We need to make sure that it is out of the way of the program
1637 that it will "exec", and that there is sufficient room for the brk. */
1638
1639-extern unsigned long randomize_et_dyn(unsigned long base);
1640-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
1641+#define ELF_ET_DYN_BASE (0x20000000)
1642+
1643+#ifdef CONFIG_PAX_ASLR
1644+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
1645+
1646+#ifdef __powerpc64__
1647+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
1648+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
1649+#else
1650+#define PAX_DELTA_MMAP_LEN 15
1651+#define PAX_DELTA_STACK_LEN 15
1652+#endif
1653+#endif
1654
1655 /*
1656 * Our registers are always unsigned longs, whether we're a 32 bit
1657@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(s
1658 (0x7ff >> (PAGE_SHIFT - 12)) : \
1659 (0x3ffff >> (PAGE_SHIFT - 12)))
1660
1661-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1662-#define arch_randomize_brk arch_randomize_brk
1663-
1664 #endif /* __KERNEL__ */
1665
1666 /*
1667diff -urNp linux-3.0.3/arch/powerpc/include/asm/kmap_types.h linux-3.0.3/arch/powerpc/include/asm/kmap_types.h
1668--- linux-3.0.3/arch/powerpc/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
1669+++ linux-3.0.3/arch/powerpc/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
1670@@ -27,6 +27,7 @@ enum km_type {
1671 KM_PPC_SYNC_PAGE,
1672 KM_PPC_SYNC_ICACHE,
1673 KM_KDB,
1674+ KM_CLEARPAGE,
1675 KM_TYPE_NR
1676 };
1677
1678diff -urNp linux-3.0.3/arch/powerpc/include/asm/mman.h linux-3.0.3/arch/powerpc/include/asm/mman.h
1679--- linux-3.0.3/arch/powerpc/include/asm/mman.h 2011-07-21 22:17:23.000000000 -0400
1680+++ linux-3.0.3/arch/powerpc/include/asm/mman.h 2011-08-23 21:47:55.000000000 -0400
1681@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm
1682 }
1683 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
1684
1685-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
1686+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
1687 {
1688 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
1689 }
1690diff -urNp linux-3.0.3/arch/powerpc/include/asm/page_64.h linux-3.0.3/arch/powerpc/include/asm/page_64.h
1691--- linux-3.0.3/arch/powerpc/include/asm/page_64.h 2011-07-21 22:17:23.000000000 -0400
1692+++ linux-3.0.3/arch/powerpc/include/asm/page_64.h 2011-08-23 21:47:55.000000000 -0400
1693@@ -155,15 +155,18 @@ do { \
1694 * stack by default, so in the absence of a PT_GNU_STACK program header
1695 * we turn execute permission off.
1696 */
1697-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1698- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1699+#define VM_STACK_DEFAULT_FLAGS32 \
1700+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1701+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1702
1703 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1704 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1705
1706+#ifndef CONFIG_PAX_PAGEEXEC
1707 #define VM_STACK_DEFAULT_FLAGS \
1708 (is_32bit_task() ? \
1709 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
1710+#endif
1711
1712 #include <asm-generic/getorder.h>
1713
1714diff -urNp linux-3.0.3/arch/powerpc/include/asm/page.h linux-3.0.3/arch/powerpc/include/asm/page.h
1715--- linux-3.0.3/arch/powerpc/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
1716+++ linux-3.0.3/arch/powerpc/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
1717@@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr;
1718 * and needs to be executable. This means the whole heap ends
1719 * up being executable.
1720 */
1721-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1722- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1723+#define VM_DATA_DEFAULT_FLAGS32 \
1724+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1725+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1726
1727 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1728 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1729@@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr;
1730 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
1731 #endif
1732
1733+#define ktla_ktva(addr) (addr)
1734+#define ktva_ktla(addr) (addr)
1735+
1736 #ifndef __ASSEMBLY__
1737
1738 #undef STRICT_MM_TYPECHECKS
1739diff -urNp linux-3.0.3/arch/powerpc/include/asm/pgtable.h linux-3.0.3/arch/powerpc/include/asm/pgtable.h
1740--- linux-3.0.3/arch/powerpc/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
1741+++ linux-3.0.3/arch/powerpc/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
1742@@ -2,6 +2,7 @@
1743 #define _ASM_POWERPC_PGTABLE_H
1744 #ifdef __KERNEL__
1745
1746+#include <linux/const.h>
1747 #ifndef __ASSEMBLY__
1748 #include <asm/processor.h> /* For TASK_SIZE */
1749 #include <asm/mmu.h>
1750diff -urNp linux-3.0.3/arch/powerpc/include/asm/pte-hash32.h linux-3.0.3/arch/powerpc/include/asm/pte-hash32.h
1751--- linux-3.0.3/arch/powerpc/include/asm/pte-hash32.h 2011-07-21 22:17:23.000000000 -0400
1752+++ linux-3.0.3/arch/powerpc/include/asm/pte-hash32.h 2011-08-23 21:47:55.000000000 -0400
1753@@ -21,6 +21,7 @@
1754 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
1755 #define _PAGE_USER 0x004 /* usermode access allowed */
1756 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
1757+#define _PAGE_EXEC _PAGE_GUARDED
1758 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
1759 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
1760 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
1761diff -urNp linux-3.0.3/arch/powerpc/include/asm/reg.h linux-3.0.3/arch/powerpc/include/asm/reg.h
1762--- linux-3.0.3/arch/powerpc/include/asm/reg.h 2011-07-21 22:17:23.000000000 -0400
1763+++ linux-3.0.3/arch/powerpc/include/asm/reg.h 2011-08-23 21:47:55.000000000 -0400
1764@@ -209,6 +209,7 @@
1765 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
1766 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
1767 #define DSISR_NOHPTE 0x40000000 /* no translation found */
1768+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
1769 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
1770 #define DSISR_ISSTORE 0x02000000 /* access was a store */
1771 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
1772diff -urNp linux-3.0.3/arch/powerpc/include/asm/system.h linux-3.0.3/arch/powerpc/include/asm/system.h
1773--- linux-3.0.3/arch/powerpc/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
1774+++ linux-3.0.3/arch/powerpc/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
1775@@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
1776 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
1777 #endif
1778
1779-extern unsigned long arch_align_stack(unsigned long sp);
1780+#define arch_align_stack(x) ((x) & ~0xfUL)
1781
1782 /* Used in very early kernel initialization. */
1783 extern unsigned long reloc_offset(void);
1784diff -urNp linux-3.0.3/arch/powerpc/include/asm/uaccess.h linux-3.0.3/arch/powerpc/include/asm/uaccess.h
1785--- linux-3.0.3/arch/powerpc/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
1786+++ linux-3.0.3/arch/powerpc/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
1787@@ -13,6 +13,8 @@
1788 #define VERIFY_READ 0
1789 #define VERIFY_WRITE 1
1790
1791+extern void check_object_size(const void *ptr, unsigned long n, bool to);
1792+
1793 /*
1794 * The fs value determines whether argument validity checking should be
1795 * performed or not. If get_fs() == USER_DS, checking is performed, with
1796@@ -327,52 +329,6 @@ do { \
1797 extern unsigned long __copy_tofrom_user(void __user *to,
1798 const void __user *from, unsigned long size);
1799
1800-#ifndef __powerpc64__
1801-
1802-static inline unsigned long copy_from_user(void *to,
1803- const void __user *from, unsigned long n)
1804-{
1805- unsigned long over;
1806-
1807- if (access_ok(VERIFY_READ, from, n))
1808- return __copy_tofrom_user((__force void __user *)to, from, n);
1809- if ((unsigned long)from < TASK_SIZE) {
1810- over = (unsigned long)from + n - TASK_SIZE;
1811- return __copy_tofrom_user((__force void __user *)to, from,
1812- n - over) + over;
1813- }
1814- return n;
1815-}
1816-
1817-static inline unsigned long copy_to_user(void __user *to,
1818- const void *from, unsigned long n)
1819-{
1820- unsigned long over;
1821-
1822- if (access_ok(VERIFY_WRITE, to, n))
1823- return __copy_tofrom_user(to, (__force void __user *)from, n);
1824- if ((unsigned long)to < TASK_SIZE) {
1825- over = (unsigned long)to + n - TASK_SIZE;
1826- return __copy_tofrom_user(to, (__force void __user *)from,
1827- n - over) + over;
1828- }
1829- return n;
1830-}
1831-
1832-#else /* __powerpc64__ */
1833-
1834-#define __copy_in_user(to, from, size) \
1835- __copy_tofrom_user((to), (from), (size))
1836-
1837-extern unsigned long copy_from_user(void *to, const void __user *from,
1838- unsigned long n);
1839-extern unsigned long copy_to_user(void __user *to, const void *from,
1840- unsigned long n);
1841-extern unsigned long copy_in_user(void __user *to, const void __user *from,
1842- unsigned long n);
1843-
1844-#endif /* __powerpc64__ */
1845-
1846 static inline unsigned long __copy_from_user_inatomic(void *to,
1847 const void __user *from, unsigned long n)
1848 {
1849@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
1850 if (ret == 0)
1851 return 0;
1852 }
1853+
1854+ if (!__builtin_constant_p(n))
1855+ check_object_size(to, n, false);
1856+
1857 return __copy_tofrom_user((__force void __user *)to, from, n);
1858 }
1859
1860@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
1861 if (ret == 0)
1862 return 0;
1863 }
1864+
1865+ if (!__builtin_constant_p(n))
1866+ check_object_size(from, n, true);
1867+
1868 return __copy_tofrom_user(to, (__force const void __user *)from, n);
1869 }
1870
1871@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
1872 return __copy_to_user_inatomic(to, from, size);
1873 }
1874
1875+#ifndef __powerpc64__
1876+
1877+static inline unsigned long __must_check copy_from_user(void *to,
1878+ const void __user *from, unsigned long n)
1879+{
1880+ unsigned long over;
1881+
1882+ if ((long)n < 0)
1883+ return n;
1884+
1885+ if (access_ok(VERIFY_READ, from, n)) {
1886+ if (!__builtin_constant_p(n))
1887+ check_object_size(to, n, false);
1888+ return __copy_tofrom_user((__force void __user *)to, from, n);
1889+ }
1890+ if ((unsigned long)from < TASK_SIZE) {
1891+ over = (unsigned long)from + n - TASK_SIZE;
1892+ if (!__builtin_constant_p(n - over))
1893+ check_object_size(to, n - over, false);
1894+ return __copy_tofrom_user((__force void __user *)to, from,
1895+ n - over) + over;
1896+ }
1897+ return n;
1898+}
1899+
1900+static inline unsigned long __must_check copy_to_user(void __user *to,
1901+ const void *from, unsigned long n)
1902+{
1903+ unsigned long over;
1904+
1905+ if ((long)n < 0)
1906+ return n;
1907+
1908+ if (access_ok(VERIFY_WRITE, to, n)) {
1909+ if (!__builtin_constant_p(n))
1910+ check_object_size(from, n, true);
1911+ return __copy_tofrom_user(to, (__force void __user *)from, n);
1912+ }
1913+ if ((unsigned long)to < TASK_SIZE) {
1914+ over = (unsigned long)to + n - TASK_SIZE;
1915+ if (!__builtin_constant_p(n))
1916+ check_object_size(from, n - over, true);
1917+ return __copy_tofrom_user(to, (__force void __user *)from,
1918+ n - over) + over;
1919+ }
1920+ return n;
1921+}
1922+
1923+#else /* __powerpc64__ */
1924+
1925+#define __copy_in_user(to, from, size) \
1926+ __copy_tofrom_user((to), (from), (size))
1927+
1928+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1929+{
1930+ if ((long)n < 0 || n > INT_MAX)
1931+ return n;
1932+
1933+ if (!__builtin_constant_p(n))
1934+ check_object_size(to, n, false);
1935+
1936+ if (likely(access_ok(VERIFY_READ, from, n)))
1937+ n = __copy_from_user(to, from, n);
1938+ else
1939+ memset(to, 0, n);
1940+ return n;
1941+}
1942+
1943+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1944+{
1945+ if ((long)n < 0 || n > INT_MAX)
1946+ return n;
1947+
1948+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
1949+ if (!__builtin_constant_p(n))
1950+ check_object_size(from, n, true);
1951+ n = __copy_to_user(to, from, n);
1952+ }
1953+ return n;
1954+}
1955+
1956+extern unsigned long copy_in_user(void __user *to, const void __user *from,
1957+ unsigned long n);
1958+
1959+#endif /* __powerpc64__ */
1960+
1961 extern unsigned long __clear_user(void __user *addr, unsigned long size);
1962
1963 static inline unsigned long clear_user(void __user *addr, unsigned long size)
1964diff -urNp linux-3.0.3/arch/powerpc/kernel/exceptions-64e.S linux-3.0.3/arch/powerpc/kernel/exceptions-64e.S
1965--- linux-3.0.3/arch/powerpc/kernel/exceptions-64e.S 2011-07-21 22:17:23.000000000 -0400
1966+++ linux-3.0.3/arch/powerpc/kernel/exceptions-64e.S 2011-08-23 21:47:55.000000000 -0400
1967@@ -567,6 +567,7 @@ storage_fault_common:
1968 std r14,_DAR(r1)
1969 std r15,_DSISR(r1)
1970 addi r3,r1,STACK_FRAME_OVERHEAD
1971+ bl .save_nvgprs
1972 mr r4,r14
1973 mr r5,r15
1974 ld r14,PACA_EXGEN+EX_R14(r13)
1975@@ -576,8 +577,7 @@ storage_fault_common:
1976 cmpdi r3,0
1977 bne- 1f
1978 b .ret_from_except_lite
1979-1: bl .save_nvgprs
1980- mr r5,r3
1981+1: mr r5,r3
1982 addi r3,r1,STACK_FRAME_OVERHEAD
1983 ld r4,_DAR(r1)
1984 bl .bad_page_fault
1985diff -urNp linux-3.0.3/arch/powerpc/kernel/exceptions-64s.S linux-3.0.3/arch/powerpc/kernel/exceptions-64s.S
1986--- linux-3.0.3/arch/powerpc/kernel/exceptions-64s.S 2011-07-21 22:17:23.000000000 -0400
1987+++ linux-3.0.3/arch/powerpc/kernel/exceptions-64s.S 2011-08-23 21:47:55.000000000 -0400
1988@@ -956,10 +956,10 @@ handle_page_fault:
1989 11: ld r4,_DAR(r1)
1990 ld r5,_DSISR(r1)
1991 addi r3,r1,STACK_FRAME_OVERHEAD
1992+ bl .save_nvgprs
1993 bl .do_page_fault
1994 cmpdi r3,0
1995 beq+ 13f
1996- bl .save_nvgprs
1997 mr r5,r3
1998 addi r3,r1,STACK_FRAME_OVERHEAD
1999 lwz r4,_DAR(r1)
2000diff -urNp linux-3.0.3/arch/powerpc/kernel/module_32.c linux-3.0.3/arch/powerpc/kernel/module_32.c
2001--- linux-3.0.3/arch/powerpc/kernel/module_32.c 2011-07-21 22:17:23.000000000 -0400
2002+++ linux-3.0.3/arch/powerpc/kernel/module_32.c 2011-08-23 21:47:55.000000000 -0400
2003@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2004 me->arch.core_plt_section = i;
2005 }
2006 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2007- printk("Module doesn't contain .plt or .init.plt sections.\n");
2008+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2009 return -ENOEXEC;
2010 }
2011
2012@@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2013
2014 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2015 /* Init, or core PLT? */
2016- if (location >= mod->module_core
2017- && location < mod->module_core + mod->core_size)
2018+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2019+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2020 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2021- else
2022+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2023+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2024 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2025+ else {
2026+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2027+ return ~0UL;
2028+ }
2029
2030 /* Find this entry, or if that fails, the next avail. entry */
2031 while (entry->jump[0]) {
2032diff -urNp linux-3.0.3/arch/powerpc/kernel/module.c linux-3.0.3/arch/powerpc/kernel/module.c
2033--- linux-3.0.3/arch/powerpc/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
2034+++ linux-3.0.3/arch/powerpc/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
2035@@ -31,11 +31,24 @@
2036
2037 LIST_HEAD(module_bug_list);
2038
2039+#ifdef CONFIG_PAX_KERNEXEC
2040 void *module_alloc(unsigned long size)
2041 {
2042 if (size == 0)
2043 return NULL;
2044
2045+ return vmalloc(size);
2046+}
2047+
2048+void *module_alloc_exec(unsigned long size)
2049+#else
2050+void *module_alloc(unsigned long size)
2051+#endif
2052+
2053+{
2054+ if (size == 0)
2055+ return NULL;
2056+
2057 return vmalloc_exec(size);
2058 }
2059
2060@@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2061 vfree(module_region);
2062 }
2063
2064+#ifdef CONFIG_PAX_KERNEXEC
2065+void module_free_exec(struct module *mod, void *module_region)
2066+{
2067+ module_free(mod, module_region);
2068+}
2069+#endif
2070+
2071 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2072 const Elf_Shdr *sechdrs,
2073 const char *name)
2074diff -urNp linux-3.0.3/arch/powerpc/kernel/process.c linux-3.0.3/arch/powerpc/kernel/process.c
2075--- linux-3.0.3/arch/powerpc/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2076+++ linux-3.0.3/arch/powerpc/kernel/process.c 2011-08-23 21:48:14.000000000 -0400
2077@@ -676,8 +676,8 @@ void show_regs(struct pt_regs * regs)
2078 * Lookup NIP late so we have the best change of getting the
2079 * above info out without failing
2080 */
2081- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2082- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2083+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2084+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2085 #endif
2086 show_stack(current, (unsigned long *) regs->gpr[1]);
2087 if (!user_mode(regs))
2088@@ -1183,10 +1183,10 @@ void show_stack(struct task_struct *tsk,
2089 newsp = stack[0];
2090 ip = stack[STACK_FRAME_LR_SAVE];
2091 if (!firstframe || ip != lr) {
2092- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2093+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2094 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2095 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2096- printk(" (%pS)",
2097+ printk(" (%pA)",
2098 (void *)current->ret_stack[curr_frame].ret);
2099 curr_frame--;
2100 }
2101@@ -1206,7 +1206,7 @@ void show_stack(struct task_struct *tsk,
2102 struct pt_regs *regs = (struct pt_regs *)
2103 (sp + STACK_FRAME_OVERHEAD);
2104 lr = regs->link;
2105- printk("--- Exception: %lx at %pS\n LR = %pS\n",
2106+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
2107 regs->trap, (void *)regs->nip, (void *)lr);
2108 firstframe = 1;
2109 }
2110@@ -1281,58 +1281,3 @@ void thread_info_cache_init(void)
2111 }
2112
2113 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2114-
2115-unsigned long arch_align_stack(unsigned long sp)
2116-{
2117- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2118- sp -= get_random_int() & ~PAGE_MASK;
2119- return sp & ~0xf;
2120-}
2121-
2122-static inline unsigned long brk_rnd(void)
2123-{
2124- unsigned long rnd = 0;
2125-
2126- /* 8MB for 32bit, 1GB for 64bit */
2127- if (is_32bit_task())
2128- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2129- else
2130- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2131-
2132- return rnd << PAGE_SHIFT;
2133-}
2134-
2135-unsigned long arch_randomize_brk(struct mm_struct *mm)
2136-{
2137- unsigned long base = mm->brk;
2138- unsigned long ret;
2139-
2140-#ifdef CONFIG_PPC_STD_MMU_64
2141- /*
2142- * If we are using 1TB segments and we are allowed to randomise
2143- * the heap, we can put it above 1TB so it is backed by a 1TB
2144- * segment. Otherwise the heap will be in the bottom 1TB
2145- * which always uses 256MB segments and this may result in a
2146- * performance penalty.
2147- */
2148- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2149- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2150-#endif
2151-
2152- ret = PAGE_ALIGN(base + brk_rnd());
2153-
2154- if (ret < mm->brk)
2155- return mm->brk;
2156-
2157- return ret;
2158-}
2159-
2160-unsigned long randomize_et_dyn(unsigned long base)
2161-{
2162- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2163-
2164- if (ret < base)
2165- return base;
2166-
2167- return ret;
2168-}
2169diff -urNp linux-3.0.3/arch/powerpc/kernel/signal_32.c linux-3.0.3/arch/powerpc/kernel/signal_32.c
2170--- linux-3.0.3/arch/powerpc/kernel/signal_32.c 2011-07-21 22:17:23.000000000 -0400
2171+++ linux-3.0.3/arch/powerpc/kernel/signal_32.c 2011-08-23 21:47:55.000000000 -0400
2172@@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig
2173 /* Save user registers on the stack */
2174 frame = &rt_sf->uc.uc_mcontext;
2175 addr = frame;
2176- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2177+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2178 if (save_user_regs(regs, frame, 0, 1))
2179 goto badframe;
2180 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2181diff -urNp linux-3.0.3/arch/powerpc/kernel/signal_64.c linux-3.0.3/arch/powerpc/kernel/signal_64.c
2182--- linux-3.0.3/arch/powerpc/kernel/signal_64.c 2011-07-21 22:17:23.000000000 -0400
2183+++ linux-3.0.3/arch/powerpc/kernel/signal_64.c 2011-08-23 21:47:55.000000000 -0400
2184@@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct
2185 current->thread.fpscr.val = 0;
2186
2187 /* Set up to return from userspace. */
2188- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2189+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2190 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2191 } else {
2192 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2193diff -urNp linux-3.0.3/arch/powerpc/kernel/traps.c linux-3.0.3/arch/powerpc/kernel/traps.c
2194--- linux-3.0.3/arch/powerpc/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
2195+++ linux-3.0.3/arch/powerpc/kernel/traps.c 2011-08-23 21:48:14.000000000 -0400
2196@@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
2197 static inline void pmac_backlight_unblank(void) { }
2198 #endif
2199
2200+extern void gr_handle_kernel_exploit(void);
2201+
2202 int die(const char *str, struct pt_regs *regs, long err)
2203 {
2204 static struct {
2205@@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs
2206 if (panic_on_oops)
2207 panic("Fatal exception");
2208
2209+ gr_handle_kernel_exploit();
2210+
2211 oops_exit();
2212 do_exit(err);
2213
2214diff -urNp linux-3.0.3/arch/powerpc/kernel/vdso.c linux-3.0.3/arch/powerpc/kernel/vdso.c
2215--- linux-3.0.3/arch/powerpc/kernel/vdso.c 2011-07-21 22:17:23.000000000 -0400
2216+++ linux-3.0.3/arch/powerpc/kernel/vdso.c 2011-08-23 21:47:55.000000000 -0400
2217@@ -36,6 +36,7 @@
2218 #include <asm/firmware.h>
2219 #include <asm/vdso.h>
2220 #include <asm/vdso_datapage.h>
2221+#include <asm/mman.h>
2222
2223 #include "setup.h"
2224
2225@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2226 vdso_base = VDSO32_MBASE;
2227 #endif
2228
2229- current->mm->context.vdso_base = 0;
2230+ current->mm->context.vdso_base = ~0UL;
2231
2232 /* vDSO has a problem and was disabled, just don't "enable" it for the
2233 * process
2234@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2235 vdso_base = get_unmapped_area(NULL, vdso_base,
2236 (vdso_pages << PAGE_SHIFT) +
2237 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2238- 0, 0);
2239+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
2240 if (IS_ERR_VALUE(vdso_base)) {
2241 rc = vdso_base;
2242 goto fail_mmapsem;
2243diff -urNp linux-3.0.3/arch/powerpc/lib/usercopy_64.c linux-3.0.3/arch/powerpc/lib/usercopy_64.c
2244--- linux-3.0.3/arch/powerpc/lib/usercopy_64.c 2011-07-21 22:17:23.000000000 -0400
2245+++ linux-3.0.3/arch/powerpc/lib/usercopy_64.c 2011-08-23 21:47:55.000000000 -0400
2246@@ -9,22 +9,6 @@
2247 #include <linux/module.h>
2248 #include <asm/uaccess.h>
2249
2250-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2251-{
2252- if (likely(access_ok(VERIFY_READ, from, n)))
2253- n = __copy_from_user(to, from, n);
2254- else
2255- memset(to, 0, n);
2256- return n;
2257-}
2258-
2259-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2260-{
2261- if (likely(access_ok(VERIFY_WRITE, to, n)))
2262- n = __copy_to_user(to, from, n);
2263- return n;
2264-}
2265-
2266 unsigned long copy_in_user(void __user *to, const void __user *from,
2267 unsigned long n)
2268 {
2269@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2270 return n;
2271 }
2272
2273-EXPORT_SYMBOL(copy_from_user);
2274-EXPORT_SYMBOL(copy_to_user);
2275 EXPORT_SYMBOL(copy_in_user);
2276
2277diff -urNp linux-3.0.3/arch/powerpc/mm/fault.c linux-3.0.3/arch/powerpc/mm/fault.c
2278--- linux-3.0.3/arch/powerpc/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
2279+++ linux-3.0.3/arch/powerpc/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
2280@@ -32,6 +32,10 @@
2281 #include <linux/perf_event.h>
2282 #include <linux/magic.h>
2283 #include <linux/ratelimit.h>
2284+#include <linux/slab.h>
2285+#include <linux/pagemap.h>
2286+#include <linux/compiler.h>
2287+#include <linux/unistd.h>
2288
2289 #include <asm/firmware.h>
2290 #include <asm/page.h>
2291@@ -43,6 +47,7 @@
2292 #include <asm/tlbflush.h>
2293 #include <asm/siginfo.h>
2294 #include <mm/mmu_decl.h>
2295+#include <asm/ptrace.h>
2296
2297 #ifdef CONFIG_KPROBES
2298 static inline int notify_page_fault(struct pt_regs *regs)
2299@@ -66,6 +71,33 @@ static inline int notify_page_fault(stru
2300 }
2301 #endif
2302
2303+#ifdef CONFIG_PAX_PAGEEXEC
2304+/*
2305+ * PaX: decide what to do with offenders (regs->nip = fault address)
2306+ *
2307+ * returns 1 when task should be killed
2308+ */
2309+static int pax_handle_fetch_fault(struct pt_regs *regs)
2310+{
2311+ return 1;
2312+}
2313+
2314+void pax_report_insns(void *pc, void *sp)
2315+{
2316+ unsigned long i;
2317+
2318+ printk(KERN_ERR "PAX: bytes at PC: ");
2319+ for (i = 0; i < 5; i++) {
2320+ unsigned int c;
2321+ if (get_user(c, (unsigned int __user *)pc+i))
2322+ printk(KERN_CONT "???????? ");
2323+ else
2324+ printk(KERN_CONT "%08x ", c);
2325+ }
2326+ printk("\n");
2327+}
2328+#endif
2329+
2330 /*
2331 * Check whether the instruction at regs->nip is a store using
2332 * an update addressing form which will update r1.
2333@@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_re
2334 * indicate errors in DSISR but can validly be set in SRR1.
2335 */
2336 if (trap == 0x400)
2337- error_code &= 0x48200000;
2338+ error_code &= 0x58200000;
2339 else
2340 is_write = error_code & DSISR_ISSTORE;
2341 #else
2342@@ -259,7 +291,7 @@ good_area:
2343 * "undefined". Of those that can be set, this is the only
2344 * one which seems bad.
2345 */
2346- if (error_code & 0x10000000)
2347+ if (error_code & DSISR_GUARDED)
2348 /* Guarded storage error. */
2349 goto bad_area;
2350 #endif /* CONFIG_8xx */
2351@@ -274,7 +306,7 @@ good_area:
2352 * processors use the same I/D cache coherency mechanism
2353 * as embedded.
2354 */
2355- if (error_code & DSISR_PROTFAULT)
2356+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2357 goto bad_area;
2358 #endif /* CONFIG_PPC_STD_MMU */
2359
2360@@ -343,6 +375,23 @@ bad_area:
2361 bad_area_nosemaphore:
2362 /* User mode accesses cause a SIGSEGV */
2363 if (user_mode(regs)) {
2364+
2365+#ifdef CONFIG_PAX_PAGEEXEC
2366+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2367+#ifdef CONFIG_PPC_STD_MMU
2368+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2369+#else
2370+ if (is_exec && regs->nip == address) {
2371+#endif
2372+ switch (pax_handle_fetch_fault(regs)) {
2373+ }
2374+
2375+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2376+ do_group_exit(SIGKILL);
2377+ }
2378+ }
2379+#endif
2380+
2381 _exception(SIGSEGV, regs, code, address);
2382 return 0;
2383 }
2384diff -urNp linux-3.0.3/arch/powerpc/mm/mmap_64.c linux-3.0.3/arch/powerpc/mm/mmap_64.c
2385--- linux-3.0.3/arch/powerpc/mm/mmap_64.c 2011-07-21 22:17:23.000000000 -0400
2386+++ linux-3.0.3/arch/powerpc/mm/mmap_64.c 2011-08-23 21:47:55.000000000 -0400
2387@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2388 */
2389 if (mmap_is_legacy()) {
2390 mm->mmap_base = TASK_UNMAPPED_BASE;
2391+
2392+#ifdef CONFIG_PAX_RANDMMAP
2393+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2394+ mm->mmap_base += mm->delta_mmap;
2395+#endif
2396+
2397 mm->get_unmapped_area = arch_get_unmapped_area;
2398 mm->unmap_area = arch_unmap_area;
2399 } else {
2400 mm->mmap_base = mmap_base();
2401+
2402+#ifdef CONFIG_PAX_RANDMMAP
2403+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2404+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2405+#endif
2406+
2407 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2408 mm->unmap_area = arch_unmap_area_topdown;
2409 }
2410diff -urNp linux-3.0.3/arch/powerpc/mm/slice.c linux-3.0.3/arch/powerpc/mm/slice.c
2411--- linux-3.0.3/arch/powerpc/mm/slice.c 2011-07-21 22:17:23.000000000 -0400
2412+++ linux-3.0.3/arch/powerpc/mm/slice.c 2011-08-23 21:47:55.000000000 -0400
2413@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
2414 if ((mm->task_size - len) < addr)
2415 return 0;
2416 vma = find_vma(mm, addr);
2417- return (!vma || (addr + len) <= vma->vm_start);
2418+ return check_heap_stack_gap(vma, addr, len);
2419 }
2420
2421 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2422@@ -256,7 +256,7 @@ full_search:
2423 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2424 continue;
2425 }
2426- if (!vma || addr + len <= vma->vm_start) {
2427+ if (check_heap_stack_gap(vma, addr, len)) {
2428 /*
2429 * Remember the place where we stopped the search:
2430 */
2431@@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
2432 }
2433 }
2434
2435- addr = mm->mmap_base;
2436- while (addr > len) {
2437+ if (mm->mmap_base < len)
2438+ addr = -ENOMEM;
2439+ else
2440+ addr = mm->mmap_base - len;
2441+
2442+ while (!IS_ERR_VALUE(addr)) {
2443 /* Go down by chunk size */
2444- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2445+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
2446
2447 /* Check for hit with different page size */
2448 mask = slice_range_to_mask(addr, len);
2449@@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
2450 * return with success:
2451 */
2452 vma = find_vma(mm, addr);
2453- if (!vma || (addr + len) <= vma->vm_start) {
2454+ if (check_heap_stack_gap(vma, addr, len)) {
2455 /* remember the address as a hint for next time */
2456 if (use_cache)
2457 mm->free_area_cache = addr;
2458@@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
2459 mm->cached_hole_size = vma->vm_start - addr;
2460
2461 /* try just below the current vma->vm_start */
2462- addr = vma->vm_start;
2463+ addr = skip_heap_stack_gap(vma, len);
2464 }
2465
2466 /*
2467@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
2468 if (fixed && addr > (mm->task_size - len))
2469 return -EINVAL;
2470
2471+#ifdef CONFIG_PAX_RANDMMAP
2472+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2473+ addr = 0;
2474+#endif
2475+
2476 /* If hint, make sure it matches our alignment restrictions */
2477 if (!fixed && addr) {
2478 addr = _ALIGN_UP(addr, 1ul << pshift);
2479diff -urNp linux-3.0.3/arch/s390/include/asm/elf.h linux-3.0.3/arch/s390/include/asm/elf.h
2480--- linux-3.0.3/arch/s390/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
2481+++ linux-3.0.3/arch/s390/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
2482@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2483 the loader. We need to make sure that it is out of the way of the program
2484 that it will "exec", and that there is sufficient room for the brk. */
2485
2486-extern unsigned long randomize_et_dyn(unsigned long base);
2487-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2488+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2489+
2490+#ifdef CONFIG_PAX_ASLR
2491+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2492+
2493+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2494+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2495+#endif
2496
2497 /* This yields a mask that user programs can use to figure out what
2498 instruction set this CPU supports. */
2499@@ -210,7 +216,4 @@ struct linux_binprm;
2500 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2501 int arch_setup_additional_pages(struct linux_binprm *, int);
2502
2503-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2504-#define arch_randomize_brk arch_randomize_brk
2505-
2506 #endif
2507diff -urNp linux-3.0.3/arch/s390/include/asm/system.h linux-3.0.3/arch/s390/include/asm/system.h
2508--- linux-3.0.3/arch/s390/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
2509+++ linux-3.0.3/arch/s390/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
2510@@ -255,7 +255,7 @@ extern void (*_machine_restart)(char *co
2511 extern void (*_machine_halt)(void);
2512 extern void (*_machine_power_off)(void);
2513
2514-extern unsigned long arch_align_stack(unsigned long sp);
2515+#define arch_align_stack(x) ((x) & ~0xfUL)
2516
2517 static inline int tprot(unsigned long addr)
2518 {
2519diff -urNp linux-3.0.3/arch/s390/include/asm/uaccess.h linux-3.0.3/arch/s390/include/asm/uaccess.h
2520--- linux-3.0.3/arch/s390/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
2521+++ linux-3.0.3/arch/s390/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
2522@@ -235,6 +235,10 @@ static inline unsigned long __must_check
2523 copy_to_user(void __user *to, const void *from, unsigned long n)
2524 {
2525 might_fault();
2526+
2527+ if ((long)n < 0)
2528+ return n;
2529+
2530 if (access_ok(VERIFY_WRITE, to, n))
2531 n = __copy_to_user(to, from, n);
2532 return n;
2533@@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void
2534 static inline unsigned long __must_check
2535 __copy_from_user(void *to, const void __user *from, unsigned long n)
2536 {
2537+ if ((long)n < 0)
2538+ return n;
2539+
2540 if (__builtin_constant_p(n) && (n <= 256))
2541 return uaccess.copy_from_user_small(n, from, to);
2542 else
2543@@ -294,6 +301,10 @@ copy_from_user(void *to, const void __us
2544 unsigned int sz = __compiletime_object_size(to);
2545
2546 might_fault();
2547+
2548+ if ((long)n < 0)
2549+ return n;
2550+
2551 if (unlikely(sz != -1 && sz < n)) {
2552 copy_from_user_overflow();
2553 return n;
2554diff -urNp linux-3.0.3/arch/s390/kernel/module.c linux-3.0.3/arch/s390/kernel/module.c
2555--- linux-3.0.3/arch/s390/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
2556+++ linux-3.0.3/arch/s390/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
2557@@ -168,11 +168,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
2558
2559 /* Increase core size by size of got & plt and set start
2560 offsets for got and plt. */
2561- me->core_size = ALIGN(me->core_size, 4);
2562- me->arch.got_offset = me->core_size;
2563- me->core_size += me->arch.got_size;
2564- me->arch.plt_offset = me->core_size;
2565- me->core_size += me->arch.plt_size;
2566+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
2567+ me->arch.got_offset = me->core_size_rw;
2568+ me->core_size_rw += me->arch.got_size;
2569+ me->arch.plt_offset = me->core_size_rx;
2570+ me->core_size_rx += me->arch.plt_size;
2571 return 0;
2572 }
2573
2574@@ -258,7 +258,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2575 if (info->got_initialized == 0) {
2576 Elf_Addr *gotent;
2577
2578- gotent = me->module_core + me->arch.got_offset +
2579+ gotent = me->module_core_rw + me->arch.got_offset +
2580 info->got_offset;
2581 *gotent = val;
2582 info->got_initialized = 1;
2583@@ -282,7 +282,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2584 else if (r_type == R_390_GOTENT ||
2585 r_type == R_390_GOTPLTENT)
2586 *(unsigned int *) loc =
2587- (val + (Elf_Addr) me->module_core - loc) >> 1;
2588+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
2589 else if (r_type == R_390_GOT64 ||
2590 r_type == R_390_GOTPLT64)
2591 *(unsigned long *) loc = val;
2592@@ -296,7 +296,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2593 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
2594 if (info->plt_initialized == 0) {
2595 unsigned int *ip;
2596- ip = me->module_core + me->arch.plt_offset +
2597+ ip = me->module_core_rx + me->arch.plt_offset +
2598 info->plt_offset;
2599 #ifndef CONFIG_64BIT
2600 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
2601@@ -321,7 +321,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2602 val - loc + 0xffffUL < 0x1ffffeUL) ||
2603 (r_type == R_390_PLT32DBL &&
2604 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
2605- val = (Elf_Addr) me->module_core +
2606+ val = (Elf_Addr) me->module_core_rx +
2607 me->arch.plt_offset +
2608 info->plt_offset;
2609 val += rela->r_addend - loc;
2610@@ -343,7 +343,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2611 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
2612 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
2613 val = val + rela->r_addend -
2614- ((Elf_Addr) me->module_core + me->arch.got_offset);
2615+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
2616 if (r_type == R_390_GOTOFF16)
2617 *(unsigned short *) loc = val;
2618 else if (r_type == R_390_GOTOFF32)
2619@@ -353,7 +353,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2620 break;
2621 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
2622 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
2623- val = (Elf_Addr) me->module_core + me->arch.got_offset +
2624+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
2625 rela->r_addend - loc;
2626 if (r_type == R_390_GOTPC)
2627 *(unsigned int *) loc = val;
2628diff -urNp linux-3.0.3/arch/s390/kernel/process.c linux-3.0.3/arch/s390/kernel/process.c
2629--- linux-3.0.3/arch/s390/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2630+++ linux-3.0.3/arch/s390/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
2631@@ -319,39 +319,3 @@ unsigned long get_wchan(struct task_stru
2632 }
2633 return 0;
2634 }
2635-
2636-unsigned long arch_align_stack(unsigned long sp)
2637-{
2638- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2639- sp -= get_random_int() & ~PAGE_MASK;
2640- return sp & ~0xf;
2641-}
2642-
2643-static inline unsigned long brk_rnd(void)
2644-{
2645- /* 8MB for 32bit, 1GB for 64bit */
2646- if (is_32bit_task())
2647- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
2648- else
2649- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
2650-}
2651-
2652-unsigned long arch_randomize_brk(struct mm_struct *mm)
2653-{
2654- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
2655-
2656- if (ret < mm->brk)
2657- return mm->brk;
2658- return ret;
2659-}
2660-
2661-unsigned long randomize_et_dyn(unsigned long base)
2662-{
2663- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2664-
2665- if (!(current->flags & PF_RANDOMIZE))
2666- return base;
2667- if (ret < base)
2668- return base;
2669- return ret;
2670-}
2671diff -urNp linux-3.0.3/arch/s390/kernel/setup.c linux-3.0.3/arch/s390/kernel/setup.c
2672--- linux-3.0.3/arch/s390/kernel/setup.c 2011-07-21 22:17:23.000000000 -0400
2673+++ linux-3.0.3/arch/s390/kernel/setup.c 2011-08-23 21:47:55.000000000 -0400
2674@@ -271,7 +271,7 @@ static int __init early_parse_mem(char *
2675 }
2676 early_param("mem", early_parse_mem);
2677
2678-unsigned int user_mode = HOME_SPACE_MODE;
2679+unsigned int user_mode = SECONDARY_SPACE_MODE;
2680 EXPORT_SYMBOL_GPL(user_mode);
2681
2682 static int set_amode_and_uaccess(unsigned long user_amode,
2683diff -urNp linux-3.0.3/arch/s390/mm/mmap.c linux-3.0.3/arch/s390/mm/mmap.c
2684--- linux-3.0.3/arch/s390/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
2685+++ linux-3.0.3/arch/s390/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
2686@@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_str
2687 */
2688 if (mmap_is_legacy()) {
2689 mm->mmap_base = TASK_UNMAPPED_BASE;
2690+
2691+#ifdef CONFIG_PAX_RANDMMAP
2692+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2693+ mm->mmap_base += mm->delta_mmap;
2694+#endif
2695+
2696 mm->get_unmapped_area = arch_get_unmapped_area;
2697 mm->unmap_area = arch_unmap_area;
2698 } else {
2699 mm->mmap_base = mmap_base();
2700+
2701+#ifdef CONFIG_PAX_RANDMMAP
2702+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2703+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2704+#endif
2705+
2706 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2707 mm->unmap_area = arch_unmap_area_topdown;
2708 }
2709@@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_str
2710 */
2711 if (mmap_is_legacy()) {
2712 mm->mmap_base = TASK_UNMAPPED_BASE;
2713+
2714+#ifdef CONFIG_PAX_RANDMMAP
2715+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2716+ mm->mmap_base += mm->delta_mmap;
2717+#endif
2718+
2719 mm->get_unmapped_area = s390_get_unmapped_area;
2720 mm->unmap_area = arch_unmap_area;
2721 } else {
2722 mm->mmap_base = mmap_base();
2723+
2724+#ifdef CONFIG_PAX_RANDMMAP
2725+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2726+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2727+#endif
2728+
2729 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
2730 mm->unmap_area = arch_unmap_area_topdown;
2731 }
2732diff -urNp linux-3.0.3/arch/score/include/asm/system.h linux-3.0.3/arch/score/include/asm/system.h
2733--- linux-3.0.3/arch/score/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
2734+++ linux-3.0.3/arch/score/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
2735@@ -17,7 +17,7 @@ do { \
2736 #define finish_arch_switch(prev) do {} while (0)
2737
2738 typedef void (*vi_handler_t)(void);
2739-extern unsigned long arch_align_stack(unsigned long sp);
2740+#define arch_align_stack(x) (x)
2741
2742 #define mb() barrier()
2743 #define rmb() barrier()
2744diff -urNp linux-3.0.3/arch/score/kernel/process.c linux-3.0.3/arch/score/kernel/process.c
2745--- linux-3.0.3/arch/score/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2746+++ linux-3.0.3/arch/score/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
2747@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
2748
2749 return task_pt_regs(task)->cp0_epc;
2750 }
2751-
2752-unsigned long arch_align_stack(unsigned long sp)
2753-{
2754- return sp;
2755-}
2756diff -urNp linux-3.0.3/arch/sh/mm/mmap.c linux-3.0.3/arch/sh/mm/mmap.c
2757--- linux-3.0.3/arch/sh/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
2758+++ linux-3.0.3/arch/sh/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
2759@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
2760 addr = PAGE_ALIGN(addr);
2761
2762 vma = find_vma(mm, addr);
2763- if (TASK_SIZE - len >= addr &&
2764- (!vma || addr + len <= vma->vm_start))
2765+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2766 return addr;
2767 }
2768
2769@@ -106,7 +105,7 @@ full_search:
2770 }
2771 return -ENOMEM;
2772 }
2773- if (likely(!vma || addr + len <= vma->vm_start)) {
2774+ if (likely(check_heap_stack_gap(vma, addr, len))) {
2775 /*
2776 * Remember the place where we stopped the search:
2777 */
2778@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
2779 addr = PAGE_ALIGN(addr);
2780
2781 vma = find_vma(mm, addr);
2782- if (TASK_SIZE - len >= addr &&
2783- (!vma || addr + len <= vma->vm_start))
2784+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2785 return addr;
2786 }
2787
2788@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
2789 /* make sure it can fit in the remaining address space */
2790 if (likely(addr > len)) {
2791 vma = find_vma(mm, addr-len);
2792- if (!vma || addr <= vma->vm_start) {
2793+ if (check_heap_stack_gap(vma, addr - len, len)) {
2794 /* remember the address as a hint for next time */
2795 return (mm->free_area_cache = addr-len);
2796 }
2797@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
2798 if (unlikely(mm->mmap_base < len))
2799 goto bottomup;
2800
2801- addr = mm->mmap_base-len;
2802- if (do_colour_align)
2803- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2804+ addr = mm->mmap_base - len;
2805
2806 do {
2807+ if (do_colour_align)
2808+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2809 /*
2810 * Lookup failure means no vma is above this address,
2811 * else if new region fits below vma->vm_start,
2812 * return with success:
2813 */
2814 vma = find_vma(mm, addr);
2815- if (likely(!vma || addr+len <= vma->vm_start)) {
2816+ if (likely(check_heap_stack_gap(vma, addr, len))) {
2817 /* remember the address as a hint for next time */
2818 return (mm->free_area_cache = addr);
2819 }
2820@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
2821 mm->cached_hole_size = vma->vm_start - addr;
2822
2823 /* try just below the current vma->vm_start */
2824- addr = vma->vm_start-len;
2825- if (do_colour_align)
2826- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2827- } while (likely(len < vma->vm_start));
2828+ addr = skip_heap_stack_gap(vma, len);
2829+ } while (!IS_ERR_VALUE(addr));
2830
2831 bottomup:
2832 /*
2833diff -urNp linux-3.0.3/arch/sparc/include/asm/atomic_64.h linux-3.0.3/arch/sparc/include/asm/atomic_64.h
2834--- linux-3.0.3/arch/sparc/include/asm/atomic_64.h 2011-07-21 22:17:23.000000000 -0400
2835+++ linux-3.0.3/arch/sparc/include/asm/atomic_64.h 2011-08-23 21:48:14.000000000 -0400
2836@@ -14,18 +14,40 @@
2837 #define ATOMIC64_INIT(i) { (i) }
2838
2839 #define atomic_read(v) (*(volatile int *)&(v)->counter)
2840+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
2841+{
2842+ return v->counter;
2843+}
2844 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
2845+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
2846+{
2847+ return v->counter;
2848+}
2849
2850 #define atomic_set(v, i) (((v)->counter) = i)
2851+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
2852+{
2853+ v->counter = i;
2854+}
2855 #define atomic64_set(v, i) (((v)->counter) = i)
2856+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
2857+{
2858+ v->counter = i;
2859+}
2860
2861 extern void atomic_add(int, atomic_t *);
2862+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
2863 extern void atomic64_add(long, atomic64_t *);
2864+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
2865 extern void atomic_sub(int, atomic_t *);
2866+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
2867 extern void atomic64_sub(long, atomic64_t *);
2868+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
2869
2870 extern int atomic_add_ret(int, atomic_t *);
2871+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
2872 extern long atomic64_add_ret(long, atomic64_t *);
2873+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
2874 extern int atomic_sub_ret(int, atomic_t *);
2875 extern long atomic64_sub_ret(long, atomic64_t *);
2876
2877@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
2878 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
2879
2880 #define atomic_inc_return(v) atomic_add_ret(1, v)
2881+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
2882+{
2883+ return atomic_add_ret_unchecked(1, v);
2884+}
2885 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
2886+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
2887+{
2888+ return atomic64_add_ret_unchecked(1, v);
2889+}
2890
2891 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
2892 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
2893
2894 #define atomic_add_return(i, v) atomic_add_ret(i, v)
2895+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
2896+{
2897+ return atomic_add_ret_unchecked(i, v);
2898+}
2899 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
2900+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
2901+{
2902+ return atomic64_add_ret_unchecked(i, v);
2903+}
2904
2905 /*
2906 * atomic_inc_and_test - increment and test
2907@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomi
2908 * other cases.
2909 */
2910 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
2911+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
2912+{
2913+ return atomic_inc_return_unchecked(v) == 0;
2914+}
2915 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
2916
2917 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
2918@@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomi
2919 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
2920
2921 #define atomic_inc(v) atomic_add(1, v)
2922+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
2923+{
2924+ atomic_add_unchecked(1, v);
2925+}
2926 #define atomic64_inc(v) atomic64_add(1, v)
2927+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
2928+{
2929+ atomic64_add_unchecked(1, v);
2930+}
2931
2932 #define atomic_dec(v) atomic_sub(1, v)
2933+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
2934+{
2935+ atomic_sub_unchecked(1, v);
2936+}
2937 #define atomic64_dec(v) atomic64_sub(1, v)
2938+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
2939+{
2940+ atomic64_sub_unchecked(1, v);
2941+}
2942
2943 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
2944 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
2945
2946 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
2947+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
2948+{
2949+ return cmpxchg(&v->counter, old, new);
2950+}
2951 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
2952+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
2953+{
2954+ return xchg(&v->counter, new);
2955+}
2956
2957 static inline int atomic_add_unless(atomic_t *v, int a, int u)
2958 {
2959- int c, old;
2960+ int c, old, new;
2961 c = atomic_read(v);
2962 for (;;) {
2963- if (unlikely(c == (u)))
2964+ if (unlikely(c == u))
2965 break;
2966- old = atomic_cmpxchg((v), c, c + (a));
2967+
2968+ asm volatile("addcc %2, %0, %0\n"
2969+
2970+#ifdef CONFIG_PAX_REFCOUNT
2971+ "tvs %%icc, 6\n"
2972+#endif
2973+
2974+ : "=r" (new)
2975+ : "0" (c), "ir" (a)
2976+ : "cc");
2977+
2978+ old = atomic_cmpxchg(v, c, new);
2979 if (likely(old == c))
2980 break;
2981 c = old;
2982 }
2983- return c != (u);
2984+ return c != u;
2985 }
2986
2987 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
2988@@ -90,20 +167,35 @@ static inline int atomic_add_unless(atom
2989 #define atomic64_cmpxchg(v, o, n) \
2990 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
2991 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
2992+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
2993+{
2994+ return xchg(&v->counter, new);
2995+}
2996
2997 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
2998 {
2999- long c, old;
3000+ long c, old, new;
3001 c = atomic64_read(v);
3002 for (;;) {
3003- if (unlikely(c == (u)))
3004+ if (unlikely(c == u))
3005 break;
3006- old = atomic64_cmpxchg((v), c, c + (a));
3007+
3008+ asm volatile("addcc %2, %0, %0\n"
3009+
3010+#ifdef CONFIG_PAX_REFCOUNT
3011+ "tvs %%xcc, 6\n"
3012+#endif
3013+
3014+ : "=r" (new)
3015+ : "0" (c), "ir" (a)
3016+ : "cc");
3017+
3018+ old = atomic64_cmpxchg(v, c, new);
3019 if (likely(old == c))
3020 break;
3021 c = old;
3022 }
3023- return c != (u);
3024+ return c != u;
3025 }
3026
3027 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3028diff -urNp linux-3.0.3/arch/sparc/include/asm/cache.h linux-3.0.3/arch/sparc/include/asm/cache.h
3029--- linux-3.0.3/arch/sparc/include/asm/cache.h 2011-07-21 22:17:23.000000000 -0400
3030+++ linux-3.0.3/arch/sparc/include/asm/cache.h 2011-08-23 21:47:55.000000000 -0400
3031@@ -10,7 +10,7 @@
3032 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3033
3034 #define L1_CACHE_SHIFT 5
3035-#define L1_CACHE_BYTES 32
3036+#define L1_CACHE_BYTES 32UL
3037
3038 #ifdef CONFIG_SPARC32
3039 #define SMP_CACHE_BYTES_SHIFT 5
3040diff -urNp linux-3.0.3/arch/sparc/include/asm/elf_32.h linux-3.0.3/arch/sparc/include/asm/elf_32.h
3041--- linux-3.0.3/arch/sparc/include/asm/elf_32.h 2011-07-21 22:17:23.000000000 -0400
3042+++ linux-3.0.3/arch/sparc/include/asm/elf_32.h 2011-08-23 21:47:55.000000000 -0400
3043@@ -114,6 +114,13 @@ typedef struct {
3044
3045 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3046
3047+#ifdef CONFIG_PAX_ASLR
3048+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3049+
3050+#define PAX_DELTA_MMAP_LEN 16
3051+#define PAX_DELTA_STACK_LEN 16
3052+#endif
3053+
3054 /* This yields a mask that user programs can use to figure out what
3055 instruction set this cpu supports. This can NOT be done in userspace
3056 on Sparc. */
3057diff -urNp linux-3.0.3/arch/sparc/include/asm/elf_64.h linux-3.0.3/arch/sparc/include/asm/elf_64.h
3058--- linux-3.0.3/arch/sparc/include/asm/elf_64.h 2011-08-23 21:44:40.000000000 -0400
3059+++ linux-3.0.3/arch/sparc/include/asm/elf_64.h 2011-08-23 21:47:55.000000000 -0400
3060@@ -180,6 +180,13 @@ typedef struct {
3061 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3062 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3063
3064+#ifdef CONFIG_PAX_ASLR
3065+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3066+
3067+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3068+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3069+#endif
3070+
3071 extern unsigned long sparc64_elf_hwcap;
3072 #define ELF_HWCAP sparc64_elf_hwcap
3073
3074diff -urNp linux-3.0.3/arch/sparc/include/asm/pgtable_32.h linux-3.0.3/arch/sparc/include/asm/pgtable_32.h
3075--- linux-3.0.3/arch/sparc/include/asm/pgtable_32.h 2011-07-21 22:17:23.000000000 -0400
3076+++ linux-3.0.3/arch/sparc/include/asm/pgtable_32.h 2011-08-23 21:47:55.000000000 -0400
3077@@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3078 BTFIXUPDEF_INT(page_none)
3079 BTFIXUPDEF_INT(page_copy)
3080 BTFIXUPDEF_INT(page_readonly)
3081+
3082+#ifdef CONFIG_PAX_PAGEEXEC
3083+BTFIXUPDEF_INT(page_shared_noexec)
3084+BTFIXUPDEF_INT(page_copy_noexec)
3085+BTFIXUPDEF_INT(page_readonly_noexec)
3086+#endif
3087+
3088 BTFIXUPDEF_INT(page_kernel)
3089
3090 #define PMD_SHIFT SUN4C_PMD_SHIFT
3091@@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
3092 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3093 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3094
3095+#ifdef CONFIG_PAX_PAGEEXEC
3096+extern pgprot_t PAGE_SHARED_NOEXEC;
3097+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3098+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3099+#else
3100+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3101+# define PAGE_COPY_NOEXEC PAGE_COPY
3102+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3103+#endif
3104+
3105 extern unsigned long page_kernel;
3106
3107 #ifdef MODULE
3108diff -urNp linux-3.0.3/arch/sparc/include/asm/pgtsrmmu.h linux-3.0.3/arch/sparc/include/asm/pgtsrmmu.h
3109--- linux-3.0.3/arch/sparc/include/asm/pgtsrmmu.h 2011-07-21 22:17:23.000000000 -0400
3110+++ linux-3.0.3/arch/sparc/include/asm/pgtsrmmu.h 2011-08-23 21:47:55.000000000 -0400
3111@@ -115,6 +115,13 @@
3112 SRMMU_EXEC | SRMMU_REF)
3113 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3114 SRMMU_EXEC | SRMMU_REF)
3115+
3116+#ifdef CONFIG_PAX_PAGEEXEC
3117+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3118+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3119+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3120+#endif
3121+
3122 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3123 SRMMU_DIRTY | SRMMU_REF)
3124
3125diff -urNp linux-3.0.3/arch/sparc/include/asm/spinlock_64.h linux-3.0.3/arch/sparc/include/asm/spinlock_64.h
3126--- linux-3.0.3/arch/sparc/include/asm/spinlock_64.h 2011-07-21 22:17:23.000000000 -0400
3127+++ linux-3.0.3/arch/sparc/include/asm/spinlock_64.h 2011-08-23 21:47:55.000000000 -0400
3128@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(
3129
3130 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3131
3132-static void inline arch_read_lock(arch_rwlock_t *lock)
3133+static inline void arch_read_lock(arch_rwlock_t *lock)
3134 {
3135 unsigned long tmp1, tmp2;
3136
3137 __asm__ __volatile__ (
3138 "1: ldsw [%2], %0\n"
3139 " brlz,pn %0, 2f\n"
3140-"4: add %0, 1, %1\n"
3141+"4: addcc %0, 1, %1\n"
3142+
3143+#ifdef CONFIG_PAX_REFCOUNT
3144+" tvs %%icc, 6\n"
3145+#endif
3146+
3147 " cas [%2], %0, %1\n"
3148 " cmp %0, %1\n"
3149 " bne,pn %%icc, 1b\n"
3150@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_r
3151 " .previous"
3152 : "=&r" (tmp1), "=&r" (tmp2)
3153 : "r" (lock)
3154- : "memory");
3155+ : "memory", "cc");
3156 }
3157
3158-static int inline arch_read_trylock(arch_rwlock_t *lock)
3159+static inline int arch_read_trylock(arch_rwlock_t *lock)
3160 {
3161 int tmp1, tmp2;
3162
3163@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch
3164 "1: ldsw [%2], %0\n"
3165 " brlz,a,pn %0, 2f\n"
3166 " mov 0, %0\n"
3167-" add %0, 1, %1\n"
3168+" addcc %0, 1, %1\n"
3169+
3170+#ifdef CONFIG_PAX_REFCOUNT
3171+" tvs %%icc, 6\n"
3172+#endif
3173+
3174 " cas [%2], %0, %1\n"
3175 " cmp %0, %1\n"
3176 " bne,pn %%icc, 1b\n"
3177@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch
3178 return tmp1;
3179 }
3180
3181-static void inline arch_read_unlock(arch_rwlock_t *lock)
3182+static inline void arch_read_unlock(arch_rwlock_t *lock)
3183 {
3184 unsigned long tmp1, tmp2;
3185
3186 __asm__ __volatile__(
3187 "1: lduw [%2], %0\n"
3188-" sub %0, 1, %1\n"
3189+" subcc %0, 1, %1\n"
3190+
3191+#ifdef CONFIG_PAX_REFCOUNT
3192+" tvs %%icc, 6\n"
3193+#endif
3194+
3195 " cas [%2], %0, %1\n"
3196 " cmp %0, %1\n"
3197 " bne,pn %%xcc, 1b\n"
3198@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch
3199 : "memory");
3200 }
3201
3202-static void inline arch_write_lock(arch_rwlock_t *lock)
3203+static inline void arch_write_lock(arch_rwlock_t *lock)
3204 {
3205 unsigned long mask, tmp1, tmp2;
3206
3207@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_
3208 : "memory");
3209 }
3210
3211-static void inline arch_write_unlock(arch_rwlock_t *lock)
3212+static inline void arch_write_unlock(arch_rwlock_t *lock)
3213 {
3214 __asm__ __volatile__(
3215 " stw %%g0, [%0]"
3216@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arc
3217 : "memory");
3218 }
3219
3220-static int inline arch_write_trylock(arch_rwlock_t *lock)
3221+static inline int arch_write_trylock(arch_rwlock_t *lock)
3222 {
3223 unsigned long mask, tmp1, tmp2, result;
3224
3225diff -urNp linux-3.0.3/arch/sparc/include/asm/thread_info_32.h linux-3.0.3/arch/sparc/include/asm/thread_info_32.h
3226--- linux-3.0.3/arch/sparc/include/asm/thread_info_32.h 2011-07-21 22:17:23.000000000 -0400
3227+++ linux-3.0.3/arch/sparc/include/asm/thread_info_32.h 2011-08-23 21:47:55.000000000 -0400
3228@@ -50,6 +50,8 @@ struct thread_info {
3229 unsigned long w_saved;
3230
3231 struct restart_block restart_block;
3232+
3233+ unsigned long lowest_stack;
3234 };
3235
3236 /*
3237diff -urNp linux-3.0.3/arch/sparc/include/asm/thread_info_64.h linux-3.0.3/arch/sparc/include/asm/thread_info_64.h
3238--- linux-3.0.3/arch/sparc/include/asm/thread_info_64.h 2011-07-21 22:17:23.000000000 -0400
3239+++ linux-3.0.3/arch/sparc/include/asm/thread_info_64.h 2011-08-23 21:47:55.000000000 -0400
3240@@ -63,6 +63,8 @@ struct thread_info {
3241 struct pt_regs *kern_una_regs;
3242 unsigned int kern_una_insn;
3243
3244+ unsigned long lowest_stack;
3245+
3246 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3247 };
3248
3249diff -urNp linux-3.0.3/arch/sparc/include/asm/uaccess_32.h linux-3.0.3/arch/sparc/include/asm/uaccess_32.h
3250--- linux-3.0.3/arch/sparc/include/asm/uaccess_32.h 2011-07-21 22:17:23.000000000 -0400
3251+++ linux-3.0.3/arch/sparc/include/asm/uaccess_32.h 2011-08-23 21:47:55.000000000 -0400
3252@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
3253
3254 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3255 {
3256- if (n && __access_ok((unsigned long) to, n))
3257+ if ((long)n < 0)
3258+ return n;
3259+
3260+ if (n && __access_ok((unsigned long) to, n)) {
3261+ if (!__builtin_constant_p(n))
3262+ check_object_size(from, n, true);
3263 return __copy_user(to, (__force void __user *) from, n);
3264- else
3265+ } else
3266 return n;
3267 }
3268
3269 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3270 {
3271+ if ((long)n < 0)
3272+ return n;
3273+
3274+ if (!__builtin_constant_p(n))
3275+ check_object_size(from, n, true);
3276+
3277 return __copy_user(to, (__force void __user *) from, n);
3278 }
3279
3280 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3281 {
3282- if (n && __access_ok((unsigned long) from, n))
3283+ if ((long)n < 0)
3284+ return n;
3285+
3286+ if (n && __access_ok((unsigned long) from, n)) {
3287+ if (!__builtin_constant_p(n))
3288+ check_object_size(to, n, false);
3289 return __copy_user((__force void __user *) to, from, n);
3290- else
3291+ } else
3292 return n;
3293 }
3294
3295 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3296 {
3297+ if ((long)n < 0)
3298+ return n;
3299+
3300 return __copy_user((__force void __user *) to, from, n);
3301 }
3302
3303diff -urNp linux-3.0.3/arch/sparc/include/asm/uaccess_64.h linux-3.0.3/arch/sparc/include/asm/uaccess_64.h
3304--- linux-3.0.3/arch/sparc/include/asm/uaccess_64.h 2011-07-21 22:17:23.000000000 -0400
3305+++ linux-3.0.3/arch/sparc/include/asm/uaccess_64.h 2011-08-23 21:47:55.000000000 -0400
3306@@ -10,6 +10,7 @@
3307 #include <linux/compiler.h>
3308 #include <linux/string.h>
3309 #include <linux/thread_info.h>
3310+#include <linux/kernel.h>
3311 #include <asm/asi.h>
3312 #include <asm/system.h>
3313 #include <asm/spitfire.h>
3314@@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixu
3315 static inline unsigned long __must_check
3316 copy_from_user(void *to, const void __user *from, unsigned long size)
3317 {
3318- unsigned long ret = ___copy_from_user(to, from, size);
3319+ unsigned long ret;
3320
3321+ if ((long)size < 0 || size > INT_MAX)
3322+ return size;
3323+
3324+ if (!__builtin_constant_p(size))
3325+ check_object_size(to, size, false);
3326+
3327+ ret = ___copy_from_user(to, from, size);
3328 if (unlikely(ret))
3329 ret = copy_from_user_fixup(to, from, size);
3330
3331@@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(
3332 static inline unsigned long __must_check
3333 copy_to_user(void __user *to, const void *from, unsigned long size)
3334 {
3335- unsigned long ret = ___copy_to_user(to, from, size);
3336+ unsigned long ret;
3337+
3338+ if ((long)size < 0 || size > INT_MAX)
3339+ return size;
3340+
3341+ if (!__builtin_constant_p(size))
3342+ check_object_size(from, size, true);
3343
3344+ ret = ___copy_to_user(to, from, size);
3345 if (unlikely(ret))
3346 ret = copy_to_user_fixup(to, from, size);
3347 return ret;
3348diff -urNp linux-3.0.3/arch/sparc/include/asm/uaccess.h linux-3.0.3/arch/sparc/include/asm/uaccess.h
3349--- linux-3.0.3/arch/sparc/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
3350+++ linux-3.0.3/arch/sparc/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
3351@@ -1,5 +1,13 @@
3352 #ifndef ___ASM_SPARC_UACCESS_H
3353 #define ___ASM_SPARC_UACCESS_H
3354+
3355+#ifdef __KERNEL__
3356+#ifndef __ASSEMBLY__
3357+#include <linux/types.h>
3358+extern void check_object_size(const void *ptr, unsigned long n, bool to);
3359+#endif
3360+#endif
3361+
3362 #if defined(__sparc__) && defined(__arch64__)
3363 #include <asm/uaccess_64.h>
3364 #else
3365diff -urNp linux-3.0.3/arch/sparc/kernel/Makefile linux-3.0.3/arch/sparc/kernel/Makefile
3366--- linux-3.0.3/arch/sparc/kernel/Makefile 2011-07-21 22:17:23.000000000 -0400
3367+++ linux-3.0.3/arch/sparc/kernel/Makefile 2011-08-23 21:47:55.000000000 -0400
3368@@ -3,7 +3,7 @@
3369 #
3370
3371 asflags-y := -ansi
3372-ccflags-y := -Werror
3373+#ccflags-y := -Werror
3374
3375 extra-y := head_$(BITS).o
3376 extra-y += init_task.o
3377diff -urNp linux-3.0.3/arch/sparc/kernel/process_32.c linux-3.0.3/arch/sparc/kernel/process_32.c
3378--- linux-3.0.3/arch/sparc/kernel/process_32.c 2011-07-21 22:17:23.000000000 -0400
3379+++ linux-3.0.3/arch/sparc/kernel/process_32.c 2011-08-23 21:48:14.000000000 -0400
3380@@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
3381 rw->ins[4], rw->ins[5],
3382 rw->ins[6],
3383 rw->ins[7]);
3384- printk("%pS\n", (void *) rw->ins[7]);
3385+ printk("%pA\n", (void *) rw->ins[7]);
3386 rw = (struct reg_window32 *) rw->ins[6];
3387 }
3388 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3389@@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
3390
3391 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3392 r->psr, r->pc, r->npc, r->y, print_tainted());
3393- printk("PC: <%pS>\n", (void *) r->pc);
3394+ printk("PC: <%pA>\n", (void *) r->pc);
3395 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3396 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3397 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3398 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3399 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3400 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3401- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3402+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3403
3404 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3405 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3406@@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk,
3407 rw = (struct reg_window32 *) fp;
3408 pc = rw->ins[7];
3409 printk("[%08lx : ", pc);
3410- printk("%pS ] ", (void *) pc);
3411+ printk("%pA ] ", (void *) pc);
3412 fp = rw->ins[6];
3413 } while (++count < 16);
3414 printk("\n");
3415diff -urNp linux-3.0.3/arch/sparc/kernel/process_64.c linux-3.0.3/arch/sparc/kernel/process_64.c
3416--- linux-3.0.3/arch/sparc/kernel/process_64.c 2011-07-21 22:17:23.000000000 -0400
3417+++ linux-3.0.3/arch/sparc/kernel/process_64.c 2011-08-23 21:48:14.000000000 -0400
3418@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
3419 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3420 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3421 if (regs->tstate & TSTATE_PRIV)
3422- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3423+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3424 }
3425
3426 void show_regs(struct pt_regs *regs)
3427 {
3428 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3429 regs->tpc, regs->tnpc, regs->y, print_tainted());
3430- printk("TPC: <%pS>\n", (void *) regs->tpc);
3431+ printk("TPC: <%pA>\n", (void *) regs->tpc);
3432 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3433 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3434 regs->u_regs[3]);
3435@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3436 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3437 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3438 regs->u_regs[15]);
3439- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3440+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3441 show_regwindow(regs);
3442 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3443 }
3444@@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void
3445 ((tp && tp->task) ? tp->task->pid : -1));
3446
3447 if (gp->tstate & TSTATE_PRIV) {
3448- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3449+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3450 (void *) gp->tpc,
3451 (void *) gp->o7,
3452 (void *) gp->i7,
3453diff -urNp linux-3.0.3/arch/sparc/kernel/sys_sparc_32.c linux-3.0.3/arch/sparc/kernel/sys_sparc_32.c
3454--- linux-3.0.3/arch/sparc/kernel/sys_sparc_32.c 2011-07-21 22:17:23.000000000 -0400
3455+++ linux-3.0.3/arch/sparc/kernel/sys_sparc_32.c 2011-08-23 21:47:55.000000000 -0400
3456@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(str
3457 if (ARCH_SUN4C && len > 0x20000000)
3458 return -ENOMEM;
3459 if (!addr)
3460- addr = TASK_UNMAPPED_BASE;
3461+ addr = current->mm->mmap_base;
3462
3463 if (flags & MAP_SHARED)
3464 addr = COLOUR_ALIGN(addr);
3465@@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(str
3466 }
3467 if (TASK_SIZE - PAGE_SIZE - len < addr)
3468 return -ENOMEM;
3469- if (!vmm || addr + len <= vmm->vm_start)
3470+ if (check_heap_stack_gap(vmm, addr, len))
3471 return addr;
3472 addr = vmm->vm_end;
3473 if (flags & MAP_SHARED)
3474diff -urNp linux-3.0.3/arch/sparc/kernel/sys_sparc_64.c linux-3.0.3/arch/sparc/kernel/sys_sparc_64.c
3475--- linux-3.0.3/arch/sparc/kernel/sys_sparc_64.c 2011-07-21 22:17:23.000000000 -0400
3476+++ linux-3.0.3/arch/sparc/kernel/sys_sparc_64.c 2011-08-23 21:47:55.000000000 -0400
3477@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str
3478 /* We do not accept a shared mapping if it would violate
3479 * cache aliasing constraints.
3480 */
3481- if ((flags & MAP_SHARED) &&
3482+ if ((filp || (flags & MAP_SHARED)) &&
3483 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3484 return -EINVAL;
3485 return addr;
3486@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(str
3487 if (filp || (flags & MAP_SHARED))
3488 do_color_align = 1;
3489
3490+#ifdef CONFIG_PAX_RANDMMAP
3491+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3492+#endif
3493+
3494 if (addr) {
3495 if (do_color_align)
3496 addr = COLOUR_ALIGN(addr, pgoff);
3497@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(str
3498 addr = PAGE_ALIGN(addr);
3499
3500 vma = find_vma(mm, addr);
3501- if (task_size - len >= addr &&
3502- (!vma || addr + len <= vma->vm_start))
3503+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3504 return addr;
3505 }
3506
3507 if (len > mm->cached_hole_size) {
3508- start_addr = addr = mm->free_area_cache;
3509+ start_addr = addr = mm->free_area_cache;
3510 } else {
3511- start_addr = addr = TASK_UNMAPPED_BASE;
3512+ start_addr = addr = mm->mmap_base;
3513 mm->cached_hole_size = 0;
3514 }
3515
3516@@ -174,14 +177,14 @@ full_search:
3517 vma = find_vma(mm, VA_EXCLUDE_END);
3518 }
3519 if (unlikely(task_size < addr)) {
3520- if (start_addr != TASK_UNMAPPED_BASE) {
3521- start_addr = addr = TASK_UNMAPPED_BASE;
3522+ if (start_addr != mm->mmap_base) {
3523+ start_addr = addr = mm->mmap_base;
3524 mm->cached_hole_size = 0;
3525 goto full_search;
3526 }
3527 return -ENOMEM;
3528 }
3529- if (likely(!vma || addr + len <= vma->vm_start)) {
3530+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3531 /*
3532 * Remember the place where we stopped the search:
3533 */
3534@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct fi
3535 /* We do not accept a shared mapping if it would violate
3536 * cache aliasing constraints.
3537 */
3538- if ((flags & MAP_SHARED) &&
3539+ if ((filp || (flags & MAP_SHARED)) &&
3540 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3541 return -EINVAL;
3542 return addr;
3543@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct fi
3544 addr = PAGE_ALIGN(addr);
3545
3546 vma = find_vma(mm, addr);
3547- if (task_size - len >= addr &&
3548- (!vma || addr + len <= vma->vm_start))
3549+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3550 return addr;
3551 }
3552
3553@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct fi
3554 /* make sure it can fit in the remaining address space */
3555 if (likely(addr > len)) {
3556 vma = find_vma(mm, addr-len);
3557- if (!vma || addr <= vma->vm_start) {
3558+ if (check_heap_stack_gap(vma, addr - len, len)) {
3559 /* remember the address as a hint for next time */
3560 return (mm->free_area_cache = addr-len);
3561 }
3562@@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct fi
3563 if (unlikely(mm->mmap_base < len))
3564 goto bottomup;
3565
3566- addr = mm->mmap_base-len;
3567- if (do_color_align)
3568- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3569+ addr = mm->mmap_base - len;
3570
3571 do {
3572+ if (do_color_align)
3573+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3574 /*
3575 * Lookup failure means no vma is above this address,
3576 * else if new region fits below vma->vm_start,
3577 * return with success:
3578 */
3579 vma = find_vma(mm, addr);
3580- if (likely(!vma || addr+len <= vma->vm_start)) {
3581+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3582 /* remember the address as a hint for next time */
3583 return (mm->free_area_cache = addr);
3584 }
3585@@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct fi
3586 mm->cached_hole_size = vma->vm_start - addr;
3587
3588 /* try just below the current vma->vm_start */
3589- addr = vma->vm_start-len;
3590- if (do_color_align)
3591- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3592- } while (likely(len < vma->vm_start));
3593+ addr = skip_heap_stack_gap(vma, len);
3594+ } while (!IS_ERR_VALUE(addr));
3595
3596 bottomup:
3597 /*
3598@@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_str
3599 gap == RLIM_INFINITY ||
3600 sysctl_legacy_va_layout) {
3601 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
3602+
3603+#ifdef CONFIG_PAX_RANDMMAP
3604+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3605+ mm->mmap_base += mm->delta_mmap;
3606+#endif
3607+
3608 mm->get_unmapped_area = arch_get_unmapped_area;
3609 mm->unmap_area = arch_unmap_area;
3610 } else {
3611@@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_str
3612 gap = (task_size / 6 * 5);
3613
3614 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
3615+
3616+#ifdef CONFIG_PAX_RANDMMAP
3617+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3618+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3619+#endif
3620+
3621 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3622 mm->unmap_area = arch_unmap_area_topdown;
3623 }
3624diff -urNp linux-3.0.3/arch/sparc/kernel/traps_32.c linux-3.0.3/arch/sparc/kernel/traps_32.c
3625--- linux-3.0.3/arch/sparc/kernel/traps_32.c 2011-07-21 22:17:23.000000000 -0400
3626+++ linux-3.0.3/arch/sparc/kernel/traps_32.c 2011-08-23 21:48:14.000000000 -0400
3627@@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
3628 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
3629 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
3630
3631+extern void gr_handle_kernel_exploit(void);
3632+
3633 void die_if_kernel(char *str, struct pt_regs *regs)
3634 {
3635 static int die_counter;
3636@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
3637 count++ < 30 &&
3638 (((unsigned long) rw) >= PAGE_OFFSET) &&
3639 !(((unsigned long) rw) & 0x7)) {
3640- printk("Caller[%08lx]: %pS\n", rw->ins[7],
3641+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
3642 (void *) rw->ins[7]);
3643 rw = (struct reg_window32 *)rw->ins[6];
3644 }
3645 }
3646 printk("Instruction DUMP:");
3647 instruction_dump ((unsigned long *) regs->pc);
3648- if(regs->psr & PSR_PS)
3649+ if(regs->psr & PSR_PS) {
3650+ gr_handle_kernel_exploit();
3651 do_exit(SIGKILL);
3652+ }
3653 do_exit(SIGSEGV);
3654 }
3655
3656diff -urNp linux-3.0.3/arch/sparc/kernel/traps_64.c linux-3.0.3/arch/sparc/kernel/traps_64.c
3657--- linux-3.0.3/arch/sparc/kernel/traps_64.c 2011-07-21 22:17:23.000000000 -0400
3658+++ linux-3.0.3/arch/sparc/kernel/traps_64.c 2011-08-23 21:48:14.000000000 -0400
3659@@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_
3660 i + 1,
3661 p->trapstack[i].tstate, p->trapstack[i].tpc,
3662 p->trapstack[i].tnpc, p->trapstack[i].tt);
3663- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
3664+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
3665 }
3666 }
3667
3668@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long
3669
3670 lvl -= 0x100;
3671 if (regs->tstate & TSTATE_PRIV) {
3672+
3673+#ifdef CONFIG_PAX_REFCOUNT
3674+ if (lvl == 6)
3675+ pax_report_refcount_overflow(regs);
3676+#endif
3677+
3678 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
3679 die_if_kernel(buffer, regs);
3680 }
3681@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long
3682 void bad_trap_tl1(struct pt_regs *regs, long lvl)
3683 {
3684 char buffer[32];
3685-
3686+
3687 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
3688 0, lvl, SIGTRAP) == NOTIFY_STOP)
3689 return;
3690
3691+#ifdef CONFIG_PAX_REFCOUNT
3692+ if (lvl == 6)
3693+ pax_report_refcount_overflow(regs);
3694+#endif
3695+
3696 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
3697
3698 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
3699@@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt
3700 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
3701 printk("%s" "ERROR(%d): ",
3702 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
3703- printk("TPC<%pS>\n", (void *) regs->tpc);
3704+ printk("TPC<%pA>\n", (void *) regs->tpc);
3705 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
3706 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
3707 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
3708@@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type,
3709 smp_processor_id(),
3710 (type & 0x1) ? 'I' : 'D',
3711 regs->tpc);
3712- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
3713+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
3714 panic("Irrecoverable Cheetah+ parity error.");
3715 }
3716
3717@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type,
3718 smp_processor_id(),
3719 (type & 0x1) ? 'I' : 'D',
3720 regs->tpc);
3721- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
3722+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
3723 }
3724
3725 struct sun4v_error_entry {
3726@@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_r
3727
3728 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
3729 regs->tpc, tl);
3730- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
3731+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
3732 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3733- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
3734+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
3735 (void *) regs->u_regs[UREG_I7]);
3736 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
3737 "pte[%lx] error[%lx]\n",
3738@@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_r
3739
3740 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
3741 regs->tpc, tl);
3742- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
3743+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
3744 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3745- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
3746+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
3747 (void *) regs->u_regs[UREG_I7]);
3748 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
3749 "pte[%lx] error[%lx]\n",
3750@@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk,
3751 fp = (unsigned long)sf->fp + STACK_BIAS;
3752 }
3753
3754- printk(" [%016lx] %pS\n", pc, (void *) pc);
3755+ printk(" [%016lx] %pA\n", pc, (void *) pc);
3756 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3757 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
3758 int index = tsk->curr_ret_stack;
3759 if (tsk->ret_stack && index >= graph) {
3760 pc = tsk->ret_stack[index - graph].ret;
3761- printk(" [%016lx] %pS\n", pc, (void *) pc);
3762+ printk(" [%016lx] %pA\n", pc, (void *) pc);
3763 graph++;
3764 }
3765 }
3766@@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_
3767 return (struct reg_window *) (fp + STACK_BIAS);
3768 }
3769
3770+extern void gr_handle_kernel_exploit(void);
3771+
3772 void die_if_kernel(char *str, struct pt_regs *regs)
3773 {
3774 static int die_counter;
3775@@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_
3776 while (rw &&
3777 count++ < 30 &&
3778 kstack_valid(tp, (unsigned long) rw)) {
3779- printk("Caller[%016lx]: %pS\n", rw->ins[7],
3780+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
3781 (void *) rw->ins[7]);
3782
3783 rw = kernel_stack_up(rw);
3784@@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_
3785 }
3786 user_instruction_dump ((unsigned int __user *) regs->tpc);
3787 }
3788- if (regs->tstate & TSTATE_PRIV)
3789+ if (regs->tstate & TSTATE_PRIV) {
3790+ gr_handle_kernel_exploit();
3791 do_exit(SIGKILL);
3792+ }
3793 do_exit(SIGSEGV);
3794 }
3795 EXPORT_SYMBOL(die_if_kernel);
3796diff -urNp linux-3.0.3/arch/sparc/kernel/unaligned_64.c linux-3.0.3/arch/sparc/kernel/unaligned_64.c
3797--- linux-3.0.3/arch/sparc/kernel/unaligned_64.c 2011-08-23 21:44:40.000000000 -0400
3798+++ linux-3.0.3/arch/sparc/kernel/unaligned_64.c 2011-08-23 21:48:14.000000000 -0400
3799@@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs
3800 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
3801
3802 if (__ratelimit(&ratelimit)) {
3803- printk("Kernel unaligned access at TPC[%lx] %pS\n",
3804+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
3805 regs->tpc, (void *) regs->tpc);
3806 }
3807 }
3808diff -urNp linux-3.0.3/arch/sparc/lib/atomic_64.S linux-3.0.3/arch/sparc/lib/atomic_64.S
3809--- linux-3.0.3/arch/sparc/lib/atomic_64.S 2011-07-21 22:17:23.000000000 -0400
3810+++ linux-3.0.3/arch/sparc/lib/atomic_64.S 2011-08-23 21:47:55.000000000 -0400
3811@@ -18,7 +18,12 @@
3812 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
3813 BACKOFF_SETUP(%o2)
3814 1: lduw [%o1], %g1
3815- add %g1, %o0, %g7
3816+ addcc %g1, %o0, %g7
3817+
3818+#ifdef CONFIG_PAX_REFCOUNT
3819+ tvs %icc, 6
3820+#endif
3821+
3822 cas [%o1], %g1, %g7
3823 cmp %g1, %g7
3824 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3825@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
3826 2: BACKOFF_SPIN(%o2, %o3, 1b)
3827 .size atomic_add, .-atomic_add
3828
3829+ .globl atomic_add_unchecked
3830+ .type atomic_add_unchecked,#function
3831+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3832+ BACKOFF_SETUP(%o2)
3833+1: lduw [%o1], %g1
3834+ add %g1, %o0, %g7
3835+ cas [%o1], %g1, %g7
3836+ cmp %g1, %g7
3837+ bne,pn %icc, 2f
3838+ nop
3839+ retl
3840+ nop
3841+2: BACKOFF_SPIN(%o2, %o3, 1b)
3842+ .size atomic_add_unchecked, .-atomic_add_unchecked
3843+
3844 .globl atomic_sub
3845 .type atomic_sub,#function
3846 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3847 BACKOFF_SETUP(%o2)
3848 1: lduw [%o1], %g1
3849- sub %g1, %o0, %g7
3850+ subcc %g1, %o0, %g7
3851+
3852+#ifdef CONFIG_PAX_REFCOUNT
3853+ tvs %icc, 6
3854+#endif
3855+
3856 cas [%o1], %g1, %g7
3857 cmp %g1, %g7
3858 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3859@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
3860 2: BACKOFF_SPIN(%o2, %o3, 1b)
3861 .size atomic_sub, .-atomic_sub
3862
3863+ .globl atomic_sub_unchecked
3864+ .type atomic_sub_unchecked,#function
3865+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3866+ BACKOFF_SETUP(%o2)
3867+1: lduw [%o1], %g1
3868+ sub %g1, %o0, %g7
3869+ cas [%o1], %g1, %g7
3870+ cmp %g1, %g7
3871+ bne,pn %icc, 2f
3872+ nop
3873+ retl
3874+ nop
3875+2: BACKOFF_SPIN(%o2, %o3, 1b)
3876+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
3877+
3878 .globl atomic_add_ret
3879 .type atomic_add_ret,#function
3880 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3881 BACKOFF_SETUP(%o2)
3882 1: lduw [%o1], %g1
3883- add %g1, %o0, %g7
3884+ addcc %g1, %o0, %g7
3885+
3886+#ifdef CONFIG_PAX_REFCOUNT
3887+ tvs %icc, 6
3888+#endif
3889+
3890 cas [%o1], %g1, %g7
3891 cmp %g1, %g7
3892 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3893@@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1
3894 2: BACKOFF_SPIN(%o2, %o3, 1b)
3895 .size atomic_add_ret, .-atomic_add_ret
3896
3897+ .globl atomic_add_ret_unchecked
3898+ .type atomic_add_ret_unchecked,#function
3899+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3900+ BACKOFF_SETUP(%o2)
3901+1: lduw [%o1], %g1
3902+ addcc %g1, %o0, %g7
3903+ cas [%o1], %g1, %g7
3904+ cmp %g1, %g7
3905+ bne,pn %icc, 2f
3906+ add %g7, %o0, %g7
3907+ sra %g7, 0, %o0
3908+ retl
3909+ nop
3910+2: BACKOFF_SPIN(%o2, %o3, 1b)
3911+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
3912+
3913 .globl atomic_sub_ret
3914 .type atomic_sub_ret,#function
3915 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
3916 BACKOFF_SETUP(%o2)
3917 1: lduw [%o1], %g1
3918- sub %g1, %o0, %g7
3919+ subcc %g1, %o0, %g7
3920+
3921+#ifdef CONFIG_PAX_REFCOUNT
3922+ tvs %icc, 6
3923+#endif
3924+
3925 cas [%o1], %g1, %g7
3926 cmp %g1, %g7
3927 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3928@@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
3929 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
3930 BACKOFF_SETUP(%o2)
3931 1: ldx [%o1], %g1
3932- add %g1, %o0, %g7
3933+ addcc %g1, %o0, %g7
3934+
3935+#ifdef CONFIG_PAX_REFCOUNT
3936+ tvs %xcc, 6
3937+#endif
3938+
3939 casx [%o1], %g1, %g7
3940 cmp %g1, %g7
3941 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3942@@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 =
3943 2: BACKOFF_SPIN(%o2, %o3, 1b)
3944 .size atomic64_add, .-atomic64_add
3945
3946+ .globl atomic64_add_unchecked
3947+ .type atomic64_add_unchecked,#function
3948+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3949+ BACKOFF_SETUP(%o2)
3950+1: ldx [%o1], %g1
3951+ addcc %g1, %o0, %g7
3952+ casx [%o1], %g1, %g7
3953+ cmp %g1, %g7
3954+ bne,pn %xcc, 2f
3955+ nop
3956+ retl
3957+ nop
3958+2: BACKOFF_SPIN(%o2, %o3, 1b)
3959+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
3960+
3961 .globl atomic64_sub
3962 .type atomic64_sub,#function
3963 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3964 BACKOFF_SETUP(%o2)
3965 1: ldx [%o1], %g1
3966- sub %g1, %o0, %g7
3967+ subcc %g1, %o0, %g7
3968+
3969+#ifdef CONFIG_PAX_REFCOUNT
3970+ tvs %xcc, 6
3971+#endif
3972+
3973 casx [%o1], %g1, %g7
3974 cmp %g1, %g7
3975 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3976@@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
3977 2: BACKOFF_SPIN(%o2, %o3, 1b)
3978 .size atomic64_sub, .-atomic64_sub
3979
3980+ .globl atomic64_sub_unchecked
3981+ .type atomic64_sub_unchecked,#function
3982+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3983+ BACKOFF_SETUP(%o2)
3984+1: ldx [%o1], %g1
3985+ subcc %g1, %o0, %g7
3986+ casx [%o1], %g1, %g7
3987+ cmp %g1, %g7
3988+ bne,pn %xcc, 2f
3989+ nop
3990+ retl
3991+ nop
3992+2: BACKOFF_SPIN(%o2, %o3, 1b)
3993+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
3994+
3995 .globl atomic64_add_ret
3996 .type atomic64_add_ret,#function
3997 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3998 BACKOFF_SETUP(%o2)
3999 1: ldx [%o1], %g1
4000- add %g1, %o0, %g7
4001+ addcc %g1, %o0, %g7
4002+
4003+#ifdef CONFIG_PAX_REFCOUNT
4004+ tvs %xcc, 6
4005+#endif
4006+
4007 casx [%o1], %g1, %g7
4008 cmp %g1, %g7
4009 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4010@@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4011 2: BACKOFF_SPIN(%o2, %o3, 1b)
4012 .size atomic64_add_ret, .-atomic64_add_ret
4013
4014+ .globl atomic64_add_ret_unchecked
4015+ .type atomic64_add_ret_unchecked,#function
4016+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4017+ BACKOFF_SETUP(%o2)
4018+1: ldx [%o1], %g1
4019+ addcc %g1, %o0, %g7
4020+ casx [%o1], %g1, %g7
4021+ cmp %g1, %g7
4022+ bne,pn %xcc, 2f
4023+ add %g7, %o0, %g7
4024+ mov %g7, %o0
4025+ retl
4026+ nop
4027+2: BACKOFF_SPIN(%o2, %o3, 1b)
4028+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4029+
4030 .globl atomic64_sub_ret
4031 .type atomic64_sub_ret,#function
4032 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4033 BACKOFF_SETUP(%o2)
4034 1: ldx [%o1], %g1
4035- sub %g1, %o0, %g7
4036+ subcc %g1, %o0, %g7
4037+
4038+#ifdef CONFIG_PAX_REFCOUNT
4039+ tvs %xcc, 6
4040+#endif
4041+
4042 casx [%o1], %g1, %g7
4043 cmp %g1, %g7
4044 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4045diff -urNp linux-3.0.3/arch/sparc/lib/ksyms.c linux-3.0.3/arch/sparc/lib/ksyms.c
4046--- linux-3.0.3/arch/sparc/lib/ksyms.c 2011-07-21 22:17:23.000000000 -0400
4047+++ linux-3.0.3/arch/sparc/lib/ksyms.c 2011-08-23 21:48:14.000000000 -0400
4048@@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
4049
4050 /* Atomic counter implementation. */
4051 EXPORT_SYMBOL(atomic_add);
4052+EXPORT_SYMBOL(atomic_add_unchecked);
4053 EXPORT_SYMBOL(atomic_add_ret);
4054+EXPORT_SYMBOL(atomic_add_ret_unchecked);
4055 EXPORT_SYMBOL(atomic_sub);
4056+EXPORT_SYMBOL(atomic_sub_unchecked);
4057 EXPORT_SYMBOL(atomic_sub_ret);
4058 EXPORT_SYMBOL(atomic64_add);
4059+EXPORT_SYMBOL(atomic64_add_unchecked);
4060 EXPORT_SYMBOL(atomic64_add_ret);
4061+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4062 EXPORT_SYMBOL(atomic64_sub);
4063+EXPORT_SYMBOL(atomic64_sub_unchecked);
4064 EXPORT_SYMBOL(atomic64_sub_ret);
4065
4066 /* Atomic bit operations. */
4067diff -urNp linux-3.0.3/arch/sparc/lib/Makefile linux-3.0.3/arch/sparc/lib/Makefile
4068--- linux-3.0.3/arch/sparc/lib/Makefile 2011-08-23 21:44:40.000000000 -0400
4069+++ linux-3.0.3/arch/sparc/lib/Makefile 2011-08-23 21:47:55.000000000 -0400
4070@@ -2,7 +2,7 @@
4071 #
4072
4073 asflags-y := -ansi -DST_DIV0=0x02
4074-ccflags-y := -Werror
4075+#ccflags-y := -Werror
4076
4077 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4078 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4079diff -urNp linux-3.0.3/arch/sparc/Makefile linux-3.0.3/arch/sparc/Makefile
4080--- linux-3.0.3/arch/sparc/Makefile 2011-07-21 22:17:23.000000000 -0400
4081+++ linux-3.0.3/arch/sparc/Makefile 2011-08-23 21:48:14.000000000 -0400
4082@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
4083 # Export what is needed by arch/sparc/boot/Makefile
4084 export VMLINUX_INIT VMLINUX_MAIN
4085 VMLINUX_INIT := $(head-y) $(init-y)
4086-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4087+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4088 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4089 VMLINUX_MAIN += $(drivers-y) $(net-y)
4090
4091diff -urNp linux-3.0.3/arch/sparc/mm/fault_32.c linux-3.0.3/arch/sparc/mm/fault_32.c
4092--- linux-3.0.3/arch/sparc/mm/fault_32.c 2011-07-21 22:17:23.000000000 -0400
4093+++ linux-3.0.3/arch/sparc/mm/fault_32.c 2011-08-23 21:47:55.000000000 -0400
4094@@ -22,6 +22,9 @@
4095 #include <linux/interrupt.h>
4096 #include <linux/module.h>
4097 #include <linux/kdebug.h>
4098+#include <linux/slab.h>
4099+#include <linux/pagemap.h>
4100+#include <linux/compiler.h>
4101
4102 #include <asm/system.h>
4103 #include <asm/page.h>
4104@@ -209,6 +212,268 @@ static unsigned long compute_si_addr(str
4105 return safe_compute_effective_address(regs, insn);
4106 }
4107
4108+#ifdef CONFIG_PAX_PAGEEXEC
4109+#ifdef CONFIG_PAX_DLRESOLVE
4110+static void pax_emuplt_close(struct vm_area_struct *vma)
4111+{
4112+ vma->vm_mm->call_dl_resolve = 0UL;
4113+}
4114+
4115+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4116+{
4117+ unsigned int *kaddr;
4118+
4119+ vmf->page = alloc_page(GFP_HIGHUSER);
4120+ if (!vmf->page)
4121+ return VM_FAULT_OOM;
4122+
4123+ kaddr = kmap(vmf->page);
4124+ memset(kaddr, 0, PAGE_SIZE);
4125+ kaddr[0] = 0x9DE3BFA8U; /* save */
4126+ flush_dcache_page(vmf->page);
4127+ kunmap(vmf->page);
4128+ return VM_FAULT_MAJOR;
4129+}
4130+
4131+static const struct vm_operations_struct pax_vm_ops = {
4132+ .close = pax_emuplt_close,
4133+ .fault = pax_emuplt_fault
4134+};
4135+
4136+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4137+{
4138+ int ret;
4139+
4140+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4141+ vma->vm_mm = current->mm;
4142+ vma->vm_start = addr;
4143+ vma->vm_end = addr + PAGE_SIZE;
4144+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4145+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4146+ vma->vm_ops = &pax_vm_ops;
4147+
4148+ ret = insert_vm_struct(current->mm, vma);
4149+ if (ret)
4150+ return ret;
4151+
4152+ ++current->mm->total_vm;
4153+ return 0;
4154+}
4155+#endif
4156+
4157+/*
4158+ * PaX: decide what to do with offenders (regs->pc = fault address)
4159+ *
4160+ * returns 1 when task should be killed
4161+ * 2 when patched PLT trampoline was detected
4162+ * 3 when unpatched PLT trampoline was detected
4163+ */
4164+static int pax_handle_fetch_fault(struct pt_regs *regs)
4165+{
4166+
4167+#ifdef CONFIG_PAX_EMUPLT
4168+ int err;
4169+
4170+ do { /* PaX: patched PLT emulation #1 */
4171+ unsigned int sethi1, sethi2, jmpl;
4172+
4173+ err = get_user(sethi1, (unsigned int *)regs->pc);
4174+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4175+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4176+
4177+ if (err)
4178+ break;
4179+
4180+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4181+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4182+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4183+ {
4184+ unsigned int addr;
4185+
4186+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4187+ addr = regs->u_regs[UREG_G1];
4188+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4189+ regs->pc = addr;
4190+ regs->npc = addr+4;
4191+ return 2;
4192+ }
4193+ } while (0);
4194+
4195+ { /* PaX: patched PLT emulation #2 */
4196+ unsigned int ba;
4197+
4198+ err = get_user(ba, (unsigned int *)regs->pc);
4199+
4200+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4201+ unsigned int addr;
4202+
4203+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4204+ regs->pc = addr;
4205+ regs->npc = addr+4;
4206+ return 2;
4207+ }
4208+ }
4209+
4210+ do { /* PaX: patched PLT emulation #3 */
4211+ unsigned int sethi, jmpl, nop;
4212+
4213+ err = get_user(sethi, (unsigned int *)regs->pc);
4214+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4215+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4216+
4217+ if (err)
4218+ break;
4219+
4220+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4221+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4222+ nop == 0x01000000U)
4223+ {
4224+ unsigned int addr;
4225+
4226+ addr = (sethi & 0x003FFFFFU) << 10;
4227+ regs->u_regs[UREG_G1] = addr;
4228+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4229+ regs->pc = addr;
4230+ regs->npc = addr+4;
4231+ return 2;
4232+ }
4233+ } while (0);
4234+
4235+ do { /* PaX: unpatched PLT emulation step 1 */
4236+ unsigned int sethi, ba, nop;
4237+
4238+ err = get_user(sethi, (unsigned int *)regs->pc);
4239+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
4240+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4241+
4242+ if (err)
4243+ break;
4244+
4245+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4246+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4247+ nop == 0x01000000U)
4248+ {
4249+ unsigned int addr, save, call;
4250+
4251+ if ((ba & 0xFFC00000U) == 0x30800000U)
4252+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4253+ else
4254+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4255+
4256+ err = get_user(save, (unsigned int *)addr);
4257+ err |= get_user(call, (unsigned int *)(addr+4));
4258+ err |= get_user(nop, (unsigned int *)(addr+8));
4259+ if (err)
4260+ break;
4261+
4262+#ifdef CONFIG_PAX_DLRESOLVE
4263+ if (save == 0x9DE3BFA8U &&
4264+ (call & 0xC0000000U) == 0x40000000U &&
4265+ nop == 0x01000000U)
4266+ {
4267+ struct vm_area_struct *vma;
4268+ unsigned long call_dl_resolve;
4269+
4270+ down_read(&current->mm->mmap_sem);
4271+ call_dl_resolve = current->mm->call_dl_resolve;
4272+ up_read(&current->mm->mmap_sem);
4273+ if (likely(call_dl_resolve))
4274+ goto emulate;
4275+
4276+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4277+
4278+ down_write(&current->mm->mmap_sem);
4279+ if (current->mm->call_dl_resolve) {
4280+ call_dl_resolve = current->mm->call_dl_resolve;
4281+ up_write(&current->mm->mmap_sem);
4282+ if (vma)
4283+ kmem_cache_free(vm_area_cachep, vma);
4284+ goto emulate;
4285+ }
4286+
4287+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4288+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4289+ up_write(&current->mm->mmap_sem);
4290+ if (vma)
4291+ kmem_cache_free(vm_area_cachep, vma);
4292+ return 1;
4293+ }
4294+
4295+ if (pax_insert_vma(vma, call_dl_resolve)) {
4296+ up_write(&current->mm->mmap_sem);
4297+ kmem_cache_free(vm_area_cachep, vma);
4298+ return 1;
4299+ }
4300+
4301+ current->mm->call_dl_resolve = call_dl_resolve;
4302+ up_write(&current->mm->mmap_sem);
4303+
4304+emulate:
4305+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4306+ regs->pc = call_dl_resolve;
4307+ regs->npc = addr+4;
4308+ return 3;
4309+ }
4310+#endif
4311+
4312+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4313+ if ((save & 0xFFC00000U) == 0x05000000U &&
4314+ (call & 0xFFFFE000U) == 0x85C0A000U &&
4315+ nop == 0x01000000U)
4316+ {
4317+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4318+ regs->u_regs[UREG_G2] = addr + 4;
4319+ addr = (save & 0x003FFFFFU) << 10;
4320+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4321+ regs->pc = addr;
4322+ regs->npc = addr+4;
4323+ return 3;
4324+ }
4325+ }
4326+ } while (0);
4327+
4328+ do { /* PaX: unpatched PLT emulation step 2 */
4329+ unsigned int save, call, nop;
4330+
4331+ err = get_user(save, (unsigned int *)(regs->pc-4));
4332+ err |= get_user(call, (unsigned int *)regs->pc);
4333+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
4334+ if (err)
4335+ break;
4336+
4337+ if (save == 0x9DE3BFA8U &&
4338+ (call & 0xC0000000U) == 0x40000000U &&
4339+ nop == 0x01000000U)
4340+ {
4341+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4342+
4343+ regs->u_regs[UREG_RETPC] = regs->pc;
4344+ regs->pc = dl_resolve;
4345+ regs->npc = dl_resolve+4;
4346+ return 3;
4347+ }
4348+ } while (0);
4349+#endif
4350+
4351+ return 1;
4352+}
4353+
4354+void pax_report_insns(void *pc, void *sp)
4355+{
4356+ unsigned long i;
4357+
4358+ printk(KERN_ERR "PAX: bytes at PC: ");
4359+ for (i = 0; i < 8; i++) {
4360+ unsigned int c;
4361+ if (get_user(c, (unsigned int *)pc+i))
4362+ printk(KERN_CONT "???????? ");
4363+ else
4364+ printk(KERN_CONT "%08x ", c);
4365+ }
4366+ printk("\n");
4367+}
4368+#endif
4369+
4370 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4371 int text_fault)
4372 {
4373@@ -281,6 +546,24 @@ good_area:
4374 if(!(vma->vm_flags & VM_WRITE))
4375 goto bad_area;
4376 } else {
4377+
4378+#ifdef CONFIG_PAX_PAGEEXEC
4379+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4380+ up_read(&mm->mmap_sem);
4381+ switch (pax_handle_fetch_fault(regs)) {
4382+
4383+#ifdef CONFIG_PAX_EMUPLT
4384+ case 2:
4385+ case 3:
4386+ return;
4387+#endif
4388+
4389+ }
4390+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4391+ do_group_exit(SIGKILL);
4392+ }
4393+#endif
4394+
4395 /* Allow reads even for write-only mappings */
4396 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4397 goto bad_area;
4398diff -urNp linux-3.0.3/arch/sparc/mm/fault_64.c linux-3.0.3/arch/sparc/mm/fault_64.c
4399--- linux-3.0.3/arch/sparc/mm/fault_64.c 2011-07-21 22:17:23.000000000 -0400
4400+++ linux-3.0.3/arch/sparc/mm/fault_64.c 2011-08-23 21:48:14.000000000 -0400
4401@@ -21,6 +21,9 @@
4402 #include <linux/kprobes.h>
4403 #include <linux/kdebug.h>
4404 #include <linux/percpu.h>
4405+#include <linux/slab.h>
4406+#include <linux/pagemap.h>
4407+#include <linux/compiler.h>
4408
4409 #include <asm/page.h>
4410 #include <asm/pgtable.h>
4411@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(stru
4412 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4413 regs->tpc);
4414 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4415- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4416+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4417 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4418 dump_stack();
4419 unhandled_fault(regs->tpc, current, regs);
4420@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32b
4421 show_regs(regs);
4422 }
4423
4424+#ifdef CONFIG_PAX_PAGEEXEC
4425+#ifdef CONFIG_PAX_DLRESOLVE
4426+static void pax_emuplt_close(struct vm_area_struct *vma)
4427+{
4428+ vma->vm_mm->call_dl_resolve = 0UL;
4429+}
4430+
4431+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4432+{
4433+ unsigned int *kaddr;
4434+
4435+ vmf->page = alloc_page(GFP_HIGHUSER);
4436+ if (!vmf->page)
4437+ return VM_FAULT_OOM;
4438+
4439+ kaddr = kmap(vmf->page);
4440+ memset(kaddr, 0, PAGE_SIZE);
4441+ kaddr[0] = 0x9DE3BFA8U; /* save */
4442+ flush_dcache_page(vmf->page);
4443+ kunmap(vmf->page);
4444+ return VM_FAULT_MAJOR;
4445+}
4446+
4447+static const struct vm_operations_struct pax_vm_ops = {
4448+ .close = pax_emuplt_close,
4449+ .fault = pax_emuplt_fault
4450+};
4451+
4452+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4453+{
4454+ int ret;
4455+
4456+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4457+ vma->vm_mm = current->mm;
4458+ vma->vm_start = addr;
4459+ vma->vm_end = addr + PAGE_SIZE;
4460+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4461+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4462+ vma->vm_ops = &pax_vm_ops;
4463+
4464+ ret = insert_vm_struct(current->mm, vma);
4465+ if (ret)
4466+ return ret;
4467+
4468+ ++current->mm->total_vm;
4469+ return 0;
4470+}
4471+#endif
4472+
4473+/*
4474+ * PaX: decide what to do with offenders (regs->tpc = fault address)
4475+ *
4476+ * returns 1 when task should be killed
4477+ * 2 when patched PLT trampoline was detected
4478+ * 3 when unpatched PLT trampoline was detected
4479+ */
4480+static int pax_handle_fetch_fault(struct pt_regs *regs)
4481+{
4482+
4483+#ifdef CONFIG_PAX_EMUPLT
4484+ int err;
4485+
4486+ do { /* PaX: patched PLT emulation #1 */
4487+ unsigned int sethi1, sethi2, jmpl;
4488+
4489+ err = get_user(sethi1, (unsigned int *)regs->tpc);
4490+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4491+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4492+
4493+ if (err)
4494+ break;
4495+
4496+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4497+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4498+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4499+ {
4500+ unsigned long addr;
4501+
4502+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4503+ addr = regs->u_regs[UREG_G1];
4504+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4505+
4506+ if (test_thread_flag(TIF_32BIT))
4507+ addr &= 0xFFFFFFFFUL;
4508+
4509+ regs->tpc = addr;
4510+ regs->tnpc = addr+4;
4511+ return 2;
4512+ }
4513+ } while (0);
4514+
4515+ { /* PaX: patched PLT emulation #2 */
4516+ unsigned int ba;
4517+
4518+ err = get_user(ba, (unsigned int *)regs->tpc);
4519+
4520+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4521+ unsigned long addr;
4522+
4523+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4524+
4525+ if (test_thread_flag(TIF_32BIT))
4526+ addr &= 0xFFFFFFFFUL;
4527+
4528+ regs->tpc = addr;
4529+ regs->tnpc = addr+4;
4530+ return 2;
4531+ }
4532+ }
4533+
4534+ do { /* PaX: patched PLT emulation #3 */
4535+ unsigned int sethi, jmpl, nop;
4536+
4537+ err = get_user(sethi, (unsigned int *)regs->tpc);
4538+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
4539+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4540+
4541+ if (err)
4542+ break;
4543+
4544+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4545+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4546+ nop == 0x01000000U)
4547+ {
4548+ unsigned long addr;
4549+
4550+ addr = (sethi & 0x003FFFFFU) << 10;
4551+ regs->u_regs[UREG_G1] = addr;
4552+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4553+
4554+ if (test_thread_flag(TIF_32BIT))
4555+ addr &= 0xFFFFFFFFUL;
4556+
4557+ regs->tpc = addr;
4558+ regs->tnpc = addr+4;
4559+ return 2;
4560+ }
4561+ } while (0);
4562+
4563+ do { /* PaX: patched PLT emulation #4 */
4564+ unsigned int sethi, mov1, call, mov2;
4565+
4566+ err = get_user(sethi, (unsigned int *)regs->tpc);
4567+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
4568+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
4569+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
4570+
4571+ if (err)
4572+ break;
4573+
4574+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4575+ mov1 == 0x8210000FU &&
4576+ (call & 0xC0000000U) == 0x40000000U &&
4577+ mov2 == 0x9E100001U)
4578+ {
4579+ unsigned long addr;
4580+
4581+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
4582+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4583+
4584+ if (test_thread_flag(TIF_32BIT))
4585+ addr &= 0xFFFFFFFFUL;
4586+
4587+ regs->tpc = addr;
4588+ regs->tnpc = addr+4;
4589+ return 2;
4590+ }
4591+ } while (0);
4592+
4593+ do { /* PaX: patched PLT emulation #5 */
4594+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
4595+
4596+ err = get_user(sethi, (unsigned int *)regs->tpc);
4597+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4598+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4599+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
4600+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
4601+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
4602+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
4603+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
4604+
4605+ if (err)
4606+ break;
4607+
4608+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4609+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
4610+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4611+ (or1 & 0xFFFFE000U) == 0x82106000U &&
4612+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
4613+ sllx == 0x83287020U &&
4614+ jmpl == 0x81C04005U &&
4615+ nop == 0x01000000U)
4616+ {
4617+ unsigned long addr;
4618+
4619+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4620+ regs->u_regs[UREG_G1] <<= 32;
4621+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4622+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4623+ regs->tpc = addr;
4624+ regs->tnpc = addr+4;
4625+ return 2;
4626+ }
4627+ } while (0);
4628+
4629+ do { /* PaX: patched PLT emulation #6 */
4630+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
4631+
4632+ err = get_user(sethi, (unsigned int *)regs->tpc);
4633+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4634+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4635+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
4636+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
4637+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
4638+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
4639+
4640+ if (err)
4641+ break;
4642+
4643+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4644+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
4645+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4646+ sllx == 0x83287020U &&
4647+ (or & 0xFFFFE000U) == 0x8A116000U &&
4648+ jmpl == 0x81C04005U &&
4649+ nop == 0x01000000U)
4650+ {
4651+ unsigned long addr;
4652+
4653+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
4654+ regs->u_regs[UREG_G1] <<= 32;
4655+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
4656+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4657+ regs->tpc = addr;
4658+ regs->tnpc = addr+4;
4659+ return 2;
4660+ }
4661+ } while (0);
4662+
4663+ do { /* PaX: unpatched PLT emulation step 1 */
4664+ unsigned int sethi, ba, nop;
4665+
4666+ err = get_user(sethi, (unsigned int *)regs->tpc);
4667+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4668+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4669+
4670+ if (err)
4671+ break;
4672+
4673+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4674+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4675+ nop == 0x01000000U)
4676+ {
4677+ unsigned long addr;
4678+ unsigned int save, call;
4679+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
4680+
4681+ if ((ba & 0xFFC00000U) == 0x30800000U)
4682+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4683+ else
4684+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4685+
4686+ if (test_thread_flag(TIF_32BIT))
4687+ addr &= 0xFFFFFFFFUL;
4688+
4689+ err = get_user(save, (unsigned int *)addr);
4690+ err |= get_user(call, (unsigned int *)(addr+4));
4691+ err |= get_user(nop, (unsigned int *)(addr+8));
4692+ if (err)
4693+ break;
4694+
4695+#ifdef CONFIG_PAX_DLRESOLVE
4696+ if (save == 0x9DE3BFA8U &&
4697+ (call & 0xC0000000U) == 0x40000000U &&
4698+ nop == 0x01000000U)
4699+ {
4700+ struct vm_area_struct *vma;
4701+ unsigned long call_dl_resolve;
4702+
4703+ down_read(&current->mm->mmap_sem);
4704+ call_dl_resolve = current->mm->call_dl_resolve;
4705+ up_read(&current->mm->mmap_sem);
4706+ if (likely(call_dl_resolve))
4707+ goto emulate;
4708+
4709+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4710+
4711+ down_write(&current->mm->mmap_sem);
4712+ if (current->mm->call_dl_resolve) {
4713+ call_dl_resolve = current->mm->call_dl_resolve;
4714+ up_write(&current->mm->mmap_sem);
4715+ if (vma)
4716+ kmem_cache_free(vm_area_cachep, vma);
4717+ goto emulate;
4718+ }
4719+
4720+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4721+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4722+ up_write(&current->mm->mmap_sem);
4723+ if (vma)
4724+ kmem_cache_free(vm_area_cachep, vma);
4725+ return 1;
4726+ }
4727+
4728+ if (pax_insert_vma(vma, call_dl_resolve)) {
4729+ up_write(&current->mm->mmap_sem);
4730+ kmem_cache_free(vm_area_cachep, vma);
4731+ return 1;
4732+ }
4733+
4734+ current->mm->call_dl_resolve = call_dl_resolve;
4735+ up_write(&current->mm->mmap_sem);
4736+
4737+emulate:
4738+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4739+ regs->tpc = call_dl_resolve;
4740+ regs->tnpc = addr+4;
4741+ return 3;
4742+ }
4743+#endif
4744+
4745+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4746+ if ((save & 0xFFC00000U) == 0x05000000U &&
4747+ (call & 0xFFFFE000U) == 0x85C0A000U &&
4748+ nop == 0x01000000U)
4749+ {
4750+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4751+ regs->u_regs[UREG_G2] = addr + 4;
4752+ addr = (save & 0x003FFFFFU) << 10;
4753+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4754+
4755+ if (test_thread_flag(TIF_32BIT))
4756+ addr &= 0xFFFFFFFFUL;
4757+
4758+ regs->tpc = addr;
4759+ regs->tnpc = addr+4;
4760+ return 3;
4761+ }
4762+
4763+ /* PaX: 64-bit PLT stub */
4764+ err = get_user(sethi1, (unsigned int *)addr);
4765+ err |= get_user(sethi2, (unsigned int *)(addr+4));
4766+ err |= get_user(or1, (unsigned int *)(addr+8));
4767+ err |= get_user(or2, (unsigned int *)(addr+12));
4768+ err |= get_user(sllx, (unsigned int *)(addr+16));
4769+ err |= get_user(add, (unsigned int *)(addr+20));
4770+ err |= get_user(jmpl, (unsigned int *)(addr+24));
4771+ err |= get_user(nop, (unsigned int *)(addr+28));
4772+ if (err)
4773+ break;
4774+
4775+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
4776+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4777+ (or1 & 0xFFFFE000U) == 0x88112000U &&
4778+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
4779+ sllx == 0x89293020U &&
4780+ add == 0x8A010005U &&
4781+ jmpl == 0x89C14000U &&
4782+ nop == 0x01000000U)
4783+ {
4784+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4785+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4786+ regs->u_regs[UREG_G4] <<= 32;
4787+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4788+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
4789+ regs->u_regs[UREG_G4] = addr + 24;
4790+ addr = regs->u_regs[UREG_G5];
4791+ regs->tpc = addr;
4792+ regs->tnpc = addr+4;
4793+ return 3;
4794+ }
4795+ }
4796+ } while (0);
4797+
4798+#ifdef CONFIG_PAX_DLRESOLVE
4799+ do { /* PaX: unpatched PLT emulation step 2 */
4800+ unsigned int save, call, nop;
4801+
4802+ err = get_user(save, (unsigned int *)(regs->tpc-4));
4803+ err |= get_user(call, (unsigned int *)regs->tpc);
4804+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
4805+ if (err)
4806+ break;
4807+
4808+ if (save == 0x9DE3BFA8U &&
4809+ (call & 0xC0000000U) == 0x40000000U &&
4810+ nop == 0x01000000U)
4811+ {
4812+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4813+
4814+ if (test_thread_flag(TIF_32BIT))
4815+ dl_resolve &= 0xFFFFFFFFUL;
4816+
4817+ regs->u_regs[UREG_RETPC] = regs->tpc;
4818+ regs->tpc = dl_resolve;
4819+ regs->tnpc = dl_resolve+4;
4820+ return 3;
4821+ }
4822+ } while (0);
4823+#endif
4824+
4825+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
4826+ unsigned int sethi, ba, nop;
4827+
4828+ err = get_user(sethi, (unsigned int *)regs->tpc);
4829+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4830+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4831+
4832+ if (err)
4833+ break;
4834+
4835+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4836+ (ba & 0xFFF00000U) == 0x30600000U &&
4837+ nop == 0x01000000U)
4838+ {
4839+ unsigned long addr;
4840+
4841+ addr = (sethi & 0x003FFFFFU) << 10;
4842+ regs->u_regs[UREG_G1] = addr;
4843+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4844+
4845+ if (test_thread_flag(TIF_32BIT))
4846+ addr &= 0xFFFFFFFFUL;
4847+
4848+ regs->tpc = addr;
4849+ regs->tnpc = addr+4;
4850+ return 2;
4851+ }
4852+ } while (0);
4853+
4854+#endif
4855+
4856+ return 1;
4857+}
4858+
4859+void pax_report_insns(void *pc, void *sp)
4860+{
4861+ unsigned long i;
4862+
4863+ printk(KERN_ERR "PAX: bytes at PC: ");
4864+ for (i = 0; i < 8; i++) {
4865+ unsigned int c;
4866+ if (get_user(c, (unsigned int *)pc+i))
4867+ printk(KERN_CONT "???????? ");
4868+ else
4869+ printk(KERN_CONT "%08x ", c);
4870+ }
4871+ printk("\n");
4872+}
4873+#endif
4874+
4875 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
4876 {
4877 struct mm_struct *mm = current->mm;
4878@@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fau
4879 if (!vma)
4880 goto bad_area;
4881
4882+#ifdef CONFIG_PAX_PAGEEXEC
4883+ /* PaX: detect ITLB misses on non-exec pages */
4884+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
4885+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
4886+ {
4887+ if (address != regs->tpc)
4888+ goto good_area;
4889+
4890+ up_read(&mm->mmap_sem);
4891+ switch (pax_handle_fetch_fault(regs)) {
4892+
4893+#ifdef CONFIG_PAX_EMUPLT
4894+ case 2:
4895+ case 3:
4896+ return;
4897+#endif
4898+
4899+ }
4900+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
4901+ do_group_exit(SIGKILL);
4902+ }
4903+#endif
4904+
4905 /* Pure DTLB misses do not tell us whether the fault causing
4906 * load/store/atomic was a write or not, it only says that there
4907 * was no match. So in such a case we (carefully) read the
4908diff -urNp linux-3.0.3/arch/sparc/mm/hugetlbpage.c linux-3.0.3/arch/sparc/mm/hugetlbpage.c
4909--- linux-3.0.3/arch/sparc/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
4910+++ linux-3.0.3/arch/sparc/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
4911@@ -68,7 +68,7 @@ full_search:
4912 }
4913 return -ENOMEM;
4914 }
4915- if (likely(!vma || addr + len <= vma->vm_start)) {
4916+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4917 /*
4918 * Remember the place where we stopped the search:
4919 */
4920@@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct
4921 /* make sure it can fit in the remaining address space */
4922 if (likely(addr > len)) {
4923 vma = find_vma(mm, addr-len);
4924- if (!vma || addr <= vma->vm_start) {
4925+ if (check_heap_stack_gap(vma, addr - len, len)) {
4926 /* remember the address as a hint for next time */
4927 return (mm->free_area_cache = addr-len);
4928 }
4929@@ -116,16 +116,17 @@ hugetlb_get_unmapped_area_topdown(struct
4930 if (unlikely(mm->mmap_base < len))
4931 goto bottomup;
4932
4933- addr = (mm->mmap_base-len) & HPAGE_MASK;
4934+ addr = mm->mmap_base - len;
4935
4936 do {
4937+ addr &= HPAGE_MASK;
4938 /*
4939 * Lookup failure means no vma is above this address,
4940 * else if new region fits below vma->vm_start,
4941 * return with success:
4942 */
4943 vma = find_vma(mm, addr);
4944- if (likely(!vma || addr+len <= vma->vm_start)) {
4945+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4946 /* remember the address as a hint for next time */
4947 return (mm->free_area_cache = addr);
4948 }
4949@@ -135,8 +136,8 @@ hugetlb_get_unmapped_area_topdown(struct
4950 mm->cached_hole_size = vma->vm_start - addr;
4951
4952 /* try just below the current vma->vm_start */
4953- addr = (vma->vm_start-len) & HPAGE_MASK;
4954- } while (likely(len < vma->vm_start));
4955+ addr = skip_heap_stack_gap(vma, len);
4956+ } while (!IS_ERR_VALUE(addr));
4957
4958 bottomup:
4959 /*
4960@@ -182,8 +183,7 @@ hugetlb_get_unmapped_area(struct file *f
4961 if (addr) {
4962 addr = ALIGN(addr, HPAGE_SIZE);
4963 vma = find_vma(mm, addr);
4964- if (task_size - len >= addr &&
4965- (!vma || addr + len <= vma->vm_start))
4966+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4967 return addr;
4968 }
4969 if (mm->get_unmapped_area == arch_get_unmapped_area)
4970diff -urNp linux-3.0.3/arch/sparc/mm/init_32.c linux-3.0.3/arch/sparc/mm/init_32.c
4971--- linux-3.0.3/arch/sparc/mm/init_32.c 2011-07-21 22:17:23.000000000 -0400
4972+++ linux-3.0.3/arch/sparc/mm/init_32.c 2011-08-23 21:47:55.000000000 -0400
4973@@ -316,6 +316,9 @@ extern void device_scan(void);
4974 pgprot_t PAGE_SHARED __read_mostly;
4975 EXPORT_SYMBOL(PAGE_SHARED);
4976
4977+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
4978+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
4979+
4980 void __init paging_init(void)
4981 {
4982 switch(sparc_cpu_model) {
4983@@ -344,17 +347,17 @@ void __init paging_init(void)
4984
4985 /* Initialize the protection map with non-constant, MMU dependent values. */
4986 protection_map[0] = PAGE_NONE;
4987- protection_map[1] = PAGE_READONLY;
4988- protection_map[2] = PAGE_COPY;
4989- protection_map[3] = PAGE_COPY;
4990+ protection_map[1] = PAGE_READONLY_NOEXEC;
4991+ protection_map[2] = PAGE_COPY_NOEXEC;
4992+ protection_map[3] = PAGE_COPY_NOEXEC;
4993 protection_map[4] = PAGE_READONLY;
4994 protection_map[5] = PAGE_READONLY;
4995 protection_map[6] = PAGE_COPY;
4996 protection_map[7] = PAGE_COPY;
4997 protection_map[8] = PAGE_NONE;
4998- protection_map[9] = PAGE_READONLY;
4999- protection_map[10] = PAGE_SHARED;
5000- protection_map[11] = PAGE_SHARED;
5001+ protection_map[9] = PAGE_READONLY_NOEXEC;
5002+ protection_map[10] = PAGE_SHARED_NOEXEC;
5003+ protection_map[11] = PAGE_SHARED_NOEXEC;
5004 protection_map[12] = PAGE_READONLY;
5005 protection_map[13] = PAGE_READONLY;
5006 protection_map[14] = PAGE_SHARED;
5007diff -urNp linux-3.0.3/arch/sparc/mm/Makefile linux-3.0.3/arch/sparc/mm/Makefile
5008--- linux-3.0.3/arch/sparc/mm/Makefile 2011-07-21 22:17:23.000000000 -0400
5009+++ linux-3.0.3/arch/sparc/mm/Makefile 2011-08-23 21:47:55.000000000 -0400
5010@@ -2,7 +2,7 @@
5011 #
5012
5013 asflags-y := -ansi
5014-ccflags-y := -Werror
5015+#ccflags-y := -Werror
5016
5017 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5018 obj-y += fault_$(BITS).o
5019diff -urNp linux-3.0.3/arch/sparc/mm/srmmu.c linux-3.0.3/arch/sparc/mm/srmmu.c
5020--- linux-3.0.3/arch/sparc/mm/srmmu.c 2011-07-21 22:17:23.000000000 -0400
5021+++ linux-3.0.3/arch/sparc/mm/srmmu.c 2011-08-23 21:47:55.000000000 -0400
5022@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5023 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5024 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5025 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5026+
5027+#ifdef CONFIG_PAX_PAGEEXEC
5028+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5029+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5030+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5031+#endif
5032+
5033 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5034 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5035
5036diff -urNp linux-3.0.3/arch/um/include/asm/kmap_types.h linux-3.0.3/arch/um/include/asm/kmap_types.h
5037--- linux-3.0.3/arch/um/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
5038+++ linux-3.0.3/arch/um/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
5039@@ -23,6 +23,7 @@ enum km_type {
5040 KM_IRQ1,
5041 KM_SOFTIRQ0,
5042 KM_SOFTIRQ1,
5043+ KM_CLEARPAGE,
5044 KM_TYPE_NR
5045 };
5046
5047diff -urNp linux-3.0.3/arch/um/include/asm/page.h linux-3.0.3/arch/um/include/asm/page.h
5048--- linux-3.0.3/arch/um/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
5049+++ linux-3.0.3/arch/um/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
5050@@ -14,6 +14,9 @@
5051 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5052 #define PAGE_MASK (~(PAGE_SIZE-1))
5053
5054+#define ktla_ktva(addr) (addr)
5055+#define ktva_ktla(addr) (addr)
5056+
5057 #ifndef __ASSEMBLY__
5058
5059 struct page;
5060diff -urNp linux-3.0.3/arch/um/kernel/process.c linux-3.0.3/arch/um/kernel/process.c
5061--- linux-3.0.3/arch/um/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
5062+++ linux-3.0.3/arch/um/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
5063@@ -404,22 +404,6 @@ int singlestepping(void * t)
5064 return 2;
5065 }
5066
5067-/*
5068- * Only x86 and x86_64 have an arch_align_stack().
5069- * All other arches have "#define arch_align_stack(x) (x)"
5070- * in their asm/system.h
5071- * As this is included in UML from asm-um/system-generic.h,
5072- * we can use it to behave as the subarch does.
5073- */
5074-#ifndef arch_align_stack
5075-unsigned long arch_align_stack(unsigned long sp)
5076-{
5077- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5078- sp -= get_random_int() % 8192;
5079- return sp & ~0xf;
5080-}
5081-#endif
5082-
5083 unsigned long get_wchan(struct task_struct *p)
5084 {
5085 unsigned long stack_page, sp, ip;
5086diff -urNp linux-3.0.3/arch/um/sys-i386/syscalls.c linux-3.0.3/arch/um/sys-i386/syscalls.c
5087--- linux-3.0.3/arch/um/sys-i386/syscalls.c 2011-07-21 22:17:23.000000000 -0400
5088+++ linux-3.0.3/arch/um/sys-i386/syscalls.c 2011-08-23 21:47:55.000000000 -0400
5089@@ -11,6 +11,21 @@
5090 #include "asm/uaccess.h"
5091 #include "asm/unistd.h"
5092
5093+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5094+{
5095+ unsigned long pax_task_size = TASK_SIZE;
5096+
5097+#ifdef CONFIG_PAX_SEGMEXEC
5098+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5099+ pax_task_size = SEGMEXEC_TASK_SIZE;
5100+#endif
5101+
5102+ if (len > pax_task_size || addr > pax_task_size - len)
5103+ return -EINVAL;
5104+
5105+ return 0;
5106+}
5107+
5108 /*
5109 * The prototype on i386 is:
5110 *
5111diff -urNp linux-3.0.3/arch/x86/boot/bitops.h linux-3.0.3/arch/x86/boot/bitops.h
5112--- linux-3.0.3/arch/x86/boot/bitops.h 2011-07-21 22:17:23.000000000 -0400
5113+++ linux-3.0.3/arch/x86/boot/bitops.h 2011-08-23 21:47:55.000000000 -0400
5114@@ -26,7 +26,7 @@ static inline int variable_test_bit(int
5115 u8 v;
5116 const u32 *p = (const u32 *)addr;
5117
5118- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5119+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5120 return v;
5121 }
5122
5123@@ -37,7 +37,7 @@ static inline int variable_test_bit(int
5124
5125 static inline void set_bit(int nr, void *addr)
5126 {
5127- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5128+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5129 }
5130
5131 #endif /* BOOT_BITOPS_H */
5132diff -urNp linux-3.0.3/arch/x86/boot/boot.h linux-3.0.3/arch/x86/boot/boot.h
5133--- linux-3.0.3/arch/x86/boot/boot.h 2011-07-21 22:17:23.000000000 -0400
5134+++ linux-3.0.3/arch/x86/boot/boot.h 2011-08-23 21:47:55.000000000 -0400
5135@@ -85,7 +85,7 @@ static inline void io_delay(void)
5136 static inline u16 ds(void)
5137 {
5138 u16 seg;
5139- asm("movw %%ds,%0" : "=rm" (seg));
5140+ asm volatile("movw %%ds,%0" : "=rm" (seg));
5141 return seg;
5142 }
5143
5144@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t
5145 static inline int memcmp(const void *s1, const void *s2, size_t len)
5146 {
5147 u8 diff;
5148- asm("repe; cmpsb; setnz %0"
5149+ asm volatile("repe; cmpsb; setnz %0"
5150 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5151 return diff;
5152 }
5153diff -urNp linux-3.0.3/arch/x86/boot/compressed/head_32.S linux-3.0.3/arch/x86/boot/compressed/head_32.S
5154--- linux-3.0.3/arch/x86/boot/compressed/head_32.S 2011-07-21 22:17:23.000000000 -0400
5155+++ linux-3.0.3/arch/x86/boot/compressed/head_32.S 2011-08-23 21:47:55.000000000 -0400
5156@@ -76,7 +76,7 @@ ENTRY(startup_32)
5157 notl %eax
5158 andl %eax, %ebx
5159 #else
5160- movl $LOAD_PHYSICAL_ADDR, %ebx
5161+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5162 #endif
5163
5164 /* Target address to relocate to for decompression */
5165@@ -162,7 +162,7 @@ relocated:
5166 * and where it was actually loaded.
5167 */
5168 movl %ebp, %ebx
5169- subl $LOAD_PHYSICAL_ADDR, %ebx
5170+ subl $____LOAD_PHYSICAL_ADDR, %ebx
5171 jz 2f /* Nothing to be done if loaded at compiled addr. */
5172 /*
5173 * Process relocations.
5174@@ -170,8 +170,7 @@ relocated:
5175
5176 1: subl $4, %edi
5177 movl (%edi), %ecx
5178- testl %ecx, %ecx
5179- jz 2f
5180+ jecxz 2f
5181 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5182 jmp 1b
5183 2:
5184diff -urNp linux-3.0.3/arch/x86/boot/compressed/head_64.S linux-3.0.3/arch/x86/boot/compressed/head_64.S
5185--- linux-3.0.3/arch/x86/boot/compressed/head_64.S 2011-07-21 22:17:23.000000000 -0400
5186+++ linux-3.0.3/arch/x86/boot/compressed/head_64.S 2011-08-23 21:47:55.000000000 -0400
5187@@ -91,7 +91,7 @@ ENTRY(startup_32)
5188 notl %eax
5189 andl %eax, %ebx
5190 #else
5191- movl $LOAD_PHYSICAL_ADDR, %ebx
5192+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5193 #endif
5194
5195 /* Target address to relocate to for decompression */
5196@@ -233,7 +233,7 @@ ENTRY(startup_64)
5197 notq %rax
5198 andq %rax, %rbp
5199 #else
5200- movq $LOAD_PHYSICAL_ADDR, %rbp
5201+ movq $____LOAD_PHYSICAL_ADDR, %rbp
5202 #endif
5203
5204 /* Target address to relocate to for decompression */
5205diff -urNp linux-3.0.3/arch/x86/boot/compressed/Makefile linux-3.0.3/arch/x86/boot/compressed/Makefile
5206--- linux-3.0.3/arch/x86/boot/compressed/Makefile 2011-07-21 22:17:23.000000000 -0400
5207+++ linux-3.0.3/arch/x86/boot/compressed/Makefile 2011-08-23 21:47:55.000000000 -0400
5208@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
5209 KBUILD_CFLAGS += $(cflags-y)
5210 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5211 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5212+ifdef CONSTIFY_PLUGIN
5213+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5214+endif
5215
5216 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5217 GCOV_PROFILE := n
5218diff -urNp linux-3.0.3/arch/x86/boot/compressed/misc.c linux-3.0.3/arch/x86/boot/compressed/misc.c
5219--- linux-3.0.3/arch/x86/boot/compressed/misc.c 2011-07-21 22:17:23.000000000 -0400
5220+++ linux-3.0.3/arch/x86/boot/compressed/misc.c 2011-08-23 21:47:55.000000000 -0400
5221@@ -310,7 +310,7 @@ static void parse_elf(void *output)
5222 case PT_LOAD:
5223 #ifdef CONFIG_RELOCATABLE
5224 dest = output;
5225- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5226+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5227 #else
5228 dest = (void *)(phdr->p_paddr);
5229 #endif
5230@@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *
5231 error("Destination address too large");
5232 #endif
5233 #ifndef CONFIG_RELOCATABLE
5234- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5235+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5236 error("Wrong destination address");
5237 #endif
5238
5239diff -urNp linux-3.0.3/arch/x86/boot/compressed/relocs.c linux-3.0.3/arch/x86/boot/compressed/relocs.c
5240--- linux-3.0.3/arch/x86/boot/compressed/relocs.c 2011-07-21 22:17:23.000000000 -0400
5241+++ linux-3.0.3/arch/x86/boot/compressed/relocs.c 2011-08-23 21:47:55.000000000 -0400
5242@@ -13,8 +13,11 @@
5243
5244 static void die(char *fmt, ...);
5245
5246+#include "../../../../include/generated/autoconf.h"
5247+
5248 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5249 static Elf32_Ehdr ehdr;
5250+static Elf32_Phdr *phdr;
5251 static unsigned long reloc_count, reloc_idx;
5252 static unsigned long *relocs;
5253
5254@@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5255 }
5256 }
5257
5258+static void read_phdrs(FILE *fp)
5259+{
5260+ unsigned int i;
5261+
5262+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5263+ if (!phdr) {
5264+ die("Unable to allocate %d program headers\n",
5265+ ehdr.e_phnum);
5266+ }
5267+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5268+ die("Seek to %d failed: %s\n",
5269+ ehdr.e_phoff, strerror(errno));
5270+ }
5271+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5272+ die("Cannot read ELF program headers: %s\n",
5273+ strerror(errno));
5274+ }
5275+ for(i = 0; i < ehdr.e_phnum; i++) {
5276+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5277+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5278+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5279+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5280+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5281+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5282+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5283+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5284+ }
5285+
5286+}
5287+
5288 static void read_shdrs(FILE *fp)
5289 {
5290- int i;
5291+ unsigned int i;
5292 Elf32_Shdr shdr;
5293
5294 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5295@@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5296
5297 static void read_strtabs(FILE *fp)
5298 {
5299- int i;
5300+ unsigned int i;
5301 for (i = 0; i < ehdr.e_shnum; i++) {
5302 struct section *sec = &secs[i];
5303 if (sec->shdr.sh_type != SHT_STRTAB) {
5304@@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5305
5306 static void read_symtabs(FILE *fp)
5307 {
5308- int i,j;
5309+ unsigned int i,j;
5310 for (i = 0; i < ehdr.e_shnum; i++) {
5311 struct section *sec = &secs[i];
5312 if (sec->shdr.sh_type != SHT_SYMTAB) {
5313@@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5314
5315 static void read_relocs(FILE *fp)
5316 {
5317- int i,j;
5318+ unsigned int i,j;
5319+ uint32_t base;
5320+
5321 for (i = 0; i < ehdr.e_shnum; i++) {
5322 struct section *sec = &secs[i];
5323 if (sec->shdr.sh_type != SHT_REL) {
5324@@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
5325 die("Cannot read symbol table: %s\n",
5326 strerror(errno));
5327 }
5328+ base = 0;
5329+ for (j = 0; j < ehdr.e_phnum; j++) {
5330+ if (phdr[j].p_type != PT_LOAD )
5331+ continue;
5332+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
5333+ continue;
5334+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
5335+ break;
5336+ }
5337 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
5338 Elf32_Rel *rel = &sec->reltab[j];
5339- rel->r_offset = elf32_to_cpu(rel->r_offset);
5340+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
5341 rel->r_info = elf32_to_cpu(rel->r_info);
5342 }
5343 }
5344@@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
5345
5346 static void print_absolute_symbols(void)
5347 {
5348- int i;
5349+ unsigned int i;
5350 printf("Absolute symbols\n");
5351 printf(" Num: Value Size Type Bind Visibility Name\n");
5352 for (i = 0; i < ehdr.e_shnum; i++) {
5353 struct section *sec = &secs[i];
5354 char *sym_strtab;
5355 Elf32_Sym *sh_symtab;
5356- int j;
5357+ unsigned int j;
5358
5359 if (sec->shdr.sh_type != SHT_SYMTAB) {
5360 continue;
5361@@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
5362
5363 static void print_absolute_relocs(void)
5364 {
5365- int i, printed = 0;
5366+ unsigned int i, printed = 0;
5367
5368 for (i = 0; i < ehdr.e_shnum; i++) {
5369 struct section *sec = &secs[i];
5370 struct section *sec_applies, *sec_symtab;
5371 char *sym_strtab;
5372 Elf32_Sym *sh_symtab;
5373- int j;
5374+ unsigned int j;
5375 if (sec->shdr.sh_type != SHT_REL) {
5376 continue;
5377 }
5378@@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
5379
5380 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
5381 {
5382- int i;
5383+ unsigned int i;
5384 /* Walk through the relocations */
5385 for (i = 0; i < ehdr.e_shnum; i++) {
5386 char *sym_strtab;
5387 Elf32_Sym *sh_symtab;
5388 struct section *sec_applies, *sec_symtab;
5389- int j;
5390+ unsigned int j;
5391 struct section *sec = &secs[i];
5392
5393 if (sec->shdr.sh_type != SHT_REL) {
5394@@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(El
5395 !is_rel_reloc(sym_name(sym_strtab, sym))) {
5396 continue;
5397 }
5398+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
5399+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
5400+ continue;
5401+
5402+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
5403+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
5404+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
5405+ continue;
5406+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
5407+ continue;
5408+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
5409+ continue;
5410+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
5411+ continue;
5412+#endif
5413+
5414 switch (r_type) {
5415 case R_386_NONE:
5416 case R_386_PC32:
5417@@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, co
5418
5419 static void emit_relocs(int as_text)
5420 {
5421- int i;
5422+ unsigned int i;
5423 /* Count how many relocations I have and allocate space for them. */
5424 reloc_count = 0;
5425 walk_relocs(count_reloc);
5426@@ -665,6 +725,7 @@ int main(int argc, char **argv)
5427 fname, strerror(errno));
5428 }
5429 read_ehdr(fp);
5430+ read_phdrs(fp);
5431 read_shdrs(fp);
5432 read_strtabs(fp);
5433 read_symtabs(fp);
5434diff -urNp linux-3.0.3/arch/x86/boot/cpucheck.c linux-3.0.3/arch/x86/boot/cpucheck.c
5435--- linux-3.0.3/arch/x86/boot/cpucheck.c 2011-07-21 22:17:23.000000000 -0400
5436+++ linux-3.0.3/arch/x86/boot/cpucheck.c 2011-08-23 21:47:55.000000000 -0400
5437@@ -74,7 +74,7 @@ static int has_fpu(void)
5438 u16 fcw = -1, fsw = -1;
5439 u32 cr0;
5440
5441- asm("movl %%cr0,%0" : "=r" (cr0));
5442+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
5443 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
5444 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
5445 asm volatile("movl %0,%%cr0" : : "r" (cr0));
5446@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
5447 {
5448 u32 f0, f1;
5449
5450- asm("pushfl ; "
5451+ asm volatile("pushfl ; "
5452 "pushfl ; "
5453 "popl %0 ; "
5454 "movl %0,%1 ; "
5455@@ -115,7 +115,7 @@ static void get_flags(void)
5456 set_bit(X86_FEATURE_FPU, cpu.flags);
5457
5458 if (has_eflag(X86_EFLAGS_ID)) {
5459- asm("cpuid"
5460+ asm volatile("cpuid"
5461 : "=a" (max_intel_level),
5462 "=b" (cpu_vendor[0]),
5463 "=d" (cpu_vendor[1]),
5464@@ -124,7 +124,7 @@ static void get_flags(void)
5465
5466 if (max_intel_level >= 0x00000001 &&
5467 max_intel_level <= 0x0000ffff) {
5468- asm("cpuid"
5469+ asm volatile("cpuid"
5470 : "=a" (tfms),
5471 "=c" (cpu.flags[4]),
5472 "=d" (cpu.flags[0])
5473@@ -136,7 +136,7 @@ static void get_flags(void)
5474 cpu.model += ((tfms >> 16) & 0xf) << 4;
5475 }
5476
5477- asm("cpuid"
5478+ asm volatile("cpuid"
5479 : "=a" (max_amd_level)
5480 : "a" (0x80000000)
5481 : "ebx", "ecx", "edx");
5482@@ -144,7 +144,7 @@ static void get_flags(void)
5483 if (max_amd_level >= 0x80000001 &&
5484 max_amd_level <= 0x8000ffff) {
5485 u32 eax = 0x80000001;
5486- asm("cpuid"
5487+ asm volatile("cpuid"
5488 : "+a" (eax),
5489 "=c" (cpu.flags[6]),
5490 "=d" (cpu.flags[1])
5491@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5492 u32 ecx = MSR_K7_HWCR;
5493 u32 eax, edx;
5494
5495- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5496+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5497 eax &= ~(1 << 15);
5498- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5499+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5500
5501 get_flags(); /* Make sure it really did something */
5502 err = check_flags();
5503@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5504 u32 ecx = MSR_VIA_FCR;
5505 u32 eax, edx;
5506
5507- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5508+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5509 eax |= (1<<1)|(1<<7);
5510- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5511+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5512
5513 set_bit(X86_FEATURE_CX8, cpu.flags);
5514 err = check_flags();
5515@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
5516 u32 eax, edx;
5517 u32 level = 1;
5518
5519- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5520- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5521- asm("cpuid"
5522+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5523+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5524+ asm volatile("cpuid"
5525 : "+a" (level), "=d" (cpu.flags[0])
5526 : : "ecx", "ebx");
5527- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5528+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5529
5530 err = check_flags();
5531 }
5532diff -urNp linux-3.0.3/arch/x86/boot/header.S linux-3.0.3/arch/x86/boot/header.S
5533--- linux-3.0.3/arch/x86/boot/header.S 2011-07-21 22:17:23.000000000 -0400
5534+++ linux-3.0.3/arch/x86/boot/header.S 2011-08-23 21:47:55.000000000 -0400
5535@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
5536 # single linked list of
5537 # struct setup_data
5538
5539-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
5540+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
5541
5542 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
5543 #define VO_INIT_SIZE (VO__end - VO__text)
5544diff -urNp linux-3.0.3/arch/x86/boot/Makefile linux-3.0.3/arch/x86/boot/Makefile
5545--- linux-3.0.3/arch/x86/boot/Makefile 2011-07-21 22:17:23.000000000 -0400
5546+++ linux-3.0.3/arch/x86/boot/Makefile 2011-08-23 21:47:55.000000000 -0400
5547@@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
5548 $(call cc-option, -fno-stack-protector) \
5549 $(call cc-option, -mpreferred-stack-boundary=2)
5550 KBUILD_CFLAGS += $(call cc-option, -m32)
5551+ifdef CONSTIFY_PLUGIN
5552+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5553+endif
5554 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5555 GCOV_PROFILE := n
5556
5557diff -urNp linux-3.0.3/arch/x86/boot/memory.c linux-3.0.3/arch/x86/boot/memory.c
5558--- linux-3.0.3/arch/x86/boot/memory.c 2011-07-21 22:17:23.000000000 -0400
5559+++ linux-3.0.3/arch/x86/boot/memory.c 2011-08-23 21:47:55.000000000 -0400
5560@@ -19,7 +19,7 @@
5561
5562 static int detect_memory_e820(void)
5563 {
5564- int count = 0;
5565+ unsigned int count = 0;
5566 struct biosregs ireg, oreg;
5567 struct e820entry *desc = boot_params.e820_map;
5568 static struct e820entry buf; /* static so it is zeroed */
5569diff -urNp linux-3.0.3/arch/x86/boot/video.c linux-3.0.3/arch/x86/boot/video.c
5570--- linux-3.0.3/arch/x86/boot/video.c 2011-07-21 22:17:23.000000000 -0400
5571+++ linux-3.0.3/arch/x86/boot/video.c 2011-08-23 21:47:55.000000000 -0400
5572@@ -96,7 +96,7 @@ static void store_mode_params(void)
5573 static unsigned int get_entry(void)
5574 {
5575 char entry_buf[4];
5576- int i, len = 0;
5577+ unsigned int i, len = 0;
5578 int key;
5579 unsigned int v;
5580
5581diff -urNp linux-3.0.3/arch/x86/boot/video-vesa.c linux-3.0.3/arch/x86/boot/video-vesa.c
5582--- linux-3.0.3/arch/x86/boot/video-vesa.c 2011-07-21 22:17:23.000000000 -0400
5583+++ linux-3.0.3/arch/x86/boot/video-vesa.c 2011-08-23 21:47:55.000000000 -0400
5584@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
5585
5586 boot_params.screen_info.vesapm_seg = oreg.es;
5587 boot_params.screen_info.vesapm_off = oreg.di;
5588+ boot_params.screen_info.vesapm_size = oreg.cx;
5589 }
5590
5591 /*
5592diff -urNp linux-3.0.3/arch/x86/ia32/ia32_aout.c linux-3.0.3/arch/x86/ia32/ia32_aout.c
5593--- linux-3.0.3/arch/x86/ia32/ia32_aout.c 2011-07-21 22:17:23.000000000 -0400
5594+++ linux-3.0.3/arch/x86/ia32/ia32_aout.c 2011-08-23 21:48:14.000000000 -0400
5595@@ -162,6 +162,8 @@ static int aout_core_dump(long signr, st
5596 unsigned long dump_start, dump_size;
5597 struct user32 dump;
5598
5599+ memset(&dump, 0, sizeof(dump));
5600+
5601 fs = get_fs();
5602 set_fs(KERNEL_DS);
5603 has_dumped = 1;
5604diff -urNp linux-3.0.3/arch/x86/ia32/ia32entry.S linux-3.0.3/arch/x86/ia32/ia32entry.S
5605--- linux-3.0.3/arch/x86/ia32/ia32entry.S 2011-07-21 22:17:23.000000000 -0400
5606+++ linux-3.0.3/arch/x86/ia32/ia32entry.S 2011-08-25 17:36:37.000000000 -0400
5607@@ -13,6 +13,7 @@
5608 #include <asm/thread_info.h>
5609 #include <asm/segment.h>
5610 #include <asm/irqflags.h>
5611+#include <asm/pgtable.h>
5612 #include <linux/linkage.h>
5613
5614 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
5615@@ -95,6 +96,29 @@ ENTRY(native_irq_enable_sysexit)
5616 ENDPROC(native_irq_enable_sysexit)
5617 #endif
5618
5619+ .macro pax_enter_kernel_user
5620+#ifdef CONFIG_PAX_MEMORY_UDEREF
5621+ call pax_enter_kernel_user
5622+#endif
5623+ .endm
5624+
5625+ .macro pax_exit_kernel_user
5626+#ifdef CONFIG_PAX_MEMORY_UDEREF
5627+ call pax_exit_kernel_user
5628+#endif
5629+#ifdef CONFIG_PAX_RANDKSTACK
5630+ pushq %rax
5631+ call pax_randomize_kstack
5632+ popq %rax
5633+#endif
5634+ .endm
5635+
5636+ .macro pax_erase_kstack
5637+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
5638+ call pax_erase_kstack
5639+#endif
5640+ .endm
5641+
5642 /*
5643 * 32bit SYSENTER instruction entry.
5644 *
5645@@ -121,7 +145,7 @@ ENTRY(ia32_sysenter_target)
5646 CFI_REGISTER rsp,rbp
5647 SWAPGS_UNSAFE_STACK
5648 movq PER_CPU_VAR(kernel_stack), %rsp
5649- addq $(KERNEL_STACK_OFFSET),%rsp
5650+ pax_enter_kernel_user
5651 /*
5652 * No need to follow this irqs on/off section: the syscall
5653 * disabled irqs, here we enable it straight after entry:
5654@@ -134,7 +158,8 @@ ENTRY(ia32_sysenter_target)
5655 CFI_REL_OFFSET rsp,0
5656 pushfq_cfi
5657 /*CFI_REL_OFFSET rflags,0*/
5658- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
5659+ GET_THREAD_INFO(%r10)
5660+ movl TI_sysenter_return(%r10), %r10d
5661 CFI_REGISTER rip,r10
5662 pushq_cfi $__USER32_CS
5663 /*CFI_REL_OFFSET cs,0*/
5664@@ -146,6 +171,12 @@ ENTRY(ia32_sysenter_target)
5665 SAVE_ARGS 0,0,1
5666 /* no need to do an access_ok check here because rbp has been
5667 32bit zero extended */
5668+
5669+#ifdef CONFIG_PAX_MEMORY_UDEREF
5670+ mov $PAX_USER_SHADOW_BASE,%r10
5671+ add %r10,%rbp
5672+#endif
5673+
5674 1: movl (%rbp),%ebp
5675 .section __ex_table,"a"
5676 .quad 1b,ia32_badarg
5677@@ -168,6 +199,8 @@ sysenter_dispatch:
5678 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5679 jnz sysexit_audit
5680 sysexit_from_sys_call:
5681+ pax_exit_kernel_user
5682+ pax_erase_kstack
5683 andl $~TS_COMPAT,TI_status(%r10)
5684 /* clear IF, that popfq doesn't enable interrupts early */
5685 andl $~0x200,EFLAGS-R11(%rsp)
5686@@ -194,6 +227,9 @@ sysexit_from_sys_call:
5687 movl %eax,%esi /* 2nd arg: syscall number */
5688 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
5689 call audit_syscall_entry
5690+
5691+ pax_erase_kstack
5692+
5693 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
5694 cmpq $(IA32_NR_syscalls-1),%rax
5695 ja ia32_badsys
5696@@ -246,6 +282,9 @@ sysenter_tracesys:
5697 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
5698 movq %rsp,%rdi /* &pt_regs -> arg1 */
5699 call syscall_trace_enter
5700+
5701+ pax_erase_kstack
5702+
5703 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5704 RESTORE_REST
5705 cmpq $(IA32_NR_syscalls-1),%rax
5706@@ -277,19 +316,24 @@ ENDPROC(ia32_sysenter_target)
5707 ENTRY(ia32_cstar_target)
5708 CFI_STARTPROC32 simple
5709 CFI_SIGNAL_FRAME
5710- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
5711+ CFI_DEF_CFA rsp,0
5712 CFI_REGISTER rip,rcx
5713 /*CFI_REGISTER rflags,r11*/
5714 SWAPGS_UNSAFE_STACK
5715 movl %esp,%r8d
5716 CFI_REGISTER rsp,r8
5717 movq PER_CPU_VAR(kernel_stack),%rsp
5718+
5719+#ifdef CONFIG_PAX_MEMORY_UDEREF
5720+ pax_enter_kernel_user
5721+#endif
5722+
5723 /*
5724 * No need to follow this irqs on/off section: the syscall
5725 * disabled irqs and here we enable it straight after entry:
5726 */
5727 ENABLE_INTERRUPTS(CLBR_NONE)
5728- SAVE_ARGS 8,1,1
5729+ SAVE_ARGS 8*6,1,1
5730 movl %eax,%eax /* zero extension */
5731 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
5732 movq %rcx,RIP-ARGOFFSET(%rsp)
5733@@ -305,6 +349,12 @@ ENTRY(ia32_cstar_target)
5734 /* no need to do an access_ok check here because r8 has been
5735 32bit zero extended */
5736 /* hardware stack frame is complete now */
5737+
5738+#ifdef CONFIG_PAX_MEMORY_UDEREF
5739+ mov $PAX_USER_SHADOW_BASE,%r10
5740+ add %r10,%r8
5741+#endif
5742+
5743 1: movl (%r8),%r9d
5744 .section __ex_table,"a"
5745 .quad 1b,ia32_badarg
5746@@ -327,6 +377,8 @@ cstar_dispatch:
5747 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5748 jnz sysretl_audit
5749 sysretl_from_sys_call:
5750+ pax_exit_kernel_user
5751+ pax_erase_kstack
5752 andl $~TS_COMPAT,TI_status(%r10)
5753 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
5754 movl RIP-ARGOFFSET(%rsp),%ecx
5755@@ -364,6 +416,9 @@ cstar_tracesys:
5756 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5757 movq %rsp,%rdi /* &pt_regs -> arg1 */
5758 call syscall_trace_enter
5759+
5760+ pax_erase_kstack
5761+
5762 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
5763 RESTORE_REST
5764 xchgl %ebp,%r9d
5765@@ -409,6 +464,7 @@ ENTRY(ia32_syscall)
5766 CFI_REL_OFFSET rip,RIP-RIP
5767 PARAVIRT_ADJUST_EXCEPTION_FRAME
5768 SWAPGS
5769+ pax_enter_kernel_user
5770 /*
5771 * No need to follow this irqs on/off section: the syscall
5772 * disabled irqs and here we enable it straight after entry:
5773@@ -441,6 +497,9 @@ ia32_tracesys:
5774 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5775 movq %rsp,%rdi /* &pt_regs -> arg1 */
5776 call syscall_trace_enter
5777+
5778+ pax_erase_kstack
5779+
5780 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5781 RESTORE_REST
5782 cmpq $(IA32_NR_syscalls-1),%rax
5783diff -urNp linux-3.0.3/arch/x86/ia32/ia32_signal.c linux-3.0.3/arch/x86/ia32/ia32_signal.c
5784--- linux-3.0.3/arch/x86/ia32/ia32_signal.c 2011-07-21 22:17:23.000000000 -0400
5785+++ linux-3.0.3/arch/x86/ia32/ia32_signal.c 2011-08-23 21:47:55.000000000 -0400
5786@@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
5787 sp -= frame_size;
5788 /* Align the stack pointer according to the i386 ABI,
5789 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
5790- sp = ((sp + 4) & -16ul) - 4;
5791+ sp = ((sp - 12) & -16ul) - 4;
5792 return (void __user *) sp;
5793 }
5794
5795@@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
5796 * These are actually not used anymore, but left because some
5797 * gdb versions depend on them as a marker.
5798 */
5799- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5800+ put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
5801 } put_user_catch(err);
5802
5803 if (err)
5804@@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
5805 0xb8,
5806 __NR_ia32_rt_sigreturn,
5807 0x80cd,
5808- 0,
5809+ 0
5810 };
5811
5812 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
5813@@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
5814
5815 if (ka->sa.sa_flags & SA_RESTORER)
5816 restorer = ka->sa.sa_restorer;
5817+ else if (current->mm->context.vdso)
5818+ /* Return stub is in 32bit vsyscall page */
5819+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
5820 else
5821- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
5822- rt_sigreturn);
5823+ restorer = &frame->retcode;
5824 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
5825
5826 /*
5827 * Not actually used anymore, but left because some gdb
5828 * versions need it.
5829 */
5830- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5831+ put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
5832 } put_user_catch(err);
5833
5834 if (err)
5835diff -urNp linux-3.0.3/arch/x86/include/asm/alternative.h linux-3.0.3/arch/x86/include/asm/alternative.h
5836--- linux-3.0.3/arch/x86/include/asm/alternative.h 2011-07-21 22:17:23.000000000 -0400
5837+++ linux-3.0.3/arch/x86/include/asm/alternative.h 2011-08-23 21:47:55.000000000 -0400
5838@@ -93,7 +93,7 @@ static inline int alternatives_text_rese
5839 ".section .discard,\"aw\",@progbits\n" \
5840 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
5841 ".previous\n" \
5842- ".section .altinstr_replacement, \"ax\"\n" \
5843+ ".section .altinstr_replacement, \"a\"\n" \
5844 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
5845 ".previous"
5846
5847diff -urNp linux-3.0.3/arch/x86/include/asm/apic.h linux-3.0.3/arch/x86/include/asm/apic.h
5848--- linux-3.0.3/arch/x86/include/asm/apic.h 2011-07-21 22:17:23.000000000 -0400
5849+++ linux-3.0.3/arch/x86/include/asm/apic.h 2011-08-23 21:48:14.000000000 -0400
5850@@ -45,7 +45,7 @@ static inline void generic_apic_probe(vo
5851
5852 #ifdef CONFIG_X86_LOCAL_APIC
5853
5854-extern unsigned int apic_verbosity;
5855+extern int apic_verbosity;
5856 extern int local_apic_timer_c2_ok;
5857
5858 extern int disable_apic;
5859diff -urNp linux-3.0.3/arch/x86/include/asm/apm.h linux-3.0.3/arch/x86/include/asm/apm.h
5860--- linux-3.0.3/arch/x86/include/asm/apm.h 2011-07-21 22:17:23.000000000 -0400
5861+++ linux-3.0.3/arch/x86/include/asm/apm.h 2011-08-23 21:47:55.000000000 -0400
5862@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
5863 __asm__ __volatile__(APM_DO_ZERO_SEGS
5864 "pushl %%edi\n\t"
5865 "pushl %%ebp\n\t"
5866- "lcall *%%cs:apm_bios_entry\n\t"
5867+ "lcall *%%ss:apm_bios_entry\n\t"
5868 "setc %%al\n\t"
5869 "popl %%ebp\n\t"
5870 "popl %%edi\n\t"
5871@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
5872 __asm__ __volatile__(APM_DO_ZERO_SEGS
5873 "pushl %%edi\n\t"
5874 "pushl %%ebp\n\t"
5875- "lcall *%%cs:apm_bios_entry\n\t"
5876+ "lcall *%%ss:apm_bios_entry\n\t"
5877 "setc %%bl\n\t"
5878 "popl %%ebp\n\t"
5879 "popl %%edi\n\t"
5880diff -urNp linux-3.0.3/arch/x86/include/asm/atomic64_32.h linux-3.0.3/arch/x86/include/asm/atomic64_32.h
5881--- linux-3.0.3/arch/x86/include/asm/atomic64_32.h 2011-07-21 22:17:23.000000000 -0400
5882+++ linux-3.0.3/arch/x86/include/asm/atomic64_32.h 2011-08-23 21:47:55.000000000 -0400
5883@@ -12,6 +12,14 @@ typedef struct {
5884 u64 __aligned(8) counter;
5885 } atomic64_t;
5886
5887+#ifdef CONFIG_PAX_REFCOUNT
5888+typedef struct {
5889+ u64 __aligned(8) counter;
5890+} atomic64_unchecked_t;
5891+#else
5892+typedef atomic64_t atomic64_unchecked_t;
5893+#endif
5894+
5895 #define ATOMIC64_INIT(val) { (val) }
5896
5897 #ifdef CONFIG_X86_CMPXCHG64
5898@@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg
5899 }
5900
5901 /**
5902+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
5903+ * @p: pointer to type atomic64_unchecked_t
5904+ * @o: expected value
5905+ * @n: new value
5906+ *
5907+ * Atomically sets @v to @n if it was equal to @o and returns
5908+ * the old value.
5909+ */
5910+
5911+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
5912+{
5913+ return cmpxchg64(&v->counter, o, n);
5914+}
5915+
5916+/**
5917 * atomic64_xchg - xchg atomic64 variable
5918 * @v: pointer to type atomic64_t
5919 * @n: value to assign
5920@@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64
5921 }
5922
5923 /**
5924+ * atomic64_set_unchecked - set atomic64 variable
5925+ * @v: pointer to type atomic64_unchecked_t
5926+ * @n: value to assign
5927+ *
5928+ * Atomically sets the value of @v to @n.
5929+ */
5930+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
5931+{
5932+ unsigned high = (unsigned)(i >> 32);
5933+ unsigned low = (unsigned)i;
5934+ asm volatile(ATOMIC64_ALTERNATIVE(set)
5935+ : "+b" (low), "+c" (high)
5936+ : "S" (v)
5937+ : "eax", "edx", "memory"
5938+ );
5939+}
5940+
5941+/**
5942 * atomic64_read - read atomic64 variable
5943 * @v: pointer to type atomic64_t
5944 *
5945@@ -93,6 +134,22 @@ static inline long long atomic64_read(at
5946 }
5947
5948 /**
5949+ * atomic64_read_unchecked - read atomic64 variable
5950+ * @v: pointer to type atomic64_unchecked_t
5951+ *
5952+ * Atomically reads the value of @v and returns it.
5953+ */
5954+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
5955+{
5956+ long long r;
5957+ asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
5958+ : "=A" (r), "+c" (v)
5959+ : : "memory"
5960+ );
5961+ return r;
5962+ }
5963+
5964+/**
5965 * atomic64_add_return - add and return
5966 * @i: integer value to add
5967 * @v: pointer to type atomic64_t
5968@@ -108,6 +165,22 @@ static inline long long atomic64_add_ret
5969 return i;
5970 }
5971
5972+/**
5973+ * atomic64_add_return_unchecked - add and return
5974+ * @i: integer value to add
5975+ * @v: pointer to type atomic64_unchecked_t
5976+ *
5977+ * Atomically adds @i to @v and returns @i + *@v
5978+ */
5979+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
5980+{
5981+ asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
5982+ : "+A" (i), "+c" (v)
5983+ : : "memory"
5984+ );
5985+ return i;
5986+}
5987+
5988 /*
5989 * Other variants with different arithmetic operators:
5990 */
5991@@ -131,6 +204,17 @@ static inline long long atomic64_inc_ret
5992 return a;
5993 }
5994
5995+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
5996+{
5997+ long long a;
5998+ asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
5999+ : "=A" (a)
6000+ : "S" (v)
6001+ : "memory", "ecx"
6002+ );
6003+ return a;
6004+}
6005+
6006 static inline long long atomic64_dec_return(atomic64_t *v)
6007 {
6008 long long a;
6009@@ -159,6 +243,22 @@ static inline long long atomic64_add(lon
6010 }
6011
6012 /**
6013+ * atomic64_add_unchecked - add integer to atomic64 variable
6014+ * @i: integer value to add
6015+ * @v: pointer to type atomic64_unchecked_t
6016+ *
6017+ * Atomically adds @i to @v.
6018+ */
6019+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
6020+{
6021+ asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
6022+ : "+A" (i), "+c" (v)
6023+ : : "memory"
6024+ );
6025+ return i;
6026+}
6027+
6028+/**
6029 * atomic64_sub - subtract the atomic64 variable
6030 * @i: integer value to subtract
6031 * @v: pointer to type atomic64_t
6032diff -urNp linux-3.0.3/arch/x86/include/asm/atomic64_64.h linux-3.0.3/arch/x86/include/asm/atomic64_64.h
6033--- linux-3.0.3/arch/x86/include/asm/atomic64_64.h 2011-07-21 22:17:23.000000000 -0400
6034+++ linux-3.0.3/arch/x86/include/asm/atomic64_64.h 2011-08-23 21:47:55.000000000 -0400
6035@@ -18,7 +18,19 @@
6036 */
6037 static inline long atomic64_read(const atomic64_t *v)
6038 {
6039- return (*(volatile long *)&(v)->counter);
6040+ return (*(volatile const long *)&(v)->counter);
6041+}
6042+
6043+/**
6044+ * atomic64_read_unchecked - read atomic64 variable
6045+ * @v: pointer of type atomic64_unchecked_t
6046+ *
6047+ * Atomically reads the value of @v.
6048+ * Doesn't imply a read memory barrier.
6049+ */
6050+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6051+{
6052+ return (*(volatile const long *)&(v)->counter);
6053 }
6054
6055 /**
6056@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64
6057 }
6058
6059 /**
6060+ * atomic64_set_unchecked - set atomic64 variable
6061+ * @v: pointer to type atomic64_unchecked_t
6062+ * @i: required value
6063+ *
6064+ * Atomically sets the value of @v to @i.
6065+ */
6066+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6067+{
6068+ v->counter = i;
6069+}
6070+
6071+/**
6072 * atomic64_add - add integer to atomic64 variable
6073 * @i: integer value to add
6074 * @v: pointer to type atomic64_t
6075@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64
6076 */
6077 static inline void atomic64_add(long i, atomic64_t *v)
6078 {
6079+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
6080+
6081+#ifdef CONFIG_PAX_REFCOUNT
6082+ "jno 0f\n"
6083+ LOCK_PREFIX "subq %1,%0\n"
6084+ "int $4\n0:\n"
6085+ _ASM_EXTABLE(0b, 0b)
6086+#endif
6087+
6088+ : "=m" (v->counter)
6089+ : "er" (i), "m" (v->counter));
6090+}
6091+
6092+/**
6093+ * atomic64_add_unchecked - add integer to atomic64 variable
6094+ * @i: integer value to add
6095+ * @v: pointer to type atomic64_unchecked_t
6096+ *
6097+ * Atomically adds @i to @v.
6098+ */
6099+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6100+{
6101 asm volatile(LOCK_PREFIX "addq %1,%0"
6102 : "=m" (v->counter)
6103 : "er" (i), "m" (v->counter));
6104@@ -56,7 +102,29 @@ static inline void atomic64_add(long i,
6105 */
6106 static inline void atomic64_sub(long i, atomic64_t *v)
6107 {
6108- asm volatile(LOCK_PREFIX "subq %1,%0"
6109+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
6110+
6111+#ifdef CONFIG_PAX_REFCOUNT
6112+ "jno 0f\n"
6113+ LOCK_PREFIX "addq %1,%0\n"
6114+ "int $4\n0:\n"
6115+ _ASM_EXTABLE(0b, 0b)
6116+#endif
6117+
6118+ : "=m" (v->counter)
6119+ : "er" (i), "m" (v->counter));
6120+}
6121+
6122+/**
6123+ * atomic64_sub_unchecked - subtract the atomic64 variable
6124+ * @i: integer value to subtract
6125+ * @v: pointer to type atomic64_unchecked_t
6126+ *
6127+ * Atomically subtracts @i from @v.
6128+ */
6129+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6130+{
6131+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
6132 : "=m" (v->counter)
6133 : "er" (i), "m" (v->counter));
6134 }
6135@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(
6136 {
6137 unsigned char c;
6138
6139- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
6140+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
6141+
6142+#ifdef CONFIG_PAX_REFCOUNT
6143+ "jno 0f\n"
6144+ LOCK_PREFIX "addq %2,%0\n"
6145+ "int $4\n0:\n"
6146+ _ASM_EXTABLE(0b, 0b)
6147+#endif
6148+
6149+ "sete %1\n"
6150 : "=m" (v->counter), "=qm" (c)
6151 : "er" (i), "m" (v->counter) : "memory");
6152 return c;
6153@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(
6154 */
6155 static inline void atomic64_inc(atomic64_t *v)
6156 {
6157+ asm volatile(LOCK_PREFIX "incq %0\n"
6158+
6159+#ifdef CONFIG_PAX_REFCOUNT
6160+ "jno 0f\n"
6161+ LOCK_PREFIX "decq %0\n"
6162+ "int $4\n0:\n"
6163+ _ASM_EXTABLE(0b, 0b)
6164+#endif
6165+
6166+ : "=m" (v->counter)
6167+ : "m" (v->counter));
6168+}
6169+
6170+/**
6171+ * atomic64_inc_unchecked - increment atomic64 variable
6172+ * @v: pointer to type atomic64_unchecked_t
6173+ *
6174+ * Atomically increments @v by 1.
6175+ */
6176+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
6177+{
6178 asm volatile(LOCK_PREFIX "incq %0"
6179 : "=m" (v->counter)
6180 : "m" (v->counter));
6181@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64
6182 */
6183 static inline void atomic64_dec(atomic64_t *v)
6184 {
6185- asm volatile(LOCK_PREFIX "decq %0"
6186+ asm volatile(LOCK_PREFIX "decq %0\n"
6187+
6188+#ifdef CONFIG_PAX_REFCOUNT
6189+ "jno 0f\n"
6190+ LOCK_PREFIX "incq %0\n"
6191+ "int $4\n0:\n"
6192+ _ASM_EXTABLE(0b, 0b)
6193+#endif
6194+
6195+ : "=m" (v->counter)
6196+ : "m" (v->counter));
6197+}
6198+
6199+/**
6200+ * atomic64_dec_unchecked - decrement atomic64 variable
6201+ * @v: pointer to type atomic64_t
6202+ *
6203+ * Atomically decrements @v by 1.
6204+ */
6205+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
6206+{
6207+ asm volatile(LOCK_PREFIX "decq %0\n"
6208 : "=m" (v->counter)
6209 : "m" (v->counter));
6210 }
6211@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(
6212 {
6213 unsigned char c;
6214
6215- asm volatile(LOCK_PREFIX "decq %0; sete %1"
6216+ asm volatile(LOCK_PREFIX "decq %0\n"
6217+
6218+#ifdef CONFIG_PAX_REFCOUNT
6219+ "jno 0f\n"
6220+ LOCK_PREFIX "incq %0\n"
6221+ "int $4\n0:\n"
6222+ _ASM_EXTABLE(0b, 0b)
6223+#endif
6224+
6225+ "sete %1\n"
6226 : "=m" (v->counter), "=qm" (c)
6227 : "m" (v->counter) : "memory");
6228 return c != 0;
6229@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(
6230 {
6231 unsigned char c;
6232
6233- asm volatile(LOCK_PREFIX "incq %0; sete %1"
6234+ asm volatile(LOCK_PREFIX "incq %0\n"
6235+
6236+#ifdef CONFIG_PAX_REFCOUNT
6237+ "jno 0f\n"
6238+ LOCK_PREFIX "decq %0\n"
6239+ "int $4\n0:\n"
6240+ _ASM_EXTABLE(0b, 0b)
6241+#endif
6242+
6243+ "sete %1\n"
6244 : "=m" (v->counter), "=qm" (c)
6245 : "m" (v->counter) : "memory");
6246 return c != 0;
6247@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(
6248 {
6249 unsigned char c;
6250
6251- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
6252+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
6253+
6254+#ifdef CONFIG_PAX_REFCOUNT
6255+ "jno 0f\n"
6256+ LOCK_PREFIX "subq %2,%0\n"
6257+ "int $4\n0:\n"
6258+ _ASM_EXTABLE(0b, 0b)
6259+#endif
6260+
6261+ "sets %1\n"
6262 : "=m" (v->counter), "=qm" (c)
6263 : "er" (i), "m" (v->counter) : "memory");
6264 return c;
6265@@ -171,7 +317,31 @@ static inline int atomic64_add_negative(
6266 static inline long atomic64_add_return(long i, atomic64_t *v)
6267 {
6268 long __i = i;
6269- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
6270+ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
6271+
6272+#ifdef CONFIG_PAX_REFCOUNT
6273+ "jno 0f\n"
6274+ "movq %0, %1\n"
6275+ "int $4\n0:\n"
6276+ _ASM_EXTABLE(0b, 0b)
6277+#endif
6278+
6279+ : "+r" (i), "+m" (v->counter)
6280+ : : "memory");
6281+ return i + __i;
6282+}
6283+
6284+/**
6285+ * atomic64_add_return_unchecked - add and return
6286+ * @i: integer value to add
6287+ * @v: pointer to type atomic64_unchecked_t
6288+ *
6289+ * Atomically adds @i to @v and returns @i + @v
6290+ */
6291+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6292+{
6293+ long __i = i;
6294+ asm volatile(LOCK_PREFIX "xaddq %0, %1"
6295 : "+r" (i), "+m" (v->counter)
6296 : : "memory");
6297 return i + __i;
6298@@ -183,6 +353,10 @@ static inline long atomic64_sub_return(l
6299 }
6300
6301 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
6302+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6303+{
6304+ return atomic64_add_return_unchecked(1, v);
6305+}
6306 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
6307
6308 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6309@@ -190,6 +364,11 @@ static inline long atomic64_cmpxchg(atom
6310 return cmpxchg(&v->counter, old, new);
6311 }
6312
6313+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
6314+{
6315+ return cmpxchg(&v->counter, old, new);
6316+}
6317+
6318 static inline long atomic64_xchg(atomic64_t *v, long new)
6319 {
6320 return xchg(&v->counter, new);
6321@@ -206,17 +385,30 @@ static inline long atomic64_xchg(atomic6
6322 */
6323 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
6324 {
6325- long c, old;
6326+ long c, old, new;
6327 c = atomic64_read(v);
6328 for (;;) {
6329- if (unlikely(c == (u)))
6330+ if (unlikely(c == u))
6331 break;
6332- old = atomic64_cmpxchg((v), c, c + (a));
6333+
6334+ asm volatile("add %2,%0\n"
6335+
6336+#ifdef CONFIG_PAX_REFCOUNT
6337+ "jno 0f\n"
6338+ "sub %2,%0\n"
6339+ "int $4\n0:\n"
6340+ _ASM_EXTABLE(0b, 0b)
6341+#endif
6342+
6343+ : "=r" (new)
6344+ : "0" (c), "ir" (a));
6345+
6346+ old = atomic64_cmpxchg(v, c, new);
6347 if (likely(old == c))
6348 break;
6349 c = old;
6350 }
6351- return c != (u);
6352+ return c != u;
6353 }
6354
6355 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6356diff -urNp linux-3.0.3/arch/x86/include/asm/atomic.h linux-3.0.3/arch/x86/include/asm/atomic.h
6357--- linux-3.0.3/arch/x86/include/asm/atomic.h 2011-07-21 22:17:23.000000000 -0400
6358+++ linux-3.0.3/arch/x86/include/asm/atomic.h 2011-08-23 21:47:55.000000000 -0400
6359@@ -22,7 +22,18 @@
6360 */
6361 static inline int atomic_read(const atomic_t *v)
6362 {
6363- return (*(volatile int *)&(v)->counter);
6364+ return (*(volatile const int *)&(v)->counter);
6365+}
6366+
6367+/**
6368+ * atomic_read_unchecked - read atomic variable
6369+ * @v: pointer of type atomic_unchecked_t
6370+ *
6371+ * Atomically reads the value of @v.
6372+ */
6373+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6374+{
6375+ return (*(volatile const int *)&(v)->counter);
6376 }
6377
6378 /**
6379@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *
6380 }
6381
6382 /**
6383+ * atomic_set_unchecked - set atomic variable
6384+ * @v: pointer of type atomic_unchecked_t
6385+ * @i: required value
6386+ *
6387+ * Atomically sets the value of @v to @i.
6388+ */
6389+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6390+{
6391+ v->counter = i;
6392+}
6393+
6394+/**
6395 * atomic_add - add integer to atomic variable
6396 * @i: integer value to add
6397 * @v: pointer of type atomic_t
6398@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *
6399 */
6400 static inline void atomic_add(int i, atomic_t *v)
6401 {
6402- asm volatile(LOCK_PREFIX "addl %1,%0"
6403+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6404+
6405+#ifdef CONFIG_PAX_REFCOUNT
6406+ "jno 0f\n"
6407+ LOCK_PREFIX "subl %1,%0\n"
6408+ "int $4\n0:\n"
6409+ _ASM_EXTABLE(0b, 0b)
6410+#endif
6411+
6412+ : "+m" (v->counter)
6413+ : "ir" (i));
6414+}
6415+
6416+/**
6417+ * atomic_add_unchecked - add integer to atomic variable
6418+ * @i: integer value to add
6419+ * @v: pointer of type atomic_unchecked_t
6420+ *
6421+ * Atomically adds @i to @v.
6422+ */
6423+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6424+{
6425+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6426 : "+m" (v->counter)
6427 : "ir" (i));
6428 }
6429@@ -60,7 +105,29 @@ static inline void atomic_add(int i, ato
6430 */
6431 static inline void atomic_sub(int i, atomic_t *v)
6432 {
6433- asm volatile(LOCK_PREFIX "subl %1,%0"
6434+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
6435+
6436+#ifdef CONFIG_PAX_REFCOUNT
6437+ "jno 0f\n"
6438+ LOCK_PREFIX "addl %1,%0\n"
6439+ "int $4\n0:\n"
6440+ _ASM_EXTABLE(0b, 0b)
6441+#endif
6442+
6443+ : "+m" (v->counter)
6444+ : "ir" (i));
6445+}
6446+
6447+/**
6448+ * atomic_sub_unchecked - subtract integer from atomic variable
6449+ * @i: integer value to subtract
6450+ * @v: pointer of type atomic_unchecked_t
6451+ *
6452+ * Atomically subtracts @i from @v.
6453+ */
6454+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6455+{
6456+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
6457 : "+m" (v->counter)
6458 : "ir" (i));
6459 }
6460@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(in
6461 {
6462 unsigned char c;
6463
6464- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6465+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
6466+
6467+#ifdef CONFIG_PAX_REFCOUNT
6468+ "jno 0f\n"
6469+ LOCK_PREFIX "addl %2,%0\n"
6470+ "int $4\n0:\n"
6471+ _ASM_EXTABLE(0b, 0b)
6472+#endif
6473+
6474+ "sete %1\n"
6475 : "+m" (v->counter), "=qm" (c)
6476 : "ir" (i) : "memory");
6477 return c;
6478@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(in
6479 */
6480 static inline void atomic_inc(atomic_t *v)
6481 {
6482- asm volatile(LOCK_PREFIX "incl %0"
6483+ asm volatile(LOCK_PREFIX "incl %0\n"
6484+
6485+#ifdef CONFIG_PAX_REFCOUNT
6486+ "jno 0f\n"
6487+ LOCK_PREFIX "decl %0\n"
6488+ "int $4\n0:\n"
6489+ _ASM_EXTABLE(0b, 0b)
6490+#endif
6491+
6492+ : "+m" (v->counter));
6493+}
6494+
6495+/**
6496+ * atomic_inc_unchecked - increment atomic variable
6497+ * @v: pointer of type atomic_unchecked_t
6498+ *
6499+ * Atomically increments @v by 1.
6500+ */
6501+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
6502+{
6503+ asm volatile(LOCK_PREFIX "incl %0\n"
6504 : "+m" (v->counter));
6505 }
6506
6507@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *
6508 */
6509 static inline void atomic_dec(atomic_t *v)
6510 {
6511- asm volatile(LOCK_PREFIX "decl %0"
6512+ asm volatile(LOCK_PREFIX "decl %0\n"
6513+
6514+#ifdef CONFIG_PAX_REFCOUNT
6515+ "jno 0f\n"
6516+ LOCK_PREFIX "incl %0\n"
6517+ "int $4\n0:\n"
6518+ _ASM_EXTABLE(0b, 0b)
6519+#endif
6520+
6521+ : "+m" (v->counter));
6522+}
6523+
6524+/**
6525+ * atomic_dec_unchecked - decrement atomic variable
6526+ * @v: pointer of type atomic_unchecked_t
6527+ *
6528+ * Atomically decrements @v by 1.
6529+ */
6530+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
6531+{
6532+ asm volatile(LOCK_PREFIX "decl %0\n"
6533 : "+m" (v->counter));
6534 }
6535
6536@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(at
6537 {
6538 unsigned char c;
6539
6540- asm volatile(LOCK_PREFIX "decl %0; sete %1"
6541+ asm volatile(LOCK_PREFIX "decl %0\n"
6542+
6543+#ifdef CONFIG_PAX_REFCOUNT
6544+ "jno 0f\n"
6545+ LOCK_PREFIX "incl %0\n"
6546+ "int $4\n0:\n"
6547+ _ASM_EXTABLE(0b, 0b)
6548+#endif
6549+
6550+ "sete %1\n"
6551 : "+m" (v->counter), "=qm" (c)
6552 : : "memory");
6553 return c != 0;
6554@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(at
6555 {
6556 unsigned char c;
6557
6558- asm volatile(LOCK_PREFIX "incl %0; sete %1"
6559+ asm volatile(LOCK_PREFIX "incl %0\n"
6560+
6561+#ifdef CONFIG_PAX_REFCOUNT
6562+ "jno 0f\n"
6563+ LOCK_PREFIX "decl %0\n"
6564+ "int $4\n0:\n"
6565+ _ASM_EXTABLE(0b, 0b)
6566+#endif
6567+
6568+ "sete %1\n"
6569+ : "+m" (v->counter), "=qm" (c)
6570+ : : "memory");
6571+ return c != 0;
6572+}
6573+
6574+/**
6575+ * atomic_inc_and_test_unchecked - increment and test
6576+ * @v: pointer of type atomic_unchecked_t
6577+ *
6578+ * Atomically increments @v by 1
6579+ * and returns true if the result is zero, or false for all
6580+ * other cases.
6581+ */
6582+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6583+{
6584+ unsigned char c;
6585+
6586+ asm volatile(LOCK_PREFIX "incl %0\n"
6587+ "sete %1\n"
6588 : "+m" (v->counter), "=qm" (c)
6589 : : "memory");
6590 return c != 0;
6591@@ -157,7 +310,16 @@ static inline int atomic_add_negative(in
6592 {
6593 unsigned char c;
6594
6595- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
6596+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
6597+
6598+#ifdef CONFIG_PAX_REFCOUNT
6599+ "jno 0f\n"
6600+ LOCK_PREFIX "subl %2,%0\n"
6601+ "int $4\n0:\n"
6602+ _ASM_EXTABLE(0b, 0b)
6603+#endif
6604+
6605+ "sets %1\n"
6606 : "+m" (v->counter), "=qm" (c)
6607 : "ir" (i) : "memory");
6608 return c;
6609@@ -180,6 +342,46 @@ static inline int atomic_add_return(int
6610 #endif
6611 /* Modern 486+ processor */
6612 __i = i;
6613+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
6614+
6615+#ifdef CONFIG_PAX_REFCOUNT
6616+ "jno 0f\n"
6617+ "movl %0, %1\n"
6618+ "int $4\n0:\n"
6619+ _ASM_EXTABLE(0b, 0b)
6620+#endif
6621+
6622+ : "+r" (i), "+m" (v->counter)
6623+ : : "memory");
6624+ return i + __i;
6625+
6626+#ifdef CONFIG_M386
6627+no_xadd: /* Legacy 386 processor */
6628+ local_irq_save(flags);
6629+ __i = atomic_read(v);
6630+ atomic_set(v, i + __i);
6631+ local_irq_restore(flags);
6632+ return i + __i;
6633+#endif
6634+}
6635+
6636+/**
6637+ * atomic_add_return_unchecked - add integer and return
6638+ * @v: pointer of type atomic_unchecked_t
6639+ * @i: integer value to add
6640+ *
6641+ * Atomically adds @i to @v and returns @i + @v
6642+ */
6643+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6644+{
6645+ int __i;
6646+#ifdef CONFIG_M386
6647+ unsigned long flags;
6648+ if (unlikely(boot_cpu_data.x86 <= 3))
6649+ goto no_xadd;
6650+#endif
6651+ /* Modern 486+ processor */
6652+ __i = i;
6653 asm volatile(LOCK_PREFIX "xaddl %0, %1"
6654 : "+r" (i), "+m" (v->counter)
6655 : : "memory");
6656@@ -208,6 +410,10 @@ static inline int atomic_sub_return(int
6657 }
6658
6659 #define atomic_inc_return(v) (atomic_add_return(1, v))
6660+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6661+{
6662+ return atomic_add_return_unchecked(1, v);
6663+}
6664 #define atomic_dec_return(v) (atomic_sub_return(1, v))
6665
6666 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6667@@ -215,11 +421,21 @@ static inline int atomic_cmpxchg(atomic_
6668 return cmpxchg(&v->counter, old, new);
6669 }
6670
6671+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
6672+{
6673+ return cmpxchg(&v->counter, old, new);
6674+}
6675+
6676 static inline int atomic_xchg(atomic_t *v, int new)
6677 {
6678 return xchg(&v->counter, new);
6679 }
6680
6681+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6682+{
6683+ return xchg(&v->counter, new);
6684+}
6685+
6686 /**
6687 * atomic_add_unless - add unless the number is already a given value
6688 * @v: pointer of type atomic_t
6689@@ -231,21 +447,77 @@ static inline int atomic_xchg(atomic_t *
6690 */
6691 static inline int atomic_add_unless(atomic_t *v, int a, int u)
6692 {
6693- int c, old;
6694+ int c, old, new;
6695 c = atomic_read(v);
6696 for (;;) {
6697- if (unlikely(c == (u)))
6698+ if (unlikely(c == u))
6699 break;
6700- old = atomic_cmpxchg((v), c, c + (a));
6701+
6702+ asm volatile("addl %2,%0\n"
6703+
6704+#ifdef CONFIG_PAX_REFCOUNT
6705+ "jno 0f\n"
6706+ "subl %2,%0\n"
6707+ "int $4\n0:\n"
6708+ _ASM_EXTABLE(0b, 0b)
6709+#endif
6710+
6711+ : "=r" (new)
6712+ : "0" (c), "ir" (a));
6713+
6714+ old = atomic_cmpxchg(v, c, new);
6715 if (likely(old == c))
6716 break;
6717 c = old;
6718 }
6719- return c != (u);
6720+ return c != u;
6721 }
6722
6723 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
6724
6725+/**
6726+ * atomic_inc_not_zero_hint - increment if not null
6727+ * @v: pointer of type atomic_t
6728+ * @hint: probable value of the atomic before the increment
6729+ *
6730+ * This version of atomic_inc_not_zero() gives a hint of probable
6731+ * value of the atomic. This helps processor to not read the memory
6732+ * before doing the atomic read/modify/write cycle, lowering
6733+ * number of bus transactions on some arches.
6734+ *
6735+ * Returns: 0 if increment was not done, 1 otherwise.
6736+ */
6737+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
6738+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
6739+{
6740+ int val, c = hint, new;
6741+
6742+ /* sanity test, should be removed by compiler if hint is a constant */
6743+ if (!hint)
6744+ return atomic_inc_not_zero(v);
6745+
6746+ do {
6747+ asm volatile("incl %0\n"
6748+
6749+#ifdef CONFIG_PAX_REFCOUNT
6750+ "jno 0f\n"
6751+ "decl %0\n"
6752+ "int $4\n0:\n"
6753+ _ASM_EXTABLE(0b, 0b)
6754+#endif
6755+
6756+ : "=r" (new)
6757+ : "0" (c));
6758+
6759+ val = atomic_cmpxchg(v, c, new);
6760+ if (val == c)
6761+ return 1;
6762+ c = val;
6763+ } while (c);
6764+
6765+ return 0;
6766+}
6767+
6768 /*
6769 * atomic_dec_if_positive - decrement by 1 if old value positive
6770 * @v: pointer of type atomic_t
6771diff -urNp linux-3.0.3/arch/x86/include/asm/bitops.h linux-3.0.3/arch/x86/include/asm/bitops.h
6772--- linux-3.0.3/arch/x86/include/asm/bitops.h 2011-07-21 22:17:23.000000000 -0400
6773+++ linux-3.0.3/arch/x86/include/asm/bitops.h 2011-08-23 21:47:55.000000000 -0400
6774@@ -38,7 +38,7 @@
6775 * a mask operation on a byte.
6776 */
6777 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
6778-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
6779+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
6780 #define CONST_MASK(nr) (1 << ((nr) & 7))
6781
6782 /**
6783diff -urNp linux-3.0.3/arch/x86/include/asm/boot.h linux-3.0.3/arch/x86/include/asm/boot.h
6784--- linux-3.0.3/arch/x86/include/asm/boot.h 2011-07-21 22:17:23.000000000 -0400
6785+++ linux-3.0.3/arch/x86/include/asm/boot.h 2011-08-23 21:47:55.000000000 -0400
6786@@ -11,10 +11,15 @@
6787 #include <asm/pgtable_types.h>
6788
6789 /* Physical address where kernel should be loaded. */
6790-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6791+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6792 + (CONFIG_PHYSICAL_ALIGN - 1)) \
6793 & ~(CONFIG_PHYSICAL_ALIGN - 1))
6794
6795+#ifndef __ASSEMBLY__
6796+extern unsigned char __LOAD_PHYSICAL_ADDR[];
6797+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
6798+#endif
6799+
6800 /* Minimum kernel alignment, as a power of two */
6801 #ifdef CONFIG_X86_64
6802 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
6803diff -urNp linux-3.0.3/arch/x86/include/asm/cacheflush.h linux-3.0.3/arch/x86/include/asm/cacheflush.h
6804--- linux-3.0.3/arch/x86/include/asm/cacheflush.h 2011-07-21 22:17:23.000000000 -0400
6805+++ linux-3.0.3/arch/x86/include/asm/cacheflush.h 2011-08-23 21:47:55.000000000 -0400
6806@@ -26,7 +26,7 @@ static inline unsigned long get_page_mem
6807 unsigned long pg_flags = pg->flags & _PGMT_MASK;
6808
6809 if (pg_flags == _PGMT_DEFAULT)
6810- return -1;
6811+ return ~0UL;
6812 else if (pg_flags == _PGMT_WC)
6813 return _PAGE_CACHE_WC;
6814 else if (pg_flags == _PGMT_UC_MINUS)
6815diff -urNp linux-3.0.3/arch/x86/include/asm/cache.h linux-3.0.3/arch/x86/include/asm/cache.h
6816--- linux-3.0.3/arch/x86/include/asm/cache.h 2011-07-21 22:17:23.000000000 -0400
6817+++ linux-3.0.3/arch/x86/include/asm/cache.h 2011-08-23 21:47:55.000000000 -0400
6818@@ -5,12 +5,13 @@
6819
6820 /* L1 cache line size */
6821 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
6822-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6823+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6824
6825 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
6826+#define __read_only __attribute__((__section__(".data..read_only")))
6827
6828 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
6829-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
6830+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
6831
6832 #ifdef CONFIG_X86_VSMP
6833 #ifdef CONFIG_SMP
6834diff -urNp linux-3.0.3/arch/x86/include/asm/checksum_32.h linux-3.0.3/arch/x86/include/asm/checksum_32.h
6835--- linux-3.0.3/arch/x86/include/asm/checksum_32.h 2011-07-21 22:17:23.000000000 -0400
6836+++ linux-3.0.3/arch/x86/include/asm/checksum_32.h 2011-08-23 21:47:55.000000000 -0400
6837@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
6838 int len, __wsum sum,
6839 int *src_err_ptr, int *dst_err_ptr);
6840
6841+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
6842+ int len, __wsum sum,
6843+ int *src_err_ptr, int *dst_err_ptr);
6844+
6845+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
6846+ int len, __wsum sum,
6847+ int *src_err_ptr, int *dst_err_ptr);
6848+
6849 /*
6850 * Note: when you get a NULL pointer exception here this means someone
6851 * passed in an incorrect kernel address to one of these functions.
6852@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
6853 int *err_ptr)
6854 {
6855 might_sleep();
6856- return csum_partial_copy_generic((__force void *)src, dst,
6857+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
6858 len, sum, err_ptr, NULL);
6859 }
6860
6861@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
6862 {
6863 might_sleep();
6864 if (access_ok(VERIFY_WRITE, dst, len))
6865- return csum_partial_copy_generic(src, (__force void *)dst,
6866+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
6867 len, sum, NULL, err_ptr);
6868
6869 if (len)
6870diff -urNp linux-3.0.3/arch/x86/include/asm/cpufeature.h linux-3.0.3/arch/x86/include/asm/cpufeature.h
6871--- linux-3.0.3/arch/x86/include/asm/cpufeature.h 2011-07-21 22:17:23.000000000 -0400
6872+++ linux-3.0.3/arch/x86/include/asm/cpufeature.h 2011-08-23 21:47:55.000000000 -0400
6873@@ -358,7 +358,7 @@ static __always_inline __pure bool __sta
6874 ".section .discard,\"aw\",@progbits\n"
6875 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
6876 ".previous\n"
6877- ".section .altinstr_replacement,\"ax\"\n"
6878+ ".section .altinstr_replacement,\"a\"\n"
6879 "3: movb $1,%0\n"
6880 "4:\n"
6881 ".previous\n"
6882diff -urNp linux-3.0.3/arch/x86/include/asm/desc_defs.h linux-3.0.3/arch/x86/include/asm/desc_defs.h
6883--- linux-3.0.3/arch/x86/include/asm/desc_defs.h 2011-07-21 22:17:23.000000000 -0400
6884+++ linux-3.0.3/arch/x86/include/asm/desc_defs.h 2011-08-23 21:47:55.000000000 -0400
6885@@ -31,6 +31,12 @@ struct desc_struct {
6886 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
6887 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
6888 };
6889+ struct {
6890+ u16 offset_low;
6891+ u16 seg;
6892+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
6893+ unsigned offset_high: 16;
6894+ } gate;
6895 };
6896 } __attribute__((packed));
6897
6898diff -urNp linux-3.0.3/arch/x86/include/asm/desc.h linux-3.0.3/arch/x86/include/asm/desc.h
6899--- linux-3.0.3/arch/x86/include/asm/desc.h 2011-07-21 22:17:23.000000000 -0400
6900+++ linux-3.0.3/arch/x86/include/asm/desc.h 2011-08-23 21:47:55.000000000 -0400
6901@@ -4,6 +4,7 @@
6902 #include <asm/desc_defs.h>
6903 #include <asm/ldt.h>
6904 #include <asm/mmu.h>
6905+#include <asm/pgtable.h>
6906
6907 #include <linux/smp.h>
6908
6909@@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_
6910
6911 desc->type = (info->read_exec_only ^ 1) << 1;
6912 desc->type |= info->contents << 2;
6913+ desc->type |= info->seg_not_present ^ 1;
6914
6915 desc->s = 1;
6916 desc->dpl = 0x3;
6917@@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_
6918 }
6919
6920 extern struct desc_ptr idt_descr;
6921-extern gate_desc idt_table[];
6922-
6923-struct gdt_page {
6924- struct desc_struct gdt[GDT_ENTRIES];
6925-} __attribute__((aligned(PAGE_SIZE)));
6926-
6927-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
6928+extern gate_desc idt_table[256];
6929
6930+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
6931 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
6932 {
6933- return per_cpu(gdt_page, cpu).gdt;
6934+ return cpu_gdt_table[cpu];
6935 }
6936
6937 #ifdef CONFIG_X86_64
6938@@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *
6939 unsigned long base, unsigned dpl, unsigned flags,
6940 unsigned short seg)
6941 {
6942- gate->a = (seg << 16) | (base & 0xffff);
6943- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
6944+ gate->gate.offset_low = base;
6945+ gate->gate.seg = seg;
6946+ gate->gate.reserved = 0;
6947+ gate->gate.type = type;
6948+ gate->gate.s = 0;
6949+ gate->gate.dpl = dpl;
6950+ gate->gate.p = 1;
6951+ gate->gate.offset_high = base >> 16;
6952 }
6953
6954 #endif
6955@@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(str
6956
6957 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
6958 {
6959+ pax_open_kernel();
6960 memcpy(&idt[entry], gate, sizeof(*gate));
6961+ pax_close_kernel();
6962 }
6963
6964 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
6965 {
6966+ pax_open_kernel();
6967 memcpy(&ldt[entry], desc, 8);
6968+ pax_close_kernel();
6969 }
6970
6971 static inline void
6972@@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struc
6973 default: size = sizeof(*gdt); break;
6974 }
6975
6976+ pax_open_kernel();
6977 memcpy(&gdt[entry], desc, size);
6978+ pax_close_kernel();
6979 }
6980
6981 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
6982@@ -207,7 +216,9 @@ static inline void native_set_ldt(const
6983
6984 static inline void native_load_tr_desc(void)
6985 {
6986+ pax_open_kernel();
6987 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
6988+ pax_close_kernel();
6989 }
6990
6991 static inline void native_load_gdt(const struct desc_ptr *dtr)
6992@@ -244,8 +255,10 @@ static inline void native_load_tls(struc
6993 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
6994 unsigned int i;
6995
6996+ pax_open_kernel();
6997 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
6998 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
6999+ pax_close_kernel();
7000 }
7001
7002 #define _LDT_empty(info) \
7003@@ -307,7 +320,7 @@ static inline void set_desc_limit(struct
7004 desc->limit = (limit >> 16) & 0xf;
7005 }
7006
7007-static inline void _set_gate(int gate, unsigned type, void *addr,
7008+static inline void _set_gate(int gate, unsigned type, const void *addr,
7009 unsigned dpl, unsigned ist, unsigned seg)
7010 {
7011 gate_desc s;
7012@@ -326,7 +339,7 @@ static inline void _set_gate(int gate, u
7013 * Pentium F0 0F bugfix can have resulted in the mapped
7014 * IDT being write-protected.
7015 */
7016-static inline void set_intr_gate(unsigned int n, void *addr)
7017+static inline void set_intr_gate(unsigned int n, const void *addr)
7018 {
7019 BUG_ON((unsigned)n > 0xFF);
7020 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
7021@@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsig
7022 /*
7023 * This routine sets up an interrupt gate at directory privilege level 3.
7024 */
7025-static inline void set_system_intr_gate(unsigned int n, void *addr)
7026+static inline void set_system_intr_gate(unsigned int n, const void *addr)
7027 {
7028 BUG_ON((unsigned)n > 0xFF);
7029 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
7030 }
7031
7032-static inline void set_system_trap_gate(unsigned int n, void *addr)
7033+static inline void set_system_trap_gate(unsigned int n, const void *addr)
7034 {
7035 BUG_ON((unsigned)n > 0xFF);
7036 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
7037 }
7038
7039-static inline void set_trap_gate(unsigned int n, void *addr)
7040+static inline void set_trap_gate(unsigned int n, const void *addr)
7041 {
7042 BUG_ON((unsigned)n > 0xFF);
7043 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
7044@@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigne
7045 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
7046 {
7047 BUG_ON((unsigned)n > 0xFF);
7048- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
7049+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
7050 }
7051
7052-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
7053+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
7054 {
7055 BUG_ON((unsigned)n > 0xFF);
7056 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
7057 }
7058
7059-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
7060+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
7061 {
7062 BUG_ON((unsigned)n > 0xFF);
7063 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
7064 }
7065
7066+#ifdef CONFIG_X86_32
7067+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
7068+{
7069+ struct desc_struct d;
7070+
7071+ if (likely(limit))
7072+ limit = (limit - 1UL) >> PAGE_SHIFT;
7073+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
7074+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
7075+}
7076+#endif
7077+
7078 #endif /* _ASM_X86_DESC_H */
7079diff -urNp linux-3.0.3/arch/x86/include/asm/e820.h linux-3.0.3/arch/x86/include/asm/e820.h
7080--- linux-3.0.3/arch/x86/include/asm/e820.h 2011-07-21 22:17:23.000000000 -0400
7081+++ linux-3.0.3/arch/x86/include/asm/e820.h 2011-08-23 21:47:55.000000000 -0400
7082@@ -69,7 +69,7 @@ struct e820map {
7083 #define ISA_START_ADDRESS 0xa0000
7084 #define ISA_END_ADDRESS 0x100000
7085
7086-#define BIOS_BEGIN 0x000a0000
7087+#define BIOS_BEGIN 0x000c0000
7088 #define BIOS_END 0x00100000
7089
7090 #define BIOS_ROM_BASE 0xffe00000
7091diff -urNp linux-3.0.3/arch/x86/include/asm/elf.h linux-3.0.3/arch/x86/include/asm/elf.h
7092--- linux-3.0.3/arch/x86/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
7093+++ linux-3.0.3/arch/x86/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
7094@@ -237,7 +237,25 @@ extern int force_personality32;
7095 the loader. We need to make sure that it is out of the way of the program
7096 that it will "exec", and that there is sufficient room for the brk. */
7097
7098+#ifdef CONFIG_PAX_SEGMEXEC
7099+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
7100+#else
7101 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
7102+#endif
7103+
7104+#ifdef CONFIG_PAX_ASLR
7105+#ifdef CONFIG_X86_32
7106+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
7107+
7108+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7109+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7110+#else
7111+#define PAX_ELF_ET_DYN_BASE 0x400000UL
7112+
7113+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7114+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7115+#endif
7116+#endif
7117
7118 /* This yields a mask that user programs can use to figure out what
7119 instruction set this CPU supports. This could be done in user space,
7120@@ -290,9 +308,7 @@ do { \
7121
7122 #define ARCH_DLINFO \
7123 do { \
7124- if (vdso_enabled) \
7125- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
7126- (unsigned long)current->mm->context.vdso); \
7127+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
7128 } while (0)
7129
7130 #define AT_SYSINFO 32
7131@@ -303,7 +319,7 @@ do { \
7132
7133 #endif /* !CONFIG_X86_32 */
7134
7135-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
7136+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
7137
7138 #define VDSO_ENTRY \
7139 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
7140@@ -317,7 +333,4 @@ extern int arch_setup_additional_pages(s
7141 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
7142 #define compat_arch_setup_additional_pages syscall32_setup_pages
7143
7144-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7145-#define arch_randomize_brk arch_randomize_brk
7146-
7147 #endif /* _ASM_X86_ELF_H */
7148diff -urNp linux-3.0.3/arch/x86/include/asm/emergency-restart.h linux-3.0.3/arch/x86/include/asm/emergency-restart.h
7149--- linux-3.0.3/arch/x86/include/asm/emergency-restart.h 2011-07-21 22:17:23.000000000 -0400
7150+++ linux-3.0.3/arch/x86/include/asm/emergency-restart.h 2011-08-23 21:47:55.000000000 -0400
7151@@ -15,6 +15,6 @@ enum reboot_type {
7152
7153 extern enum reboot_type reboot_type;
7154
7155-extern void machine_emergency_restart(void);
7156+extern void machine_emergency_restart(void) __noreturn;
7157
7158 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
7159diff -urNp linux-3.0.3/arch/x86/include/asm/futex.h linux-3.0.3/arch/x86/include/asm/futex.h
7160--- linux-3.0.3/arch/x86/include/asm/futex.h 2011-07-21 22:17:23.000000000 -0400
7161+++ linux-3.0.3/arch/x86/include/asm/futex.h 2011-08-23 21:47:55.000000000 -0400
7162@@ -12,16 +12,18 @@
7163 #include <asm/system.h>
7164
7165 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
7166+ typecheck(u32 *, uaddr); \
7167 asm volatile("1:\t" insn "\n" \
7168 "2:\t.section .fixup,\"ax\"\n" \
7169 "3:\tmov\t%3, %1\n" \
7170 "\tjmp\t2b\n" \
7171 "\t.previous\n" \
7172 _ASM_EXTABLE(1b, 3b) \
7173- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
7174+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
7175 : "i" (-EFAULT), "0" (oparg), "1" (0))
7176
7177 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
7178+ typecheck(u32 *, uaddr); \
7179 asm volatile("1:\tmovl %2, %0\n" \
7180 "\tmovl\t%0, %3\n" \
7181 "\t" insn "\n" \
7182@@ -34,7 +36,7 @@
7183 _ASM_EXTABLE(1b, 4b) \
7184 _ASM_EXTABLE(2b, 4b) \
7185 : "=&a" (oldval), "=&r" (ret), \
7186- "+m" (*uaddr), "=&r" (tem) \
7187+ "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
7188 : "r" (oparg), "i" (-EFAULT), "1" (0))
7189
7190 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
7191@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
7192
7193 switch (op) {
7194 case FUTEX_OP_SET:
7195- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
7196+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
7197 break;
7198 case FUTEX_OP_ADD:
7199- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
7200+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
7201 uaddr, oparg);
7202 break;
7203 case FUTEX_OP_OR:
7204@@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_i
7205 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
7206 return -EFAULT;
7207
7208- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
7209+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
7210 "2:\t.section .fixup, \"ax\"\n"
7211 "3:\tmov %3, %0\n"
7212 "\tjmp 2b\n"
7213 "\t.previous\n"
7214 _ASM_EXTABLE(1b, 3b)
7215- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
7216+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
7217 : "i" (-EFAULT), "r" (newval), "1" (oldval)
7218 : "memory"
7219 );
7220diff -urNp linux-3.0.3/arch/x86/include/asm/hw_irq.h linux-3.0.3/arch/x86/include/asm/hw_irq.h
7221--- linux-3.0.3/arch/x86/include/asm/hw_irq.h 2011-07-21 22:17:23.000000000 -0400
7222+++ linux-3.0.3/arch/x86/include/asm/hw_irq.h 2011-08-23 21:47:55.000000000 -0400
7223@@ -137,8 +137,8 @@ extern void setup_ioapic_dest(void);
7224 extern void enable_IO_APIC(void);
7225
7226 /* Statistics */
7227-extern atomic_t irq_err_count;
7228-extern atomic_t irq_mis_count;
7229+extern atomic_unchecked_t irq_err_count;
7230+extern atomic_unchecked_t irq_mis_count;
7231
7232 /* EISA */
7233 extern void eisa_set_level_irq(unsigned int irq);
7234diff -urNp linux-3.0.3/arch/x86/include/asm/i387.h linux-3.0.3/arch/x86/include/asm/i387.h
7235--- linux-3.0.3/arch/x86/include/asm/i387.h 2011-07-21 22:17:23.000000000 -0400
7236+++ linux-3.0.3/arch/x86/include/asm/i387.h 2011-08-23 21:47:55.000000000 -0400
7237@@ -92,6 +92,11 @@ static inline int fxrstor_checking(struc
7238 {
7239 int err;
7240
7241+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7242+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7243+ fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
7244+#endif
7245+
7246 /* See comment in fxsave() below. */
7247 #ifdef CONFIG_AS_FXSAVEQ
7248 asm volatile("1: fxrstorq %[fx]\n\t"
7249@@ -121,6 +126,11 @@ static inline int fxsave_user(struct i38
7250 {
7251 int err;
7252
7253+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7254+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7255+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
7256+#endif
7257+
7258 /*
7259 * Clear the bytes not touched by the fxsave and reserved
7260 * for the SW usage.
7261@@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu
7262 #endif /* CONFIG_X86_64 */
7263
7264 /* We need a safe address that is cheap to find and that is already
7265- in L1 during context switch. The best choices are unfortunately
7266- different for UP and SMP */
7267-#ifdef CONFIG_SMP
7268-#define safe_address (__per_cpu_offset[0])
7269-#else
7270-#define safe_address (kstat_cpu(0).cpustat.user)
7271-#endif
7272+ in L1 during context switch. */
7273+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
7274
7275 /*
7276 * These must be called with preempt disabled
7277@@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void
7278 struct thread_info *me = current_thread_info();
7279 preempt_disable();
7280 if (me->status & TS_USEDFPU)
7281- __save_init_fpu(me->task);
7282+ __save_init_fpu(current);
7283 else
7284 clts();
7285 }
7286diff -urNp linux-3.0.3/arch/x86/include/asm/io.h linux-3.0.3/arch/x86/include/asm/io.h
7287--- linux-3.0.3/arch/x86/include/asm/io.h 2011-07-21 22:17:23.000000000 -0400
7288+++ linux-3.0.3/arch/x86/include/asm/io.h 2011-08-23 21:47:55.000000000 -0400
7289@@ -196,6 +196,17 @@ extern void set_iounmap_nonlazy(void);
7290
7291 #include <linux/vmalloc.h>
7292
7293+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
7294+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
7295+{
7296+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7297+}
7298+
7299+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
7300+{
7301+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7302+}
7303+
7304 /*
7305 * Convert a virtual cached pointer to an uncached pointer
7306 */
7307diff -urNp linux-3.0.3/arch/x86/include/asm/irqflags.h linux-3.0.3/arch/x86/include/asm/irqflags.h
7308--- linux-3.0.3/arch/x86/include/asm/irqflags.h 2011-07-21 22:17:23.000000000 -0400
7309+++ linux-3.0.3/arch/x86/include/asm/irqflags.h 2011-08-23 21:47:55.000000000 -0400
7310@@ -140,6 +140,11 @@ static inline unsigned long arch_local_i
7311 sti; \
7312 sysexit
7313
7314+#define GET_CR0_INTO_RDI mov %cr0, %rdi
7315+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
7316+#define GET_CR3_INTO_RDI mov %cr3, %rdi
7317+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
7318+
7319 #else
7320 #define INTERRUPT_RETURN iret
7321 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
7322diff -urNp linux-3.0.3/arch/x86/include/asm/kprobes.h linux-3.0.3/arch/x86/include/asm/kprobes.h
7323--- linux-3.0.3/arch/x86/include/asm/kprobes.h 2011-07-21 22:17:23.000000000 -0400
7324+++ linux-3.0.3/arch/x86/include/asm/kprobes.h 2011-08-23 21:47:55.000000000 -0400
7325@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
7326 #define RELATIVEJUMP_SIZE 5
7327 #define RELATIVECALL_OPCODE 0xe8
7328 #define RELATIVE_ADDR_SIZE 4
7329-#define MAX_STACK_SIZE 64
7330-#define MIN_STACK_SIZE(ADDR) \
7331- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
7332- THREAD_SIZE - (unsigned long)(ADDR))) \
7333- ? (MAX_STACK_SIZE) \
7334- : (((unsigned long)current_thread_info()) + \
7335- THREAD_SIZE - (unsigned long)(ADDR)))
7336+#define MAX_STACK_SIZE 64UL
7337+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
7338
7339 #define flush_insn_slot(p) do { } while (0)
7340
7341diff -urNp linux-3.0.3/arch/x86/include/asm/kvm_host.h linux-3.0.3/arch/x86/include/asm/kvm_host.h
7342--- linux-3.0.3/arch/x86/include/asm/kvm_host.h 2011-07-21 22:17:23.000000000 -0400
7343+++ linux-3.0.3/arch/x86/include/asm/kvm_host.h 2011-08-23 21:47:55.000000000 -0400
7344@@ -441,7 +441,7 @@ struct kvm_arch {
7345 unsigned int n_used_mmu_pages;
7346 unsigned int n_requested_mmu_pages;
7347 unsigned int n_max_mmu_pages;
7348- atomic_t invlpg_counter;
7349+ atomic_unchecked_t invlpg_counter;
7350 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
7351 /*
7352 * Hash table of struct kvm_mmu_page.
7353@@ -618,7 +618,7 @@ struct kvm_x86_ops {
7354 struct x86_instruction_info *info,
7355 enum x86_intercept_stage stage);
7356
7357- const struct trace_print_flags *exit_reasons_str;
7358+ const struct trace_print_flags * const exit_reasons_str;
7359 };
7360
7361 struct kvm_arch_async_pf {
7362diff -urNp linux-3.0.3/arch/x86/include/asm/local.h linux-3.0.3/arch/x86/include/asm/local.h
7363--- linux-3.0.3/arch/x86/include/asm/local.h 2011-07-21 22:17:23.000000000 -0400
7364+++ linux-3.0.3/arch/x86/include/asm/local.h 2011-08-23 21:47:55.000000000 -0400
7365@@ -18,26 +18,58 @@ typedef struct {
7366
7367 static inline void local_inc(local_t *l)
7368 {
7369- asm volatile(_ASM_INC "%0"
7370+ asm volatile(_ASM_INC "%0\n"
7371+
7372+#ifdef CONFIG_PAX_REFCOUNT
7373+ "jno 0f\n"
7374+ _ASM_DEC "%0\n"
7375+ "int $4\n0:\n"
7376+ _ASM_EXTABLE(0b, 0b)
7377+#endif
7378+
7379 : "+m" (l->a.counter));
7380 }
7381
7382 static inline void local_dec(local_t *l)
7383 {
7384- asm volatile(_ASM_DEC "%0"
7385+ asm volatile(_ASM_DEC "%0\n"
7386+
7387+#ifdef CONFIG_PAX_REFCOUNT
7388+ "jno 0f\n"
7389+ _ASM_INC "%0\n"
7390+ "int $4\n0:\n"
7391+ _ASM_EXTABLE(0b, 0b)
7392+#endif
7393+
7394 : "+m" (l->a.counter));
7395 }
7396
7397 static inline void local_add(long i, local_t *l)
7398 {
7399- asm volatile(_ASM_ADD "%1,%0"
7400+ asm volatile(_ASM_ADD "%1,%0\n"
7401+
7402+#ifdef CONFIG_PAX_REFCOUNT
7403+ "jno 0f\n"
7404+ _ASM_SUB "%1,%0\n"
7405+ "int $4\n0:\n"
7406+ _ASM_EXTABLE(0b, 0b)
7407+#endif
7408+
7409 : "+m" (l->a.counter)
7410 : "ir" (i));
7411 }
7412
7413 static inline void local_sub(long i, local_t *l)
7414 {
7415- asm volatile(_ASM_SUB "%1,%0"
7416+ asm volatile(_ASM_SUB "%1,%0\n"
7417+
7418+#ifdef CONFIG_PAX_REFCOUNT
7419+ "jno 0f\n"
7420+ _ASM_ADD "%1,%0\n"
7421+ "int $4\n0:\n"
7422+ _ASM_EXTABLE(0b, 0b)
7423+#endif
7424+
7425 : "+m" (l->a.counter)
7426 : "ir" (i));
7427 }
7428@@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
7429 {
7430 unsigned char c;
7431
7432- asm volatile(_ASM_SUB "%2,%0; sete %1"
7433+ asm volatile(_ASM_SUB "%2,%0\n"
7434+
7435+#ifdef CONFIG_PAX_REFCOUNT
7436+ "jno 0f\n"
7437+ _ASM_ADD "%2,%0\n"
7438+ "int $4\n0:\n"
7439+ _ASM_EXTABLE(0b, 0b)
7440+#endif
7441+
7442+ "sete %1\n"
7443 : "+m" (l->a.counter), "=qm" (c)
7444 : "ir" (i) : "memory");
7445 return c;
7446@@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
7447 {
7448 unsigned char c;
7449
7450- asm volatile(_ASM_DEC "%0; sete %1"
7451+ asm volatile(_ASM_DEC "%0\n"
7452+
7453+#ifdef CONFIG_PAX_REFCOUNT
7454+ "jno 0f\n"
7455+ _ASM_INC "%0\n"
7456+ "int $4\n0:\n"
7457+ _ASM_EXTABLE(0b, 0b)
7458+#endif
7459+
7460+ "sete %1\n"
7461 : "+m" (l->a.counter), "=qm" (c)
7462 : : "memory");
7463 return c != 0;
7464@@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
7465 {
7466 unsigned char c;
7467
7468- asm volatile(_ASM_INC "%0; sete %1"
7469+ asm volatile(_ASM_INC "%0\n"
7470+
7471+#ifdef CONFIG_PAX_REFCOUNT
7472+ "jno 0f\n"
7473+ _ASM_DEC "%0\n"
7474+ "int $4\n0:\n"
7475+ _ASM_EXTABLE(0b, 0b)
7476+#endif
7477+
7478+ "sete %1\n"
7479 : "+m" (l->a.counter), "=qm" (c)
7480 : : "memory");
7481 return c != 0;
7482@@ -110,7 +169,16 @@ static inline int local_add_negative(lon
7483 {
7484 unsigned char c;
7485
7486- asm volatile(_ASM_ADD "%2,%0; sets %1"
7487+ asm volatile(_ASM_ADD "%2,%0\n"
7488+
7489+#ifdef CONFIG_PAX_REFCOUNT
7490+ "jno 0f\n"
7491+ _ASM_SUB "%2,%0\n"
7492+ "int $4\n0:\n"
7493+ _ASM_EXTABLE(0b, 0b)
7494+#endif
7495+
7496+ "sets %1\n"
7497 : "+m" (l->a.counter), "=qm" (c)
7498 : "ir" (i) : "memory");
7499 return c;
7500@@ -133,7 +201,15 @@ static inline long local_add_return(long
7501 #endif
7502 /* Modern 486+ processor */
7503 __i = i;
7504- asm volatile(_ASM_XADD "%0, %1;"
7505+ asm volatile(_ASM_XADD "%0, %1\n"
7506+
7507+#ifdef CONFIG_PAX_REFCOUNT
7508+ "jno 0f\n"
7509+ _ASM_MOV "%0,%1\n"
7510+ "int $4\n0:\n"
7511+ _ASM_EXTABLE(0b, 0b)
7512+#endif
7513+
7514 : "+r" (i), "+m" (l->a.counter)
7515 : : "memory");
7516 return i + __i;
7517diff -urNp linux-3.0.3/arch/x86/include/asm/mman.h linux-3.0.3/arch/x86/include/asm/mman.h
7518--- linux-3.0.3/arch/x86/include/asm/mman.h 2011-07-21 22:17:23.000000000 -0400
7519+++ linux-3.0.3/arch/x86/include/asm/mman.h 2011-08-23 21:47:55.000000000 -0400
7520@@ -5,4 +5,14 @@
7521
7522 #include <asm-generic/mman.h>
7523
7524+#ifdef __KERNEL__
7525+#ifndef __ASSEMBLY__
7526+#ifdef CONFIG_X86_32
7527+#define arch_mmap_check i386_mmap_check
7528+int i386_mmap_check(unsigned long addr, unsigned long len,
7529+ unsigned long flags);
7530+#endif
7531+#endif
7532+#endif
7533+
7534 #endif /* _ASM_X86_MMAN_H */
7535diff -urNp linux-3.0.3/arch/x86/include/asm/mmu_context.h linux-3.0.3/arch/x86/include/asm/mmu_context.h
7536--- linux-3.0.3/arch/x86/include/asm/mmu_context.h 2011-07-21 22:17:23.000000000 -0400
7537+++ linux-3.0.3/arch/x86/include/asm/mmu_context.h 2011-08-23 21:48:14.000000000 -0400
7538@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *m
7539
7540 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
7541 {
7542+
7543+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7544+ unsigned int i;
7545+ pgd_t *pgd;
7546+
7547+ pax_open_kernel();
7548+ pgd = get_cpu_pgd(smp_processor_id());
7549+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
7550+ set_pgd_batched(pgd+i, native_make_pgd(0));
7551+ pax_close_kernel();
7552+#endif
7553+
7554 #ifdef CONFIG_SMP
7555 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
7556 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
7557@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_s
7558 struct task_struct *tsk)
7559 {
7560 unsigned cpu = smp_processor_id();
7561+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7562+ int tlbstate = TLBSTATE_OK;
7563+#endif
7564
7565 if (likely(prev != next)) {
7566 #ifdef CONFIG_SMP
7567+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7568+ tlbstate = percpu_read(cpu_tlbstate.state);
7569+#endif
7570 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7571 percpu_write(cpu_tlbstate.active_mm, next);
7572 #endif
7573 cpumask_set_cpu(cpu, mm_cpumask(next));
7574
7575 /* Re-load page tables */
7576+#ifdef CONFIG_PAX_PER_CPU_PGD
7577+ pax_open_kernel();
7578+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7579+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7580+ pax_close_kernel();
7581+ load_cr3(get_cpu_pgd(cpu));
7582+#else
7583 load_cr3(next->pgd);
7584+#endif
7585
7586 /* stop flush ipis for the previous mm */
7587 cpumask_clear_cpu(cpu, mm_cpumask(prev));
7588@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_s
7589 */
7590 if (unlikely(prev->context.ldt != next->context.ldt))
7591 load_LDT_nolock(&next->context);
7592- }
7593+
7594+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7595+ if (!(__supported_pte_mask & _PAGE_NX)) {
7596+ smp_mb__before_clear_bit();
7597+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
7598+ smp_mb__after_clear_bit();
7599+ cpu_set(cpu, next->context.cpu_user_cs_mask);
7600+ }
7601+#endif
7602+
7603+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7604+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
7605+ prev->context.user_cs_limit != next->context.user_cs_limit))
7606+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7607 #ifdef CONFIG_SMP
7608+ else if (unlikely(tlbstate != TLBSTATE_OK))
7609+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7610+#endif
7611+#endif
7612+
7613+ }
7614 else {
7615+
7616+#ifdef CONFIG_PAX_PER_CPU_PGD
7617+ pax_open_kernel();
7618+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7619+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7620+ pax_close_kernel();
7621+ load_cr3(get_cpu_pgd(cpu));
7622+#endif
7623+
7624+#ifdef CONFIG_SMP
7625 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7626 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
7627
7628@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_s
7629 * tlb flush IPI delivery. We must reload CR3
7630 * to make sure to use no freed page tables.
7631 */
7632+
7633+#ifndef CONFIG_PAX_PER_CPU_PGD
7634 load_cr3(next->pgd);
7635+#endif
7636+
7637 load_LDT_nolock(&next->context);
7638+
7639+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
7640+ if (!(__supported_pte_mask & _PAGE_NX))
7641+ cpu_set(cpu, next->context.cpu_user_cs_mask);
7642+#endif
7643+
7644+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7645+#ifdef CONFIG_PAX_PAGEEXEC
7646+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
7647+#endif
7648+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7649+#endif
7650+
7651 }
7652- }
7653 #endif
7654+ }
7655 }
7656
7657 #define activate_mm(prev, next) \
7658diff -urNp linux-3.0.3/arch/x86/include/asm/mmu.h linux-3.0.3/arch/x86/include/asm/mmu.h
7659--- linux-3.0.3/arch/x86/include/asm/mmu.h 2011-07-21 22:17:23.000000000 -0400
7660+++ linux-3.0.3/arch/x86/include/asm/mmu.h 2011-08-23 21:47:55.000000000 -0400
7661@@ -9,7 +9,7 @@
7662 * we put the segment information here.
7663 */
7664 typedef struct {
7665- void *ldt;
7666+ struct desc_struct *ldt;
7667 int size;
7668
7669 #ifdef CONFIG_X86_64
7670@@ -18,7 +18,19 @@ typedef struct {
7671 #endif
7672
7673 struct mutex lock;
7674- void *vdso;
7675+ unsigned long vdso;
7676+
7677+#ifdef CONFIG_X86_32
7678+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
7679+ unsigned long user_cs_base;
7680+ unsigned long user_cs_limit;
7681+
7682+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7683+ cpumask_t cpu_user_cs_mask;
7684+#endif
7685+
7686+#endif
7687+#endif
7688 } mm_context_t;
7689
7690 #ifdef CONFIG_SMP
7691diff -urNp linux-3.0.3/arch/x86/include/asm/module.h linux-3.0.3/arch/x86/include/asm/module.h
7692--- linux-3.0.3/arch/x86/include/asm/module.h 2011-07-21 22:17:23.000000000 -0400
7693+++ linux-3.0.3/arch/x86/include/asm/module.h 2011-08-23 21:48:14.000000000 -0400
7694@@ -5,6 +5,7 @@
7695
7696 #ifdef CONFIG_X86_64
7697 /* X86_64 does not define MODULE_PROC_FAMILY */
7698+#define MODULE_PROC_FAMILY ""
7699 #elif defined CONFIG_M386
7700 #define MODULE_PROC_FAMILY "386 "
7701 #elif defined CONFIG_M486
7702@@ -59,8 +60,30 @@
7703 #error unknown processor family
7704 #endif
7705
7706-#ifdef CONFIG_X86_32
7707-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
7708+#ifdef CONFIG_PAX_MEMORY_UDEREF
7709+#define MODULE_PAX_UDEREF "UDEREF "
7710+#else
7711+#define MODULE_PAX_UDEREF ""
7712+#endif
7713+
7714+#ifdef CONFIG_PAX_KERNEXEC
7715+#define MODULE_PAX_KERNEXEC "KERNEXEC "
7716+#else
7717+#define MODULE_PAX_KERNEXEC ""
7718 #endif
7719
7720+#ifdef CONFIG_PAX_REFCOUNT
7721+#define MODULE_PAX_REFCOUNT "REFCOUNT "
7722+#else
7723+#define MODULE_PAX_REFCOUNT ""
7724+#endif
7725+
7726+#ifdef CONFIG_GRKERNSEC
7727+#define MODULE_GRSEC "GRSECURITY "
7728+#else
7729+#define MODULE_GRSEC ""
7730+#endif
7731+
7732+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
7733+
7734 #endif /* _ASM_X86_MODULE_H */
7735diff -urNp linux-3.0.3/arch/x86/include/asm/page_64_types.h linux-3.0.3/arch/x86/include/asm/page_64_types.h
7736--- linux-3.0.3/arch/x86/include/asm/page_64_types.h 2011-07-21 22:17:23.000000000 -0400
7737+++ linux-3.0.3/arch/x86/include/asm/page_64_types.h 2011-08-23 21:47:55.000000000 -0400
7738@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
7739
7740 /* duplicated to the one in bootmem.h */
7741 extern unsigned long max_pfn;
7742-extern unsigned long phys_base;
7743+extern const unsigned long phys_base;
7744
7745 extern unsigned long __phys_addr(unsigned long);
7746 #define __phys_reloc_hide(x) (x)
7747diff -urNp linux-3.0.3/arch/x86/include/asm/paravirt.h linux-3.0.3/arch/x86/include/asm/paravirt.h
7748--- linux-3.0.3/arch/x86/include/asm/paravirt.h 2011-07-21 22:17:23.000000000 -0400
7749+++ linux-3.0.3/arch/x86/include/asm/paravirt.h 2011-08-23 21:47:55.000000000 -0400
7750@@ -658,6 +658,18 @@ static inline void set_pgd(pgd_t *pgdp,
7751 val);
7752 }
7753
7754+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
7755+{
7756+ pgdval_t val = native_pgd_val(pgd);
7757+
7758+ if (sizeof(pgdval_t) > sizeof(long))
7759+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
7760+ val, (u64)val >> 32);
7761+ else
7762+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
7763+ val);
7764+}
7765+
7766 static inline void pgd_clear(pgd_t *pgdp)
7767 {
7768 set_pgd(pgdp, __pgd(0));
7769@@ -739,6 +751,21 @@ static inline void __set_fixmap(unsigned
7770 pv_mmu_ops.set_fixmap(idx, phys, flags);
7771 }
7772
7773+#ifdef CONFIG_PAX_KERNEXEC
7774+static inline unsigned long pax_open_kernel(void)
7775+{
7776+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
7777+}
7778+
7779+static inline unsigned long pax_close_kernel(void)
7780+{
7781+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
7782+}
7783+#else
7784+static inline unsigned long pax_open_kernel(void) { return 0; }
7785+static inline unsigned long pax_close_kernel(void) { return 0; }
7786+#endif
7787+
7788 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
7789
7790 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
7791@@ -955,7 +982,7 @@ extern void default_banner(void);
7792
7793 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
7794 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
7795-#define PARA_INDIRECT(addr) *%cs:addr
7796+#define PARA_INDIRECT(addr) *%ss:addr
7797 #endif
7798
7799 #define INTERRUPT_RETURN \
7800@@ -1032,6 +1059,21 @@ extern void default_banner(void);
7801 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
7802 CLBR_NONE, \
7803 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
7804+
7805+#define GET_CR0_INTO_RDI \
7806+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
7807+ mov %rax,%rdi
7808+
7809+#define SET_RDI_INTO_CR0 \
7810+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
7811+
7812+#define GET_CR3_INTO_RDI \
7813+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
7814+ mov %rax,%rdi
7815+
7816+#define SET_RDI_INTO_CR3 \
7817+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
7818+
7819 #endif /* CONFIG_X86_32 */
7820
7821 #endif /* __ASSEMBLY__ */
7822diff -urNp linux-3.0.3/arch/x86/include/asm/paravirt_types.h linux-3.0.3/arch/x86/include/asm/paravirt_types.h
7823--- linux-3.0.3/arch/x86/include/asm/paravirt_types.h 2011-07-21 22:17:23.000000000 -0400
7824+++ linux-3.0.3/arch/x86/include/asm/paravirt_types.h 2011-08-23 21:47:55.000000000 -0400
7825@@ -78,19 +78,19 @@ struct pv_init_ops {
7826 */
7827 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
7828 unsigned long addr, unsigned len);
7829-};
7830+} __no_const;
7831
7832
7833 struct pv_lazy_ops {
7834 /* Set deferred update mode, used for batching operations. */
7835 void (*enter)(void);
7836 void (*leave)(void);
7837-};
7838+} __no_const;
7839
7840 struct pv_time_ops {
7841 unsigned long long (*sched_clock)(void);
7842 unsigned long (*get_tsc_khz)(void);
7843-};
7844+} __no_const;
7845
7846 struct pv_cpu_ops {
7847 /* hooks for various privileged instructions */
7848@@ -186,7 +186,7 @@ struct pv_cpu_ops {
7849
7850 void (*start_context_switch)(struct task_struct *prev);
7851 void (*end_context_switch)(struct task_struct *next);
7852-};
7853+} __no_const;
7854
7855 struct pv_irq_ops {
7856 /*
7857@@ -217,7 +217,7 @@ struct pv_apic_ops {
7858 unsigned long start_eip,
7859 unsigned long start_esp);
7860 #endif
7861-};
7862+} __no_const;
7863
7864 struct pv_mmu_ops {
7865 unsigned long (*read_cr2)(void);
7866@@ -306,6 +306,7 @@ struct pv_mmu_ops {
7867 struct paravirt_callee_save make_pud;
7868
7869 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
7870+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
7871 #endif /* PAGETABLE_LEVELS == 4 */
7872 #endif /* PAGETABLE_LEVELS >= 3 */
7873
7874@@ -317,6 +318,12 @@ struct pv_mmu_ops {
7875 an mfn. We can tell which is which from the index. */
7876 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
7877 phys_addr_t phys, pgprot_t flags);
7878+
7879+#ifdef CONFIG_PAX_KERNEXEC
7880+ unsigned long (*pax_open_kernel)(void);
7881+ unsigned long (*pax_close_kernel)(void);
7882+#endif
7883+
7884 };
7885
7886 struct arch_spinlock;
7887@@ -327,7 +334,7 @@ struct pv_lock_ops {
7888 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
7889 int (*spin_trylock)(struct arch_spinlock *lock);
7890 void (*spin_unlock)(struct arch_spinlock *lock);
7891-};
7892+} __no_const;
7893
7894 /* This contains all the paravirt structures: we get a convenient
7895 * number for each function using the offset which we use to indicate
7896diff -urNp linux-3.0.3/arch/x86/include/asm/pgalloc.h linux-3.0.3/arch/x86/include/asm/pgalloc.h
7897--- linux-3.0.3/arch/x86/include/asm/pgalloc.h 2011-07-21 22:17:23.000000000 -0400
7898+++ linux-3.0.3/arch/x86/include/asm/pgalloc.h 2011-08-23 21:47:55.000000000 -0400
7899@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
7900 pmd_t *pmd, pte_t *pte)
7901 {
7902 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
7903+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
7904+}
7905+
7906+static inline void pmd_populate_user(struct mm_struct *mm,
7907+ pmd_t *pmd, pte_t *pte)
7908+{
7909+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
7910 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
7911 }
7912
7913diff -urNp linux-3.0.3/arch/x86/include/asm/pgtable-2level.h linux-3.0.3/arch/x86/include/asm/pgtable-2level.h
7914--- linux-3.0.3/arch/x86/include/asm/pgtable-2level.h 2011-07-21 22:17:23.000000000 -0400
7915+++ linux-3.0.3/arch/x86/include/asm/pgtable-2level.h 2011-08-23 21:47:55.000000000 -0400
7916@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
7917
7918 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
7919 {
7920+ pax_open_kernel();
7921 *pmdp = pmd;
7922+ pax_close_kernel();
7923 }
7924
7925 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
7926diff -urNp linux-3.0.3/arch/x86/include/asm/pgtable_32.h linux-3.0.3/arch/x86/include/asm/pgtable_32.h
7927--- linux-3.0.3/arch/x86/include/asm/pgtable_32.h 2011-07-21 22:17:23.000000000 -0400
7928+++ linux-3.0.3/arch/x86/include/asm/pgtable_32.h 2011-08-23 21:47:55.000000000 -0400
7929@@ -25,9 +25,6 @@
7930 struct mm_struct;
7931 struct vm_area_struct;
7932
7933-extern pgd_t swapper_pg_dir[1024];
7934-extern pgd_t initial_page_table[1024];
7935-
7936 static inline void pgtable_cache_init(void) { }
7937 static inline void check_pgt_cache(void) { }
7938 void paging_init(void);
7939@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, u
7940 # include <asm/pgtable-2level.h>
7941 #endif
7942
7943+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
7944+extern pgd_t initial_page_table[PTRS_PER_PGD];
7945+#ifdef CONFIG_X86_PAE
7946+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
7947+#endif
7948+
7949 #if defined(CONFIG_HIGHPTE)
7950 #define pte_offset_map(dir, address) \
7951 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
7952@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, u
7953 /* Clear a kernel PTE and flush it from the TLB */
7954 #define kpte_clear_flush(ptep, vaddr) \
7955 do { \
7956+ pax_open_kernel(); \
7957 pte_clear(&init_mm, (vaddr), (ptep)); \
7958+ pax_close_kernel(); \
7959 __flush_tlb_one((vaddr)); \
7960 } while (0)
7961
7962@@ -74,6 +79,9 @@ do { \
7963
7964 #endif /* !__ASSEMBLY__ */
7965
7966+#define HAVE_ARCH_UNMAPPED_AREA
7967+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
7968+
7969 /*
7970 * kern_addr_valid() is (1) for FLATMEM and (0) for
7971 * SPARSEMEM and DISCONTIGMEM
7972diff -urNp linux-3.0.3/arch/x86/include/asm/pgtable_32_types.h linux-3.0.3/arch/x86/include/asm/pgtable_32_types.h
7973--- linux-3.0.3/arch/x86/include/asm/pgtable_32_types.h 2011-07-21 22:17:23.000000000 -0400
7974+++ linux-3.0.3/arch/x86/include/asm/pgtable_32_types.h 2011-08-23 21:47:55.000000000 -0400
7975@@ -8,7 +8,7 @@
7976 */
7977 #ifdef CONFIG_X86_PAE
7978 # include <asm/pgtable-3level_types.h>
7979-# define PMD_SIZE (1UL << PMD_SHIFT)
7980+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
7981 # define PMD_MASK (~(PMD_SIZE - 1))
7982 #else
7983 # include <asm/pgtable-2level_types.h>
7984@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
7985 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
7986 #endif
7987
7988+#ifdef CONFIG_PAX_KERNEXEC
7989+#ifndef __ASSEMBLY__
7990+extern unsigned char MODULES_EXEC_VADDR[];
7991+extern unsigned char MODULES_EXEC_END[];
7992+#endif
7993+#include <asm/boot.h>
7994+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
7995+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
7996+#else
7997+#define ktla_ktva(addr) (addr)
7998+#define ktva_ktla(addr) (addr)
7999+#endif
8000+
8001 #define MODULES_VADDR VMALLOC_START
8002 #define MODULES_END VMALLOC_END
8003 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
8004diff -urNp linux-3.0.3/arch/x86/include/asm/pgtable-3level.h linux-3.0.3/arch/x86/include/asm/pgtable-3level.h
8005--- linux-3.0.3/arch/x86/include/asm/pgtable-3level.h 2011-07-21 22:17:23.000000000 -0400
8006+++ linux-3.0.3/arch/x86/include/asm/pgtable-3level.h 2011-08-23 21:47:55.000000000 -0400
8007@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
8008
8009 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8010 {
8011+ pax_open_kernel();
8012 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
8013+ pax_close_kernel();
8014 }
8015
8016 static inline void native_set_pud(pud_t *pudp, pud_t pud)
8017 {
8018+ pax_open_kernel();
8019 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
8020+ pax_close_kernel();
8021 }
8022
8023 /*
8024diff -urNp linux-3.0.3/arch/x86/include/asm/pgtable_64.h linux-3.0.3/arch/x86/include/asm/pgtable_64.h
8025--- linux-3.0.3/arch/x86/include/asm/pgtable_64.h 2011-07-21 22:17:23.000000000 -0400
8026+++ linux-3.0.3/arch/x86/include/asm/pgtable_64.h 2011-08-23 21:47:55.000000000 -0400
8027@@ -16,10 +16,13 @@
8028
8029 extern pud_t level3_kernel_pgt[512];
8030 extern pud_t level3_ident_pgt[512];
8031+extern pud_t level3_vmalloc_pgt[512];
8032+extern pud_t level3_vmemmap_pgt[512];
8033+extern pud_t level2_vmemmap_pgt[512];
8034 extern pmd_t level2_kernel_pgt[512];
8035 extern pmd_t level2_fixmap_pgt[512];
8036-extern pmd_t level2_ident_pgt[512];
8037-extern pgd_t init_level4_pgt[];
8038+extern pmd_t level2_ident_pgt[512*2];
8039+extern pgd_t init_level4_pgt[512];
8040
8041 #define swapper_pg_dir init_level4_pgt
8042
8043@@ -61,7 +64,9 @@ static inline void native_set_pte_atomic
8044
8045 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8046 {
8047+ pax_open_kernel();
8048 *pmdp = pmd;
8049+ pax_close_kernel();
8050 }
8051
8052 static inline void native_pmd_clear(pmd_t *pmd)
8053@@ -107,6 +112,13 @@ static inline void native_pud_clear(pud_
8054
8055 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
8056 {
8057+ pax_open_kernel();
8058+ *pgdp = pgd;
8059+ pax_close_kernel();
8060+}
8061+
8062+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
8063+{
8064 *pgdp = pgd;
8065 }
8066
8067diff -urNp linux-3.0.3/arch/x86/include/asm/pgtable_64_types.h linux-3.0.3/arch/x86/include/asm/pgtable_64_types.h
8068--- linux-3.0.3/arch/x86/include/asm/pgtable_64_types.h 2011-07-21 22:17:23.000000000 -0400
8069+++ linux-3.0.3/arch/x86/include/asm/pgtable_64_types.h 2011-08-23 21:47:55.000000000 -0400
8070@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
8071 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
8072 #define MODULES_END _AC(0xffffffffff000000, UL)
8073 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
8074+#define MODULES_EXEC_VADDR MODULES_VADDR
8075+#define MODULES_EXEC_END MODULES_END
8076+
8077+#define ktla_ktva(addr) (addr)
8078+#define ktva_ktla(addr) (addr)
8079
8080 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
8081diff -urNp linux-3.0.3/arch/x86/include/asm/pgtable.h linux-3.0.3/arch/x86/include/asm/pgtable.h
8082--- linux-3.0.3/arch/x86/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
8083+++ linux-3.0.3/arch/x86/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
8084@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm
8085
8086 #ifndef __PAGETABLE_PUD_FOLDED
8087 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
8088+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
8089 #define pgd_clear(pgd) native_pgd_clear(pgd)
8090 #endif
8091
8092@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm
8093
8094 #define arch_end_context_switch(prev) do {} while(0)
8095
8096+#define pax_open_kernel() native_pax_open_kernel()
8097+#define pax_close_kernel() native_pax_close_kernel()
8098 #endif /* CONFIG_PARAVIRT */
8099
8100+#define __HAVE_ARCH_PAX_OPEN_KERNEL
8101+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
8102+
8103+#ifdef CONFIG_PAX_KERNEXEC
8104+static inline unsigned long native_pax_open_kernel(void)
8105+{
8106+ unsigned long cr0;
8107+
8108+ preempt_disable();
8109+ barrier();
8110+ cr0 = read_cr0() ^ X86_CR0_WP;
8111+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
8112+ write_cr0(cr0);
8113+ return cr0 ^ X86_CR0_WP;
8114+}
8115+
8116+static inline unsigned long native_pax_close_kernel(void)
8117+{
8118+ unsigned long cr0;
8119+
8120+ cr0 = read_cr0() ^ X86_CR0_WP;
8121+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
8122+ write_cr0(cr0);
8123+ barrier();
8124+ preempt_enable_no_resched();
8125+ return cr0 ^ X86_CR0_WP;
8126+}
8127+#else
8128+static inline unsigned long native_pax_open_kernel(void) { return 0; }
8129+static inline unsigned long native_pax_close_kernel(void) { return 0; }
8130+#endif
8131+
8132 /*
8133 * The following only work if pte_present() is true.
8134 * Undefined behaviour if not..
8135 */
8136+static inline int pte_user(pte_t pte)
8137+{
8138+ return pte_val(pte) & _PAGE_USER;
8139+}
8140+
8141 static inline int pte_dirty(pte_t pte)
8142 {
8143 return pte_flags(pte) & _PAGE_DIRTY;
8144@@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t
8145 return pte_clear_flags(pte, _PAGE_RW);
8146 }
8147
8148+static inline pte_t pte_mkread(pte_t pte)
8149+{
8150+ return __pte(pte_val(pte) | _PAGE_USER);
8151+}
8152+
8153 static inline pte_t pte_mkexec(pte_t pte)
8154 {
8155- return pte_clear_flags(pte, _PAGE_NX);
8156+#ifdef CONFIG_X86_PAE
8157+ if (__supported_pte_mask & _PAGE_NX)
8158+ return pte_clear_flags(pte, _PAGE_NX);
8159+ else
8160+#endif
8161+ return pte_set_flags(pte, _PAGE_USER);
8162+}
8163+
8164+static inline pte_t pte_exprotect(pte_t pte)
8165+{
8166+#ifdef CONFIG_X86_PAE
8167+ if (__supported_pte_mask & _PAGE_NX)
8168+ return pte_set_flags(pte, _PAGE_NX);
8169+ else
8170+#endif
8171+ return pte_clear_flags(pte, _PAGE_USER);
8172 }
8173
8174 static inline pte_t pte_mkdirty(pte_t pte)
8175@@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long
8176 #endif
8177
8178 #ifndef __ASSEMBLY__
8179+
8180+#ifdef CONFIG_PAX_PER_CPU_PGD
8181+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
8182+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
8183+{
8184+ return cpu_pgd[cpu];
8185+}
8186+#endif
8187+
8188 #include <linux/mm_types.h>
8189
8190 static inline int pte_none(pte_t pte)
8191@@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *p
8192
8193 static inline int pgd_bad(pgd_t pgd)
8194 {
8195- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
8196+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
8197 }
8198
8199 static inline int pgd_none(pgd_t pgd)
8200@@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
8201 * pgd_offset() returns a (pgd_t *)
8202 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
8203 */
8204-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
8205+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
8206+
8207+#ifdef CONFIG_PAX_PER_CPU_PGD
8208+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
8209+#endif
8210+
8211 /*
8212 * a shortcut which implies the use of the kernel's pgd, instead
8213 * of a process's
8214@@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
8215 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
8216 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
8217
8218+#ifdef CONFIG_X86_32
8219+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
8220+#else
8221+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
8222+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
8223+
8224+#ifdef CONFIG_PAX_MEMORY_UDEREF
8225+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
8226+#else
8227+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
8228+#endif
8229+
8230+#endif
8231+
8232 #ifndef __ASSEMBLY__
8233
8234 extern int direct_gbpages;
8235@@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(st
8236 * dst and src can be on the same page, but the range must not overlap,
8237 * and must not cross a page boundary.
8238 */
8239-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
8240+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
8241 {
8242- memcpy(dst, src, count * sizeof(pgd_t));
8243+ pax_open_kernel();
8244+ while (count--)
8245+ *dst++ = *src++;
8246+ pax_close_kernel();
8247 }
8248
8249+#ifdef CONFIG_PAX_PER_CPU_PGD
8250+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8251+#endif
8252+
8253+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8254+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8255+#else
8256+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
8257+#endif
8258
8259 #include <asm-generic/pgtable.h>
8260 #endif /* __ASSEMBLY__ */
8261diff -urNp linux-3.0.3/arch/x86/include/asm/pgtable_types.h linux-3.0.3/arch/x86/include/asm/pgtable_types.h
8262--- linux-3.0.3/arch/x86/include/asm/pgtable_types.h 2011-07-21 22:17:23.000000000 -0400
8263+++ linux-3.0.3/arch/x86/include/asm/pgtable_types.h 2011-08-23 21:47:55.000000000 -0400
8264@@ -16,13 +16,12 @@
8265 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
8266 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
8267 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
8268-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
8269+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
8270 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
8271 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
8272 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
8273-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
8274-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
8275-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
8276+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
8277+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
8278 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
8279
8280 /* If _PAGE_BIT_PRESENT is clear, we use these: */
8281@@ -40,7 +39,6 @@
8282 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
8283 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
8284 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
8285-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
8286 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
8287 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
8288 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
8289@@ -57,8 +55,10 @@
8290
8291 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
8292 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
8293-#else
8294+#elif defined(CONFIG_KMEMCHECK)
8295 #define _PAGE_NX (_AT(pteval_t, 0))
8296+#else
8297+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
8298 #endif
8299
8300 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
8301@@ -96,6 +96,9 @@
8302 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
8303 _PAGE_ACCESSED)
8304
8305+#define PAGE_READONLY_NOEXEC PAGE_READONLY
8306+#define PAGE_SHARED_NOEXEC PAGE_SHARED
8307+
8308 #define __PAGE_KERNEL_EXEC \
8309 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
8310 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
8311@@ -106,8 +109,8 @@
8312 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
8313 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
8314 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
8315-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
8316-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
8317+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
8318+#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
8319 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
8320 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
8321 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
8322@@ -166,8 +169,8 @@
8323 * bits are combined, this will alow user to access the high address mapped
8324 * VDSO in the presence of CONFIG_COMPAT_VDSO
8325 */
8326-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
8327-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
8328+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8329+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8330 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
8331 #endif
8332
8333@@ -205,7 +208,17 @@ static inline pgdval_t pgd_flags(pgd_t p
8334 {
8335 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
8336 }
8337+#endif
8338
8339+#if PAGETABLE_LEVELS == 3
8340+#include <asm-generic/pgtable-nopud.h>
8341+#endif
8342+
8343+#if PAGETABLE_LEVELS == 2
8344+#include <asm-generic/pgtable-nopmd.h>
8345+#endif
8346+
8347+#ifndef __ASSEMBLY__
8348 #if PAGETABLE_LEVELS > 3
8349 typedef struct { pudval_t pud; } pud_t;
8350
8351@@ -219,8 +232,6 @@ static inline pudval_t native_pud_val(pu
8352 return pud.pud;
8353 }
8354 #else
8355-#include <asm-generic/pgtable-nopud.h>
8356-
8357 static inline pudval_t native_pud_val(pud_t pud)
8358 {
8359 return native_pgd_val(pud.pgd);
8360@@ -240,8 +251,6 @@ static inline pmdval_t native_pmd_val(pm
8361 return pmd.pmd;
8362 }
8363 #else
8364-#include <asm-generic/pgtable-nopmd.h>
8365-
8366 static inline pmdval_t native_pmd_val(pmd_t pmd)
8367 {
8368 return native_pgd_val(pmd.pud.pgd);
8369@@ -281,7 +290,6 @@ typedef struct page *pgtable_t;
8370
8371 extern pteval_t __supported_pte_mask;
8372 extern void set_nx(void);
8373-extern int nx_enabled;
8374
8375 #define pgprot_writecombine pgprot_writecombine
8376 extern pgprot_t pgprot_writecombine(pgprot_t prot);
8377diff -urNp linux-3.0.3/arch/x86/include/asm/processor.h linux-3.0.3/arch/x86/include/asm/processor.h
8378--- linux-3.0.3/arch/x86/include/asm/processor.h 2011-07-21 22:17:23.000000000 -0400
8379+++ linux-3.0.3/arch/x86/include/asm/processor.h 2011-08-23 21:47:55.000000000 -0400
8380@@ -266,7 +266,7 @@ struct tss_struct {
8381
8382 } ____cacheline_aligned;
8383
8384-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
8385+extern struct tss_struct init_tss[NR_CPUS];
8386
8387 /*
8388 * Save the original ist values for checking stack pointers during debugging
8389@@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(co
8390 */
8391 #define TASK_SIZE PAGE_OFFSET
8392 #define TASK_SIZE_MAX TASK_SIZE
8393+
8394+#ifdef CONFIG_PAX_SEGMEXEC
8395+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
8396+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
8397+#else
8398 #define STACK_TOP TASK_SIZE
8399-#define STACK_TOP_MAX STACK_TOP
8400+#endif
8401+
8402+#define STACK_TOP_MAX TASK_SIZE
8403
8404 #define INIT_THREAD { \
8405- .sp0 = sizeof(init_stack) + (long)&init_stack, \
8406+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8407 .vm86_info = NULL, \
8408 .sysenter_cs = __KERNEL_CS, \
8409 .io_bitmap_ptr = NULL, \
8410@@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(co
8411 */
8412 #define INIT_TSS { \
8413 .x86_tss = { \
8414- .sp0 = sizeof(init_stack) + (long)&init_stack, \
8415+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8416 .ss0 = __KERNEL_DS, \
8417 .ss1 = __KERNEL_CS, \
8418 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
8419@@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(co
8420 extern unsigned long thread_saved_pc(struct task_struct *tsk);
8421
8422 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
8423-#define KSTK_TOP(info) \
8424-({ \
8425- unsigned long *__ptr = (unsigned long *)(info); \
8426- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
8427-})
8428+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
8429
8430 /*
8431 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
8432@@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(str
8433 #define task_pt_regs(task) \
8434 ({ \
8435 struct pt_regs *__regs__; \
8436- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
8437+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
8438 __regs__ - 1; \
8439 })
8440
8441@@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(str
8442 /*
8443 * User space process size. 47bits minus one guard page.
8444 */
8445-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
8446+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
8447
8448 /* This decides where the kernel will search for a free chunk of vm
8449 * space during mmap's.
8450 */
8451 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
8452- 0xc0000000 : 0xFFFFe000)
8453+ 0xc0000000 : 0xFFFFf000)
8454
8455 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
8456 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
8457@@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(str
8458 #define STACK_TOP_MAX TASK_SIZE_MAX
8459
8460 #define INIT_THREAD { \
8461- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8462+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8463 }
8464
8465 #define INIT_TSS { \
8466- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8467+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8468 }
8469
8470 /*
8471@@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs
8472 */
8473 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
8474
8475+#ifdef CONFIG_PAX_SEGMEXEC
8476+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
8477+#endif
8478+
8479 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
8480
8481 /* Get/set a process' ability to use the timestamp counter instruction */
8482diff -urNp linux-3.0.3/arch/x86/include/asm/ptrace.h linux-3.0.3/arch/x86/include/asm/ptrace.h
8483--- linux-3.0.3/arch/x86/include/asm/ptrace.h 2011-07-21 22:17:23.000000000 -0400
8484+++ linux-3.0.3/arch/x86/include/asm/ptrace.h 2011-08-23 21:47:55.000000000 -0400
8485@@ -153,28 +153,29 @@ static inline unsigned long regs_return_
8486 }
8487
8488 /*
8489- * user_mode_vm(regs) determines whether a register set came from user mode.
8490+ * user_mode(regs) determines whether a register set came from user mode.
8491 * This is true if V8086 mode was enabled OR if the register set was from
8492 * protected mode with RPL-3 CS value. This tricky test checks that with
8493 * one comparison. Many places in the kernel can bypass this full check
8494- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
8495+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
8496+ * be used.
8497 */
8498-static inline int user_mode(struct pt_regs *regs)
8499+static inline int user_mode_novm(struct pt_regs *regs)
8500 {
8501 #ifdef CONFIG_X86_32
8502 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
8503 #else
8504- return !!(regs->cs & 3);
8505+ return !!(regs->cs & SEGMENT_RPL_MASK);
8506 #endif
8507 }
8508
8509-static inline int user_mode_vm(struct pt_regs *regs)
8510+static inline int user_mode(struct pt_regs *regs)
8511 {
8512 #ifdef CONFIG_X86_32
8513 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
8514 USER_RPL;
8515 #else
8516- return user_mode(regs);
8517+ return user_mode_novm(regs);
8518 #endif
8519 }
8520
8521diff -urNp linux-3.0.3/arch/x86/include/asm/reboot.h linux-3.0.3/arch/x86/include/asm/reboot.h
8522--- linux-3.0.3/arch/x86/include/asm/reboot.h 2011-07-21 22:17:23.000000000 -0400
8523+++ linux-3.0.3/arch/x86/include/asm/reboot.h 2011-08-23 21:47:55.000000000 -0400
8524@@ -6,19 +6,19 @@
8525 struct pt_regs;
8526
8527 struct machine_ops {
8528- void (*restart)(char *cmd);
8529- void (*halt)(void);
8530- void (*power_off)(void);
8531+ void (* __noreturn restart)(char *cmd);
8532+ void (* __noreturn halt)(void);
8533+ void (* __noreturn power_off)(void);
8534 void (*shutdown)(void);
8535 void (*crash_shutdown)(struct pt_regs *);
8536- void (*emergency_restart)(void);
8537-};
8538+ void (* __noreturn emergency_restart)(void);
8539+} __no_const;
8540
8541 extern struct machine_ops machine_ops;
8542
8543 void native_machine_crash_shutdown(struct pt_regs *regs);
8544 void native_machine_shutdown(void);
8545-void machine_real_restart(unsigned int type);
8546+void machine_real_restart(unsigned int type) __noreturn;
8547 /* These must match dispatch_table in reboot_32.S */
8548 #define MRR_BIOS 0
8549 #define MRR_APM 1
8550diff -urNp linux-3.0.3/arch/x86/include/asm/rwsem.h linux-3.0.3/arch/x86/include/asm/rwsem.h
8551--- linux-3.0.3/arch/x86/include/asm/rwsem.h 2011-07-21 22:17:23.000000000 -0400
8552+++ linux-3.0.3/arch/x86/include/asm/rwsem.h 2011-08-23 21:47:55.000000000 -0400
8553@@ -64,6 +64,14 @@ static inline void __down_read(struct rw
8554 {
8555 asm volatile("# beginning down_read\n\t"
8556 LOCK_PREFIX _ASM_INC "(%1)\n\t"
8557+
8558+#ifdef CONFIG_PAX_REFCOUNT
8559+ "jno 0f\n"
8560+ LOCK_PREFIX _ASM_DEC "(%1)\n"
8561+ "int $4\n0:\n"
8562+ _ASM_EXTABLE(0b, 0b)
8563+#endif
8564+
8565 /* adds 0x00000001 */
8566 " jns 1f\n"
8567 " call call_rwsem_down_read_failed\n"
8568@@ -85,6 +93,14 @@ static inline int __down_read_trylock(st
8569 "1:\n\t"
8570 " mov %1,%2\n\t"
8571 " add %3,%2\n\t"
8572+
8573+#ifdef CONFIG_PAX_REFCOUNT
8574+ "jno 0f\n"
8575+ "sub %3,%2\n"
8576+ "int $4\n0:\n"
8577+ _ASM_EXTABLE(0b, 0b)
8578+#endif
8579+
8580 " jle 2f\n\t"
8581 LOCK_PREFIX " cmpxchg %2,%0\n\t"
8582 " jnz 1b\n\t"
8583@@ -104,6 +120,14 @@ static inline void __down_write_nested(s
8584 long tmp;
8585 asm volatile("# beginning down_write\n\t"
8586 LOCK_PREFIX " xadd %1,(%2)\n\t"
8587+
8588+#ifdef CONFIG_PAX_REFCOUNT
8589+ "jno 0f\n"
8590+ "mov %1,(%2)\n"
8591+ "int $4\n0:\n"
8592+ _ASM_EXTABLE(0b, 0b)
8593+#endif
8594+
8595 /* adds 0xffff0001, returns the old value */
8596 " test %1,%1\n\t"
8597 /* was the count 0 before? */
8598@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_s
8599 long tmp;
8600 asm volatile("# beginning __up_read\n\t"
8601 LOCK_PREFIX " xadd %1,(%2)\n\t"
8602+
8603+#ifdef CONFIG_PAX_REFCOUNT
8604+ "jno 0f\n"
8605+ "mov %1,(%2)\n"
8606+ "int $4\n0:\n"
8607+ _ASM_EXTABLE(0b, 0b)
8608+#endif
8609+
8610 /* subtracts 1, returns the old value */
8611 " jns 1f\n\t"
8612 " call call_rwsem_wake\n" /* expects old value in %edx */
8613@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_
8614 long tmp;
8615 asm volatile("# beginning __up_write\n\t"
8616 LOCK_PREFIX " xadd %1,(%2)\n\t"
8617+
8618+#ifdef CONFIG_PAX_REFCOUNT
8619+ "jno 0f\n"
8620+ "mov %1,(%2)\n"
8621+ "int $4\n0:\n"
8622+ _ASM_EXTABLE(0b, 0b)
8623+#endif
8624+
8625 /* subtracts 0xffff0001, returns the old value */
8626 " jns 1f\n\t"
8627 " call call_rwsem_wake\n" /* expects old value in %edx */
8628@@ -176,6 +216,14 @@ static inline void __downgrade_write(str
8629 {
8630 asm volatile("# beginning __downgrade_write\n\t"
8631 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
8632+
8633+#ifdef CONFIG_PAX_REFCOUNT
8634+ "jno 0f\n"
8635+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
8636+ "int $4\n0:\n"
8637+ _ASM_EXTABLE(0b, 0b)
8638+#endif
8639+
8640 /*
8641 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
8642 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
8643@@ -194,7 +242,15 @@ static inline void __downgrade_write(str
8644 */
8645 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
8646 {
8647- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
8648+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
8649+
8650+#ifdef CONFIG_PAX_REFCOUNT
8651+ "jno 0f\n"
8652+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
8653+ "int $4\n0:\n"
8654+ _ASM_EXTABLE(0b, 0b)
8655+#endif
8656+
8657 : "+m" (sem->count)
8658 : "er" (delta));
8659 }
8660@@ -206,7 +262,15 @@ static inline long rwsem_atomic_update(l
8661 {
8662 long tmp = delta;
8663
8664- asm volatile(LOCK_PREFIX "xadd %0,%1"
8665+ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
8666+
8667+#ifdef CONFIG_PAX_REFCOUNT
8668+ "jno 0f\n"
8669+ "mov %0,%1\n"
8670+ "int $4\n0:\n"
8671+ _ASM_EXTABLE(0b, 0b)
8672+#endif
8673+
8674 : "+r" (tmp), "+m" (sem->count)
8675 : : "memory");
8676
8677diff -urNp linux-3.0.3/arch/x86/include/asm/segment.h linux-3.0.3/arch/x86/include/asm/segment.h
8678--- linux-3.0.3/arch/x86/include/asm/segment.h 2011-07-21 22:17:23.000000000 -0400
8679+++ linux-3.0.3/arch/x86/include/asm/segment.h 2011-08-23 21:47:55.000000000 -0400
8680@@ -64,8 +64,8 @@
8681 * 26 - ESPFIX small SS
8682 * 27 - per-cpu [ offset to per-cpu data area ]
8683 * 28 - stack_canary-20 [ for stack protector ]
8684- * 29 - unused
8685- * 30 - unused
8686+ * 29 - PCI BIOS CS
8687+ * 30 - PCI BIOS DS
8688 * 31 - TSS for double fault handler
8689 */
8690 #define GDT_ENTRY_TLS_MIN 6
8691@@ -79,6 +79,8 @@
8692
8693 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
8694
8695+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
8696+
8697 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
8698
8699 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
8700@@ -104,6 +106,12 @@
8701 #define __KERNEL_STACK_CANARY 0
8702 #endif
8703
8704+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
8705+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
8706+
8707+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
8708+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
8709+
8710 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
8711
8712 /*
8713@@ -141,7 +149,7 @@
8714 */
8715
8716 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
8717-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
8718+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
8719
8720
8721 #else
8722@@ -165,6 +173,8 @@
8723 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
8724 #define __USER32_DS __USER_DS
8725
8726+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
8727+
8728 #define GDT_ENTRY_TSS 8 /* needs two entries */
8729 #define GDT_ENTRY_LDT 10 /* needs two entries */
8730 #define GDT_ENTRY_TLS_MIN 12
8731@@ -185,6 +195,7 @@
8732 #endif
8733
8734 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
8735+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
8736 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
8737 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
8738 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
8739diff -urNp linux-3.0.3/arch/x86/include/asm/smp.h linux-3.0.3/arch/x86/include/asm/smp.h
8740--- linux-3.0.3/arch/x86/include/asm/smp.h 2011-07-21 22:17:23.000000000 -0400
8741+++ linux-3.0.3/arch/x86/include/asm/smp.h 2011-08-23 21:47:55.000000000 -0400
8742@@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_
8743 /* cpus sharing the last level cache: */
8744 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
8745 DECLARE_PER_CPU(u16, cpu_llc_id);
8746-DECLARE_PER_CPU(int, cpu_number);
8747+DECLARE_PER_CPU(unsigned int, cpu_number);
8748
8749 static inline struct cpumask *cpu_sibling_mask(int cpu)
8750 {
8751@@ -77,7 +77,7 @@ struct smp_ops {
8752
8753 void (*send_call_func_ipi)(const struct cpumask *mask);
8754 void (*send_call_func_single_ipi)(int cpu);
8755-};
8756+} __no_const;
8757
8758 /* Globals due to paravirt */
8759 extern void set_cpu_sibling_map(int cpu);
8760@@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitd
8761 extern int safe_smp_processor_id(void);
8762
8763 #elif defined(CONFIG_X86_64_SMP)
8764-#define raw_smp_processor_id() (percpu_read(cpu_number))
8765-
8766-#define stack_smp_processor_id() \
8767-({ \
8768- struct thread_info *ti; \
8769- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
8770- ti->cpu; \
8771-})
8772+#define raw_smp_processor_id() (percpu_read(cpu_number))
8773+#define stack_smp_processor_id() raw_smp_processor_id()
8774 #define safe_smp_processor_id() smp_processor_id()
8775
8776 #endif
8777diff -urNp linux-3.0.3/arch/x86/include/asm/spinlock.h linux-3.0.3/arch/x86/include/asm/spinlock.h
8778--- linux-3.0.3/arch/x86/include/asm/spinlock.h 2011-07-21 22:17:23.000000000 -0400
8779+++ linux-3.0.3/arch/x86/include/asm/spinlock.h 2011-08-23 21:47:55.000000000 -0400
8780@@ -249,6 +249,14 @@ static inline int arch_write_can_lock(ar
8781 static inline void arch_read_lock(arch_rwlock_t *rw)
8782 {
8783 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
8784+
8785+#ifdef CONFIG_PAX_REFCOUNT
8786+ "jno 0f\n"
8787+ LOCK_PREFIX " addl $1,(%0)\n"
8788+ "int $4\n0:\n"
8789+ _ASM_EXTABLE(0b, 0b)
8790+#endif
8791+
8792 "jns 1f\n"
8793 "call __read_lock_failed\n\t"
8794 "1:\n"
8795@@ -258,6 +266,14 @@ static inline void arch_read_lock(arch_r
8796 static inline void arch_write_lock(arch_rwlock_t *rw)
8797 {
8798 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
8799+
8800+#ifdef CONFIG_PAX_REFCOUNT
8801+ "jno 0f\n"
8802+ LOCK_PREFIX " addl %1,(%0)\n"
8803+ "int $4\n0:\n"
8804+ _ASM_EXTABLE(0b, 0b)
8805+#endif
8806+
8807 "jz 1f\n"
8808 "call __write_lock_failed\n\t"
8809 "1:\n"
8810@@ -286,12 +302,29 @@ static inline int arch_write_trylock(arc
8811
8812 static inline void arch_read_unlock(arch_rwlock_t *rw)
8813 {
8814- asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
8815+ asm volatile(LOCK_PREFIX "incl %0\n"
8816+
8817+#ifdef CONFIG_PAX_REFCOUNT
8818+ "jno 0f\n"
8819+ LOCK_PREFIX "decl %0\n"
8820+ "int $4\n0:\n"
8821+ _ASM_EXTABLE(0b, 0b)
8822+#endif
8823+
8824+ :"+m" (rw->lock) : : "memory");
8825 }
8826
8827 static inline void arch_write_unlock(arch_rwlock_t *rw)
8828 {
8829- asm volatile(LOCK_PREFIX "addl %1, %0"
8830+ asm volatile(LOCK_PREFIX "addl %1, %0\n"
8831+
8832+#ifdef CONFIG_PAX_REFCOUNT
8833+ "jno 0f\n"
8834+ LOCK_PREFIX "subl %1, %0\n"
8835+ "int $4\n0:\n"
8836+ _ASM_EXTABLE(0b, 0b)
8837+#endif
8838+
8839 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
8840 }
8841
8842diff -urNp linux-3.0.3/arch/x86/include/asm/stackprotector.h linux-3.0.3/arch/x86/include/asm/stackprotector.h
8843--- linux-3.0.3/arch/x86/include/asm/stackprotector.h 2011-07-21 22:17:23.000000000 -0400
8844+++ linux-3.0.3/arch/x86/include/asm/stackprotector.h 2011-08-23 21:47:55.000000000 -0400
8845@@ -48,7 +48,7 @@
8846 * head_32 for boot CPU and setup_per_cpu_areas() for others.
8847 */
8848 #define GDT_STACK_CANARY_INIT \
8849- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
8850+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
8851
8852 /*
8853 * Initialize the stackprotector canary value.
8854@@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
8855
8856 static inline void load_stack_canary_segment(void)
8857 {
8858-#ifdef CONFIG_X86_32
8859+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
8860 asm volatile ("mov %0, %%gs" : : "r" (0));
8861 #endif
8862 }
8863diff -urNp linux-3.0.3/arch/x86/include/asm/stacktrace.h linux-3.0.3/arch/x86/include/asm/stacktrace.h
8864--- linux-3.0.3/arch/x86/include/asm/stacktrace.h 2011-07-21 22:17:23.000000000 -0400
8865+++ linux-3.0.3/arch/x86/include/asm/stacktrace.h 2011-08-23 21:47:55.000000000 -0400
8866@@ -11,28 +11,20 @@
8867
8868 extern int kstack_depth_to_print;
8869
8870-struct thread_info;
8871+struct task_struct;
8872 struct stacktrace_ops;
8873
8874-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
8875- unsigned long *stack,
8876- unsigned long bp,
8877- const struct stacktrace_ops *ops,
8878- void *data,
8879- unsigned long *end,
8880- int *graph);
8881-
8882-extern unsigned long
8883-print_context_stack(struct thread_info *tinfo,
8884- unsigned long *stack, unsigned long bp,
8885- const struct stacktrace_ops *ops, void *data,
8886- unsigned long *end, int *graph);
8887-
8888-extern unsigned long
8889-print_context_stack_bp(struct thread_info *tinfo,
8890- unsigned long *stack, unsigned long bp,
8891- const struct stacktrace_ops *ops, void *data,
8892- unsigned long *end, int *graph);
8893+typedef unsigned long walk_stack_t(struct task_struct *task,
8894+ void *stack_start,
8895+ unsigned long *stack,
8896+ unsigned long bp,
8897+ const struct stacktrace_ops *ops,
8898+ void *data,
8899+ unsigned long *end,
8900+ int *graph);
8901+
8902+extern walk_stack_t print_context_stack;
8903+extern walk_stack_t print_context_stack_bp;
8904
8905 /* Generic stack tracer with callbacks */
8906
8907@@ -40,7 +32,7 @@ struct stacktrace_ops {
8908 void (*address)(void *data, unsigned long address, int reliable);
8909 /* On negative return stop dumping */
8910 int (*stack)(void *data, char *name);
8911- walk_stack_t walk_stack;
8912+ walk_stack_t *walk_stack;
8913 };
8914
8915 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
8916diff -urNp linux-3.0.3/arch/x86/include/asm/system.h linux-3.0.3/arch/x86/include/asm/system.h
8917--- linux-3.0.3/arch/x86/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
8918+++ linux-3.0.3/arch/x86/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
8919@@ -129,7 +129,7 @@ do { \
8920 "call __switch_to\n\t" \
8921 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
8922 __switch_canary \
8923- "movq %P[thread_info](%%rsi),%%r8\n\t" \
8924+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
8925 "movq %%rax,%%rdi\n\t" \
8926 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
8927 "jnz ret_from_fork\n\t" \
8928@@ -140,7 +140,7 @@ do { \
8929 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
8930 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
8931 [_tif_fork] "i" (_TIF_FORK), \
8932- [thread_info] "i" (offsetof(struct task_struct, stack)), \
8933+ [thread_info] "m" (current_tinfo), \
8934 [current_task] "m" (current_task) \
8935 __switch_canary_iparam \
8936 : "memory", "cc" __EXTRA_CLOBBER)
8937@@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
8938 {
8939 unsigned long __limit;
8940 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
8941- return __limit + 1;
8942+ return __limit;
8943 }
8944
8945 static inline void native_clts(void)
8946@@ -397,12 +397,12 @@ void enable_hlt(void);
8947
8948 void cpu_idle_wait(void);
8949
8950-extern unsigned long arch_align_stack(unsigned long sp);
8951+#define arch_align_stack(x) ((x) & ~0xfUL)
8952 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
8953
8954 void default_idle(void);
8955
8956-void stop_this_cpu(void *dummy);
8957+void stop_this_cpu(void *dummy) __noreturn;
8958
8959 /*
8960 * Force strict CPU ordering.
8961diff -urNp linux-3.0.3/arch/x86/include/asm/thread_info.h linux-3.0.3/arch/x86/include/asm/thread_info.h
8962--- linux-3.0.3/arch/x86/include/asm/thread_info.h 2011-07-21 22:17:23.000000000 -0400
8963+++ linux-3.0.3/arch/x86/include/asm/thread_info.h 2011-08-23 21:47:55.000000000 -0400
8964@@ -10,6 +10,7 @@
8965 #include <linux/compiler.h>
8966 #include <asm/page.h>
8967 #include <asm/types.h>
8968+#include <asm/percpu.h>
8969
8970 /*
8971 * low level task data that entry.S needs immediate access to
8972@@ -24,7 +25,6 @@ struct exec_domain;
8973 #include <asm/atomic.h>
8974
8975 struct thread_info {
8976- struct task_struct *task; /* main task structure */
8977 struct exec_domain *exec_domain; /* execution domain */
8978 __u32 flags; /* low level flags */
8979 __u32 status; /* thread synchronous flags */
8980@@ -34,18 +34,12 @@ struct thread_info {
8981 mm_segment_t addr_limit;
8982 struct restart_block restart_block;
8983 void __user *sysenter_return;
8984-#ifdef CONFIG_X86_32
8985- unsigned long previous_esp; /* ESP of the previous stack in
8986- case of nested (IRQ) stacks
8987- */
8988- __u8 supervisor_stack[0];
8989-#endif
8990+ unsigned long lowest_stack;
8991 int uaccess_err;
8992 };
8993
8994-#define INIT_THREAD_INFO(tsk) \
8995+#define INIT_THREAD_INFO \
8996 { \
8997- .task = &tsk, \
8998 .exec_domain = &default_exec_domain, \
8999 .flags = 0, \
9000 .cpu = 0, \
9001@@ -56,7 +50,7 @@ struct thread_info {
9002 }, \
9003 }
9004
9005-#define init_thread_info (init_thread_union.thread_info)
9006+#define init_thread_info (init_thread_union.stack)
9007 #define init_stack (init_thread_union.stack)
9008
9009 #else /* !__ASSEMBLY__ */
9010@@ -170,6 +164,23 @@ struct thread_info {
9011 ret; \
9012 })
9013
9014+#ifdef __ASSEMBLY__
9015+/* how to get the thread information struct from ASM */
9016+#define GET_THREAD_INFO(reg) \
9017+ mov PER_CPU_VAR(current_tinfo), reg
9018+
9019+/* use this one if reg already contains %esp */
9020+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
9021+#else
9022+/* how to get the thread information struct from C */
9023+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
9024+
9025+static __always_inline struct thread_info *current_thread_info(void)
9026+{
9027+ return percpu_read_stable(current_tinfo);
9028+}
9029+#endif
9030+
9031 #ifdef CONFIG_X86_32
9032
9033 #define STACK_WARN (THREAD_SIZE/8)
9034@@ -180,35 +191,13 @@ struct thread_info {
9035 */
9036 #ifndef __ASSEMBLY__
9037
9038-
9039 /* how to get the current stack pointer from C */
9040 register unsigned long current_stack_pointer asm("esp") __used;
9041
9042-/* how to get the thread information struct from C */
9043-static inline struct thread_info *current_thread_info(void)
9044-{
9045- return (struct thread_info *)
9046- (current_stack_pointer & ~(THREAD_SIZE - 1));
9047-}
9048-
9049-#else /* !__ASSEMBLY__ */
9050-
9051-/* how to get the thread information struct from ASM */
9052-#define GET_THREAD_INFO(reg) \
9053- movl $-THREAD_SIZE, reg; \
9054- andl %esp, reg
9055-
9056-/* use this one if reg already contains %esp */
9057-#define GET_THREAD_INFO_WITH_ESP(reg) \
9058- andl $-THREAD_SIZE, reg
9059-
9060 #endif
9061
9062 #else /* X86_32 */
9063
9064-#include <asm/percpu.h>
9065-#define KERNEL_STACK_OFFSET (5*8)
9066-
9067 /*
9068 * macros/functions for gaining access to the thread information structure
9069 * preempt_count needs to be 1 initially, until the scheduler is functional.
9070@@ -216,21 +205,8 @@ static inline struct thread_info *curren
9071 #ifndef __ASSEMBLY__
9072 DECLARE_PER_CPU(unsigned long, kernel_stack);
9073
9074-static inline struct thread_info *current_thread_info(void)
9075-{
9076- struct thread_info *ti;
9077- ti = (void *)(percpu_read_stable(kernel_stack) +
9078- KERNEL_STACK_OFFSET - THREAD_SIZE);
9079- return ti;
9080-}
9081-
9082-#else /* !__ASSEMBLY__ */
9083-
9084-/* how to get the thread information struct from ASM */
9085-#define GET_THREAD_INFO(reg) \
9086- movq PER_CPU_VAR(kernel_stack),reg ; \
9087- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
9088-
9089+/* how to get the current stack pointer from C */
9090+register unsigned long current_stack_pointer asm("rsp") __used;
9091 #endif
9092
9093 #endif /* !X86_32 */
9094@@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
9095 extern void free_thread_info(struct thread_info *ti);
9096 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
9097 #define arch_task_cache_init arch_task_cache_init
9098+
9099+#define __HAVE_THREAD_FUNCTIONS
9100+#define task_thread_info(task) (&(task)->tinfo)
9101+#define task_stack_page(task) ((task)->stack)
9102+#define setup_thread_stack(p, org) do {} while (0)
9103+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
9104+
9105+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
9106+extern struct task_struct *alloc_task_struct_node(int node);
9107+extern void free_task_struct(struct task_struct *);
9108+
9109 #endif
9110 #endif /* _ASM_X86_THREAD_INFO_H */
9111diff -urNp linux-3.0.3/arch/x86/include/asm/uaccess_32.h linux-3.0.3/arch/x86/include/asm/uaccess_32.h
9112--- linux-3.0.3/arch/x86/include/asm/uaccess_32.h 2011-07-21 22:17:23.000000000 -0400
9113+++ linux-3.0.3/arch/x86/include/asm/uaccess_32.h 2011-08-23 21:48:14.000000000 -0400
9114@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_u
9115 static __always_inline unsigned long __must_check
9116 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
9117 {
9118+ pax_track_stack();
9119+
9120+ if ((long)n < 0)
9121+ return n;
9122+
9123 if (__builtin_constant_p(n)) {
9124 unsigned long ret;
9125
9126@@ -61,6 +66,8 @@ __copy_to_user_inatomic(void __user *to,
9127 return ret;
9128 }
9129 }
9130+ if (!__builtin_constant_p(n))
9131+ check_object_size(from, n, true);
9132 return __copy_to_user_ll(to, from, n);
9133 }
9134
9135@@ -82,12 +89,16 @@ static __always_inline unsigned long __m
9136 __copy_to_user(void __user *to, const void *from, unsigned long n)
9137 {
9138 might_fault();
9139+
9140 return __copy_to_user_inatomic(to, from, n);
9141 }
9142
9143 static __always_inline unsigned long
9144 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
9145 {
9146+ if ((long)n < 0)
9147+ return n;
9148+
9149 /* Avoid zeroing the tail if the copy fails..
9150 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
9151 * but as the zeroing behaviour is only significant when n is not
9152@@ -137,6 +148,12 @@ static __always_inline unsigned long
9153 __copy_from_user(void *to, const void __user *from, unsigned long n)
9154 {
9155 might_fault();
9156+
9157+ pax_track_stack();
9158+
9159+ if ((long)n < 0)
9160+ return n;
9161+
9162 if (__builtin_constant_p(n)) {
9163 unsigned long ret;
9164
9165@@ -152,6 +169,8 @@ __copy_from_user(void *to, const void __
9166 return ret;
9167 }
9168 }
9169+ if (!__builtin_constant_p(n))
9170+ check_object_size(to, n, false);
9171 return __copy_from_user_ll(to, from, n);
9172 }
9173
9174@@ -159,6 +178,10 @@ static __always_inline unsigned long __c
9175 const void __user *from, unsigned long n)
9176 {
9177 might_fault();
9178+
9179+ if ((long)n < 0)
9180+ return n;
9181+
9182 if (__builtin_constant_p(n)) {
9183 unsigned long ret;
9184
9185@@ -181,15 +204,19 @@ static __always_inline unsigned long
9186 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
9187 unsigned long n)
9188 {
9189- return __copy_from_user_ll_nocache_nozero(to, from, n);
9190-}
9191+ if ((long)n < 0)
9192+ return n;
9193
9194-unsigned long __must_check copy_to_user(void __user *to,
9195- const void *from, unsigned long n);
9196-unsigned long __must_check _copy_from_user(void *to,
9197- const void __user *from,
9198- unsigned long n);
9199+ return __copy_from_user_ll_nocache_nozero(to, from, n);
9200+}
9201
9202+extern void copy_to_user_overflow(void)
9203+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9204+ __compiletime_error("copy_to_user() buffer size is not provably correct")
9205+#else
9206+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
9207+#endif
9208+;
9209
9210 extern void copy_from_user_overflow(void)
9211 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9212@@ -199,17 +226,61 @@ extern void copy_from_user_overflow(void
9213 #endif
9214 ;
9215
9216-static inline unsigned long __must_check copy_from_user(void *to,
9217- const void __user *from,
9218- unsigned long n)
9219+/**
9220+ * copy_to_user: - Copy a block of data into user space.
9221+ * @to: Destination address, in user space.
9222+ * @from: Source address, in kernel space.
9223+ * @n: Number of bytes to copy.
9224+ *
9225+ * Context: User context only. This function may sleep.
9226+ *
9227+ * Copy data from kernel space to user space.
9228+ *
9229+ * Returns number of bytes that could not be copied.
9230+ * On success, this will be zero.
9231+ */
9232+static inline unsigned long __must_check
9233+copy_to_user(void __user *to, const void *from, unsigned long n)
9234+{
9235+ int sz = __compiletime_object_size(from);
9236+
9237+ if (unlikely(sz != -1 && sz < n))
9238+ copy_to_user_overflow();
9239+ else if (access_ok(VERIFY_WRITE, to, n))
9240+ n = __copy_to_user(to, from, n);
9241+ return n;
9242+}
9243+
9244+/**
9245+ * copy_from_user: - Copy a block of data from user space.
9246+ * @to: Destination address, in kernel space.
9247+ * @from: Source address, in user space.
9248+ * @n: Number of bytes to copy.
9249+ *
9250+ * Context: User context only. This function may sleep.
9251+ *
9252+ * Copy data from user space to kernel space.
9253+ *
9254+ * Returns number of bytes that could not be copied.
9255+ * On success, this will be zero.
9256+ *
9257+ * If some data could not be copied, this function will pad the copied
9258+ * data to the requested size using zero bytes.
9259+ */
9260+static inline unsigned long __must_check
9261+copy_from_user(void *to, const void __user *from, unsigned long n)
9262 {
9263 int sz = __compiletime_object_size(to);
9264
9265- if (likely(sz == -1 || sz >= n))
9266- n = _copy_from_user(to, from, n);
9267- else
9268+ if (unlikely(sz != -1 && sz < n))
9269 copy_from_user_overflow();
9270-
9271+ else if (access_ok(VERIFY_READ, from, n))
9272+ n = __copy_from_user(to, from, n);
9273+ else if ((long)n > 0) {
9274+ if (!__builtin_constant_p(n))
9275+ check_object_size(to, n, false);
9276+ memset(to, 0, n);
9277+ }
9278 return n;
9279 }
9280
9281diff -urNp linux-3.0.3/arch/x86/include/asm/uaccess_64.h linux-3.0.3/arch/x86/include/asm/uaccess_64.h
9282--- linux-3.0.3/arch/x86/include/asm/uaccess_64.h 2011-07-21 22:17:23.000000000 -0400
9283+++ linux-3.0.3/arch/x86/include/asm/uaccess_64.h 2011-08-23 21:48:14.000000000 -0400
9284@@ -10,6 +10,9 @@
9285 #include <asm/alternative.h>
9286 #include <asm/cpufeature.h>
9287 #include <asm/page.h>
9288+#include <asm/pgtable.h>
9289+
9290+#define set_fs(x) (current_thread_info()->addr_limit = (x))
9291
9292 /*
9293 * Copy To/From Userspace
9294@@ -36,26 +39,26 @@ copy_user_generic(void *to, const void *
9295 return ret;
9296 }
9297
9298-__must_check unsigned long
9299-_copy_to_user(void __user *to, const void *from, unsigned len);
9300-__must_check unsigned long
9301-_copy_from_user(void *to, const void __user *from, unsigned len);
9302+static __always_inline __must_check unsigned long
9303+__copy_to_user(void __user *to, const void *from, unsigned len);
9304+static __always_inline __must_check unsigned long
9305+__copy_from_user(void *to, const void __user *from, unsigned len);
9306 __must_check unsigned long
9307 copy_in_user(void __user *to, const void __user *from, unsigned len);
9308
9309 static inline unsigned long __must_check copy_from_user(void *to,
9310 const void __user *from,
9311- unsigned long n)
9312+ unsigned n)
9313 {
9314- int sz = __compiletime_object_size(to);
9315-
9316 might_fault();
9317- if (likely(sz == -1 || sz >= n))
9318- n = _copy_from_user(to, from, n);
9319-#ifdef CONFIG_DEBUG_VM
9320- else
9321- WARN(1, "Buffer overflow detected!\n");
9322-#endif
9323+
9324+ if (access_ok(VERIFY_READ, from, n))
9325+ n = __copy_from_user(to, from, n);
9326+ else if ((int)n > 0) {
9327+ if (!__builtin_constant_p(n))
9328+ check_object_size(to, n, false);
9329+ memset(to, 0, n);
9330+ }
9331 return n;
9332 }
9333
9334@@ -64,110 +67,198 @@ int copy_to_user(void __user *dst, const
9335 {
9336 might_fault();
9337
9338- return _copy_to_user(dst, src, size);
9339+ if (access_ok(VERIFY_WRITE, dst, size))
9340+ size = __copy_to_user(dst, src, size);
9341+ return size;
9342 }
9343
9344 static __always_inline __must_check
9345-int __copy_from_user(void *dst, const void __user *src, unsigned size)
9346+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
9347 {
9348- int ret = 0;
9349+ int sz = __compiletime_object_size(dst);
9350+ unsigned ret = 0;
9351
9352 might_fault();
9353- if (!__builtin_constant_p(size))
9354- return copy_user_generic(dst, (__force void *)src, size);
9355+
9356+ pax_track_stack();
9357+
9358+ if ((int)size < 0)
9359+ return size;
9360+
9361+#ifdef CONFIG_PAX_MEMORY_UDEREF
9362+ if (!__access_ok(VERIFY_READ, src, size))
9363+ return size;
9364+#endif
9365+
9366+ if (unlikely(sz != -1 && sz < size)) {
9367+#ifdef CONFIG_DEBUG_VM
9368+ WARN(1, "Buffer overflow detected!\n");
9369+#endif
9370+ return size;
9371+ }
9372+
9373+ if (!__builtin_constant_p(size)) {
9374+ check_object_size(dst, size, false);
9375+
9376+#ifdef CONFIG_PAX_MEMORY_UDEREF
9377+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9378+ src += PAX_USER_SHADOW_BASE;
9379+#endif
9380+
9381+ return copy_user_generic(dst, (__force const void *)src, size);
9382+ }
9383 switch (size) {
9384- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
9385+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
9386 ret, "b", "b", "=q", 1);
9387 return ret;
9388- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
9389+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
9390 ret, "w", "w", "=r", 2);
9391 return ret;
9392- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
9393+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
9394 ret, "l", "k", "=r", 4);
9395 return ret;
9396- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
9397+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9398 ret, "q", "", "=r", 8);
9399 return ret;
9400 case 10:
9401- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9402+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9403 ret, "q", "", "=r", 10);
9404 if (unlikely(ret))
9405 return ret;
9406 __get_user_asm(*(u16 *)(8 + (char *)dst),
9407- (u16 __user *)(8 + (char __user *)src),
9408+ (const u16 __user *)(8 + (const char __user *)src),
9409 ret, "w", "w", "=r", 2);
9410 return ret;
9411 case 16:
9412- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9413+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9414 ret, "q", "", "=r", 16);
9415 if (unlikely(ret))
9416 return ret;
9417 __get_user_asm(*(u64 *)(8 + (char *)dst),
9418- (u64 __user *)(8 + (char __user *)src),
9419+ (const u64 __user *)(8 + (const char __user *)src),
9420 ret, "q", "", "=r", 8);
9421 return ret;
9422 default:
9423- return copy_user_generic(dst, (__force void *)src, size);
9424+
9425+#ifdef CONFIG_PAX_MEMORY_UDEREF
9426+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9427+ src += PAX_USER_SHADOW_BASE;
9428+#endif
9429+
9430+ return copy_user_generic(dst, (__force const void *)src, size);
9431 }
9432 }
9433
9434 static __always_inline __must_check
9435-int __copy_to_user(void __user *dst, const void *src, unsigned size)
9436+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
9437 {
9438- int ret = 0;
9439+ int sz = __compiletime_object_size(src);
9440+ unsigned ret = 0;
9441
9442 might_fault();
9443- if (!__builtin_constant_p(size))
9444+
9445+ pax_track_stack();
9446+
9447+ if ((int)size < 0)
9448+ return size;
9449+
9450+#ifdef CONFIG_PAX_MEMORY_UDEREF
9451+ if (!__access_ok(VERIFY_WRITE, dst, size))
9452+ return size;
9453+#endif
9454+
9455+ if (unlikely(sz != -1 && sz < size)) {
9456+#ifdef CONFIG_DEBUG_VM
9457+ WARN(1, "Buffer overflow detected!\n");
9458+#endif
9459+ return size;
9460+ }
9461+
9462+ if (!__builtin_constant_p(size)) {
9463+ check_object_size(src, size, true);
9464+
9465+#ifdef CONFIG_PAX_MEMORY_UDEREF
9466+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9467+ dst += PAX_USER_SHADOW_BASE;
9468+#endif
9469+
9470 return copy_user_generic((__force void *)dst, src, size);
9471+ }
9472 switch (size) {
9473- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
9474+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
9475 ret, "b", "b", "iq", 1);
9476 return ret;
9477- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
9478+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
9479 ret, "w", "w", "ir", 2);
9480 return ret;
9481- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
9482+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
9483 ret, "l", "k", "ir", 4);
9484 return ret;
9485- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
9486+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9487 ret, "q", "", "er", 8);
9488 return ret;
9489 case 10:
9490- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9491+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9492 ret, "q", "", "er", 10);
9493 if (unlikely(ret))
9494 return ret;
9495 asm("":::"memory");
9496- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
9497+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
9498 ret, "w", "w", "ir", 2);
9499 return ret;
9500 case 16:
9501- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9502+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9503 ret, "q", "", "er", 16);
9504 if (unlikely(ret))
9505 return ret;
9506 asm("":::"memory");
9507- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
9508+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
9509 ret, "q", "", "er", 8);
9510 return ret;
9511 default:
9512+
9513+#ifdef CONFIG_PAX_MEMORY_UDEREF
9514+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9515+ dst += PAX_USER_SHADOW_BASE;
9516+#endif
9517+
9518 return copy_user_generic((__force void *)dst, src, size);
9519 }
9520 }
9521
9522 static __always_inline __must_check
9523-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9524+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9525 {
9526- int ret = 0;
9527+ unsigned ret = 0;
9528
9529 might_fault();
9530- if (!__builtin_constant_p(size))
9531+
9532+ if ((int)size < 0)
9533+ return size;
9534+
9535+#ifdef CONFIG_PAX_MEMORY_UDEREF
9536+ if (!__access_ok(VERIFY_READ, src, size))
9537+ return size;
9538+ if (!__access_ok(VERIFY_WRITE, dst, size))
9539+ return size;
9540+#endif
9541+
9542+ if (!__builtin_constant_p(size)) {
9543+
9544+#ifdef CONFIG_PAX_MEMORY_UDEREF
9545+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9546+ src += PAX_USER_SHADOW_BASE;
9547+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9548+ dst += PAX_USER_SHADOW_BASE;
9549+#endif
9550+
9551 return copy_user_generic((__force void *)dst,
9552- (__force void *)src, size);
9553+ (__force const void *)src, size);
9554+ }
9555 switch (size) {
9556 case 1: {
9557 u8 tmp;
9558- __get_user_asm(tmp, (u8 __user *)src,
9559+ __get_user_asm(tmp, (const u8 __user *)src,
9560 ret, "b", "b", "=q", 1);
9561 if (likely(!ret))
9562 __put_user_asm(tmp, (u8 __user *)dst,
9563@@ -176,7 +267,7 @@ int __copy_in_user(void __user *dst, con
9564 }
9565 case 2: {
9566 u16 tmp;
9567- __get_user_asm(tmp, (u16 __user *)src,
9568+ __get_user_asm(tmp, (const u16 __user *)src,
9569 ret, "w", "w", "=r", 2);
9570 if (likely(!ret))
9571 __put_user_asm(tmp, (u16 __user *)dst,
9572@@ -186,7 +277,7 @@ int __copy_in_user(void __user *dst, con
9573
9574 case 4: {
9575 u32 tmp;
9576- __get_user_asm(tmp, (u32 __user *)src,
9577+ __get_user_asm(tmp, (const u32 __user *)src,
9578 ret, "l", "k", "=r", 4);
9579 if (likely(!ret))
9580 __put_user_asm(tmp, (u32 __user *)dst,
9581@@ -195,7 +286,7 @@ int __copy_in_user(void __user *dst, con
9582 }
9583 case 8: {
9584 u64 tmp;
9585- __get_user_asm(tmp, (u64 __user *)src,
9586+ __get_user_asm(tmp, (const u64 __user *)src,
9587 ret, "q", "", "=r", 8);
9588 if (likely(!ret))
9589 __put_user_asm(tmp, (u64 __user *)dst,
9590@@ -203,8 +294,16 @@ int __copy_in_user(void __user *dst, con
9591 return ret;
9592 }
9593 default:
9594+
9595+#ifdef CONFIG_PAX_MEMORY_UDEREF
9596+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9597+ src += PAX_USER_SHADOW_BASE;
9598+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9599+ dst += PAX_USER_SHADOW_BASE;
9600+#endif
9601+
9602 return copy_user_generic((__force void *)dst,
9603- (__force void *)src, size);
9604+ (__force const void *)src, size);
9605 }
9606 }
9607
9608@@ -221,33 +320,72 @@ __must_check unsigned long __clear_user(
9609 static __must_check __always_inline int
9610 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
9611 {
9612+ pax_track_stack();
9613+
9614+ if ((int)size < 0)
9615+ return size;
9616+
9617+#ifdef CONFIG_PAX_MEMORY_UDEREF
9618+ if (!__access_ok(VERIFY_READ, src, size))
9619+ return size;
9620+
9621+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9622+ src += PAX_USER_SHADOW_BASE;
9623+#endif
9624+
9625 return copy_user_generic(dst, (__force const void *)src, size);
9626 }
9627
9628-static __must_check __always_inline int
9629+static __must_check __always_inline unsigned long
9630 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
9631 {
9632+ if ((int)size < 0)
9633+ return size;
9634+
9635+#ifdef CONFIG_PAX_MEMORY_UDEREF
9636+ if (!__access_ok(VERIFY_WRITE, dst, size))
9637+ return size;
9638+
9639+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9640+ dst += PAX_USER_SHADOW_BASE;
9641+#endif
9642+
9643 return copy_user_generic((__force void *)dst, src, size);
9644 }
9645
9646-extern long __copy_user_nocache(void *dst, const void __user *src,
9647+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
9648 unsigned size, int zerorest);
9649
9650-static inline int
9651-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9652+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9653 {
9654 might_sleep();
9655+
9656+ if ((int)size < 0)
9657+ return size;
9658+
9659+#ifdef CONFIG_PAX_MEMORY_UDEREF
9660+ if (!__access_ok(VERIFY_READ, src, size))
9661+ return size;
9662+#endif
9663+
9664 return __copy_user_nocache(dst, src, size, 1);
9665 }
9666
9667-static inline int
9668-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9669+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9670 unsigned size)
9671 {
9672+ if ((int)size < 0)
9673+ return size;
9674+
9675+#ifdef CONFIG_PAX_MEMORY_UDEREF
9676+ if (!__access_ok(VERIFY_READ, src, size))
9677+ return size;
9678+#endif
9679+
9680 return __copy_user_nocache(dst, src, size, 0);
9681 }
9682
9683-unsigned long
9684+extern unsigned long
9685 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
9686
9687 #endif /* _ASM_X86_UACCESS_64_H */
9688diff -urNp linux-3.0.3/arch/x86/include/asm/uaccess.h linux-3.0.3/arch/x86/include/asm/uaccess.h
9689--- linux-3.0.3/arch/x86/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
9690+++ linux-3.0.3/arch/x86/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
9691@@ -7,12 +7,15 @@
9692 #include <linux/compiler.h>
9693 #include <linux/thread_info.h>
9694 #include <linux/string.h>
9695+#include <linux/sched.h>
9696 #include <asm/asm.h>
9697 #include <asm/page.h>
9698
9699 #define VERIFY_READ 0
9700 #define VERIFY_WRITE 1
9701
9702+extern void check_object_size(const void *ptr, unsigned long n, bool to);
9703+
9704 /*
9705 * The fs value determines whether argument validity checking should be
9706 * performed or not. If get_fs() == USER_DS, checking is performed, with
9707@@ -28,7 +31,12 @@
9708
9709 #define get_ds() (KERNEL_DS)
9710 #define get_fs() (current_thread_info()->addr_limit)
9711+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9712+void __set_fs(mm_segment_t x);
9713+void set_fs(mm_segment_t x);
9714+#else
9715 #define set_fs(x) (current_thread_info()->addr_limit = (x))
9716+#endif
9717
9718 #define segment_eq(a, b) ((a).seg == (b).seg)
9719
9720@@ -76,7 +84,33 @@
9721 * checks that the pointer is in the user space range - after calling
9722 * this function, memory access functions may still return -EFAULT.
9723 */
9724-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9725+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9726+#define access_ok(type, addr, size) \
9727+({ \
9728+ long __size = size; \
9729+ unsigned long __addr = (unsigned long)addr; \
9730+ unsigned long __addr_ao = __addr & PAGE_MASK; \
9731+ unsigned long __end_ao = __addr + __size - 1; \
9732+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
9733+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
9734+ while(__addr_ao <= __end_ao) { \
9735+ char __c_ao; \
9736+ __addr_ao += PAGE_SIZE; \
9737+ if (__size > PAGE_SIZE) \
9738+ cond_resched(); \
9739+ if (__get_user(__c_ao, (char __user *)__addr)) \
9740+ break; \
9741+ if (type != VERIFY_WRITE) { \
9742+ __addr = __addr_ao; \
9743+ continue; \
9744+ } \
9745+ if (__put_user(__c_ao, (char __user *)__addr)) \
9746+ break; \
9747+ __addr = __addr_ao; \
9748+ } \
9749+ } \
9750+ __ret_ao; \
9751+})
9752
9753 /*
9754 * The exception table consists of pairs of addresses: the first is the
9755@@ -182,12 +216,20 @@ extern int __get_user_bad(void);
9756 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
9757 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
9758
9759-
9760+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9761+#define __copyuser_seg "gs;"
9762+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
9763+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
9764+#else
9765+#define __copyuser_seg
9766+#define __COPYUSER_SET_ES
9767+#define __COPYUSER_RESTORE_ES
9768+#endif
9769
9770 #ifdef CONFIG_X86_32
9771 #define __put_user_asm_u64(x, addr, err, errret) \
9772- asm volatile("1: movl %%eax,0(%2)\n" \
9773- "2: movl %%edx,4(%2)\n" \
9774+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
9775+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
9776 "3:\n" \
9777 ".section .fixup,\"ax\"\n" \
9778 "4: movl %3,%0\n" \
9779@@ -199,8 +241,8 @@ extern int __get_user_bad(void);
9780 : "A" (x), "r" (addr), "i" (errret), "0" (err))
9781
9782 #define __put_user_asm_ex_u64(x, addr) \
9783- asm volatile("1: movl %%eax,0(%1)\n" \
9784- "2: movl %%edx,4(%1)\n" \
9785+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
9786+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
9787 "3:\n" \
9788 _ASM_EXTABLE(1b, 2b - 1b) \
9789 _ASM_EXTABLE(2b, 3b - 2b) \
9790@@ -373,7 +415,7 @@ do { \
9791 } while (0)
9792
9793 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
9794- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
9795+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
9796 "2:\n" \
9797 ".section .fixup,\"ax\"\n" \
9798 "3: mov %3,%0\n" \
9799@@ -381,7 +423,7 @@ do { \
9800 " jmp 2b\n" \
9801 ".previous\n" \
9802 _ASM_EXTABLE(1b, 3b) \
9803- : "=r" (err), ltype(x) \
9804+ : "=r" (err), ltype (x) \
9805 : "m" (__m(addr)), "i" (errret), "0" (err))
9806
9807 #define __get_user_size_ex(x, ptr, size) \
9808@@ -406,7 +448,7 @@ do { \
9809 } while (0)
9810
9811 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
9812- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
9813+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
9814 "2:\n" \
9815 _ASM_EXTABLE(1b, 2b - 1b) \
9816 : ltype(x) : "m" (__m(addr)))
9817@@ -423,13 +465,24 @@ do { \
9818 int __gu_err; \
9819 unsigned long __gu_val; \
9820 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
9821- (x) = (__force __typeof__(*(ptr)))__gu_val; \
9822+ (x) = (__typeof__(*(ptr)))__gu_val; \
9823 __gu_err; \
9824 })
9825
9826 /* FIXME: this hack is definitely wrong -AK */
9827 struct __large_struct { unsigned long buf[100]; };
9828-#define __m(x) (*(struct __large_struct __user *)(x))
9829+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9830+#define ____m(x) \
9831+({ \
9832+ unsigned long ____x = (unsigned long)(x); \
9833+ if (____x < PAX_USER_SHADOW_BASE) \
9834+ ____x += PAX_USER_SHADOW_BASE; \
9835+ (void __user *)____x; \
9836+})
9837+#else
9838+#define ____m(x) (x)
9839+#endif
9840+#define __m(x) (*(struct __large_struct __user *)____m(x))
9841
9842 /*
9843 * Tell gcc we read from memory instead of writing: this is because
9844@@ -437,7 +490,7 @@ struct __large_struct { unsigned long bu
9845 * aliasing issues.
9846 */
9847 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
9848- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
9849+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
9850 "2:\n" \
9851 ".section .fixup,\"ax\"\n" \
9852 "3: mov %3,%0\n" \
9853@@ -445,10 +498,10 @@ struct __large_struct { unsigned long bu
9854 ".previous\n" \
9855 _ASM_EXTABLE(1b, 3b) \
9856 : "=r"(err) \
9857- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
9858+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
9859
9860 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
9861- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
9862+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
9863 "2:\n" \
9864 _ASM_EXTABLE(1b, 2b - 1b) \
9865 : : ltype(x), "m" (__m(addr)))
9866@@ -487,8 +540,12 @@ struct __large_struct { unsigned long bu
9867 * On error, the variable @x is set to zero.
9868 */
9869
9870+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9871+#define __get_user(x, ptr) get_user((x), (ptr))
9872+#else
9873 #define __get_user(x, ptr) \
9874 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
9875+#endif
9876
9877 /**
9878 * __put_user: - Write a simple value into user space, with less checking.
9879@@ -510,8 +567,12 @@ struct __large_struct { unsigned long bu
9880 * Returns zero on success, or -EFAULT on error.
9881 */
9882
9883+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9884+#define __put_user(x, ptr) put_user((x), (ptr))
9885+#else
9886 #define __put_user(x, ptr) \
9887 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
9888+#endif
9889
9890 #define __get_user_unaligned __get_user
9891 #define __put_user_unaligned __put_user
9892@@ -529,7 +590,7 @@ struct __large_struct { unsigned long bu
9893 #define get_user_ex(x, ptr) do { \
9894 unsigned long __gue_val; \
9895 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
9896- (x) = (__force __typeof__(*(ptr)))__gue_val; \
9897+ (x) = (__typeof__(*(ptr)))__gue_val; \
9898 } while (0)
9899
9900 #ifdef CONFIG_X86_WP_WORKS_OK
9901diff -urNp linux-3.0.3/arch/x86/include/asm/vgtod.h linux-3.0.3/arch/x86/include/asm/vgtod.h
9902--- linux-3.0.3/arch/x86/include/asm/vgtod.h 2011-07-21 22:17:23.000000000 -0400
9903+++ linux-3.0.3/arch/x86/include/asm/vgtod.h 2011-08-23 21:47:55.000000000 -0400
9904@@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
9905 int sysctl_enabled;
9906 struct timezone sys_tz;
9907 struct { /* extract of a clocksource struct */
9908+ char name[8];
9909 cycle_t (*vread)(void);
9910 cycle_t cycle_last;
9911 cycle_t mask;
9912diff -urNp linux-3.0.3/arch/x86/include/asm/x86_init.h linux-3.0.3/arch/x86/include/asm/x86_init.h
9913--- linux-3.0.3/arch/x86/include/asm/x86_init.h 2011-07-21 22:17:23.000000000 -0400
9914+++ linux-3.0.3/arch/x86/include/asm/x86_init.h 2011-08-23 21:47:55.000000000 -0400
9915@@ -28,7 +28,7 @@ struct x86_init_mpparse {
9916 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
9917 void (*find_smp_config)(void);
9918 void (*get_smp_config)(unsigned int early);
9919-};
9920+} __no_const;
9921
9922 /**
9923 * struct x86_init_resources - platform specific resource related ops
9924@@ -42,7 +42,7 @@ struct x86_init_resources {
9925 void (*probe_roms)(void);
9926 void (*reserve_resources)(void);
9927 char *(*memory_setup)(void);
9928-};
9929+} __no_const;
9930
9931 /**
9932 * struct x86_init_irqs - platform specific interrupt setup
9933@@ -55,7 +55,7 @@ struct x86_init_irqs {
9934 void (*pre_vector_init)(void);
9935 void (*intr_init)(void);
9936 void (*trap_init)(void);
9937-};
9938+} __no_const;
9939
9940 /**
9941 * struct x86_init_oem - oem platform specific customizing functions
9942@@ -65,7 +65,7 @@ struct x86_init_irqs {
9943 struct x86_init_oem {
9944 void (*arch_setup)(void);
9945 void (*banner)(void);
9946-};
9947+} __no_const;
9948
9949 /**
9950 * struct x86_init_mapping - platform specific initial kernel pagetable setup
9951@@ -76,7 +76,7 @@ struct x86_init_oem {
9952 */
9953 struct x86_init_mapping {
9954 void (*pagetable_reserve)(u64 start, u64 end);
9955-};
9956+} __no_const;
9957
9958 /**
9959 * struct x86_init_paging - platform specific paging functions
9960@@ -86,7 +86,7 @@ struct x86_init_mapping {
9961 struct x86_init_paging {
9962 void (*pagetable_setup_start)(pgd_t *base);
9963 void (*pagetable_setup_done)(pgd_t *base);
9964-};
9965+} __no_const;
9966
9967 /**
9968 * struct x86_init_timers - platform specific timer setup
9969@@ -101,7 +101,7 @@ struct x86_init_timers {
9970 void (*tsc_pre_init)(void);
9971 void (*timer_init)(void);
9972 void (*wallclock_init)(void);
9973-};
9974+} __no_const;
9975
9976 /**
9977 * struct x86_init_iommu - platform specific iommu setup
9978@@ -109,7 +109,7 @@ struct x86_init_timers {
9979 */
9980 struct x86_init_iommu {
9981 int (*iommu_init)(void);
9982-};
9983+} __no_const;
9984
9985 /**
9986 * struct x86_init_pci - platform specific pci init functions
9987@@ -123,7 +123,7 @@ struct x86_init_pci {
9988 int (*init)(void);
9989 void (*init_irq)(void);
9990 void (*fixup_irqs)(void);
9991-};
9992+} __no_const;
9993
9994 /**
9995 * struct x86_init_ops - functions for platform specific setup
9996@@ -139,7 +139,7 @@ struct x86_init_ops {
9997 struct x86_init_timers timers;
9998 struct x86_init_iommu iommu;
9999 struct x86_init_pci pci;
10000-};
10001+} __no_const;
10002
10003 /**
10004 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
10005@@ -147,7 +147,7 @@ struct x86_init_ops {
10006 */
10007 struct x86_cpuinit_ops {
10008 void (*setup_percpu_clockev)(void);
10009-};
10010+} __no_const;
10011
10012 /**
10013 * struct x86_platform_ops - platform specific runtime functions
10014@@ -166,7 +166,7 @@ struct x86_platform_ops {
10015 bool (*is_untracked_pat_range)(u64 start, u64 end);
10016 void (*nmi_init)(void);
10017 int (*i8042_detect)(void);
10018-};
10019+} __no_const;
10020
10021 struct pci_dev;
10022
10023@@ -174,7 +174,7 @@ struct x86_msi_ops {
10024 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
10025 void (*teardown_msi_irq)(unsigned int irq);
10026 void (*teardown_msi_irqs)(struct pci_dev *dev);
10027-};
10028+} __no_const;
10029
10030 extern struct x86_init_ops x86_init;
10031 extern struct x86_cpuinit_ops x86_cpuinit;
10032diff -urNp linux-3.0.3/arch/x86/include/asm/xsave.h linux-3.0.3/arch/x86/include/asm/xsave.h
10033--- linux-3.0.3/arch/x86/include/asm/xsave.h 2011-07-21 22:17:23.000000000 -0400
10034+++ linux-3.0.3/arch/x86/include/asm/xsave.h 2011-08-23 21:47:55.000000000 -0400
10035@@ -65,6 +65,11 @@ static inline int xsave_user(struct xsav
10036 {
10037 int err;
10038
10039+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10040+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
10041+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
10042+#endif
10043+
10044 /*
10045 * Clear the xsave header first, so that reserved fields are
10046 * initialized to zero.
10047@@ -100,6 +105,11 @@ static inline int xrestore_user(struct x
10048 u32 lmask = mask;
10049 u32 hmask = mask >> 32;
10050
10051+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10052+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
10053+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
10054+#endif
10055+
10056 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
10057 "2:\n"
10058 ".section .fixup,\"ax\"\n"
10059diff -urNp linux-3.0.3/arch/x86/Kconfig linux-3.0.3/arch/x86/Kconfig
10060--- linux-3.0.3/arch/x86/Kconfig 2011-07-21 22:17:23.000000000 -0400
10061+++ linux-3.0.3/arch/x86/Kconfig 2011-08-23 21:48:14.000000000 -0400
10062@@ -229,7 +229,7 @@ config X86_HT
10063
10064 config X86_32_LAZY_GS
10065 def_bool y
10066- depends on X86_32 && !CC_STACKPROTECTOR
10067+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10068
10069 config ARCH_HWEIGHT_CFLAGS
10070 string
10071@@ -1018,7 +1018,7 @@ choice
10072
10073 config NOHIGHMEM
10074 bool "off"
10075- depends on !X86_NUMAQ
10076+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10077 ---help---
10078 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10079 However, the address space of 32-bit x86 processors is only 4
10080@@ -1055,7 +1055,7 @@ config NOHIGHMEM
10081
10082 config HIGHMEM4G
10083 bool "4GB"
10084- depends on !X86_NUMAQ
10085+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10086 ---help---
10087 Select this if you have a 32-bit processor and between 1 and 4
10088 gigabytes of physical RAM.
10089@@ -1109,7 +1109,7 @@ config PAGE_OFFSET
10090 hex
10091 default 0xB0000000 if VMSPLIT_3G_OPT
10092 default 0x80000000 if VMSPLIT_2G
10093- default 0x78000000 if VMSPLIT_2G_OPT
10094+ default 0x70000000 if VMSPLIT_2G_OPT
10095 default 0x40000000 if VMSPLIT_1G
10096 default 0xC0000000
10097 depends on X86_32
10098@@ -1453,7 +1453,7 @@ config ARCH_USES_PG_UNCACHED
10099
10100 config EFI
10101 bool "EFI runtime service support"
10102- depends on ACPI
10103+ depends on ACPI && !PAX_KERNEXEC
10104 ---help---
10105 This enables the kernel to use EFI runtime services that are
10106 available (such as the EFI variable services).
10107@@ -1483,6 +1483,7 @@ config SECCOMP
10108
10109 config CC_STACKPROTECTOR
10110 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
10111+ depends on X86_64 || !PAX_MEMORY_UDEREF
10112 ---help---
10113 This option turns on the -fstack-protector GCC feature. This
10114 feature puts, at the beginning of functions, a canary value on
10115@@ -1540,6 +1541,7 @@ config KEXEC_JUMP
10116 config PHYSICAL_START
10117 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
10118 default "0x1000000"
10119+ range 0x400000 0x40000000
10120 ---help---
10121 This gives the physical address where the kernel is loaded.
10122
10123@@ -1603,6 +1605,7 @@ config X86_NEED_RELOCS
10124 config PHYSICAL_ALIGN
10125 hex "Alignment value to which kernel should be aligned" if X86_32
10126 default "0x1000000"
10127+ range 0x400000 0x1000000 if PAX_KERNEXEC
10128 range 0x2000 0x1000000
10129 ---help---
10130 This value puts the alignment restrictions on physical address
10131@@ -1634,9 +1637,10 @@ config HOTPLUG_CPU
10132 Say N if you want to disable CPU hotplug.
10133
10134 config COMPAT_VDSO
10135- def_bool y
10136+ def_bool n
10137 prompt "Compat VDSO support"
10138 depends on X86_32 || IA32_EMULATION
10139+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
10140 ---help---
10141 Map the 32-bit VDSO to the predictable old-style address too.
10142
10143diff -urNp linux-3.0.3/arch/x86/Kconfig.cpu linux-3.0.3/arch/x86/Kconfig.cpu
10144--- linux-3.0.3/arch/x86/Kconfig.cpu 2011-07-21 22:17:23.000000000 -0400
10145+++ linux-3.0.3/arch/x86/Kconfig.cpu 2011-08-23 21:47:55.000000000 -0400
10146@@ -338,7 +338,7 @@ config X86_PPRO_FENCE
10147
10148 config X86_F00F_BUG
10149 def_bool y
10150- depends on M586MMX || M586TSC || M586 || M486 || M386
10151+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
10152
10153 config X86_INVD_BUG
10154 def_bool y
10155@@ -362,7 +362,7 @@ config X86_POPAD_OK
10156
10157 config X86_ALIGNMENT_16
10158 def_bool y
10159- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10160+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10161
10162 config X86_INTEL_USERCOPY
10163 def_bool y
10164@@ -408,7 +408,7 @@ config X86_CMPXCHG64
10165 # generates cmov.
10166 config X86_CMOV
10167 def_bool y
10168- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10169+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10170
10171 config X86_MINIMUM_CPU_FAMILY
10172 int
10173diff -urNp linux-3.0.3/arch/x86/Kconfig.debug linux-3.0.3/arch/x86/Kconfig.debug
10174--- linux-3.0.3/arch/x86/Kconfig.debug 2011-07-21 22:17:23.000000000 -0400
10175+++ linux-3.0.3/arch/x86/Kconfig.debug 2011-08-23 21:47:55.000000000 -0400
10176@@ -81,7 +81,7 @@ config X86_PTDUMP
10177 config DEBUG_RODATA
10178 bool "Write protect kernel read-only data structures"
10179 default y
10180- depends on DEBUG_KERNEL
10181+ depends on DEBUG_KERNEL && BROKEN
10182 ---help---
10183 Mark the kernel read-only data as write-protected in the pagetables,
10184 in order to catch accidental (and incorrect) writes to such const
10185@@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
10186
10187 config DEBUG_SET_MODULE_RONX
10188 bool "Set loadable kernel module data as NX and text as RO"
10189- depends on MODULES
10190+ depends on MODULES && BROKEN
10191 ---help---
10192 This option helps catch unintended modifications to loadable
10193 kernel module's text and read-only data. It also prevents execution
10194diff -urNp linux-3.0.3/arch/x86/kernel/acpi/realmode/Makefile linux-3.0.3/arch/x86/kernel/acpi/realmode/Makefile
10195--- linux-3.0.3/arch/x86/kernel/acpi/realmode/Makefile 2011-07-21 22:17:23.000000000 -0400
10196+++ linux-3.0.3/arch/x86/kernel/acpi/realmode/Makefile 2011-08-23 21:47:55.000000000 -0400
10197@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
10198 $(call cc-option, -fno-stack-protector) \
10199 $(call cc-option, -mpreferred-stack-boundary=2)
10200 KBUILD_CFLAGS += $(call cc-option, -m32)
10201+ifdef CONSTIFY_PLUGIN
10202+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
10203+endif
10204 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10205 GCOV_PROFILE := n
10206
10207diff -urNp linux-3.0.3/arch/x86/kernel/acpi/realmode/wakeup.S linux-3.0.3/arch/x86/kernel/acpi/realmode/wakeup.S
10208--- linux-3.0.3/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-21 22:17:23.000000000 -0400
10209+++ linux-3.0.3/arch/x86/kernel/acpi/realmode/wakeup.S 2011-08-23 21:48:14.000000000 -0400
10210@@ -108,6 +108,9 @@ wakeup_code:
10211 /* Do any other stuff... */
10212
10213 #ifndef CONFIG_64BIT
10214+ /* Recheck NX bit overrides (64bit path does this in trampoline */
10215+ call verify_cpu
10216+
10217 /* This could also be done in C code... */
10218 movl pmode_cr3, %eax
10219 movl %eax, %cr3
10220@@ -131,6 +134,7 @@ wakeup_code:
10221 movl pmode_cr0, %eax
10222 movl %eax, %cr0
10223 jmp pmode_return
10224+# include "../../verify_cpu.S"
10225 #else
10226 pushw $0
10227 pushw trampoline_segment
10228diff -urNp linux-3.0.3/arch/x86/kernel/acpi/sleep.c linux-3.0.3/arch/x86/kernel/acpi/sleep.c
10229--- linux-3.0.3/arch/x86/kernel/acpi/sleep.c 2011-07-21 22:17:23.000000000 -0400
10230+++ linux-3.0.3/arch/x86/kernel/acpi/sleep.c 2011-08-23 21:47:55.000000000 -0400
10231@@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
10232 header->trampoline_segment = trampoline_address() >> 4;
10233 #ifdef CONFIG_SMP
10234 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
10235+
10236+ pax_open_kernel();
10237 early_gdt_descr.address =
10238 (unsigned long)get_cpu_gdt_table(smp_processor_id());
10239+ pax_close_kernel();
10240+
10241 initial_gs = per_cpu_offset(smp_processor_id());
10242 #endif
10243 initial_code = (unsigned long)wakeup_long64;
10244diff -urNp linux-3.0.3/arch/x86/kernel/acpi/wakeup_32.S linux-3.0.3/arch/x86/kernel/acpi/wakeup_32.S
10245--- linux-3.0.3/arch/x86/kernel/acpi/wakeup_32.S 2011-07-21 22:17:23.000000000 -0400
10246+++ linux-3.0.3/arch/x86/kernel/acpi/wakeup_32.S 2011-08-23 21:47:55.000000000 -0400
10247@@ -30,13 +30,11 @@ wakeup_pmode_return:
10248 # and restore the stack ... but you need gdt for this to work
10249 movl saved_context_esp, %esp
10250
10251- movl %cs:saved_magic, %eax
10252- cmpl $0x12345678, %eax
10253+ cmpl $0x12345678, saved_magic
10254 jne bogus_magic
10255
10256 # jump to place where we left off
10257- movl saved_eip, %eax
10258- jmp *%eax
10259+ jmp *(saved_eip)
10260
10261 bogus_magic:
10262 jmp bogus_magic
10263diff -urNp linux-3.0.3/arch/x86/kernel/alternative.c linux-3.0.3/arch/x86/kernel/alternative.c
10264--- linux-3.0.3/arch/x86/kernel/alternative.c 2011-07-21 22:17:23.000000000 -0400
10265+++ linux-3.0.3/arch/x86/kernel/alternative.c 2011-08-23 21:47:55.000000000 -0400
10266@@ -313,7 +313,7 @@ static void alternatives_smp_lock(const
10267 if (!*poff || ptr < text || ptr >= text_end)
10268 continue;
10269 /* turn DS segment override prefix into lock prefix */
10270- if (*ptr == 0x3e)
10271+ if (*ktla_ktva(ptr) == 0x3e)
10272 text_poke(ptr, ((unsigned char []){0xf0}), 1);
10273 };
10274 mutex_unlock(&text_mutex);
10275@@ -334,7 +334,7 @@ static void alternatives_smp_unlock(cons
10276 if (!*poff || ptr < text || ptr >= text_end)
10277 continue;
10278 /* turn lock prefix into DS segment override prefix */
10279- if (*ptr == 0xf0)
10280+ if (*ktla_ktva(ptr) == 0xf0)
10281 text_poke(ptr, ((unsigned char []){0x3E}), 1);
10282 };
10283 mutex_unlock(&text_mutex);
10284@@ -503,7 +503,7 @@ void __init_or_module apply_paravirt(str
10285
10286 BUG_ON(p->len > MAX_PATCH_LEN);
10287 /* prep the buffer with the original instructions */
10288- memcpy(insnbuf, p->instr, p->len);
10289+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
10290 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
10291 (unsigned long)p->instr, p->len);
10292
10293@@ -571,7 +571,7 @@ void __init alternative_instructions(voi
10294 if (smp_alt_once)
10295 free_init_pages("SMP alternatives",
10296 (unsigned long)__smp_locks,
10297- (unsigned long)__smp_locks_end);
10298+ PAGE_ALIGN((unsigned long)__smp_locks_end));
10299
10300 restart_nmi();
10301 }
10302@@ -588,13 +588,17 @@ void __init alternative_instructions(voi
10303 * instructions. And on the local CPU you need to be protected again NMI or MCE
10304 * handlers seeing an inconsistent instruction while you patch.
10305 */
10306-void *__init_or_module text_poke_early(void *addr, const void *opcode,
10307+void *__kprobes text_poke_early(void *addr, const void *opcode,
10308 size_t len)
10309 {
10310 unsigned long flags;
10311 local_irq_save(flags);
10312- memcpy(addr, opcode, len);
10313+
10314+ pax_open_kernel();
10315+ memcpy(ktla_ktva(addr), opcode, len);
10316 sync_core();
10317+ pax_close_kernel();
10318+
10319 local_irq_restore(flags);
10320 /* Could also do a CLFLUSH here to speed up CPU recovery; but
10321 that causes hangs on some VIA CPUs. */
10322@@ -616,36 +620,22 @@ void *__init_or_module text_poke_early(v
10323 */
10324 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
10325 {
10326- unsigned long flags;
10327- char *vaddr;
10328+ unsigned char *vaddr = ktla_ktva(addr);
10329 struct page *pages[2];
10330- int i;
10331+ size_t i;
10332
10333 if (!core_kernel_text((unsigned long)addr)) {
10334- pages[0] = vmalloc_to_page(addr);
10335- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
10336+ pages[0] = vmalloc_to_page(vaddr);
10337+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
10338 } else {
10339- pages[0] = virt_to_page(addr);
10340+ pages[0] = virt_to_page(vaddr);
10341 WARN_ON(!PageReserved(pages[0]));
10342- pages[1] = virt_to_page(addr + PAGE_SIZE);
10343+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
10344 }
10345 BUG_ON(!pages[0]);
10346- local_irq_save(flags);
10347- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
10348- if (pages[1])
10349- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
10350- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
10351- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
10352- clear_fixmap(FIX_TEXT_POKE0);
10353- if (pages[1])
10354- clear_fixmap(FIX_TEXT_POKE1);
10355- local_flush_tlb();
10356- sync_core();
10357- /* Could also do a CLFLUSH here to speed up CPU recovery; but
10358- that causes hangs on some VIA CPUs. */
10359+ text_poke_early(addr, opcode, len);
10360 for (i = 0; i < len; i++)
10361- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
10362- local_irq_restore(flags);
10363+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
10364 return addr;
10365 }
10366
10367diff -urNp linux-3.0.3/arch/x86/kernel/apic/apic.c linux-3.0.3/arch/x86/kernel/apic/apic.c
10368--- linux-3.0.3/arch/x86/kernel/apic/apic.c 2011-07-21 22:17:23.000000000 -0400
10369+++ linux-3.0.3/arch/x86/kernel/apic/apic.c 2011-08-23 21:48:14.000000000 -0400
10370@@ -173,7 +173,7 @@ int first_system_vector = 0xfe;
10371 /*
10372 * Debug level, exported for io_apic.c
10373 */
10374-unsigned int apic_verbosity;
10375+int apic_verbosity;
10376
10377 int pic_mode;
10378
10379@@ -1834,7 +1834,7 @@ void smp_error_interrupt(struct pt_regs
10380 apic_write(APIC_ESR, 0);
10381 v1 = apic_read(APIC_ESR);
10382 ack_APIC_irq();
10383- atomic_inc(&irq_err_count);
10384+ atomic_inc_unchecked(&irq_err_count);
10385
10386 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
10387 smp_processor_id(), v0 , v1);
10388@@ -2190,6 +2190,8 @@ static int __cpuinit apic_cluster_num(vo
10389 u16 *bios_cpu_apicid;
10390 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
10391
10392+ pax_track_stack();
10393+
10394 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
10395 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
10396
10397diff -urNp linux-3.0.3/arch/x86/kernel/apic/io_apic.c linux-3.0.3/arch/x86/kernel/apic/io_apic.c
10398--- linux-3.0.3/arch/x86/kernel/apic/io_apic.c 2011-07-21 22:17:23.000000000 -0400
10399+++ linux-3.0.3/arch/x86/kernel/apic/io_apic.c 2011-08-23 21:47:55.000000000 -0400
10400@@ -1028,7 +1028,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
10401 }
10402 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
10403
10404-void lock_vector_lock(void)
10405+void lock_vector_lock(void) __acquires(vector_lock)
10406 {
10407 /* Used to the online set of cpus does not change
10408 * during assign_irq_vector.
10409@@ -1036,7 +1036,7 @@ void lock_vector_lock(void)
10410 raw_spin_lock(&vector_lock);
10411 }
10412
10413-void unlock_vector_lock(void)
10414+void unlock_vector_lock(void) __releases(vector_lock)
10415 {
10416 raw_spin_unlock(&vector_lock);
10417 }
10418@@ -2364,7 +2364,7 @@ static void ack_apic_edge(struct irq_dat
10419 ack_APIC_irq();
10420 }
10421
10422-atomic_t irq_mis_count;
10423+atomic_unchecked_t irq_mis_count;
10424
10425 /*
10426 * IO-APIC versions below 0x20 don't support EOI register.
10427@@ -2472,7 +2472,7 @@ static void ack_apic_level(struct irq_da
10428 * at the cpu.
10429 */
10430 if (!(v & (1 << (i & 0x1f)))) {
10431- atomic_inc(&irq_mis_count);
10432+ atomic_inc_unchecked(&irq_mis_count);
10433
10434 eoi_ioapic_irq(irq, cfg);
10435 }
10436diff -urNp linux-3.0.3/arch/x86/kernel/apm_32.c linux-3.0.3/arch/x86/kernel/apm_32.c
10437--- linux-3.0.3/arch/x86/kernel/apm_32.c 2011-07-21 22:17:23.000000000 -0400
10438+++ linux-3.0.3/arch/x86/kernel/apm_32.c 2011-08-23 21:47:55.000000000 -0400
10439@@ -413,7 +413,7 @@ static DEFINE_MUTEX(apm_mutex);
10440 * This is for buggy BIOS's that refer to (real mode) segment 0x40
10441 * even though they are called in protected mode.
10442 */
10443-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
10444+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
10445 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
10446
10447 static const char driver_version[] = "1.16ac"; /* no spaces */
10448@@ -591,7 +591,10 @@ static long __apm_bios_call(void *_call)
10449 BUG_ON(cpu != 0);
10450 gdt = get_cpu_gdt_table(cpu);
10451 save_desc_40 = gdt[0x40 / 8];
10452+
10453+ pax_open_kernel();
10454 gdt[0x40 / 8] = bad_bios_desc;
10455+ pax_close_kernel();
10456
10457 apm_irq_save(flags);
10458 APM_DO_SAVE_SEGS;
10459@@ -600,7 +603,11 @@ static long __apm_bios_call(void *_call)
10460 &call->esi);
10461 APM_DO_RESTORE_SEGS;
10462 apm_irq_restore(flags);
10463+
10464+ pax_open_kernel();
10465 gdt[0x40 / 8] = save_desc_40;
10466+ pax_close_kernel();
10467+
10468 put_cpu();
10469
10470 return call->eax & 0xff;
10471@@ -667,7 +674,10 @@ static long __apm_bios_call_simple(void
10472 BUG_ON(cpu != 0);
10473 gdt = get_cpu_gdt_table(cpu);
10474 save_desc_40 = gdt[0x40 / 8];
10475+
10476+ pax_open_kernel();
10477 gdt[0x40 / 8] = bad_bios_desc;
10478+ pax_close_kernel();
10479
10480 apm_irq_save(flags);
10481 APM_DO_SAVE_SEGS;
10482@@ -675,7 +685,11 @@ static long __apm_bios_call_simple(void
10483 &call->eax);
10484 APM_DO_RESTORE_SEGS;
10485 apm_irq_restore(flags);
10486+
10487+ pax_open_kernel();
10488 gdt[0x40 / 8] = save_desc_40;
10489+ pax_close_kernel();
10490+
10491 put_cpu();
10492 return error;
10493 }
10494@@ -2349,12 +2363,15 @@ static int __init apm_init(void)
10495 * code to that CPU.
10496 */
10497 gdt = get_cpu_gdt_table(0);
10498+
10499+ pax_open_kernel();
10500 set_desc_base(&gdt[APM_CS >> 3],
10501 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
10502 set_desc_base(&gdt[APM_CS_16 >> 3],
10503 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
10504 set_desc_base(&gdt[APM_DS >> 3],
10505 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
10506+ pax_close_kernel();
10507
10508 proc_create("apm", 0, NULL, &apm_file_ops);
10509
10510diff -urNp linux-3.0.3/arch/x86/kernel/asm-offsets_64.c linux-3.0.3/arch/x86/kernel/asm-offsets_64.c
10511--- linux-3.0.3/arch/x86/kernel/asm-offsets_64.c 2011-07-21 22:17:23.000000000 -0400
10512+++ linux-3.0.3/arch/x86/kernel/asm-offsets_64.c 2011-08-23 21:47:55.000000000 -0400
10513@@ -69,6 +69,7 @@ int main(void)
10514 BLANK();
10515 #undef ENTRY
10516
10517+ DEFINE(TSS_size, sizeof(struct tss_struct));
10518 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
10519 BLANK();
10520
10521diff -urNp linux-3.0.3/arch/x86/kernel/asm-offsets.c linux-3.0.3/arch/x86/kernel/asm-offsets.c
10522--- linux-3.0.3/arch/x86/kernel/asm-offsets.c 2011-07-21 22:17:23.000000000 -0400
10523+++ linux-3.0.3/arch/x86/kernel/asm-offsets.c 2011-08-23 21:47:55.000000000 -0400
10524@@ -33,6 +33,8 @@ void common(void) {
10525 OFFSET(TI_status, thread_info, status);
10526 OFFSET(TI_addr_limit, thread_info, addr_limit);
10527 OFFSET(TI_preempt_count, thread_info, preempt_count);
10528+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
10529+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
10530
10531 BLANK();
10532 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
10533@@ -53,8 +55,26 @@ void common(void) {
10534 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
10535 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
10536 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
10537+
10538+#ifdef CONFIG_PAX_KERNEXEC
10539+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
10540+#endif
10541+
10542+#ifdef CONFIG_PAX_MEMORY_UDEREF
10543+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
10544+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
10545+#ifdef CONFIG_X86_64
10546+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
10547+#endif
10548 #endif
10549
10550+#endif
10551+
10552+ BLANK();
10553+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
10554+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
10555+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
10556+
10557 #ifdef CONFIG_XEN
10558 BLANK();
10559 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
10560diff -urNp linux-3.0.3/arch/x86/kernel/cpu/amd.c linux-3.0.3/arch/x86/kernel/cpu/amd.c
10561--- linux-3.0.3/arch/x86/kernel/cpu/amd.c 2011-07-21 22:17:23.000000000 -0400
10562+++ linux-3.0.3/arch/x86/kernel/cpu/amd.c 2011-08-23 21:47:55.000000000 -0400
10563@@ -647,7 +647,7 @@ static unsigned int __cpuinit amd_size_c
10564 unsigned int size)
10565 {
10566 /* AMD errata T13 (order #21922) */
10567- if ((c->x86 == 6)) {
10568+ if (c->x86 == 6) {
10569 /* Duron Rev A0 */
10570 if (c->x86_model == 3 && c->x86_mask == 0)
10571 size = 64;
10572diff -urNp linux-3.0.3/arch/x86/kernel/cpu/common.c linux-3.0.3/arch/x86/kernel/cpu/common.c
10573--- linux-3.0.3/arch/x86/kernel/cpu/common.c 2011-07-21 22:17:23.000000000 -0400
10574+++ linux-3.0.3/arch/x86/kernel/cpu/common.c 2011-08-23 21:47:55.000000000 -0400
10575@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
10576
10577 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
10578
10579-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
10580-#ifdef CONFIG_X86_64
10581- /*
10582- * We need valid kernel segments for data and code in long mode too
10583- * IRET will check the segment types kkeil 2000/10/28
10584- * Also sysret mandates a special GDT layout
10585- *
10586- * TLS descriptors are currently at a different place compared to i386.
10587- * Hopefully nobody expects them at a fixed place (Wine?)
10588- */
10589- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
10590- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
10591- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
10592- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
10593- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
10594- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
10595-#else
10596- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
10597- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10598- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
10599- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
10600- /*
10601- * Segments used for calling PnP BIOS have byte granularity.
10602- * They code segments and data segments have fixed 64k limits,
10603- * the transfer segment sizes are set at run time.
10604- */
10605- /* 32-bit code */
10606- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10607- /* 16-bit code */
10608- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10609- /* 16-bit data */
10610- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
10611- /* 16-bit data */
10612- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
10613- /* 16-bit data */
10614- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
10615- /*
10616- * The APM segments have byte granularity and their bases
10617- * are set at run time. All have 64k limits.
10618- */
10619- /* 32-bit code */
10620- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10621- /* 16-bit code */
10622- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10623- /* data */
10624- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
10625-
10626- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10627- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10628- GDT_STACK_CANARY_INIT
10629-#endif
10630-} };
10631-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
10632-
10633 static int __init x86_xsave_setup(char *s)
10634 {
10635 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
10636@@ -371,7 +317,7 @@ void switch_to_new_gdt(int cpu)
10637 {
10638 struct desc_ptr gdt_descr;
10639
10640- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
10641+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
10642 gdt_descr.size = GDT_SIZE - 1;
10643 load_gdt(&gdt_descr);
10644 /* Reload the per-cpu base */
10645@@ -840,6 +786,10 @@ static void __cpuinit identify_cpu(struc
10646 /* Filter out anything that depends on CPUID levels we don't have */
10647 filter_cpuid_features(c, true);
10648
10649+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
10650+ setup_clear_cpu_cap(X86_FEATURE_SEP);
10651+#endif
10652+
10653 /* If the model name is still unset, do table lookup. */
10654 if (!c->x86_model_id[0]) {
10655 const char *p;
10656@@ -1019,6 +969,9 @@ static __init int setup_disablecpuid(cha
10657 }
10658 __setup("clearcpuid=", setup_disablecpuid);
10659
10660+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
10661+EXPORT_PER_CPU_SYMBOL(current_tinfo);
10662+
10663 #ifdef CONFIG_X86_64
10664 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
10665
10666@@ -1034,7 +987,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
10667 EXPORT_PER_CPU_SYMBOL(current_task);
10668
10669 DEFINE_PER_CPU(unsigned long, kernel_stack) =
10670- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
10671+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
10672 EXPORT_PER_CPU_SYMBOL(kernel_stack);
10673
10674 DEFINE_PER_CPU(char *, irq_stack_ptr) =
10675@@ -1099,7 +1052,7 @@ struct pt_regs * __cpuinit idle_regs(str
10676 {
10677 memset(regs, 0, sizeof(struct pt_regs));
10678 regs->fs = __KERNEL_PERCPU;
10679- regs->gs = __KERNEL_STACK_CANARY;
10680+ savesegment(gs, regs->gs);
10681
10682 return regs;
10683 }
10684@@ -1154,7 +1107,7 @@ void __cpuinit cpu_init(void)
10685 int i;
10686
10687 cpu = stack_smp_processor_id();
10688- t = &per_cpu(init_tss, cpu);
10689+ t = init_tss + cpu;
10690 oist = &per_cpu(orig_ist, cpu);
10691
10692 #ifdef CONFIG_NUMA
10693@@ -1180,7 +1133,7 @@ void __cpuinit cpu_init(void)
10694 switch_to_new_gdt(cpu);
10695 loadsegment(fs, 0);
10696
10697- load_idt((const struct desc_ptr *)&idt_descr);
10698+ load_idt(&idt_descr);
10699
10700 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
10701 syscall_init();
10702@@ -1189,7 +1142,6 @@ void __cpuinit cpu_init(void)
10703 wrmsrl(MSR_KERNEL_GS_BASE, 0);
10704 barrier();
10705
10706- x86_configure_nx();
10707 if (cpu != 0)
10708 enable_x2apic();
10709
10710@@ -1243,7 +1195,7 @@ void __cpuinit cpu_init(void)
10711 {
10712 int cpu = smp_processor_id();
10713 struct task_struct *curr = current;
10714- struct tss_struct *t = &per_cpu(init_tss, cpu);
10715+ struct tss_struct *t = init_tss + cpu;
10716 struct thread_struct *thread = &curr->thread;
10717
10718 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
10719diff -urNp linux-3.0.3/arch/x86/kernel/cpu/intel.c linux-3.0.3/arch/x86/kernel/cpu/intel.c
10720--- linux-3.0.3/arch/x86/kernel/cpu/intel.c 2011-08-23 21:44:40.000000000 -0400
10721+++ linux-3.0.3/arch/x86/kernel/cpu/intel.c 2011-08-23 21:47:55.000000000 -0400
10722@@ -172,7 +172,7 @@ static void __cpuinit trap_init_f00f_bug
10723 * Update the IDT descriptor and reload the IDT so that
10724 * it uses the read-only mapped virtual address.
10725 */
10726- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
10727+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
10728 load_idt(&idt_descr);
10729 }
10730 #endif
10731diff -urNp linux-3.0.3/arch/x86/kernel/cpu/Makefile linux-3.0.3/arch/x86/kernel/cpu/Makefile
10732--- linux-3.0.3/arch/x86/kernel/cpu/Makefile 2011-07-21 22:17:23.000000000 -0400
10733+++ linux-3.0.3/arch/x86/kernel/cpu/Makefile 2011-08-23 21:47:55.000000000 -0400
10734@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
10735 CFLAGS_REMOVE_perf_event.o = -pg
10736 endif
10737
10738-# Make sure load_percpu_segment has no stackprotector
10739-nostackp := $(call cc-option, -fno-stack-protector)
10740-CFLAGS_common.o := $(nostackp)
10741-
10742 obj-y := intel_cacheinfo.o scattered.o topology.o
10743 obj-y += proc.o capflags.o powerflags.o common.o
10744 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
10745diff -urNp linux-3.0.3/arch/x86/kernel/cpu/mcheck/mce.c linux-3.0.3/arch/x86/kernel/cpu/mcheck/mce.c
10746--- linux-3.0.3/arch/x86/kernel/cpu/mcheck/mce.c 2011-07-21 22:17:23.000000000 -0400
10747+++ linux-3.0.3/arch/x86/kernel/cpu/mcheck/mce.c 2011-08-23 21:47:55.000000000 -0400
10748@@ -46,6 +46,7 @@
10749 #include <asm/ipi.h>
10750 #include <asm/mce.h>
10751 #include <asm/msr.h>
10752+#include <asm/local.h>
10753
10754 #include "mce-internal.h"
10755
10756@@ -208,7 +209,7 @@ static void print_mce(struct mce *m)
10757 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
10758 m->cs, m->ip);
10759
10760- if (m->cs == __KERNEL_CS)
10761+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
10762 print_symbol("{%s}", m->ip);
10763 pr_cont("\n");
10764 }
10765@@ -236,10 +237,10 @@ static void print_mce(struct mce *m)
10766
10767 #define PANIC_TIMEOUT 5 /* 5 seconds */
10768
10769-static atomic_t mce_paniced;
10770+static atomic_unchecked_t mce_paniced;
10771
10772 static int fake_panic;
10773-static atomic_t mce_fake_paniced;
10774+static atomic_unchecked_t mce_fake_paniced;
10775
10776 /* Panic in progress. Enable interrupts and wait for final IPI */
10777 static void wait_for_panic(void)
10778@@ -263,7 +264,7 @@ static void mce_panic(char *msg, struct
10779 /*
10780 * Make sure only one CPU runs in machine check panic
10781 */
10782- if (atomic_inc_return(&mce_paniced) > 1)
10783+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
10784 wait_for_panic();
10785 barrier();
10786
10787@@ -271,7 +272,7 @@ static void mce_panic(char *msg, struct
10788 console_verbose();
10789 } else {
10790 /* Don't log too much for fake panic */
10791- if (atomic_inc_return(&mce_fake_paniced) > 1)
10792+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
10793 return;
10794 }
10795 /* First print corrected ones that are still unlogged */
10796@@ -638,7 +639,7 @@ static int mce_timed_out(u64 *t)
10797 * might have been modified by someone else.
10798 */
10799 rmb();
10800- if (atomic_read(&mce_paniced))
10801+ if (atomic_read_unchecked(&mce_paniced))
10802 wait_for_panic();
10803 if (!monarch_timeout)
10804 goto out;
10805@@ -1452,14 +1453,14 @@ void __cpuinit mcheck_cpu_init(struct cp
10806 */
10807
10808 static DEFINE_SPINLOCK(mce_state_lock);
10809-static int open_count; /* #times opened */
10810+static local_t open_count; /* #times opened */
10811 static int open_exclu; /* already open exclusive? */
10812
10813 static int mce_open(struct inode *inode, struct file *file)
10814 {
10815 spin_lock(&mce_state_lock);
10816
10817- if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
10818+ if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
10819 spin_unlock(&mce_state_lock);
10820
10821 return -EBUSY;
10822@@ -1467,7 +1468,7 @@ static int mce_open(struct inode *inode,
10823
10824 if (file->f_flags & O_EXCL)
10825 open_exclu = 1;
10826- open_count++;
10827+ local_inc(&open_count);
10828
10829 spin_unlock(&mce_state_lock);
10830
10831@@ -1478,7 +1479,7 @@ static int mce_release(struct inode *ino
10832 {
10833 spin_lock(&mce_state_lock);
10834
10835- open_count--;
10836+ local_dec(&open_count);
10837 open_exclu = 0;
10838
10839 spin_unlock(&mce_state_lock);
10840@@ -2163,7 +2164,7 @@ struct dentry *mce_get_debugfs_dir(void)
10841 static void mce_reset(void)
10842 {
10843 cpu_missing = 0;
10844- atomic_set(&mce_fake_paniced, 0);
10845+ atomic_set_unchecked(&mce_fake_paniced, 0);
10846 atomic_set(&mce_executing, 0);
10847 atomic_set(&mce_callin, 0);
10848 atomic_set(&global_nwo, 0);
10849diff -urNp linux-3.0.3/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-3.0.3/arch/x86/kernel/cpu/mcheck/mce-inject.c
10850--- linux-3.0.3/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-07-21 22:17:23.000000000 -0400
10851+++ linux-3.0.3/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-08-23 21:47:55.000000000 -0400
10852@@ -215,7 +215,9 @@ static int inject_init(void)
10853 if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
10854 return -ENOMEM;
10855 printk(KERN_INFO "Machine check injector initialized\n");
10856- mce_chrdev_ops.write = mce_write;
10857+ pax_open_kernel();
10858+ *(void **)&mce_chrdev_ops.write = mce_write;
10859+ pax_close_kernel();
10860 register_die_notifier(&mce_raise_nb);
10861 return 0;
10862 }
10863diff -urNp linux-3.0.3/arch/x86/kernel/cpu/mtrr/main.c linux-3.0.3/arch/x86/kernel/cpu/mtrr/main.c
10864--- linux-3.0.3/arch/x86/kernel/cpu/mtrr/main.c 2011-07-21 22:17:23.000000000 -0400
10865+++ linux-3.0.3/arch/x86/kernel/cpu/mtrr/main.c 2011-08-23 21:47:55.000000000 -0400
10866@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
10867 u64 size_or_mask, size_and_mask;
10868 static bool mtrr_aps_delayed_init;
10869
10870-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
10871+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
10872
10873 const struct mtrr_ops *mtrr_if;
10874
10875diff -urNp linux-3.0.3/arch/x86/kernel/cpu/mtrr/mtrr.h linux-3.0.3/arch/x86/kernel/cpu/mtrr/mtrr.h
10876--- linux-3.0.3/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-07-21 22:17:23.000000000 -0400
10877+++ linux-3.0.3/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-08-23 21:47:55.000000000 -0400
10878@@ -12,8 +12,8 @@
10879 extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
10880
10881 struct mtrr_ops {
10882- u32 vendor;
10883- u32 use_intel_if;
10884+ const u32 vendor;
10885+ const u32 use_intel_if;
10886 void (*set)(unsigned int reg, unsigned long base,
10887 unsigned long size, mtrr_type type);
10888 void (*set_all)(void);
10889diff -urNp linux-3.0.3/arch/x86/kernel/cpu/perf_event.c linux-3.0.3/arch/x86/kernel/cpu/perf_event.c
10890--- linux-3.0.3/arch/x86/kernel/cpu/perf_event.c 2011-07-21 22:17:23.000000000 -0400
10891+++ linux-3.0.3/arch/x86/kernel/cpu/perf_event.c 2011-08-23 21:48:14.000000000 -0400
10892@@ -781,6 +781,8 @@ static int x86_schedule_events(struct cp
10893 int i, j, w, wmax, num = 0;
10894 struct hw_perf_event *hwc;
10895
10896+ pax_track_stack();
10897+
10898 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
10899
10900 for (i = 0; i < n; i++) {
10901@@ -1872,7 +1874,7 @@ perf_callchain_user(struct perf_callchai
10902 break;
10903
10904 perf_callchain_store(entry, frame.return_address);
10905- fp = frame.next_frame;
10906+ fp = (__force const void __user *)frame.next_frame;
10907 }
10908 }
10909
10910diff -urNp linux-3.0.3/arch/x86/kernel/crash.c linux-3.0.3/arch/x86/kernel/crash.c
10911--- linux-3.0.3/arch/x86/kernel/crash.c 2011-07-21 22:17:23.000000000 -0400
10912+++ linux-3.0.3/arch/x86/kernel/crash.c 2011-08-23 21:47:55.000000000 -0400
10913@@ -42,7 +42,7 @@ static void kdump_nmi_callback(int cpu,
10914 regs = args->regs;
10915
10916 #ifdef CONFIG_X86_32
10917- if (!user_mode_vm(regs)) {
10918+ if (!user_mode(regs)) {
10919 crash_fixup_ss_esp(&fixed_regs, regs);
10920 regs = &fixed_regs;
10921 }
10922diff -urNp linux-3.0.3/arch/x86/kernel/doublefault_32.c linux-3.0.3/arch/x86/kernel/doublefault_32.c
10923--- linux-3.0.3/arch/x86/kernel/doublefault_32.c 2011-07-21 22:17:23.000000000 -0400
10924+++ linux-3.0.3/arch/x86/kernel/doublefault_32.c 2011-08-23 21:47:55.000000000 -0400
10925@@ -11,7 +11,7 @@
10926
10927 #define DOUBLEFAULT_STACKSIZE (1024)
10928 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
10929-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
10930+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
10931
10932 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
10933
10934@@ -21,7 +21,7 @@ static void doublefault_fn(void)
10935 unsigned long gdt, tss;
10936
10937 store_gdt(&gdt_desc);
10938- gdt = gdt_desc.address;
10939+ gdt = (unsigned long)gdt_desc.address;
10940
10941 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
10942
10943@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
10944 /* 0x2 bit is always set */
10945 .flags = X86_EFLAGS_SF | 0x2,
10946 .sp = STACK_START,
10947- .es = __USER_DS,
10948+ .es = __KERNEL_DS,
10949 .cs = __KERNEL_CS,
10950 .ss = __KERNEL_DS,
10951- .ds = __USER_DS,
10952+ .ds = __KERNEL_DS,
10953 .fs = __KERNEL_PERCPU,
10954
10955 .__cr3 = __pa_nodebug(swapper_pg_dir),
10956diff -urNp linux-3.0.3/arch/x86/kernel/dumpstack_32.c linux-3.0.3/arch/x86/kernel/dumpstack_32.c
10957--- linux-3.0.3/arch/x86/kernel/dumpstack_32.c 2011-07-21 22:17:23.000000000 -0400
10958+++ linux-3.0.3/arch/x86/kernel/dumpstack_32.c 2011-08-23 21:47:55.000000000 -0400
10959@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task
10960 bp = stack_frame(task, regs);
10961
10962 for (;;) {
10963- struct thread_info *context;
10964+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
10965
10966- context = (struct thread_info *)
10967- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
10968- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
10969+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
10970
10971- stack = (unsigned long *)context->previous_esp;
10972- if (!stack)
10973+ if (stack_start == task_stack_page(task))
10974 break;
10975+ stack = *(unsigned long **)stack_start;
10976 if (ops->stack(data, "IRQ") < 0)
10977 break;
10978 touch_nmi_watchdog();
10979@@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs
10980 * When in-kernel, we also print out the stack and code at the
10981 * time of the fault..
10982 */
10983- if (!user_mode_vm(regs)) {
10984+ if (!user_mode(regs)) {
10985 unsigned int code_prologue = code_bytes * 43 / 64;
10986 unsigned int code_len = code_bytes;
10987 unsigned char c;
10988 u8 *ip;
10989+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
10990
10991 printk(KERN_EMERG "Stack:\n");
10992 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
10993
10994 printk(KERN_EMERG "Code: ");
10995
10996- ip = (u8 *)regs->ip - code_prologue;
10997+ ip = (u8 *)regs->ip - code_prologue + cs_base;
10998 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
10999 /* try starting at IP */
11000- ip = (u8 *)regs->ip;
11001+ ip = (u8 *)regs->ip + cs_base;
11002 code_len = code_len - code_prologue + 1;
11003 }
11004 for (i = 0; i < code_len; i++, ip++) {
11005@@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs
11006 printk(" Bad EIP value.");
11007 break;
11008 }
11009- if (ip == (u8 *)regs->ip)
11010+ if (ip == (u8 *)regs->ip + cs_base)
11011 printk("<%02x> ", c);
11012 else
11013 printk("%02x ", c);
11014@@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
11015 {
11016 unsigned short ud2;
11017
11018+ ip = ktla_ktva(ip);
11019 if (ip < PAGE_OFFSET)
11020 return 0;
11021 if (probe_kernel_address((unsigned short *)ip, ud2))
11022diff -urNp linux-3.0.3/arch/x86/kernel/dumpstack_64.c linux-3.0.3/arch/x86/kernel/dumpstack_64.c
11023--- linux-3.0.3/arch/x86/kernel/dumpstack_64.c 2011-07-21 22:17:23.000000000 -0400
11024+++ linux-3.0.3/arch/x86/kernel/dumpstack_64.c 2011-08-23 21:47:55.000000000 -0400
11025@@ -147,9 +147,9 @@ void dump_trace(struct task_struct *task
11026 unsigned long *irq_stack_end =
11027 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
11028 unsigned used = 0;
11029- struct thread_info *tinfo;
11030 int graph = 0;
11031 unsigned long dummy;
11032+ void *stack_start;
11033
11034 if (!task)
11035 task = current;
11036@@ -167,10 +167,10 @@ void dump_trace(struct task_struct *task
11037 * current stack address. If the stacks consist of nested
11038 * exceptions
11039 */
11040- tinfo = task_thread_info(task);
11041 for (;;) {
11042 char *id;
11043 unsigned long *estack_end;
11044+
11045 estack_end = in_exception_stack(cpu, (unsigned long)stack,
11046 &used, &id);
11047
11048@@ -178,7 +178,7 @@ void dump_trace(struct task_struct *task
11049 if (ops->stack(data, id) < 0)
11050 break;
11051
11052- bp = ops->walk_stack(tinfo, stack, bp, ops,
11053+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
11054 data, estack_end, &graph);
11055 ops->stack(data, "<EOE>");
11056 /*
11057@@ -197,7 +197,7 @@ void dump_trace(struct task_struct *task
11058 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
11059 if (ops->stack(data, "IRQ") < 0)
11060 break;
11061- bp = ops->walk_stack(tinfo, stack, bp,
11062+ bp = ops->walk_stack(task, irq_stack, stack, bp,
11063 ops, data, irq_stack_end, &graph);
11064 /*
11065 * We link to the next stack (which would be
11066@@ -218,7 +218,8 @@ void dump_trace(struct task_struct *task
11067 /*
11068 * This handles the process stack:
11069 */
11070- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
11071+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11072+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11073 put_cpu();
11074 }
11075 EXPORT_SYMBOL(dump_trace);
11076diff -urNp linux-3.0.3/arch/x86/kernel/dumpstack.c linux-3.0.3/arch/x86/kernel/dumpstack.c
11077--- linux-3.0.3/arch/x86/kernel/dumpstack.c 2011-07-21 22:17:23.000000000 -0400
11078+++ linux-3.0.3/arch/x86/kernel/dumpstack.c 2011-08-23 21:48:14.000000000 -0400
11079@@ -2,6 +2,9 @@
11080 * Copyright (C) 1991, 1992 Linus Torvalds
11081 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
11082 */
11083+#ifdef CONFIG_GRKERNSEC_HIDESYM
11084+#define __INCLUDED_BY_HIDESYM 1
11085+#endif
11086 #include <linux/kallsyms.h>
11087 #include <linux/kprobes.h>
11088 #include <linux/uaccess.h>
11089@@ -35,9 +38,8 @@ void printk_address(unsigned long addres
11090 static void
11091 print_ftrace_graph_addr(unsigned long addr, void *data,
11092 const struct stacktrace_ops *ops,
11093- struct thread_info *tinfo, int *graph)
11094+ struct task_struct *task, int *graph)
11095 {
11096- struct task_struct *task = tinfo->task;
11097 unsigned long ret_addr;
11098 int index = task->curr_ret_stack;
11099
11100@@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long ad
11101 static inline void
11102 print_ftrace_graph_addr(unsigned long addr, void *data,
11103 const struct stacktrace_ops *ops,
11104- struct thread_info *tinfo, int *graph)
11105+ struct task_struct *task, int *graph)
11106 { }
11107 #endif
11108
11109@@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long ad
11110 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
11111 */
11112
11113-static inline int valid_stack_ptr(struct thread_info *tinfo,
11114- void *p, unsigned int size, void *end)
11115+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
11116 {
11117- void *t = tinfo;
11118 if (end) {
11119 if (p < end && p >= (end-THREAD_SIZE))
11120 return 1;
11121@@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct
11122 }
11123
11124 unsigned long
11125-print_context_stack(struct thread_info *tinfo,
11126+print_context_stack(struct task_struct *task, void *stack_start,
11127 unsigned long *stack, unsigned long bp,
11128 const struct stacktrace_ops *ops, void *data,
11129 unsigned long *end, int *graph)
11130 {
11131 struct stack_frame *frame = (struct stack_frame *)bp;
11132
11133- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
11134+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
11135 unsigned long addr;
11136
11137 addr = *stack;
11138@@ -102,7 +102,7 @@ print_context_stack(struct thread_info *
11139 } else {
11140 ops->address(data, addr, 0);
11141 }
11142- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11143+ print_ftrace_graph_addr(addr, data, ops, task, graph);
11144 }
11145 stack++;
11146 }
11147@@ -111,7 +111,7 @@ print_context_stack(struct thread_info *
11148 EXPORT_SYMBOL_GPL(print_context_stack);
11149
11150 unsigned long
11151-print_context_stack_bp(struct thread_info *tinfo,
11152+print_context_stack_bp(struct task_struct *task, void *stack_start,
11153 unsigned long *stack, unsigned long bp,
11154 const struct stacktrace_ops *ops, void *data,
11155 unsigned long *end, int *graph)
11156@@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_inf
11157 struct stack_frame *frame = (struct stack_frame *)bp;
11158 unsigned long *ret_addr = &frame->return_address;
11159
11160- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
11161+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
11162 unsigned long addr = *ret_addr;
11163
11164 if (!__kernel_text_address(addr))
11165@@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_inf
11166 ops->address(data, addr, 1);
11167 frame = frame->next_frame;
11168 ret_addr = &frame->return_address;
11169- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11170+ print_ftrace_graph_addr(addr, data, ops, task, graph);
11171 }
11172
11173 return (unsigned long)frame;
11174@@ -186,7 +186,7 @@ void dump_stack(void)
11175
11176 bp = stack_frame(current, NULL);
11177 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
11178- current->pid, current->comm, print_tainted(),
11179+ task_pid_nr(current), current->comm, print_tainted(),
11180 init_utsname()->release,
11181 (int)strcspn(init_utsname()->version, " "),
11182 init_utsname()->version);
11183@@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
11184 }
11185 EXPORT_SYMBOL_GPL(oops_begin);
11186
11187+extern void gr_handle_kernel_exploit(void);
11188+
11189 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
11190 {
11191 if (regs && kexec_should_crash(current))
11192@@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long fl
11193 panic("Fatal exception in interrupt");
11194 if (panic_on_oops)
11195 panic("Fatal exception");
11196- do_exit(signr);
11197+
11198+ gr_handle_kernel_exploit();
11199+
11200+ do_group_exit(signr);
11201 }
11202
11203 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
11204@@ -269,7 +274,7 @@ int __kprobes __die(const char *str, str
11205
11206 show_registers(regs);
11207 #ifdef CONFIG_X86_32
11208- if (user_mode_vm(regs)) {
11209+ if (user_mode(regs)) {
11210 sp = regs->sp;
11211 ss = regs->ss & 0xffff;
11212 } else {
11213@@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs
11214 unsigned long flags = oops_begin();
11215 int sig = SIGSEGV;
11216
11217- if (!user_mode_vm(regs))
11218+ if (!user_mode(regs))
11219 report_bug(regs->ip, regs);
11220
11221 if (__die(str, regs, err))
11222diff -urNp linux-3.0.3/arch/x86/kernel/early_printk.c linux-3.0.3/arch/x86/kernel/early_printk.c
11223--- linux-3.0.3/arch/x86/kernel/early_printk.c 2011-07-21 22:17:23.000000000 -0400
11224+++ linux-3.0.3/arch/x86/kernel/early_printk.c 2011-08-23 21:48:14.000000000 -0400
11225@@ -7,6 +7,7 @@
11226 #include <linux/pci_regs.h>
11227 #include <linux/pci_ids.h>
11228 #include <linux/errno.h>
11229+#include <linux/sched.h>
11230 #include <asm/io.h>
11231 #include <asm/processor.h>
11232 #include <asm/fcntl.h>
11233@@ -179,6 +180,8 @@ asmlinkage void early_printk(const char
11234 int n;
11235 va_list ap;
11236
11237+ pax_track_stack();
11238+
11239 va_start(ap, fmt);
11240 n = vscnprintf(buf, sizeof(buf), fmt, ap);
11241 early_console->write(early_console, buf, n);
11242diff -urNp linux-3.0.3/arch/x86/kernel/entry_32.S linux-3.0.3/arch/x86/kernel/entry_32.S
11243--- linux-3.0.3/arch/x86/kernel/entry_32.S 2011-07-21 22:17:23.000000000 -0400
11244+++ linux-3.0.3/arch/x86/kernel/entry_32.S 2011-08-23 21:48:14.000000000 -0400
11245@@ -185,13 +185,146 @@
11246 /*CFI_REL_OFFSET gs, PT_GS*/
11247 .endm
11248 .macro SET_KERNEL_GS reg
11249+
11250+#ifdef CONFIG_CC_STACKPROTECTOR
11251 movl $(__KERNEL_STACK_CANARY), \reg
11252+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
11253+ movl $(__USER_DS), \reg
11254+#else
11255+ xorl \reg, \reg
11256+#endif
11257+
11258 movl \reg, %gs
11259 .endm
11260
11261 #endif /* CONFIG_X86_32_LAZY_GS */
11262
11263-.macro SAVE_ALL
11264+.macro pax_enter_kernel
11265+#ifdef CONFIG_PAX_KERNEXEC
11266+ call pax_enter_kernel
11267+#endif
11268+.endm
11269+
11270+.macro pax_exit_kernel
11271+#ifdef CONFIG_PAX_KERNEXEC
11272+ call pax_exit_kernel
11273+#endif
11274+.endm
11275+
11276+#ifdef CONFIG_PAX_KERNEXEC
11277+ENTRY(pax_enter_kernel)
11278+#ifdef CONFIG_PARAVIRT
11279+ pushl %eax
11280+ pushl %ecx
11281+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
11282+ mov %eax, %esi
11283+#else
11284+ mov %cr0, %esi
11285+#endif
11286+ bts $16, %esi
11287+ jnc 1f
11288+ mov %cs, %esi
11289+ cmp $__KERNEL_CS, %esi
11290+ jz 3f
11291+ ljmp $__KERNEL_CS, $3f
11292+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
11293+2:
11294+#ifdef CONFIG_PARAVIRT
11295+ mov %esi, %eax
11296+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11297+#else
11298+ mov %esi, %cr0
11299+#endif
11300+3:
11301+#ifdef CONFIG_PARAVIRT
11302+ popl %ecx
11303+ popl %eax
11304+#endif
11305+ ret
11306+ENDPROC(pax_enter_kernel)
11307+
11308+ENTRY(pax_exit_kernel)
11309+#ifdef CONFIG_PARAVIRT
11310+ pushl %eax
11311+ pushl %ecx
11312+#endif
11313+ mov %cs, %esi
11314+ cmp $__KERNEXEC_KERNEL_CS, %esi
11315+ jnz 2f
11316+#ifdef CONFIG_PARAVIRT
11317+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
11318+ mov %eax, %esi
11319+#else
11320+ mov %cr0, %esi
11321+#endif
11322+ btr $16, %esi
11323+ ljmp $__KERNEL_CS, $1f
11324+1:
11325+#ifdef CONFIG_PARAVIRT
11326+ mov %esi, %eax
11327+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
11328+#else
11329+ mov %esi, %cr0
11330+#endif
11331+2:
11332+#ifdef CONFIG_PARAVIRT
11333+ popl %ecx
11334+ popl %eax
11335+#endif
11336+ ret
11337+ENDPROC(pax_exit_kernel)
11338+#endif
11339+
11340+.macro pax_erase_kstack
11341+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11342+ call pax_erase_kstack
11343+#endif
11344+.endm
11345+
11346+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11347+/*
11348+ * ebp: thread_info
11349+ * ecx, edx: can be clobbered
11350+ */
11351+ENTRY(pax_erase_kstack)
11352+ pushl %edi
11353+ pushl %eax
11354+
11355+ mov TI_lowest_stack(%ebp), %edi
11356+ mov $-0xBEEF, %eax
11357+ std
11358+
11359+1: mov %edi, %ecx
11360+ and $THREAD_SIZE_asm - 1, %ecx
11361+ shr $2, %ecx
11362+ repne scasl
11363+ jecxz 2f
11364+
11365+ cmp $2*16, %ecx
11366+ jc 2f
11367+
11368+ mov $2*16, %ecx
11369+ repe scasl
11370+ jecxz 2f
11371+ jne 1b
11372+
11373+2: cld
11374+ mov %esp, %ecx
11375+ sub %edi, %ecx
11376+ shr $2, %ecx
11377+ rep stosl
11378+
11379+ mov TI_task_thread_sp0(%ebp), %edi
11380+ sub $128, %edi
11381+ mov %edi, TI_lowest_stack(%ebp)
11382+
11383+ popl %eax
11384+ popl %edi
11385+ ret
11386+ENDPROC(pax_erase_kstack)
11387+#endif
11388+
11389+.macro __SAVE_ALL _DS
11390 cld
11391 PUSH_GS
11392 pushl_cfi %fs
11393@@ -214,7 +347,7 @@
11394 CFI_REL_OFFSET ecx, 0
11395 pushl_cfi %ebx
11396 CFI_REL_OFFSET ebx, 0
11397- movl $(__USER_DS), %edx
11398+ movl $\_DS, %edx
11399 movl %edx, %ds
11400 movl %edx, %es
11401 movl $(__KERNEL_PERCPU), %edx
11402@@ -222,6 +355,15 @@
11403 SET_KERNEL_GS %edx
11404 .endm
11405
11406+.macro SAVE_ALL
11407+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
11408+ __SAVE_ALL __KERNEL_DS
11409+ pax_enter_kernel
11410+#else
11411+ __SAVE_ALL __USER_DS
11412+#endif
11413+.endm
11414+
11415 .macro RESTORE_INT_REGS
11416 popl_cfi %ebx
11417 CFI_RESTORE ebx
11418@@ -332,7 +474,15 @@ check_userspace:
11419 movb PT_CS(%esp), %al
11420 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
11421 cmpl $USER_RPL, %eax
11422+
11423+#ifdef CONFIG_PAX_KERNEXEC
11424+ jae resume_userspace
11425+
11426+ PAX_EXIT_KERNEL
11427+ jmp resume_kernel
11428+#else
11429 jb resume_kernel # not returning to v8086 or userspace
11430+#endif
11431
11432 ENTRY(resume_userspace)
11433 LOCKDEP_SYS_EXIT
11434@@ -344,7 +494,7 @@ ENTRY(resume_userspace)
11435 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
11436 # int/exception return?
11437 jne work_pending
11438- jmp restore_all
11439+ jmp restore_all_pax
11440 END(ret_from_exception)
11441
11442 #ifdef CONFIG_PREEMPT
11443@@ -394,23 +544,34 @@ sysenter_past_esp:
11444 /*CFI_REL_OFFSET cs, 0*/
11445 /*
11446 * Push current_thread_info()->sysenter_return to the stack.
11447- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
11448- * pushed above; +8 corresponds to copy_thread's esp0 setting.
11449 */
11450- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
11451+ pushl_cfi $0
11452 CFI_REL_OFFSET eip, 0
11453
11454 pushl_cfi %eax
11455 SAVE_ALL
11456+ GET_THREAD_INFO(%ebp)
11457+ movl TI_sysenter_return(%ebp),%ebp
11458+ movl %ebp,PT_EIP(%esp)
11459 ENABLE_INTERRUPTS(CLBR_NONE)
11460
11461 /*
11462 * Load the potential sixth argument from user stack.
11463 * Careful about security.
11464 */
11465+ movl PT_OLDESP(%esp),%ebp
11466+
11467+#ifdef CONFIG_PAX_MEMORY_UDEREF
11468+ mov PT_OLDSS(%esp),%ds
11469+1: movl %ds:(%ebp),%ebp
11470+ push %ss
11471+ pop %ds
11472+#else
11473 cmpl $__PAGE_OFFSET-3,%ebp
11474 jae syscall_fault
11475 1: movl (%ebp),%ebp
11476+#endif
11477+
11478 movl %ebp,PT_EBP(%esp)
11479 .section __ex_table,"a"
11480 .align 4
11481@@ -433,12 +594,23 @@ sysenter_do_call:
11482 testl $_TIF_ALLWORK_MASK, %ecx
11483 jne sysexit_audit
11484 sysenter_exit:
11485+
11486+#ifdef CONFIG_PAX_RANDKSTACK
11487+ pushl_cfi %eax
11488+ call pax_randomize_kstack
11489+ popl_cfi %eax
11490+#endif
11491+
11492+ pax_erase_kstack
11493+
11494 /* if something modifies registers it must also disable sysexit */
11495 movl PT_EIP(%esp), %edx
11496 movl PT_OLDESP(%esp), %ecx
11497 xorl %ebp,%ebp
11498 TRACE_IRQS_ON
11499 1: mov PT_FS(%esp), %fs
11500+2: mov PT_DS(%esp), %ds
11501+3: mov PT_ES(%esp), %es
11502 PTGS_TO_GS
11503 ENABLE_INTERRUPTS_SYSEXIT
11504
11505@@ -455,6 +627,9 @@ sysenter_audit:
11506 movl %eax,%edx /* 2nd arg: syscall number */
11507 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
11508 call audit_syscall_entry
11509+
11510+ pax_erase_kstack
11511+
11512 pushl_cfi %ebx
11513 movl PT_EAX(%esp),%eax /* reload syscall number */
11514 jmp sysenter_do_call
11515@@ -481,11 +656,17 @@ sysexit_audit:
11516
11517 CFI_ENDPROC
11518 .pushsection .fixup,"ax"
11519-2: movl $0,PT_FS(%esp)
11520+4: movl $0,PT_FS(%esp)
11521+ jmp 1b
11522+5: movl $0,PT_DS(%esp)
11523+ jmp 1b
11524+6: movl $0,PT_ES(%esp)
11525 jmp 1b
11526 .section __ex_table,"a"
11527 .align 4
11528- .long 1b,2b
11529+ .long 1b,4b
11530+ .long 2b,5b
11531+ .long 3b,6b
11532 .popsection
11533 PTGS_TO_GS_EX
11534 ENDPROC(ia32_sysenter_target)
11535@@ -518,6 +699,14 @@ syscall_exit:
11536 testl $_TIF_ALLWORK_MASK, %ecx # current->work
11537 jne syscall_exit_work
11538
11539+restore_all_pax:
11540+
11541+#ifdef CONFIG_PAX_RANDKSTACK
11542+ call pax_randomize_kstack
11543+#endif
11544+
11545+ pax_erase_kstack
11546+
11547 restore_all:
11548 TRACE_IRQS_IRET
11549 restore_all_notrace:
11550@@ -577,14 +766,34 @@ ldt_ss:
11551 * compensating for the offset by changing to the ESPFIX segment with
11552 * a base address that matches for the difference.
11553 */
11554-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
11555+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
11556 mov %esp, %edx /* load kernel esp */
11557 mov PT_OLDESP(%esp), %eax /* load userspace esp */
11558 mov %dx, %ax /* eax: new kernel esp */
11559 sub %eax, %edx /* offset (low word is 0) */
11560+#ifdef CONFIG_SMP
11561+ movl PER_CPU_VAR(cpu_number), %ebx
11562+ shll $PAGE_SHIFT_asm, %ebx
11563+ addl $cpu_gdt_table, %ebx
11564+#else
11565+ movl $cpu_gdt_table, %ebx
11566+#endif
11567 shr $16, %edx
11568- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
11569- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
11570+
11571+#ifdef CONFIG_PAX_KERNEXEC
11572+ mov %cr0, %esi
11573+ btr $16, %esi
11574+ mov %esi, %cr0
11575+#endif
11576+
11577+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
11578+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
11579+
11580+#ifdef CONFIG_PAX_KERNEXEC
11581+ bts $16, %esi
11582+ mov %esi, %cr0
11583+#endif
11584+
11585 pushl_cfi $__ESPFIX_SS
11586 pushl_cfi %eax /* new kernel esp */
11587 /* Disable interrupts, but do not irqtrace this section: we
11588@@ -613,29 +822,23 @@ work_resched:
11589 movl TI_flags(%ebp), %ecx
11590 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
11591 # than syscall tracing?
11592- jz restore_all
11593+ jz restore_all_pax
11594 testb $_TIF_NEED_RESCHED, %cl
11595 jnz work_resched
11596
11597 work_notifysig: # deal with pending signals and
11598 # notify-resume requests
11599+ movl %esp, %eax
11600 #ifdef CONFIG_VM86
11601 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
11602- movl %esp, %eax
11603- jne work_notifysig_v86 # returning to kernel-space or
11604+ jz 1f # returning to kernel-space or
11605 # vm86-space
11606- xorl %edx, %edx
11607- call do_notify_resume
11608- jmp resume_userspace_sig
11609
11610- ALIGN
11611-work_notifysig_v86:
11612 pushl_cfi %ecx # save ti_flags for do_notify_resume
11613 call save_v86_state # %eax contains pt_regs pointer
11614 popl_cfi %ecx
11615 movl %eax, %esp
11616-#else
11617- movl %esp, %eax
11618+1:
11619 #endif
11620 xorl %edx, %edx
11621 call do_notify_resume
11622@@ -648,6 +851,9 @@ syscall_trace_entry:
11623 movl $-ENOSYS,PT_EAX(%esp)
11624 movl %esp, %eax
11625 call syscall_trace_enter
11626+
11627+ pax_erase_kstack
11628+
11629 /* What it returned is what we'll actually use. */
11630 cmpl $(nr_syscalls), %eax
11631 jnae syscall_call
11632@@ -670,6 +876,10 @@ END(syscall_exit_work)
11633
11634 RING0_INT_FRAME # can't unwind into user space anyway
11635 syscall_fault:
11636+#ifdef CONFIG_PAX_MEMORY_UDEREF
11637+ push %ss
11638+ pop %ds
11639+#endif
11640 GET_THREAD_INFO(%ebp)
11641 movl $-EFAULT,PT_EAX(%esp)
11642 jmp resume_userspace
11643@@ -752,6 +962,36 @@ ptregs_clone:
11644 CFI_ENDPROC
11645 ENDPROC(ptregs_clone)
11646
11647+ ALIGN;
11648+ENTRY(kernel_execve)
11649+ CFI_STARTPROC
11650+ pushl_cfi %ebp
11651+ sub $PT_OLDSS+4,%esp
11652+ pushl_cfi %edi
11653+ pushl_cfi %ecx
11654+ pushl_cfi %eax
11655+ lea 3*4(%esp),%edi
11656+ mov $PT_OLDSS/4+1,%ecx
11657+ xorl %eax,%eax
11658+ rep stosl
11659+ popl_cfi %eax
11660+ popl_cfi %ecx
11661+ popl_cfi %edi
11662+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
11663+ pushl_cfi %esp
11664+ call sys_execve
11665+ add $4,%esp
11666+ CFI_ADJUST_CFA_OFFSET -4
11667+ GET_THREAD_INFO(%ebp)
11668+ test %eax,%eax
11669+ jz syscall_exit
11670+ add $PT_OLDSS+4,%esp
11671+ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
11672+ popl_cfi %ebp
11673+ ret
11674+ CFI_ENDPROC
11675+ENDPROC(kernel_execve)
11676+
11677 .macro FIXUP_ESPFIX_STACK
11678 /*
11679 * Switch back for ESPFIX stack to the normal zerobased stack
11680@@ -761,8 +1001,15 @@ ENDPROC(ptregs_clone)
11681 * normal stack and adjusts ESP with the matching offset.
11682 */
11683 /* fixup the stack */
11684- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
11685- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
11686+#ifdef CONFIG_SMP
11687+ movl PER_CPU_VAR(cpu_number), %ebx
11688+ shll $PAGE_SHIFT_asm, %ebx
11689+ addl $cpu_gdt_table, %ebx
11690+#else
11691+ movl $cpu_gdt_table, %ebx
11692+#endif
11693+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
11694+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
11695 shl $16, %eax
11696 addl %esp, %eax /* the adjusted stack pointer */
11697 pushl_cfi $__KERNEL_DS
11698@@ -1213,7 +1460,6 @@ return_to_handler:
11699 jmp *%ecx
11700 #endif
11701
11702-.section .rodata,"a"
11703 #include "syscall_table_32.S"
11704
11705 syscall_table_size=(.-sys_call_table)
11706@@ -1259,9 +1505,12 @@ error_code:
11707 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
11708 REG_TO_PTGS %ecx
11709 SET_KERNEL_GS %ecx
11710- movl $(__USER_DS), %ecx
11711+ movl $(__KERNEL_DS), %ecx
11712 movl %ecx, %ds
11713 movl %ecx, %es
11714+
11715+ pax_enter_kernel
11716+
11717 TRACE_IRQS_OFF
11718 movl %esp,%eax # pt_regs pointer
11719 call *%edi
11720@@ -1346,6 +1595,9 @@ nmi_stack_correct:
11721 xorl %edx,%edx # zero error code
11722 movl %esp,%eax # pt_regs pointer
11723 call do_nmi
11724+
11725+ pax_exit_kernel
11726+
11727 jmp restore_all_notrace
11728 CFI_ENDPROC
11729
11730@@ -1382,6 +1634,9 @@ nmi_espfix_stack:
11731 FIXUP_ESPFIX_STACK # %eax == %esp
11732 xorl %edx,%edx # zero error code
11733 call do_nmi
11734+
11735+ pax_exit_kernel
11736+
11737 RESTORE_REGS
11738 lss 12+4(%esp), %esp # back to espfix stack
11739 CFI_ADJUST_CFA_OFFSET -24
11740diff -urNp linux-3.0.3/arch/x86/kernel/entry_64.S linux-3.0.3/arch/x86/kernel/entry_64.S
11741--- linux-3.0.3/arch/x86/kernel/entry_64.S 2011-07-21 22:17:23.000000000 -0400
11742+++ linux-3.0.3/arch/x86/kernel/entry_64.S 2011-08-25 17:38:59.000000000 -0400
11743@@ -53,6 +53,7 @@
11744 #include <asm/paravirt.h>
11745 #include <asm/ftrace.h>
11746 #include <asm/percpu.h>
11747+#include <asm/pgtable.h>
11748
11749 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
11750 #include <linux/elf-em.h>
11751@@ -176,6 +177,262 @@ ENTRY(native_usergs_sysret64)
11752 ENDPROC(native_usergs_sysret64)
11753 #endif /* CONFIG_PARAVIRT */
11754
11755+ .macro ljmpq sel, off
11756+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
11757+ .byte 0x48; ljmp *1234f(%rip)
11758+ .pushsection .rodata
11759+ .align 16
11760+ 1234: .quad \off; .word \sel
11761+ .popsection
11762+#else
11763+ pushq $\sel
11764+ pushq $\off
11765+ lretq
11766+#endif
11767+ .endm
11768+
11769+ .macro pax_enter_kernel
11770+#ifdef CONFIG_PAX_KERNEXEC
11771+ call pax_enter_kernel
11772+#endif
11773+ .endm
11774+
11775+ .macro pax_exit_kernel
11776+#ifdef CONFIG_PAX_KERNEXEC
11777+ call pax_exit_kernel
11778+#endif
11779+ .endm
11780+
11781+#ifdef CONFIG_PAX_KERNEXEC
11782+ENTRY(pax_enter_kernel)
11783+ pushq %rdi
11784+
11785+#ifdef CONFIG_PARAVIRT
11786+ PV_SAVE_REGS(CLBR_RDI)
11787+#endif
11788+
11789+ GET_CR0_INTO_RDI
11790+ bts $16,%rdi
11791+ jnc 1f
11792+ mov %cs,%edi
11793+ cmp $__KERNEL_CS,%edi
11794+ jz 3f
11795+ ljmpq __KERNEL_CS,3f
11796+1: ljmpq __KERNEXEC_KERNEL_CS,2f
11797+2: SET_RDI_INTO_CR0
11798+3:
11799+
11800+#ifdef CONFIG_PARAVIRT
11801+ PV_RESTORE_REGS(CLBR_RDI)
11802+#endif
11803+
11804+ popq %rdi
11805+ retq
11806+ENDPROC(pax_enter_kernel)
11807+
11808+ENTRY(pax_exit_kernel)
11809+ pushq %rdi
11810+
11811+#ifdef CONFIG_PARAVIRT
11812+ PV_SAVE_REGS(CLBR_RDI)
11813+#endif
11814+
11815+ mov %cs,%rdi
11816+ cmp $__KERNEXEC_KERNEL_CS,%edi
11817+ jnz 2f
11818+ GET_CR0_INTO_RDI
11819+ btr $16,%rdi
11820+ ljmpq __KERNEL_CS,1f
11821+1: SET_RDI_INTO_CR0
11822+2:
11823+
11824+#ifdef CONFIG_PARAVIRT
11825+ PV_RESTORE_REGS(CLBR_RDI);
11826+#endif
11827+
11828+ popq %rdi
11829+ retq
11830+ENDPROC(pax_exit_kernel)
11831+#endif
11832+
11833+ .macro pax_enter_kernel_user
11834+#ifdef CONFIG_PAX_MEMORY_UDEREF
11835+ call pax_enter_kernel_user
11836+#endif
11837+ .endm
11838+
11839+ .macro pax_exit_kernel_user
11840+#ifdef CONFIG_PAX_MEMORY_UDEREF
11841+ call pax_exit_kernel_user
11842+#endif
11843+#ifdef CONFIG_PAX_RANDKSTACK
11844+ push %rax
11845+ call pax_randomize_kstack
11846+ pop %rax
11847+#endif
11848+ .endm
11849+
11850+#ifdef CONFIG_PAX_MEMORY_UDEREF
11851+ENTRY(pax_enter_kernel_user)
11852+ pushq %rdi
11853+ pushq %rbx
11854+
11855+#ifdef CONFIG_PARAVIRT
11856+ PV_SAVE_REGS(CLBR_RDI)
11857+#endif
11858+
11859+ GET_CR3_INTO_RDI
11860+ mov %rdi,%rbx
11861+ add $__START_KERNEL_map,%rbx
11862+ sub phys_base(%rip),%rbx
11863+
11864+#ifdef CONFIG_PARAVIRT
11865+ pushq %rdi
11866+ cmpl $0, pv_info+PARAVIRT_enabled
11867+ jz 1f
11868+ i = 0
11869+ .rept USER_PGD_PTRS
11870+ mov i*8(%rbx),%rsi
11871+ mov $0,%sil
11872+ lea i*8(%rbx),%rdi
11873+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
11874+ i = i + 1
11875+ .endr
11876+ jmp 2f
11877+1:
11878+#endif
11879+
11880+ i = 0
11881+ .rept USER_PGD_PTRS
11882+ movb $0,i*8(%rbx)
11883+ i = i + 1
11884+ .endr
11885+
11886+#ifdef CONFIG_PARAVIRT
11887+2: popq %rdi
11888+#endif
11889+ SET_RDI_INTO_CR3
11890+
11891+#ifdef CONFIG_PAX_KERNEXEC
11892+ GET_CR0_INTO_RDI
11893+ bts $16,%rdi
11894+ SET_RDI_INTO_CR0
11895+#endif
11896+
11897+#ifdef CONFIG_PARAVIRT
11898+ PV_RESTORE_REGS(CLBR_RDI)
11899+#endif
11900+
11901+ popq %rbx
11902+ popq %rdi
11903+ retq
11904+ENDPROC(pax_enter_kernel_user)
11905+
11906+ENTRY(pax_exit_kernel_user)
11907+ push %rdi
11908+
11909+#ifdef CONFIG_PARAVIRT
11910+ pushq %rbx
11911+ PV_SAVE_REGS(CLBR_RDI)
11912+#endif
11913+
11914+#ifdef CONFIG_PAX_KERNEXEC
11915+ GET_CR0_INTO_RDI
11916+ btr $16,%rdi
11917+ SET_RDI_INTO_CR0
11918+#endif
11919+
11920+ GET_CR3_INTO_RDI
11921+ add $__START_KERNEL_map,%rdi
11922+ sub phys_base(%rip),%rdi
11923+
11924+#ifdef CONFIG_PARAVIRT
11925+ cmpl $0, pv_info+PARAVIRT_enabled
11926+ jz 1f
11927+ mov %rdi,%rbx
11928+ i = 0
11929+ .rept USER_PGD_PTRS
11930+ mov i*8(%rbx),%rsi
11931+ mov $0x67,%sil
11932+ lea i*8(%rbx),%rdi
11933+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
11934+ i = i + 1
11935+ .endr
11936+ jmp 2f
11937+1:
11938+#endif
11939+
11940+ i = 0
11941+ .rept USER_PGD_PTRS
11942+ movb $0x67,i*8(%rdi)
11943+ i = i + 1
11944+ .endr
11945+
11946+#ifdef CONFIG_PARAVIRT
11947+2: PV_RESTORE_REGS(CLBR_RDI)
11948+ popq %rbx
11949+#endif
11950+
11951+ popq %rdi
11952+ retq
11953+ENDPROC(pax_exit_kernel_user)
11954+#endif
11955+
11956+ .macro pax_erase_kstack
11957+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11958+ call pax_erase_kstack
11959+#endif
11960+ .endm
11961+
11962+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11963+/*
11964+ * r10: thread_info
11965+ * rcx, rdx: can be clobbered
11966+ */
11967+ENTRY(pax_erase_kstack)
11968+ pushq %rdi
11969+ pushq %rax
11970+
11971+ GET_THREAD_INFO(%r10)
11972+ mov TI_lowest_stack(%r10), %rdi
11973+ mov $-0xBEEF, %rax
11974+ std
11975+
11976+1: mov %edi, %ecx
11977+ and $THREAD_SIZE_asm - 1, %ecx
11978+ shr $3, %ecx
11979+ repne scasq
11980+ jecxz 2f
11981+
11982+ cmp $2*8, %ecx
11983+ jc 2f
11984+
11985+ mov $2*8, %ecx
11986+ repe scasq
11987+ jecxz 2f
11988+ jne 1b
11989+
11990+2: cld
11991+ mov %esp, %ecx
11992+ sub %edi, %ecx
11993+
11994+ cmp $THREAD_SIZE_asm, %rcx
11995+ jb 3f
11996+ ud2
11997+3:
11998+
11999+ shr $3, %ecx
12000+ rep stosq
12001+
12002+ mov TI_task_thread_sp0(%r10), %rdi
12003+ sub $256, %rdi
12004+ mov %rdi, TI_lowest_stack(%r10)
12005+
12006+ popq %rax
12007+ popq %rdi
12008+ ret
12009+ENDPROC(pax_erase_kstack)
12010+#endif
12011
12012 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
12013 #ifdef CONFIG_TRACE_IRQFLAGS
12014@@ -318,7 +575,7 @@ ENTRY(save_args)
12015 leaq -RBP+8(%rsp),%rdi /* arg1 for handler */
12016 movq_cfi rbp, 8 /* push %rbp */
12017 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
12018- testl $3, CS(%rdi)
12019+ testb $3, CS(%rdi)
12020 je 1f
12021 SWAPGS
12022 /*
12023@@ -409,7 +666,7 @@ ENTRY(ret_from_fork)
12024
12025 RESTORE_REST
12026
12027- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12028+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12029 je int_ret_from_sys_call
12030
12031 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
12032@@ -455,7 +712,7 @@ END(ret_from_fork)
12033 ENTRY(system_call)
12034 CFI_STARTPROC simple
12035 CFI_SIGNAL_FRAME
12036- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
12037+ CFI_DEF_CFA rsp,0
12038 CFI_REGISTER rip,rcx
12039 /*CFI_REGISTER rflags,r11*/
12040 SWAPGS_UNSAFE_STACK
12041@@ -468,12 +725,13 @@ ENTRY(system_call_after_swapgs)
12042
12043 movq %rsp,PER_CPU_VAR(old_rsp)
12044 movq PER_CPU_VAR(kernel_stack),%rsp
12045+ pax_enter_kernel_user
12046 /*
12047 * No need to follow this irqs off/on section - it's straight
12048 * and short:
12049 */
12050 ENABLE_INTERRUPTS(CLBR_NONE)
12051- SAVE_ARGS 8,1
12052+ SAVE_ARGS 8*6,1
12053 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
12054 movq %rcx,RIP-ARGOFFSET(%rsp)
12055 CFI_REL_OFFSET rip,RIP-ARGOFFSET
12056@@ -502,6 +760,8 @@ sysret_check:
12057 andl %edi,%edx
12058 jnz sysret_careful
12059 CFI_REMEMBER_STATE
12060+ pax_exit_kernel_user
12061+ pax_erase_kstack
12062 /*
12063 * sysretq will re-enable interrupts:
12064 */
12065@@ -560,6 +820,9 @@ auditsys:
12066 movq %rax,%rsi /* 2nd arg: syscall number */
12067 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
12068 call audit_syscall_entry
12069+
12070+ pax_erase_kstack
12071+
12072 LOAD_ARGS 0 /* reload call-clobbered registers */
12073 jmp system_call_fastpath
12074
12075@@ -590,6 +853,9 @@ tracesys:
12076 FIXUP_TOP_OF_STACK %rdi
12077 movq %rsp,%rdi
12078 call syscall_trace_enter
12079+
12080+ pax_erase_kstack
12081+
12082 /*
12083 * Reload arg registers from stack in case ptrace changed them.
12084 * We don't reload %rax because syscall_trace_enter() returned
12085@@ -611,7 +877,7 @@ tracesys:
12086 GLOBAL(int_ret_from_sys_call)
12087 DISABLE_INTERRUPTS(CLBR_NONE)
12088 TRACE_IRQS_OFF
12089- testl $3,CS-ARGOFFSET(%rsp)
12090+ testb $3,CS-ARGOFFSET(%rsp)
12091 je retint_restore_args
12092 movl $_TIF_ALLWORK_MASK,%edi
12093 /* edi: mask to check */
12094@@ -793,6 +1059,16 @@ END(interrupt)
12095 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
12096 call save_args
12097 PARTIAL_FRAME 0
12098+#ifdef CONFIG_PAX_MEMORY_UDEREF
12099+ testb $3, CS(%rdi)
12100+ jnz 1f
12101+ pax_enter_kernel
12102+ jmp 2f
12103+1: pax_enter_kernel_user
12104+2:
12105+#else
12106+ pax_enter_kernel
12107+#endif
12108 call \func
12109 .endm
12110
12111@@ -825,7 +1101,7 @@ ret_from_intr:
12112 CFI_ADJUST_CFA_OFFSET -8
12113 exit_intr:
12114 GET_THREAD_INFO(%rcx)
12115- testl $3,CS-ARGOFFSET(%rsp)
12116+ testb $3,CS-ARGOFFSET(%rsp)
12117 je retint_kernel
12118
12119 /* Interrupt came from user space */
12120@@ -847,12 +1123,15 @@ retint_swapgs: /* return to user-space
12121 * The iretq could re-enable interrupts:
12122 */
12123 DISABLE_INTERRUPTS(CLBR_ANY)
12124+ pax_exit_kernel_user
12125+ pax_erase_kstack
12126 TRACE_IRQS_IRETQ
12127 SWAPGS
12128 jmp restore_args
12129
12130 retint_restore_args: /* return to kernel space */
12131 DISABLE_INTERRUPTS(CLBR_ANY)
12132+ pax_exit_kernel
12133 /*
12134 * The iretq could re-enable interrupts:
12135 */
12136@@ -1027,6 +1306,16 @@ ENTRY(\sym)
12137 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12138 call error_entry
12139 DEFAULT_FRAME 0
12140+#ifdef CONFIG_PAX_MEMORY_UDEREF
12141+ testb $3, CS(%rsp)
12142+ jnz 1f
12143+ pax_enter_kernel
12144+ jmp 2f
12145+1: pax_enter_kernel_user
12146+2:
12147+#else
12148+ pax_enter_kernel
12149+#endif
12150 movq %rsp,%rdi /* pt_regs pointer */
12151 xorl %esi,%esi /* no error code */
12152 call \do_sym
12153@@ -1044,6 +1333,16 @@ ENTRY(\sym)
12154 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12155 call save_paranoid
12156 TRACE_IRQS_OFF
12157+#ifdef CONFIG_PAX_MEMORY_UDEREF
12158+ testb $3, CS(%rsp)
12159+ jnz 1f
12160+ pax_enter_kernel
12161+ jmp 2f
12162+1: pax_enter_kernel_user
12163+2:
12164+#else
12165+ pax_enter_kernel
12166+#endif
12167 movq %rsp,%rdi /* pt_regs pointer */
12168 xorl %esi,%esi /* no error code */
12169 call \do_sym
12170@@ -1052,7 +1351,7 @@ ENTRY(\sym)
12171 END(\sym)
12172 .endm
12173
12174-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
12175+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
12176 .macro paranoidzeroentry_ist sym do_sym ist
12177 ENTRY(\sym)
12178 INTR_FRAME
12179@@ -1062,8 +1361,24 @@ ENTRY(\sym)
12180 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12181 call save_paranoid
12182 TRACE_IRQS_OFF
12183+#ifdef CONFIG_PAX_MEMORY_UDEREF
12184+ testb $3, CS(%rsp)
12185+ jnz 1f
12186+ pax_enter_kernel
12187+ jmp 2f
12188+1: pax_enter_kernel_user
12189+2:
12190+#else
12191+ pax_enter_kernel
12192+#endif
12193 movq %rsp,%rdi /* pt_regs pointer */
12194 xorl %esi,%esi /* no error code */
12195+#ifdef CONFIG_SMP
12196+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
12197+ lea init_tss(%r12), %r12
12198+#else
12199+ lea init_tss(%rip), %r12
12200+#endif
12201 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12202 call \do_sym
12203 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12204@@ -1080,6 +1395,16 @@ ENTRY(\sym)
12205 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12206 call error_entry
12207 DEFAULT_FRAME 0
12208+#ifdef CONFIG_PAX_MEMORY_UDEREF
12209+ testb $3, CS(%rsp)
12210+ jnz 1f
12211+ pax_enter_kernel
12212+ jmp 2f
12213+1: pax_enter_kernel_user
12214+2:
12215+#else
12216+ pax_enter_kernel
12217+#endif
12218 movq %rsp,%rdi /* pt_regs pointer */
12219 movq ORIG_RAX(%rsp),%rsi /* get error code */
12220 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12221@@ -1099,6 +1424,16 @@ ENTRY(\sym)
12222 call save_paranoid
12223 DEFAULT_FRAME 0
12224 TRACE_IRQS_OFF
12225+#ifdef CONFIG_PAX_MEMORY_UDEREF
12226+ testb $3, CS(%rsp)
12227+ jnz 1f
12228+ pax_enter_kernel
12229+ jmp 2f
12230+1: pax_enter_kernel_user
12231+2:
12232+#else
12233+ pax_enter_kernel
12234+#endif
12235 movq %rsp,%rdi /* pt_regs pointer */
12236 movq ORIG_RAX(%rsp),%rsi /* get error code */
12237 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12238@@ -1361,14 +1696,27 @@ ENTRY(paranoid_exit)
12239 TRACE_IRQS_OFF
12240 testl %ebx,%ebx /* swapgs needed? */
12241 jnz paranoid_restore
12242- testl $3,CS(%rsp)
12243+ testb $3,CS(%rsp)
12244 jnz paranoid_userspace
12245+#ifdef CONFIG_PAX_MEMORY_UDEREF
12246+ pax_exit_kernel
12247+ TRACE_IRQS_IRETQ 0
12248+ SWAPGS_UNSAFE_STACK
12249+ RESTORE_ALL 8
12250+ jmp irq_return
12251+#endif
12252 paranoid_swapgs:
12253+#ifdef CONFIG_PAX_MEMORY_UDEREF
12254+ pax_exit_kernel_user
12255+#else
12256+ pax_exit_kernel
12257+#endif
12258 TRACE_IRQS_IRETQ 0
12259 SWAPGS_UNSAFE_STACK
12260 RESTORE_ALL 8
12261 jmp irq_return
12262 paranoid_restore:
12263+ pax_exit_kernel
12264 TRACE_IRQS_IRETQ 0
12265 RESTORE_ALL 8
12266 jmp irq_return
12267@@ -1426,7 +1774,7 @@ ENTRY(error_entry)
12268 movq_cfi r14, R14+8
12269 movq_cfi r15, R15+8
12270 xorl %ebx,%ebx
12271- testl $3,CS+8(%rsp)
12272+ testb $3,CS+8(%rsp)
12273 je error_kernelspace
12274 error_swapgs:
12275 SWAPGS
12276@@ -1490,6 +1838,16 @@ ENTRY(nmi)
12277 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12278 call save_paranoid
12279 DEFAULT_FRAME 0
12280+#ifdef CONFIG_PAX_MEMORY_UDEREF
12281+ testb $3, CS(%rsp)
12282+ jnz 1f
12283+ pax_enter_kernel
12284+ jmp 2f
12285+1: pax_enter_kernel_user
12286+2:
12287+#else
12288+ pax_enter_kernel
12289+#endif
12290 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
12291 movq %rsp,%rdi
12292 movq $-1,%rsi
12293@@ -1500,11 +1858,25 @@ ENTRY(nmi)
12294 DISABLE_INTERRUPTS(CLBR_NONE)
12295 testl %ebx,%ebx /* swapgs needed? */
12296 jnz nmi_restore
12297- testl $3,CS(%rsp)
12298+ testb $3,CS(%rsp)
12299 jnz nmi_userspace
12300+#ifdef CONFIG_PAX_MEMORY_UDEREF
12301+ pax_exit_kernel
12302+ SWAPGS_UNSAFE_STACK
12303+ RESTORE_ALL 8
12304+ jmp irq_return
12305+#endif
12306 nmi_swapgs:
12307+#ifdef CONFIG_PAX_MEMORY_UDEREF
12308+ pax_exit_kernel_user
12309+#else
12310+ pax_exit_kernel
12311+#endif
12312 SWAPGS_UNSAFE_STACK
12313+ RESTORE_ALL 8
12314+ jmp irq_return
12315 nmi_restore:
12316+ pax_exit_kernel
12317 RESTORE_ALL 8
12318 jmp irq_return
12319 nmi_userspace:
12320diff -urNp linux-3.0.3/arch/x86/kernel/ftrace.c linux-3.0.3/arch/x86/kernel/ftrace.c
12321--- linux-3.0.3/arch/x86/kernel/ftrace.c 2011-07-21 22:17:23.000000000 -0400
12322+++ linux-3.0.3/arch/x86/kernel/ftrace.c 2011-08-23 21:47:55.000000000 -0400
12323@@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the
12324 static const void *mod_code_newcode; /* holds the text to write to the IP */
12325
12326 static unsigned nmi_wait_count;
12327-static atomic_t nmi_update_count = ATOMIC_INIT(0);
12328+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
12329
12330 int ftrace_arch_read_dyn_info(char *buf, int size)
12331 {
12332@@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf,
12333
12334 r = snprintf(buf, size, "%u %u",
12335 nmi_wait_count,
12336- atomic_read(&nmi_update_count));
12337+ atomic_read_unchecked(&nmi_update_count));
12338 return r;
12339 }
12340
12341@@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
12342
12343 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
12344 smp_rmb();
12345+ pax_open_kernel();
12346 ftrace_mod_code();
12347- atomic_inc(&nmi_update_count);
12348+ pax_close_kernel();
12349+ atomic_inc_unchecked(&nmi_update_count);
12350 }
12351 /* Must have previous changes seen before executions */
12352 smp_mb();
12353@@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, uns
12354 {
12355 unsigned char replaced[MCOUNT_INSN_SIZE];
12356
12357+ ip = ktla_ktva(ip);
12358+
12359 /*
12360 * Note: Due to modules and __init, code can
12361 * disappear and change, we need to protect against faulting
12362@@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_fun
12363 unsigned char old[MCOUNT_INSN_SIZE], *new;
12364 int ret;
12365
12366- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
12367+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
12368 new = ftrace_call_replace(ip, (unsigned long)func);
12369 ret = ftrace_modify_code(ip, old, new);
12370
12371@@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long
12372 {
12373 unsigned char code[MCOUNT_INSN_SIZE];
12374
12375+ ip = ktla_ktva(ip);
12376+
12377 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
12378 return -EFAULT;
12379
12380diff -urNp linux-3.0.3/arch/x86/kernel/head32.c linux-3.0.3/arch/x86/kernel/head32.c
12381--- linux-3.0.3/arch/x86/kernel/head32.c 2011-07-21 22:17:23.000000000 -0400
12382+++ linux-3.0.3/arch/x86/kernel/head32.c 2011-08-23 21:47:55.000000000 -0400
12383@@ -19,6 +19,7 @@
12384 #include <asm/io_apic.h>
12385 #include <asm/bios_ebda.h>
12386 #include <asm/tlbflush.h>
12387+#include <asm/boot.h>
12388
12389 static void __init i386_default_early_setup(void)
12390 {
12391@@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
12392 {
12393 memblock_init();
12394
12395- memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12396+ memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12397
12398 #ifdef CONFIG_BLK_DEV_INITRD
12399 /* Reserve INITRD */
12400diff -urNp linux-3.0.3/arch/x86/kernel/head_32.S linux-3.0.3/arch/x86/kernel/head_32.S
12401--- linux-3.0.3/arch/x86/kernel/head_32.S 2011-07-21 22:17:23.000000000 -0400
12402+++ linux-3.0.3/arch/x86/kernel/head_32.S 2011-08-23 21:47:55.000000000 -0400
12403@@ -25,6 +25,12 @@
12404 /* Physical address */
12405 #define pa(X) ((X) - __PAGE_OFFSET)
12406
12407+#ifdef CONFIG_PAX_KERNEXEC
12408+#define ta(X) (X)
12409+#else
12410+#define ta(X) ((X) - __PAGE_OFFSET)
12411+#endif
12412+
12413 /*
12414 * References to members of the new_cpu_data structure.
12415 */
12416@@ -54,11 +60,7 @@
12417 * and small than max_low_pfn, otherwise will waste some page table entries
12418 */
12419
12420-#if PTRS_PER_PMD > 1
12421-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
12422-#else
12423-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
12424-#endif
12425+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
12426
12427 /* Number of possible pages in the lowmem region */
12428 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
12429@@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
12430 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12431
12432 /*
12433+ * Real beginning of normal "text" segment
12434+ */
12435+ENTRY(stext)
12436+ENTRY(_stext)
12437+
12438+/*
12439 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
12440 * %esi points to the real-mode code as a 32-bit pointer.
12441 * CS and DS must be 4 GB flat segments, but we don't depend on
12442@@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12443 * can.
12444 */
12445 __HEAD
12446+
12447+#ifdef CONFIG_PAX_KERNEXEC
12448+ jmp startup_32
12449+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
12450+.fill PAGE_SIZE-5,1,0xcc
12451+#endif
12452+
12453 ENTRY(startup_32)
12454 movl pa(stack_start),%ecx
12455
12456@@ -105,6 +120,57 @@ ENTRY(startup_32)
12457 2:
12458 leal -__PAGE_OFFSET(%ecx),%esp
12459
12460+#ifdef CONFIG_SMP
12461+ movl $pa(cpu_gdt_table),%edi
12462+ movl $__per_cpu_load,%eax
12463+ movw %ax,__KERNEL_PERCPU + 2(%edi)
12464+ rorl $16,%eax
12465+ movb %al,__KERNEL_PERCPU + 4(%edi)
12466+ movb %ah,__KERNEL_PERCPU + 7(%edi)
12467+ movl $__per_cpu_end - 1,%eax
12468+ subl $__per_cpu_start,%eax
12469+ movw %ax,__KERNEL_PERCPU + 0(%edi)
12470+#endif
12471+
12472+#ifdef CONFIG_PAX_MEMORY_UDEREF
12473+ movl $NR_CPUS,%ecx
12474+ movl $pa(cpu_gdt_table),%edi
12475+1:
12476+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
12477+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
12478+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
12479+ addl $PAGE_SIZE_asm,%edi
12480+ loop 1b
12481+#endif
12482+
12483+#ifdef CONFIG_PAX_KERNEXEC
12484+ movl $pa(boot_gdt),%edi
12485+ movl $__LOAD_PHYSICAL_ADDR,%eax
12486+ movw %ax,__BOOT_CS + 2(%edi)
12487+ rorl $16,%eax
12488+ movb %al,__BOOT_CS + 4(%edi)
12489+ movb %ah,__BOOT_CS + 7(%edi)
12490+ rorl $16,%eax
12491+
12492+ ljmp $(__BOOT_CS),$1f
12493+1:
12494+
12495+ movl $NR_CPUS,%ecx
12496+ movl $pa(cpu_gdt_table),%edi
12497+ addl $__PAGE_OFFSET,%eax
12498+1:
12499+ movw %ax,__KERNEL_CS + 2(%edi)
12500+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
12501+ rorl $16,%eax
12502+ movb %al,__KERNEL_CS + 4(%edi)
12503+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
12504+ movb %ah,__KERNEL_CS + 7(%edi)
12505+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
12506+ rorl $16,%eax
12507+ addl $PAGE_SIZE_asm,%edi
12508+ loop 1b
12509+#endif
12510+
12511 /*
12512 * Clear BSS first so that there are no surprises...
12513 */
12514@@ -195,8 +261,11 @@ ENTRY(startup_32)
12515 movl %eax, pa(max_pfn_mapped)
12516
12517 /* Do early initialization of the fixmap area */
12518- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12519- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
12520+#ifdef CONFIG_COMPAT_VDSO
12521+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
12522+#else
12523+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
12524+#endif
12525 #else /* Not PAE */
12526
12527 page_pde_offset = (__PAGE_OFFSET >> 20);
12528@@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12529 movl %eax, pa(max_pfn_mapped)
12530
12531 /* Do early initialization of the fixmap area */
12532- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12533- movl %eax,pa(initial_page_table+0xffc)
12534+#ifdef CONFIG_COMPAT_VDSO
12535+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
12536+#else
12537+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
12538+#endif
12539 #endif
12540
12541 #ifdef CONFIG_PARAVIRT
12542@@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12543 cmpl $num_subarch_entries, %eax
12544 jae bad_subarch
12545
12546- movl pa(subarch_entries)(,%eax,4), %eax
12547- subl $__PAGE_OFFSET, %eax
12548- jmp *%eax
12549+ jmp *pa(subarch_entries)(,%eax,4)
12550
12551 bad_subarch:
12552 WEAK(lguest_entry)
12553@@ -255,10 +325,10 @@ WEAK(xen_entry)
12554 __INITDATA
12555
12556 subarch_entries:
12557- .long default_entry /* normal x86/PC */
12558- .long lguest_entry /* lguest hypervisor */
12559- .long xen_entry /* Xen hypervisor */
12560- .long default_entry /* Moorestown MID */
12561+ .long ta(default_entry) /* normal x86/PC */
12562+ .long ta(lguest_entry) /* lguest hypervisor */
12563+ .long ta(xen_entry) /* Xen hypervisor */
12564+ .long ta(default_entry) /* Moorestown MID */
12565 num_subarch_entries = (. - subarch_entries) / 4
12566 .previous
12567 #else
12568@@ -312,6 +382,7 @@ default_entry:
12569 orl %edx,%eax
12570 movl %eax,%cr4
12571
12572+#ifdef CONFIG_X86_PAE
12573 testb $X86_CR4_PAE, %al # check if PAE is enabled
12574 jz 6f
12575
12576@@ -340,6 +411,9 @@ default_entry:
12577 /* Make changes effective */
12578 wrmsr
12579
12580+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
12581+#endif
12582+
12583 6:
12584
12585 /*
12586@@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
12587 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
12588 movl %eax,%ss # after changing gdt.
12589
12590- movl $(__USER_DS),%eax # DS/ES contains default USER segment
12591+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
12592 movl %eax,%ds
12593 movl %eax,%es
12594
12595@@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
12596 */
12597 cmpb $0,ready
12598 jne 1f
12599- movl $gdt_page,%eax
12600+ movl $cpu_gdt_table,%eax
12601 movl $stack_canary,%ecx
12602+#ifdef CONFIG_SMP
12603+ addl $__per_cpu_load,%ecx
12604+#endif
12605 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
12606 shrl $16, %ecx
12607 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
12608 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
12609 1:
12610-#endif
12611 movl $(__KERNEL_STACK_CANARY),%eax
12612+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12613+ movl $(__USER_DS),%eax
12614+#else
12615+ xorl %eax,%eax
12616+#endif
12617 movl %eax,%gs
12618
12619 xorl %eax,%eax # Clear LDT
12620@@ -558,22 +639,22 @@ early_page_fault:
12621 jmp early_fault
12622
12623 early_fault:
12624- cld
12625 #ifdef CONFIG_PRINTK
12626+ cmpl $1,%ss:early_recursion_flag
12627+ je hlt_loop
12628+ incl %ss:early_recursion_flag
12629+ cld
12630 pusha
12631 movl $(__KERNEL_DS),%eax
12632 movl %eax,%ds
12633 movl %eax,%es
12634- cmpl $2,early_recursion_flag
12635- je hlt_loop
12636- incl early_recursion_flag
12637 movl %cr2,%eax
12638 pushl %eax
12639 pushl %edx /* trapno */
12640 pushl $fault_msg
12641 call printk
12642+; call dump_stack
12643 #endif
12644- call dump_stack
12645 hlt_loop:
12646 hlt
12647 jmp hlt_loop
12648@@ -581,8 +662,11 @@ hlt_loop:
12649 /* This is the default interrupt "handler" :-) */
12650 ALIGN
12651 ignore_int:
12652- cld
12653 #ifdef CONFIG_PRINTK
12654+ cmpl $2,%ss:early_recursion_flag
12655+ je hlt_loop
12656+ incl %ss:early_recursion_flag
12657+ cld
12658 pushl %eax
12659 pushl %ecx
12660 pushl %edx
12661@@ -591,9 +675,6 @@ ignore_int:
12662 movl $(__KERNEL_DS),%eax
12663 movl %eax,%ds
12664 movl %eax,%es
12665- cmpl $2,early_recursion_flag
12666- je hlt_loop
12667- incl early_recursion_flag
12668 pushl 16(%esp)
12669 pushl 24(%esp)
12670 pushl 32(%esp)
12671@@ -622,29 +703,43 @@ ENTRY(initial_code)
12672 /*
12673 * BSS section
12674 */
12675-__PAGE_ALIGNED_BSS
12676- .align PAGE_SIZE
12677 #ifdef CONFIG_X86_PAE
12678+.section .initial_pg_pmd,"a",@progbits
12679 initial_pg_pmd:
12680 .fill 1024*KPMDS,4,0
12681 #else
12682+.section .initial_page_table,"a",@progbits
12683 ENTRY(initial_page_table)
12684 .fill 1024,4,0
12685 #endif
12686+.section .initial_pg_fixmap,"a",@progbits
12687 initial_pg_fixmap:
12688 .fill 1024,4,0
12689+.section .empty_zero_page,"a",@progbits
12690 ENTRY(empty_zero_page)
12691 .fill 4096,1,0
12692+.section .swapper_pg_dir,"a",@progbits
12693 ENTRY(swapper_pg_dir)
12694+#ifdef CONFIG_X86_PAE
12695+ .fill 4,8,0
12696+#else
12697 .fill 1024,4,0
12698+#endif
12699+
12700+/*
12701+ * The IDT has to be page-aligned to simplify the Pentium
12702+ * F0 0F bug workaround.. We have a special link segment
12703+ * for this.
12704+ */
12705+.section .idt,"a",@progbits
12706+ENTRY(idt_table)
12707+ .fill 256,8,0
12708
12709 /*
12710 * This starts the data section.
12711 */
12712 #ifdef CONFIG_X86_PAE
12713-__PAGE_ALIGNED_DATA
12714- /* Page-aligned for the benefit of paravirt? */
12715- .align PAGE_SIZE
12716+.section .initial_page_table,"a",@progbits
12717 ENTRY(initial_page_table)
12718 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
12719 # if KPMDS == 3
12720@@ -663,18 +758,27 @@ ENTRY(initial_page_table)
12721 # error "Kernel PMDs should be 1, 2 or 3"
12722 # endif
12723 .align PAGE_SIZE /* needs to be page-sized too */
12724+
12725+#ifdef CONFIG_PAX_PER_CPU_PGD
12726+ENTRY(cpu_pgd)
12727+ .rept NR_CPUS
12728+ .fill 4,8,0
12729+ .endr
12730+#endif
12731+
12732 #endif
12733
12734 .data
12735 .balign 4
12736 ENTRY(stack_start)
12737- .long init_thread_union+THREAD_SIZE
12738+ .long init_thread_union+THREAD_SIZE-8
12739+
12740+ready: .byte 0
12741
12742+.section .rodata,"a",@progbits
12743 early_recursion_flag:
12744 .long 0
12745
12746-ready: .byte 0
12747-
12748 int_msg:
12749 .asciz "Unknown interrupt or fault at: %p %p %p\n"
12750
12751@@ -707,7 +811,7 @@ fault_msg:
12752 .word 0 # 32 bit align gdt_desc.address
12753 boot_gdt_descr:
12754 .word __BOOT_DS+7
12755- .long boot_gdt - __PAGE_OFFSET
12756+ .long pa(boot_gdt)
12757
12758 .word 0 # 32-bit align idt_desc.address
12759 idt_descr:
12760@@ -718,7 +822,7 @@ idt_descr:
12761 .word 0 # 32 bit align gdt_desc.address
12762 ENTRY(early_gdt_descr)
12763 .word GDT_ENTRIES*8-1
12764- .long gdt_page /* Overwritten for secondary CPUs */
12765+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
12766
12767 /*
12768 * The boot_gdt must mirror the equivalent in setup.S and is
12769@@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
12770 .align L1_CACHE_BYTES
12771 ENTRY(boot_gdt)
12772 .fill GDT_ENTRY_BOOT_CS,8,0
12773- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
12774- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
12775+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
12776+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
12777+
12778+ .align PAGE_SIZE_asm
12779+ENTRY(cpu_gdt_table)
12780+ .rept NR_CPUS
12781+ .quad 0x0000000000000000 /* NULL descriptor */
12782+ .quad 0x0000000000000000 /* 0x0b reserved */
12783+ .quad 0x0000000000000000 /* 0x13 reserved */
12784+ .quad 0x0000000000000000 /* 0x1b reserved */
12785+
12786+#ifdef CONFIG_PAX_KERNEXEC
12787+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
12788+#else
12789+ .quad 0x0000000000000000 /* 0x20 unused */
12790+#endif
12791+
12792+ .quad 0x0000000000000000 /* 0x28 unused */
12793+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
12794+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
12795+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
12796+ .quad 0x0000000000000000 /* 0x4b reserved */
12797+ .quad 0x0000000000000000 /* 0x53 reserved */
12798+ .quad 0x0000000000000000 /* 0x5b reserved */
12799+
12800+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
12801+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
12802+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
12803+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
12804+
12805+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
12806+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
12807+
12808+ /*
12809+ * Segments used for calling PnP BIOS have byte granularity.
12810+ * The code segments and data segments have fixed 64k limits,
12811+ * the transfer segment sizes are set at run time.
12812+ */
12813+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
12814+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
12815+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
12816+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
12817+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
12818+
12819+ /*
12820+ * The APM segments have byte granularity and their bases
12821+ * are set at run time. All have 64k limits.
12822+ */
12823+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
12824+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
12825+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
12826+
12827+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
12828+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
12829+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
12830+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
12831+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
12832+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
12833+
12834+ /* Be sure this is zeroed to avoid false validations in Xen */
12835+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
12836+ .endr
12837diff -urNp linux-3.0.3/arch/x86/kernel/head_64.S linux-3.0.3/arch/x86/kernel/head_64.S
12838--- linux-3.0.3/arch/x86/kernel/head_64.S 2011-07-21 22:17:23.000000000 -0400
12839+++ linux-3.0.3/arch/x86/kernel/head_64.S 2011-08-23 21:47:55.000000000 -0400
12840@@ -19,6 +19,7 @@
12841 #include <asm/cache.h>
12842 #include <asm/processor-flags.h>
12843 #include <asm/percpu.h>
12844+#include <asm/cpufeature.h>
12845
12846 #ifdef CONFIG_PARAVIRT
12847 #include <asm/asm-offsets.h>
12848@@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
12849 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
12850 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
12851 L3_START_KERNEL = pud_index(__START_KERNEL_map)
12852+L4_VMALLOC_START = pgd_index(VMALLOC_START)
12853+L3_VMALLOC_START = pud_index(VMALLOC_START)
12854+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
12855+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
12856
12857 .text
12858 __HEAD
12859@@ -85,35 +90,22 @@ startup_64:
12860 */
12861 addq %rbp, init_level4_pgt + 0(%rip)
12862 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
12863+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
12864+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
12865 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
12866
12867 addq %rbp, level3_ident_pgt + 0(%rip)
12868+#ifndef CONFIG_XEN
12869+ addq %rbp, level3_ident_pgt + 8(%rip)
12870+#endif
12871
12872- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
12873- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
12874+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
12875
12876- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
12877+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
12878+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
12879
12880- /* Add an Identity mapping if I am above 1G */
12881- leaq _text(%rip), %rdi
12882- andq $PMD_PAGE_MASK, %rdi
12883-
12884- movq %rdi, %rax
12885- shrq $PUD_SHIFT, %rax
12886- andq $(PTRS_PER_PUD - 1), %rax
12887- jz ident_complete
12888-
12889- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
12890- leaq level3_ident_pgt(%rip), %rbx
12891- movq %rdx, 0(%rbx, %rax, 8)
12892-
12893- movq %rdi, %rax
12894- shrq $PMD_SHIFT, %rax
12895- andq $(PTRS_PER_PMD - 1), %rax
12896- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
12897- leaq level2_spare_pgt(%rip), %rbx
12898- movq %rdx, 0(%rbx, %rax, 8)
12899-ident_complete:
12900+ addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
12901+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
12902
12903 /*
12904 * Fixup the kernel text+data virtual addresses. Note that
12905@@ -160,8 +152,8 @@ ENTRY(secondary_startup_64)
12906 * after the boot processor executes this code.
12907 */
12908
12909- /* Enable PAE mode and PGE */
12910- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
12911+ /* Enable PAE mode and PSE/PGE */
12912+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
12913 movq %rax, %cr4
12914
12915 /* Setup early boot stage 4 level pagetables. */
12916@@ -183,9 +175,14 @@ ENTRY(secondary_startup_64)
12917 movl $MSR_EFER, %ecx
12918 rdmsr
12919 btsl $_EFER_SCE, %eax /* Enable System Call */
12920- btl $20,%edi /* No Execute supported? */
12921+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
12922 jnc 1f
12923 btsl $_EFER_NX, %eax
12924+ leaq init_level4_pgt(%rip), %rdi
12925+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
12926+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
12927+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
12928+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
12929 1: wrmsr /* Make changes effective */
12930
12931 /* Setup cr0 */
12932@@ -269,7 +266,7 @@ ENTRY(secondary_startup_64)
12933 bad_address:
12934 jmp bad_address
12935
12936- .section ".init.text","ax"
12937+ __INIT
12938 #ifdef CONFIG_EARLY_PRINTK
12939 .globl early_idt_handlers
12940 early_idt_handlers:
12941@@ -314,18 +311,23 @@ ENTRY(early_idt_handler)
12942 #endif /* EARLY_PRINTK */
12943 1: hlt
12944 jmp 1b
12945+ .previous
12946
12947 #ifdef CONFIG_EARLY_PRINTK
12948+ __INITDATA
12949 early_recursion_flag:
12950 .long 0
12951+ .previous
12952
12953+ .section .rodata,"a",@progbits
12954 early_idt_msg:
12955 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
12956 early_idt_ripmsg:
12957 .asciz "RIP %s\n"
12958-#endif /* CONFIG_EARLY_PRINTK */
12959 .previous
12960+#endif /* CONFIG_EARLY_PRINTK */
12961
12962+ .section .rodata,"a",@progbits
12963 #define NEXT_PAGE(name) \
12964 .balign PAGE_SIZE; \
12965 ENTRY(name)
12966@@ -338,7 +340,6 @@ ENTRY(name)
12967 i = i + 1 ; \
12968 .endr
12969
12970- .data
12971 /*
12972 * This default setting generates an ident mapping at address 0x100000
12973 * and a mapping for the kernel that precisely maps virtual address
12974@@ -349,13 +350,36 @@ NEXT_PAGE(init_level4_pgt)
12975 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
12976 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
12977 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
12978+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
12979+ .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
12980+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
12981+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
12982 .org init_level4_pgt + L4_START_KERNEL*8, 0
12983 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
12984 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
12985
12986+#ifdef CONFIG_PAX_PER_CPU_PGD
12987+NEXT_PAGE(cpu_pgd)
12988+ .rept NR_CPUS
12989+ .fill 512,8,0
12990+ .endr
12991+#endif
12992+
12993 NEXT_PAGE(level3_ident_pgt)
12994 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
12995+#ifdef CONFIG_XEN
12996 .fill 511,8,0
12997+#else
12998+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
12999+ .fill 510,8,0
13000+#endif
13001+
13002+NEXT_PAGE(level3_vmalloc_pgt)
13003+ .fill 512,8,0
13004+
13005+NEXT_PAGE(level3_vmemmap_pgt)
13006+ .fill L3_VMEMMAP_START,8,0
13007+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13008
13009 NEXT_PAGE(level3_kernel_pgt)
13010 .fill L3_START_KERNEL,8,0
13011@@ -363,20 +387,23 @@ NEXT_PAGE(level3_kernel_pgt)
13012 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
13013 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13014
13015+NEXT_PAGE(level2_vmemmap_pgt)
13016+ .fill 512,8,0
13017+
13018 NEXT_PAGE(level2_fixmap_pgt)
13019- .fill 506,8,0
13020- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13021- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
13022- .fill 5,8,0
13023+ .fill 507,8,0
13024+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
13025+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
13026+ .fill 4,8,0
13027
13028-NEXT_PAGE(level1_fixmap_pgt)
13029+NEXT_PAGE(level1_vsyscall_pgt)
13030 .fill 512,8,0
13031
13032-NEXT_PAGE(level2_ident_pgt)
13033- /* Since I easily can, map the first 1G.
13034+ /* Since I easily can, map the first 2G.
13035 * Don't set NX because code runs from these pages.
13036 */
13037- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
13038+NEXT_PAGE(level2_ident_pgt)
13039+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
13040
13041 NEXT_PAGE(level2_kernel_pgt)
13042 /*
13043@@ -389,33 +416,55 @@ NEXT_PAGE(level2_kernel_pgt)
13044 * If you want to increase this then increase MODULES_VADDR
13045 * too.)
13046 */
13047- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
13048- KERNEL_IMAGE_SIZE/PMD_SIZE)
13049-
13050-NEXT_PAGE(level2_spare_pgt)
13051- .fill 512, 8, 0
13052+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
13053
13054 #undef PMDS
13055 #undef NEXT_PAGE
13056
13057- .data
13058+ .align PAGE_SIZE
13059+ENTRY(cpu_gdt_table)
13060+ .rept NR_CPUS
13061+ .quad 0x0000000000000000 /* NULL descriptor */
13062+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
13063+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
13064+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
13065+ .quad 0x00cffb000000ffff /* __USER32_CS */
13066+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
13067+ .quad 0x00affb000000ffff /* __USER_CS */
13068+
13069+#ifdef CONFIG_PAX_KERNEXEC
13070+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
13071+#else
13072+ .quad 0x0 /* unused */
13073+#endif
13074+
13075+ .quad 0,0 /* TSS */
13076+ .quad 0,0 /* LDT */
13077+ .quad 0,0,0 /* three TLS descriptors */
13078+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
13079+ /* asm/segment.h:GDT_ENTRIES must match this */
13080+
13081+ /* zero the remaining page */
13082+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
13083+ .endr
13084+
13085 .align 16
13086 .globl early_gdt_descr
13087 early_gdt_descr:
13088 .word GDT_ENTRIES*8-1
13089 early_gdt_descr_base:
13090- .quad INIT_PER_CPU_VAR(gdt_page)
13091+ .quad cpu_gdt_table
13092
13093 ENTRY(phys_base)
13094 /* This must match the first entry in level2_kernel_pgt */
13095 .quad 0x0000000000000000
13096
13097 #include "../../x86/xen/xen-head.S"
13098-
13099- .section .bss, "aw", @nobits
13100+
13101+ .section .rodata,"a",@progbits
13102 .align L1_CACHE_BYTES
13103 ENTRY(idt_table)
13104- .skip IDT_ENTRIES * 16
13105+ .fill 512,8,0
13106
13107 __PAGE_ALIGNED_BSS
13108 .align PAGE_SIZE
13109diff -urNp linux-3.0.3/arch/x86/kernel/i386_ksyms_32.c linux-3.0.3/arch/x86/kernel/i386_ksyms_32.c
13110--- linux-3.0.3/arch/x86/kernel/i386_ksyms_32.c 2011-07-21 22:17:23.000000000 -0400
13111+++ linux-3.0.3/arch/x86/kernel/i386_ksyms_32.c 2011-08-23 21:47:55.000000000 -0400
13112@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
13113 EXPORT_SYMBOL(cmpxchg8b_emu);
13114 #endif
13115
13116+EXPORT_SYMBOL_GPL(cpu_gdt_table);
13117+
13118 /* Networking helper routines. */
13119 EXPORT_SYMBOL(csum_partial_copy_generic);
13120+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
13121+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
13122
13123 EXPORT_SYMBOL(__get_user_1);
13124 EXPORT_SYMBOL(__get_user_2);
13125@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
13126
13127 EXPORT_SYMBOL(csum_partial);
13128 EXPORT_SYMBOL(empty_zero_page);
13129+
13130+#ifdef CONFIG_PAX_KERNEXEC
13131+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
13132+#endif
13133diff -urNp linux-3.0.3/arch/x86/kernel/i8259.c linux-3.0.3/arch/x86/kernel/i8259.c
13134--- linux-3.0.3/arch/x86/kernel/i8259.c 2011-07-21 22:17:23.000000000 -0400
13135+++ linux-3.0.3/arch/x86/kernel/i8259.c 2011-08-23 21:47:55.000000000 -0400
13136@@ -210,7 +210,7 @@ spurious_8259A_irq:
13137 "spurious 8259A interrupt: IRQ%d.\n", irq);
13138 spurious_irq_mask |= irqmask;
13139 }
13140- atomic_inc(&irq_err_count);
13141+ atomic_inc_unchecked(&irq_err_count);
13142 /*
13143 * Theoretically we do not have to handle this IRQ,
13144 * but in Linux this does not cause problems and is
13145diff -urNp linux-3.0.3/arch/x86/kernel/init_task.c linux-3.0.3/arch/x86/kernel/init_task.c
13146--- linux-3.0.3/arch/x86/kernel/init_task.c 2011-07-21 22:17:23.000000000 -0400
13147+++ linux-3.0.3/arch/x86/kernel/init_task.c 2011-08-23 21:47:55.000000000 -0400
13148@@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
13149 * way process stacks are handled. This is done by having a special
13150 * "init_task" linker map entry..
13151 */
13152-union thread_union init_thread_union __init_task_data =
13153- { INIT_THREAD_INFO(init_task) };
13154+union thread_union init_thread_union __init_task_data;
13155
13156 /*
13157 * Initial task structure.
13158@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
13159 * section. Since TSS's are completely CPU-local, we want them
13160 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
13161 */
13162-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
13163-
13164+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
13165+EXPORT_SYMBOL(init_tss);
13166diff -urNp linux-3.0.3/arch/x86/kernel/ioport.c linux-3.0.3/arch/x86/kernel/ioport.c
13167--- linux-3.0.3/arch/x86/kernel/ioport.c 2011-07-21 22:17:23.000000000 -0400
13168+++ linux-3.0.3/arch/x86/kernel/ioport.c 2011-08-23 21:48:14.000000000 -0400
13169@@ -6,6 +6,7 @@
13170 #include <linux/sched.h>
13171 #include <linux/kernel.h>
13172 #include <linux/capability.h>
13173+#include <linux/security.h>
13174 #include <linux/errno.h>
13175 #include <linux/types.h>
13176 #include <linux/ioport.h>
13177@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long
13178
13179 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
13180 return -EINVAL;
13181+#ifdef CONFIG_GRKERNSEC_IO
13182+ if (turn_on && grsec_disable_privio) {
13183+ gr_handle_ioperm();
13184+ return -EPERM;
13185+ }
13186+#endif
13187 if (turn_on && !capable(CAP_SYS_RAWIO))
13188 return -EPERM;
13189
13190@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long
13191 * because the ->io_bitmap_max value must match the bitmap
13192 * contents:
13193 */
13194- tss = &per_cpu(init_tss, get_cpu());
13195+ tss = init_tss + get_cpu();
13196
13197 if (turn_on)
13198 bitmap_clear(t->io_bitmap_ptr, from, num);
13199@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct
13200 return -EINVAL;
13201 /* Trying to gain more privileges? */
13202 if (level > old) {
13203+#ifdef CONFIG_GRKERNSEC_IO
13204+ if (grsec_disable_privio) {
13205+ gr_handle_iopl();
13206+ return -EPERM;
13207+ }
13208+#endif
13209 if (!capable(CAP_SYS_RAWIO))
13210 return -EPERM;
13211 }
13212diff -urNp linux-3.0.3/arch/x86/kernel/irq_32.c linux-3.0.3/arch/x86/kernel/irq_32.c
13213--- linux-3.0.3/arch/x86/kernel/irq_32.c 2011-07-21 22:17:23.000000000 -0400
13214+++ linux-3.0.3/arch/x86/kernel/irq_32.c 2011-08-23 21:47:55.000000000 -0400
13215@@ -36,7 +36,7 @@ static int check_stack_overflow(void)
13216 __asm__ __volatile__("andl %%esp,%0" :
13217 "=r" (sp) : "0" (THREAD_SIZE - 1));
13218
13219- return sp < (sizeof(struct thread_info) + STACK_WARN);
13220+ return sp < STACK_WARN;
13221 }
13222
13223 static void print_stack_overflow(void)
13224@@ -54,8 +54,8 @@ static inline void print_stack_overflow(
13225 * per-CPU IRQ handling contexts (thread information and stack)
13226 */
13227 union irq_ctx {
13228- struct thread_info tinfo;
13229- u32 stack[THREAD_SIZE/sizeof(u32)];
13230+ unsigned long previous_esp;
13231+ u32 stack[THREAD_SIZE/sizeof(u32)];
13232 } __attribute__((aligned(THREAD_SIZE)));
13233
13234 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
13235@@ -75,10 +75,9 @@ static void call_on_stack(void *func, vo
13236 static inline int
13237 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
13238 {
13239- union irq_ctx *curctx, *irqctx;
13240+ union irq_ctx *irqctx;
13241 u32 *isp, arg1, arg2;
13242
13243- curctx = (union irq_ctx *) current_thread_info();
13244 irqctx = __this_cpu_read(hardirq_ctx);
13245
13246 /*
13247@@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struc
13248 * handler) we can't do that and just have to keep using the
13249 * current stack (which is the irq stack already after all)
13250 */
13251- if (unlikely(curctx == irqctx))
13252+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
13253 return 0;
13254
13255 /* build the stack frame on the IRQ stack */
13256- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13257- irqctx->tinfo.task = curctx->tinfo.task;
13258- irqctx->tinfo.previous_esp = current_stack_pointer;
13259+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13260+ irqctx->previous_esp = current_stack_pointer;
13261
13262- /*
13263- * Copy the softirq bits in preempt_count so that the
13264- * softirq checks work in the hardirq context.
13265- */
13266- irqctx->tinfo.preempt_count =
13267- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
13268- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
13269+#ifdef CONFIG_PAX_MEMORY_UDEREF
13270+ __set_fs(MAKE_MM_SEG(0));
13271+#endif
13272
13273 if (unlikely(overflow))
13274 call_on_stack(print_stack_overflow, isp);
13275@@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struc
13276 : "0" (irq), "1" (desc), "2" (isp),
13277 "D" (desc->handle_irq)
13278 : "memory", "cc", "ecx");
13279+
13280+#ifdef CONFIG_PAX_MEMORY_UDEREF
13281+ __set_fs(current_thread_info()->addr_limit);
13282+#endif
13283+
13284 return 1;
13285 }
13286
13287@@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struc
13288 */
13289 void __cpuinit irq_ctx_init(int cpu)
13290 {
13291- union irq_ctx *irqctx;
13292-
13293 if (per_cpu(hardirq_ctx, cpu))
13294 return;
13295
13296- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13297- THREAD_FLAGS,
13298- THREAD_ORDER));
13299- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13300- irqctx->tinfo.cpu = cpu;
13301- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
13302- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13303-
13304- per_cpu(hardirq_ctx, cpu) = irqctx;
13305-
13306- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13307- THREAD_FLAGS,
13308- THREAD_ORDER));
13309- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13310- irqctx->tinfo.cpu = cpu;
13311- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13312-
13313- per_cpu(softirq_ctx, cpu) = irqctx;
13314+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13315+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13316
13317 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
13318 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
13319@@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
13320 asmlinkage void do_softirq(void)
13321 {
13322 unsigned long flags;
13323- struct thread_info *curctx;
13324 union irq_ctx *irqctx;
13325 u32 *isp;
13326
13327@@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
13328 local_irq_save(flags);
13329
13330 if (local_softirq_pending()) {
13331- curctx = current_thread_info();
13332 irqctx = __this_cpu_read(softirq_ctx);
13333- irqctx->tinfo.task = curctx->task;
13334- irqctx->tinfo.previous_esp = current_stack_pointer;
13335+ irqctx->previous_esp = current_stack_pointer;
13336
13337 /* build the stack frame on the softirq stack */
13338- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13339+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13340+
13341+#ifdef CONFIG_PAX_MEMORY_UDEREF
13342+ __set_fs(MAKE_MM_SEG(0));
13343+#endif
13344
13345 call_on_stack(__do_softirq, isp);
13346+
13347+#ifdef CONFIG_PAX_MEMORY_UDEREF
13348+ __set_fs(current_thread_info()->addr_limit);
13349+#endif
13350+
13351 /*
13352 * Shouldn't happen, we returned above if in_interrupt():
13353 */
13354diff -urNp linux-3.0.3/arch/x86/kernel/irq.c linux-3.0.3/arch/x86/kernel/irq.c
13355--- linux-3.0.3/arch/x86/kernel/irq.c 2011-07-21 22:17:23.000000000 -0400
13356+++ linux-3.0.3/arch/x86/kernel/irq.c 2011-08-23 21:47:55.000000000 -0400
13357@@ -17,7 +17,7 @@
13358 #include <asm/mce.h>
13359 #include <asm/hw_irq.h>
13360
13361-atomic_t irq_err_count;
13362+atomic_unchecked_t irq_err_count;
13363
13364 /* Function pointer for generic interrupt vector handling */
13365 void (*x86_platform_ipi_callback)(void) = NULL;
13366@@ -116,9 +116,9 @@ int arch_show_interrupts(struct seq_file
13367 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
13368 seq_printf(p, " Machine check polls\n");
13369 #endif
13370- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
13371+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
13372 #if defined(CONFIG_X86_IO_APIC)
13373- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
13374+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
13375 #endif
13376 return 0;
13377 }
13378@@ -158,10 +158,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
13379
13380 u64 arch_irq_stat(void)
13381 {
13382- u64 sum = atomic_read(&irq_err_count);
13383+ u64 sum = atomic_read_unchecked(&irq_err_count);
13384
13385 #ifdef CONFIG_X86_IO_APIC
13386- sum += atomic_read(&irq_mis_count);
13387+ sum += atomic_read_unchecked(&irq_mis_count);
13388 #endif
13389 return sum;
13390 }
13391diff -urNp linux-3.0.3/arch/x86/kernel/kgdb.c linux-3.0.3/arch/x86/kernel/kgdb.c
13392--- linux-3.0.3/arch/x86/kernel/kgdb.c 2011-07-21 22:17:23.000000000 -0400
13393+++ linux-3.0.3/arch/x86/kernel/kgdb.c 2011-08-23 21:47:55.000000000 -0400
13394@@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem,
13395 #ifdef CONFIG_X86_32
13396 switch (regno) {
13397 case GDB_SS:
13398- if (!user_mode_vm(regs))
13399+ if (!user_mode(regs))
13400 *(unsigned long *)mem = __KERNEL_DS;
13401 break;
13402 case GDB_SP:
13403- if (!user_mode_vm(regs))
13404+ if (!user_mode(regs))
13405 *(unsigned long *)mem = kernel_stack_pointer(regs);
13406 break;
13407 case GDB_GS:
13408@@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vec
13409 case 'k':
13410 /* clear the trace bit */
13411 linux_regs->flags &= ~X86_EFLAGS_TF;
13412- atomic_set(&kgdb_cpu_doing_single_step, -1);
13413+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
13414
13415 /* set the trace bit if we're stepping */
13416 if (remcomInBuffer[0] == 's') {
13417 linux_regs->flags |= X86_EFLAGS_TF;
13418- atomic_set(&kgdb_cpu_doing_single_step,
13419+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
13420 raw_smp_processor_id());
13421 }
13422
13423@@ -534,7 +534,7 @@ static int __kgdb_notify(struct die_args
13424 return NOTIFY_DONE;
13425
13426 case DIE_DEBUG:
13427- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
13428+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
13429 if (user_mode(regs))
13430 return single_step_cont(regs, args);
13431 break;
13432diff -urNp linux-3.0.3/arch/x86/kernel/kprobes.c linux-3.0.3/arch/x86/kernel/kprobes.c
13433--- linux-3.0.3/arch/x86/kernel/kprobes.c 2011-07-21 22:17:23.000000000 -0400
13434+++ linux-3.0.3/arch/x86/kernel/kprobes.c 2011-08-23 21:47:55.000000000 -0400
13435@@ -115,8 +115,11 @@ static void __kprobes __synthesize_relat
13436 } __attribute__((packed)) *insn;
13437
13438 insn = (struct __arch_relative_insn *)from;
13439+
13440+ pax_open_kernel();
13441 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
13442 insn->op = op;
13443+ pax_close_kernel();
13444 }
13445
13446 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
13447@@ -153,7 +156,7 @@ static int __kprobes can_boost(kprobe_op
13448 kprobe_opcode_t opcode;
13449 kprobe_opcode_t *orig_opcodes = opcodes;
13450
13451- if (search_exception_tables((unsigned long)opcodes))
13452+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
13453 return 0; /* Page fault may occur on this address. */
13454
13455 retry:
13456@@ -314,7 +317,9 @@ static int __kprobes __copy_instruction(
13457 }
13458 }
13459 insn_get_length(&insn);
13460+ pax_open_kernel();
13461 memcpy(dest, insn.kaddr, insn.length);
13462+ pax_close_kernel();
13463
13464 #ifdef CONFIG_X86_64
13465 if (insn_rip_relative(&insn)) {
13466@@ -338,7 +343,9 @@ static int __kprobes __copy_instruction(
13467 (u8 *) dest;
13468 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
13469 disp = (u8 *) dest + insn_offset_displacement(&insn);
13470+ pax_open_kernel();
13471 *(s32 *) disp = (s32) newdisp;
13472+ pax_close_kernel();
13473 }
13474 #endif
13475 return insn.length;
13476@@ -352,12 +359,12 @@ static void __kprobes arch_copy_kprobe(s
13477 */
13478 __copy_instruction(p->ainsn.insn, p->addr, 0);
13479
13480- if (can_boost(p->addr))
13481+ if (can_boost(ktla_ktva(p->addr)))
13482 p->ainsn.boostable = 0;
13483 else
13484 p->ainsn.boostable = -1;
13485
13486- p->opcode = *p->addr;
13487+ p->opcode = *(ktla_ktva(p->addr));
13488 }
13489
13490 int __kprobes arch_prepare_kprobe(struct kprobe *p)
13491@@ -474,7 +481,7 @@ static void __kprobes setup_singlestep(s
13492 * nor set current_kprobe, because it doesn't use single
13493 * stepping.
13494 */
13495- regs->ip = (unsigned long)p->ainsn.insn;
13496+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13497 preempt_enable_no_resched();
13498 return;
13499 }
13500@@ -493,7 +500,7 @@ static void __kprobes setup_singlestep(s
13501 if (p->opcode == BREAKPOINT_INSTRUCTION)
13502 regs->ip = (unsigned long)p->addr;
13503 else
13504- regs->ip = (unsigned long)p->ainsn.insn;
13505+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13506 }
13507
13508 /*
13509@@ -572,7 +579,7 @@ static int __kprobes kprobe_handler(stru
13510 setup_singlestep(p, regs, kcb, 0);
13511 return 1;
13512 }
13513- } else if (*addr != BREAKPOINT_INSTRUCTION) {
13514+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
13515 /*
13516 * The breakpoint instruction was removed right
13517 * after we hit it. Another cpu has removed
13518@@ -817,7 +824,7 @@ static void __kprobes resume_execution(s
13519 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
13520 {
13521 unsigned long *tos = stack_addr(regs);
13522- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
13523+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
13524 unsigned long orig_ip = (unsigned long)p->addr;
13525 kprobe_opcode_t *insn = p->ainsn.insn;
13526
13527@@ -999,7 +1006,7 @@ int __kprobes kprobe_exceptions_notify(s
13528 struct die_args *args = data;
13529 int ret = NOTIFY_DONE;
13530
13531- if (args->regs && user_mode_vm(args->regs))
13532+ if (args->regs && user_mode(args->regs))
13533 return ret;
13534
13535 switch (val) {
13536@@ -1381,7 +1388,7 @@ int __kprobes arch_prepare_optimized_kpr
13537 * Verify if the address gap is in 2GB range, because this uses
13538 * a relative jump.
13539 */
13540- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
13541+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
13542 if (abs(rel) > 0x7fffffff)
13543 return -ERANGE;
13544
13545@@ -1402,11 +1409,11 @@ int __kprobes arch_prepare_optimized_kpr
13546 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
13547
13548 /* Set probe function call */
13549- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
13550+ synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
13551
13552 /* Set returning jmp instruction at the tail of out-of-line buffer */
13553 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
13554- (u8 *)op->kp.addr + op->optinsn.size);
13555+ (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
13556
13557 flush_icache_range((unsigned long) buf,
13558 (unsigned long) buf + TMPL_END_IDX +
13559@@ -1428,7 +1435,7 @@ static void __kprobes setup_optimize_kpr
13560 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
13561
13562 /* Backup instructions which will be replaced by jump address */
13563- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
13564+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
13565 RELATIVE_ADDR_SIZE);
13566
13567 insn_buf[0] = RELATIVEJUMP_OPCODE;
13568diff -urNp linux-3.0.3/arch/x86/kernel/kvm.c linux-3.0.3/arch/x86/kernel/kvm.c
13569--- linux-3.0.3/arch/x86/kernel/kvm.c 2011-07-21 22:17:23.000000000 -0400
13570+++ linux-3.0.3/arch/x86/kernel/kvm.c 2011-08-24 18:10:12.000000000 -0400
13571@@ -426,6 +426,7 @@ static void __init paravirt_ops_setup(vo
13572 pv_mmu_ops.set_pud = kvm_set_pud;
13573 #if PAGETABLE_LEVELS == 4
13574 pv_mmu_ops.set_pgd = kvm_set_pgd;
13575+ pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
13576 #endif
13577 #endif
13578 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
13579diff -urNp linux-3.0.3/arch/x86/kernel/ldt.c linux-3.0.3/arch/x86/kernel/ldt.c
13580--- linux-3.0.3/arch/x86/kernel/ldt.c 2011-07-21 22:17:23.000000000 -0400
13581+++ linux-3.0.3/arch/x86/kernel/ldt.c 2011-08-23 21:47:55.000000000 -0400
13582@@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, i
13583 if (reload) {
13584 #ifdef CONFIG_SMP
13585 preempt_disable();
13586- load_LDT(pc);
13587+ load_LDT_nolock(pc);
13588 if (!cpumask_equal(mm_cpumask(current->mm),
13589 cpumask_of(smp_processor_id())))
13590 smp_call_function(flush_ldt, current->mm, 1);
13591 preempt_enable();
13592 #else
13593- load_LDT(pc);
13594+ load_LDT_nolock(pc);
13595 #endif
13596 }
13597 if (oldsize) {
13598@@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t
13599 return err;
13600
13601 for (i = 0; i < old->size; i++)
13602- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
13603+ write_ldt_entry(new->ldt, i, old->ldt + i);
13604 return 0;
13605 }
13606
13607@@ -116,6 +116,24 @@ int init_new_context(struct task_struct
13608 retval = copy_ldt(&mm->context, &old_mm->context);
13609 mutex_unlock(&old_mm->context.lock);
13610 }
13611+
13612+ if (tsk == current) {
13613+ mm->context.vdso = 0;
13614+
13615+#ifdef CONFIG_X86_32
13616+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
13617+ mm->context.user_cs_base = 0UL;
13618+ mm->context.user_cs_limit = ~0UL;
13619+
13620+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
13621+ cpus_clear(mm->context.cpu_user_cs_mask);
13622+#endif
13623+
13624+#endif
13625+#endif
13626+
13627+ }
13628+
13629 return retval;
13630 }
13631
13632@@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, u
13633 }
13634 }
13635
13636+#ifdef CONFIG_PAX_SEGMEXEC
13637+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
13638+ error = -EINVAL;
13639+ goto out_unlock;
13640+ }
13641+#endif
13642+
13643 fill_ldt(&ldt, &ldt_info);
13644 if (oldmode)
13645 ldt.avl = 0;
13646diff -urNp linux-3.0.3/arch/x86/kernel/machine_kexec_32.c linux-3.0.3/arch/x86/kernel/machine_kexec_32.c
13647--- linux-3.0.3/arch/x86/kernel/machine_kexec_32.c 2011-07-21 22:17:23.000000000 -0400
13648+++ linux-3.0.3/arch/x86/kernel/machine_kexec_32.c 2011-08-23 21:47:55.000000000 -0400
13649@@ -27,7 +27,7 @@
13650 #include <asm/cacheflush.h>
13651 #include <asm/debugreg.h>
13652
13653-static void set_idt(void *newidt, __u16 limit)
13654+static void set_idt(struct desc_struct *newidt, __u16 limit)
13655 {
13656 struct desc_ptr curidt;
13657
13658@@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16
13659 }
13660
13661
13662-static void set_gdt(void *newgdt, __u16 limit)
13663+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
13664 {
13665 struct desc_ptr curgdt;
13666
13667@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
13668 }
13669
13670 control_page = page_address(image->control_code_page);
13671- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
13672+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
13673
13674 relocate_kernel_ptr = control_page;
13675 page_list[PA_CONTROL_PAGE] = __pa(control_page);
13676diff -urNp linux-3.0.3/arch/x86/kernel/microcode_intel.c linux-3.0.3/arch/x86/kernel/microcode_intel.c
13677--- linux-3.0.3/arch/x86/kernel/microcode_intel.c 2011-07-21 22:17:23.000000000 -0400
13678+++ linux-3.0.3/arch/x86/kernel/microcode_intel.c 2011-08-23 21:47:55.000000000 -0400
13679@@ -440,13 +440,13 @@ static enum ucode_state request_microcod
13680
13681 static int get_ucode_user(void *to, const void *from, size_t n)
13682 {
13683- return copy_from_user(to, from, n);
13684+ return copy_from_user(to, (__force const void __user *)from, n);
13685 }
13686
13687 static enum ucode_state
13688 request_microcode_user(int cpu, const void __user *buf, size_t size)
13689 {
13690- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
13691+ return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
13692 }
13693
13694 static void microcode_fini_cpu(int cpu)
13695diff -urNp linux-3.0.3/arch/x86/kernel/module.c linux-3.0.3/arch/x86/kernel/module.c
13696--- linux-3.0.3/arch/x86/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
13697+++ linux-3.0.3/arch/x86/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
13698@@ -36,21 +36,66 @@
13699 #define DEBUGP(fmt...)
13700 #endif
13701
13702-void *module_alloc(unsigned long size)
13703+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
13704 {
13705 if (PAGE_ALIGN(size) > MODULES_LEN)
13706 return NULL;
13707 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
13708- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
13709+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
13710 -1, __builtin_return_address(0));
13711 }
13712
13713+void *module_alloc(unsigned long size)
13714+{
13715+
13716+#ifdef CONFIG_PAX_KERNEXEC
13717+ return __module_alloc(size, PAGE_KERNEL);
13718+#else
13719+ return __module_alloc(size, PAGE_KERNEL_EXEC);
13720+#endif
13721+
13722+}
13723+
13724 /* Free memory returned from module_alloc */
13725 void module_free(struct module *mod, void *module_region)
13726 {
13727 vfree(module_region);
13728 }
13729
13730+#ifdef CONFIG_PAX_KERNEXEC
13731+#ifdef CONFIG_X86_32
13732+void *module_alloc_exec(unsigned long size)
13733+{
13734+ struct vm_struct *area;
13735+
13736+ if (size == 0)
13737+ return NULL;
13738+
13739+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
13740+ return area ? area->addr : NULL;
13741+}
13742+EXPORT_SYMBOL(module_alloc_exec);
13743+
13744+void module_free_exec(struct module *mod, void *module_region)
13745+{
13746+ vunmap(module_region);
13747+}
13748+EXPORT_SYMBOL(module_free_exec);
13749+#else
13750+void module_free_exec(struct module *mod, void *module_region)
13751+{
13752+ module_free(mod, module_region);
13753+}
13754+EXPORT_SYMBOL(module_free_exec);
13755+
13756+void *module_alloc_exec(unsigned long size)
13757+{
13758+ return __module_alloc(size, PAGE_KERNEL_RX);
13759+}
13760+EXPORT_SYMBOL(module_alloc_exec);
13761+#endif
13762+#endif
13763+
13764 /* We don't need anything special. */
13765 int module_frob_arch_sections(Elf_Ehdr *hdr,
13766 Elf_Shdr *sechdrs,
13767@@ -70,14 +115,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
13768 unsigned int i;
13769 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
13770 Elf32_Sym *sym;
13771- uint32_t *location;
13772+ uint32_t *plocation, location;
13773
13774 DEBUGP("Applying relocate section %u to %u\n", relsec,
13775 sechdrs[relsec].sh_info);
13776 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
13777 /* This is where to make the change */
13778- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
13779- + rel[i].r_offset;
13780+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
13781+ location = (uint32_t)plocation;
13782+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
13783+ plocation = ktla_ktva((void *)plocation);
13784 /* This is the symbol it is referring to. Note that all
13785 undefined symbols have been resolved. */
13786 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
13787@@ -86,11 +133,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
13788 switch (ELF32_R_TYPE(rel[i].r_info)) {
13789 case R_386_32:
13790 /* We add the value into the location given */
13791- *location += sym->st_value;
13792+ pax_open_kernel();
13793+ *plocation += sym->st_value;
13794+ pax_close_kernel();
13795 break;
13796 case R_386_PC32:
13797 /* Add the value, subtract its postition */
13798- *location += sym->st_value - (uint32_t)location;
13799+ pax_open_kernel();
13800+ *plocation += sym->st_value - location;
13801+ pax_close_kernel();
13802 break;
13803 default:
13804 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
13805@@ -146,21 +197,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
13806 case R_X86_64_NONE:
13807 break;
13808 case R_X86_64_64:
13809+ pax_open_kernel();
13810 *(u64 *)loc = val;
13811+ pax_close_kernel();
13812 break;
13813 case R_X86_64_32:
13814+ pax_open_kernel();
13815 *(u32 *)loc = val;
13816+ pax_close_kernel();
13817 if (val != *(u32 *)loc)
13818 goto overflow;
13819 break;
13820 case R_X86_64_32S:
13821+ pax_open_kernel();
13822 *(s32 *)loc = val;
13823+ pax_close_kernel();
13824 if ((s64)val != *(s32 *)loc)
13825 goto overflow;
13826 break;
13827 case R_X86_64_PC32:
13828 val -= (u64)loc;
13829+ pax_open_kernel();
13830 *(u32 *)loc = val;
13831+ pax_close_kernel();
13832+
13833 #if 0
13834 if ((s64)val != *(s32 *)loc)
13835 goto overflow;
13836diff -urNp linux-3.0.3/arch/x86/kernel/paravirt.c linux-3.0.3/arch/x86/kernel/paravirt.c
13837--- linux-3.0.3/arch/x86/kernel/paravirt.c 2011-07-21 22:17:23.000000000 -0400
13838+++ linux-3.0.3/arch/x86/kernel/paravirt.c 2011-08-23 21:48:14.000000000 -0400
13839@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
13840 {
13841 return x;
13842 }
13843+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
13844+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
13845+#endif
13846
13847 void __init default_banner(void)
13848 {
13849@@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbu
13850 * corresponding structure. */
13851 static void *get_call_destination(u8 type)
13852 {
13853- struct paravirt_patch_template tmpl = {
13854+ const struct paravirt_patch_template tmpl = {
13855 .pv_init_ops = pv_init_ops,
13856 .pv_time_ops = pv_time_ops,
13857 .pv_cpu_ops = pv_cpu_ops,
13858@@ -133,6 +136,9 @@ static void *get_call_destination(u8 typ
13859 .pv_lock_ops = pv_lock_ops,
13860 #endif
13861 };
13862+
13863+ pax_track_stack();
13864+
13865 return *((void **)&tmpl + type);
13866 }
13867
13868@@ -145,15 +151,19 @@ unsigned paravirt_patch_default(u8 type,
13869 if (opfunc == NULL)
13870 /* If there's no function, patch it with a ud2a (BUG) */
13871 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
13872- else if (opfunc == _paravirt_nop)
13873+ else if (opfunc == (void *)_paravirt_nop)
13874 /* If the operation is a nop, then nop the callsite */
13875 ret = paravirt_patch_nop();
13876
13877 /* identity functions just return their single argument */
13878- else if (opfunc == _paravirt_ident_32)
13879+ else if (opfunc == (void *)_paravirt_ident_32)
13880 ret = paravirt_patch_ident_32(insnbuf, len);
13881- else if (opfunc == _paravirt_ident_64)
13882+ else if (opfunc == (void *)_paravirt_ident_64)
13883 ret = paravirt_patch_ident_64(insnbuf, len);
13884+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
13885+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
13886+ ret = paravirt_patch_ident_64(insnbuf, len);
13887+#endif
13888
13889 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
13890 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
13891@@ -178,7 +188,7 @@ unsigned paravirt_patch_insns(void *insn
13892 if (insn_len > len || start == NULL)
13893 insn_len = len;
13894 else
13895- memcpy(insnbuf, start, insn_len);
13896+ memcpy(insnbuf, ktla_ktva(start), insn_len);
13897
13898 return insn_len;
13899 }
13900@@ -294,22 +304,22 @@ void arch_flush_lazy_mmu_mode(void)
13901 preempt_enable();
13902 }
13903
13904-struct pv_info pv_info = {
13905+struct pv_info pv_info __read_only = {
13906 .name = "bare hardware",
13907 .paravirt_enabled = 0,
13908 .kernel_rpl = 0,
13909 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
13910 };
13911
13912-struct pv_init_ops pv_init_ops = {
13913+struct pv_init_ops pv_init_ops __read_only = {
13914 .patch = native_patch,
13915 };
13916
13917-struct pv_time_ops pv_time_ops = {
13918+struct pv_time_ops pv_time_ops __read_only = {
13919 .sched_clock = native_sched_clock,
13920 };
13921
13922-struct pv_irq_ops pv_irq_ops = {
13923+struct pv_irq_ops pv_irq_ops __read_only = {
13924 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
13925 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
13926 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
13927@@ -321,7 +331,7 @@ struct pv_irq_ops pv_irq_ops = {
13928 #endif
13929 };
13930
13931-struct pv_cpu_ops pv_cpu_ops = {
13932+struct pv_cpu_ops pv_cpu_ops __read_only = {
13933 .cpuid = native_cpuid,
13934 .get_debugreg = native_get_debugreg,
13935 .set_debugreg = native_set_debugreg,
13936@@ -382,21 +392,26 @@ struct pv_cpu_ops pv_cpu_ops = {
13937 .end_context_switch = paravirt_nop,
13938 };
13939
13940-struct pv_apic_ops pv_apic_ops = {
13941+struct pv_apic_ops pv_apic_ops __read_only = {
13942 #ifdef CONFIG_X86_LOCAL_APIC
13943 .startup_ipi_hook = paravirt_nop,
13944 #endif
13945 };
13946
13947-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
13948+#ifdef CONFIG_X86_32
13949+#ifdef CONFIG_X86_PAE
13950+/* 64-bit pagetable entries */
13951+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
13952+#else
13953 /* 32-bit pagetable entries */
13954 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
13955+#endif
13956 #else
13957 /* 64-bit pagetable entries */
13958 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
13959 #endif
13960
13961-struct pv_mmu_ops pv_mmu_ops = {
13962+struct pv_mmu_ops pv_mmu_ops __read_only = {
13963
13964 .read_cr2 = native_read_cr2,
13965 .write_cr2 = native_write_cr2,
13966@@ -446,6 +461,7 @@ struct pv_mmu_ops pv_mmu_ops = {
13967 .make_pud = PTE_IDENT,
13968
13969 .set_pgd = native_set_pgd,
13970+ .set_pgd_batched = native_set_pgd_batched,
13971 #endif
13972 #endif /* PAGETABLE_LEVELS >= 3 */
13973
13974@@ -465,6 +481,12 @@ struct pv_mmu_ops pv_mmu_ops = {
13975 },
13976
13977 .set_fixmap = native_set_fixmap,
13978+
13979+#ifdef CONFIG_PAX_KERNEXEC
13980+ .pax_open_kernel = native_pax_open_kernel,
13981+ .pax_close_kernel = native_pax_close_kernel,
13982+#endif
13983+
13984 };
13985
13986 EXPORT_SYMBOL_GPL(pv_time_ops);
13987diff -urNp linux-3.0.3/arch/x86/kernel/paravirt-spinlocks.c linux-3.0.3/arch/x86/kernel/paravirt-spinlocks.c
13988--- linux-3.0.3/arch/x86/kernel/paravirt-spinlocks.c 2011-07-21 22:17:23.000000000 -0400
13989+++ linux-3.0.3/arch/x86/kernel/paravirt-spinlocks.c 2011-08-23 21:47:55.000000000 -0400
13990@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t
13991 arch_spin_lock(lock);
13992 }
13993
13994-struct pv_lock_ops pv_lock_ops = {
13995+struct pv_lock_ops pv_lock_ops __read_only = {
13996 #ifdef CONFIG_SMP
13997 .spin_is_locked = __ticket_spin_is_locked,
13998 .spin_is_contended = __ticket_spin_is_contended,
13999diff -urNp linux-3.0.3/arch/x86/kernel/pci-iommu_table.c linux-3.0.3/arch/x86/kernel/pci-iommu_table.c
14000--- linux-3.0.3/arch/x86/kernel/pci-iommu_table.c 2011-07-21 22:17:23.000000000 -0400
14001+++ linux-3.0.3/arch/x86/kernel/pci-iommu_table.c 2011-08-23 21:48:14.000000000 -0400
14002@@ -2,7 +2,7 @@
14003 #include <asm/iommu_table.h>
14004 #include <linux/string.h>
14005 #include <linux/kallsyms.h>
14006-
14007+#include <linux/sched.h>
14008
14009 #define DEBUG 1
14010
14011@@ -51,6 +51,8 @@ void __init check_iommu_entries(struct i
14012 {
14013 struct iommu_table_entry *p, *q, *x;
14014
14015+ pax_track_stack();
14016+
14017 /* Simple cyclic dependency checker. */
14018 for (p = start; p < finish; p++) {
14019 q = find_dependents_of(start, finish, p);
14020diff -urNp linux-3.0.3/arch/x86/kernel/process_32.c linux-3.0.3/arch/x86/kernel/process_32.c
14021--- linux-3.0.3/arch/x86/kernel/process_32.c 2011-07-21 22:17:23.000000000 -0400
14022+++ linux-3.0.3/arch/x86/kernel/process_32.c 2011-08-23 21:47:55.000000000 -0400
14023@@ -65,6 +65,7 @@ asmlinkage void ret_from_fork(void) __as
14024 unsigned long thread_saved_pc(struct task_struct *tsk)
14025 {
14026 return ((unsigned long *)tsk->thread.sp)[3];
14027+//XXX return tsk->thread.eip;
14028 }
14029
14030 #ifndef CONFIG_SMP
14031@@ -126,15 +127,14 @@ void __show_regs(struct pt_regs *regs, i
14032 unsigned long sp;
14033 unsigned short ss, gs;
14034
14035- if (user_mode_vm(regs)) {
14036+ if (user_mode(regs)) {
14037 sp = regs->sp;
14038 ss = regs->ss & 0xffff;
14039- gs = get_user_gs(regs);
14040 } else {
14041 sp = kernel_stack_pointer(regs);
14042 savesegment(ss, ss);
14043- savesegment(gs, gs);
14044 }
14045+ gs = get_user_gs(regs);
14046
14047 show_regs_common();
14048
14049@@ -196,13 +196,14 @@ int copy_thread(unsigned long clone_flag
14050 struct task_struct *tsk;
14051 int err;
14052
14053- childregs = task_pt_regs(p);
14054+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
14055 *childregs = *regs;
14056 childregs->ax = 0;
14057 childregs->sp = sp;
14058
14059 p->thread.sp = (unsigned long) childregs;
14060 p->thread.sp0 = (unsigned long) (childregs+1);
14061+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14062
14063 p->thread.ip = (unsigned long) ret_from_fork;
14064
14065@@ -292,7 +293,7 @@ __switch_to(struct task_struct *prev_p,
14066 struct thread_struct *prev = &prev_p->thread,
14067 *next = &next_p->thread;
14068 int cpu = smp_processor_id();
14069- struct tss_struct *tss = &per_cpu(init_tss, cpu);
14070+ struct tss_struct *tss = init_tss + cpu;
14071 bool preload_fpu;
14072
14073 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
14074@@ -327,6 +328,10 @@ __switch_to(struct task_struct *prev_p,
14075 */
14076 lazy_save_gs(prev->gs);
14077
14078+#ifdef CONFIG_PAX_MEMORY_UDEREF
14079+ __set_fs(task_thread_info(next_p)->addr_limit);
14080+#endif
14081+
14082 /*
14083 * Load the per-thread Thread-Local Storage descriptor.
14084 */
14085@@ -362,6 +367,9 @@ __switch_to(struct task_struct *prev_p,
14086 */
14087 arch_end_context_switch(next_p);
14088
14089+ percpu_write(current_task, next_p);
14090+ percpu_write(current_tinfo, &next_p->tinfo);
14091+
14092 if (preload_fpu)
14093 __math_state_restore();
14094
14095@@ -371,8 +379,6 @@ __switch_to(struct task_struct *prev_p,
14096 if (prev->gs | next->gs)
14097 lazy_load_gs(next->gs);
14098
14099- percpu_write(current_task, next_p);
14100-
14101 return prev_p;
14102 }
14103
14104@@ -402,4 +408,3 @@ unsigned long get_wchan(struct task_stru
14105 } while (count++ < 16);
14106 return 0;
14107 }
14108-
14109diff -urNp linux-3.0.3/arch/x86/kernel/process_64.c linux-3.0.3/arch/x86/kernel/process_64.c
14110--- linux-3.0.3/arch/x86/kernel/process_64.c 2011-07-21 22:17:23.000000000 -0400
14111+++ linux-3.0.3/arch/x86/kernel/process_64.c 2011-08-23 21:47:55.000000000 -0400
14112@@ -87,7 +87,7 @@ static void __exit_idle(void)
14113 void exit_idle(void)
14114 {
14115 /* idle loop has pid 0 */
14116- if (current->pid)
14117+ if (task_pid_nr(current))
14118 return;
14119 __exit_idle();
14120 }
14121@@ -260,8 +260,7 @@ int copy_thread(unsigned long clone_flag
14122 struct pt_regs *childregs;
14123 struct task_struct *me = current;
14124
14125- childregs = ((struct pt_regs *)
14126- (THREAD_SIZE + task_stack_page(p))) - 1;
14127+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
14128 *childregs = *regs;
14129
14130 childregs->ax = 0;
14131@@ -273,6 +272,7 @@ int copy_thread(unsigned long clone_flag
14132 p->thread.sp = (unsigned long) childregs;
14133 p->thread.sp0 = (unsigned long) (childregs+1);
14134 p->thread.usersp = me->thread.usersp;
14135+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14136
14137 set_tsk_thread_flag(p, TIF_FORK);
14138
14139@@ -375,7 +375,7 @@ __switch_to(struct task_struct *prev_p,
14140 struct thread_struct *prev = &prev_p->thread;
14141 struct thread_struct *next = &next_p->thread;
14142 int cpu = smp_processor_id();
14143- struct tss_struct *tss = &per_cpu(init_tss, cpu);
14144+ struct tss_struct *tss = init_tss + cpu;
14145 unsigned fsindex, gsindex;
14146 bool preload_fpu;
14147
14148@@ -471,10 +471,9 @@ __switch_to(struct task_struct *prev_p,
14149 prev->usersp = percpu_read(old_rsp);
14150 percpu_write(old_rsp, next->usersp);
14151 percpu_write(current_task, next_p);
14152+ percpu_write(current_tinfo, &next_p->tinfo);
14153
14154- percpu_write(kernel_stack,
14155- (unsigned long)task_stack_page(next_p) +
14156- THREAD_SIZE - KERNEL_STACK_OFFSET);
14157+ percpu_write(kernel_stack, next->sp0);
14158
14159 /*
14160 * Now maybe reload the debug registers and handle I/O bitmaps
14161@@ -536,12 +535,11 @@ unsigned long get_wchan(struct task_stru
14162 if (!p || p == current || p->state == TASK_RUNNING)
14163 return 0;
14164 stack = (unsigned long)task_stack_page(p);
14165- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
14166+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
14167 return 0;
14168 fp = *(u64 *)(p->thread.sp);
14169 do {
14170- if (fp < (unsigned long)stack ||
14171- fp >= (unsigned long)stack+THREAD_SIZE)
14172+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
14173 return 0;
14174 ip = *(u64 *)(fp+8);
14175 if (!in_sched_functions(ip))
14176diff -urNp linux-3.0.3/arch/x86/kernel/process.c linux-3.0.3/arch/x86/kernel/process.c
14177--- linux-3.0.3/arch/x86/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
14178+++ linux-3.0.3/arch/x86/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
14179@@ -48,16 +48,33 @@ void free_thread_xstate(struct task_stru
14180
14181 void free_thread_info(struct thread_info *ti)
14182 {
14183- free_thread_xstate(ti->task);
14184 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
14185 }
14186
14187+static struct kmem_cache *task_struct_cachep;
14188+
14189 void arch_task_cache_init(void)
14190 {
14191- task_xstate_cachep =
14192- kmem_cache_create("task_xstate", xstate_size,
14193+ /* create a slab on which task_structs can be allocated */
14194+ task_struct_cachep =
14195+ kmem_cache_create("task_struct", sizeof(struct task_struct),
14196+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
14197+
14198+ task_xstate_cachep =
14199+ kmem_cache_create("task_xstate", xstate_size,
14200 __alignof__(union thread_xstate),
14201- SLAB_PANIC | SLAB_NOTRACK, NULL);
14202+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
14203+}
14204+
14205+struct task_struct *alloc_task_struct_node(int node)
14206+{
14207+ return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
14208+}
14209+
14210+void free_task_struct(struct task_struct *task)
14211+{
14212+ free_thread_xstate(task);
14213+ kmem_cache_free(task_struct_cachep, task);
14214 }
14215
14216 /*
14217@@ -70,7 +87,7 @@ void exit_thread(void)
14218 unsigned long *bp = t->io_bitmap_ptr;
14219
14220 if (bp) {
14221- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
14222+ struct tss_struct *tss = init_tss + get_cpu();
14223
14224 t->io_bitmap_ptr = NULL;
14225 clear_thread_flag(TIF_IO_BITMAP);
14226@@ -106,7 +123,7 @@ void show_regs_common(void)
14227
14228 printk(KERN_CONT "\n");
14229 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
14230- current->pid, current->comm, print_tainted(),
14231+ task_pid_nr(current), current->comm, print_tainted(),
14232 init_utsname()->release,
14233 (int)strcspn(init_utsname()->version, " "),
14234 init_utsname()->version);
14235@@ -120,6 +137,9 @@ void flush_thread(void)
14236 {
14237 struct task_struct *tsk = current;
14238
14239+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
14240+ loadsegment(gs, 0);
14241+#endif
14242 flush_ptrace_hw_breakpoint(tsk);
14243 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
14244 /*
14245@@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), voi
14246 regs.di = (unsigned long) arg;
14247
14248 #ifdef CONFIG_X86_32
14249- regs.ds = __USER_DS;
14250- regs.es = __USER_DS;
14251+ regs.ds = __KERNEL_DS;
14252+ regs.es = __KERNEL_DS;
14253 regs.fs = __KERNEL_PERCPU;
14254- regs.gs = __KERNEL_STACK_CANARY;
14255+ savesegment(gs, regs.gs);
14256 #else
14257 regs.ss = __KERNEL_DS;
14258 #endif
14259@@ -403,7 +423,7 @@ void default_idle(void)
14260 EXPORT_SYMBOL(default_idle);
14261 #endif
14262
14263-void stop_this_cpu(void *dummy)
14264+__noreturn void stop_this_cpu(void *dummy)
14265 {
14266 local_irq_disable();
14267 /*
14268@@ -668,16 +688,34 @@ static int __init idle_setup(char *str)
14269 }
14270 early_param("idle", idle_setup);
14271
14272-unsigned long arch_align_stack(unsigned long sp)
14273+#ifdef CONFIG_PAX_RANDKSTACK
14274+asmlinkage void pax_randomize_kstack(void)
14275 {
14276- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
14277- sp -= get_random_int() % 8192;
14278- return sp & ~0xf;
14279-}
14280+ struct thread_struct *thread = &current->thread;
14281+ unsigned long time;
14282
14283-unsigned long arch_randomize_brk(struct mm_struct *mm)
14284-{
14285- unsigned long range_end = mm->brk + 0x02000000;
14286- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
14287-}
14288+ if (!randomize_va_space)
14289+ return;
14290+
14291+ rdtscl(time);
14292+
14293+ /* P4 seems to return a 0 LSB, ignore it */
14294+#ifdef CONFIG_MPENTIUM4
14295+ time &= 0x3EUL;
14296+ time <<= 2;
14297+#elif defined(CONFIG_X86_64)
14298+ time &= 0xFUL;
14299+ time <<= 4;
14300+#else
14301+ time &= 0x1FUL;
14302+ time <<= 3;
14303+#endif
14304+
14305+ thread->sp0 ^= time;
14306+ load_sp0(init_tss + smp_processor_id(), thread);
14307
14308+#ifdef CONFIG_X86_64
14309+ percpu_write(kernel_stack, thread->sp0);
14310+#endif
14311+}
14312+#endif
14313diff -urNp linux-3.0.3/arch/x86/kernel/ptrace.c linux-3.0.3/arch/x86/kernel/ptrace.c
14314--- linux-3.0.3/arch/x86/kernel/ptrace.c 2011-07-21 22:17:23.000000000 -0400
14315+++ linux-3.0.3/arch/x86/kernel/ptrace.c 2011-08-23 21:47:55.000000000 -0400
14316@@ -821,7 +821,7 @@ long arch_ptrace(struct task_struct *chi
14317 unsigned long addr, unsigned long data)
14318 {
14319 int ret;
14320- unsigned long __user *datap = (unsigned long __user *)data;
14321+ unsigned long __user *datap = (__force unsigned long __user *)data;
14322
14323 switch (request) {
14324 /* read the word at location addr in the USER area. */
14325@@ -906,14 +906,14 @@ long arch_ptrace(struct task_struct *chi
14326 if ((int) addr < 0)
14327 return -EIO;
14328 ret = do_get_thread_area(child, addr,
14329- (struct user_desc __user *)data);
14330+ (__force struct user_desc __user *) data);
14331 break;
14332
14333 case PTRACE_SET_THREAD_AREA:
14334 if ((int) addr < 0)
14335 return -EIO;
14336 ret = do_set_thread_area(child, addr,
14337- (struct user_desc __user *)data, 0);
14338+ (__force struct user_desc __user *) data, 0);
14339 break;
14340 #endif
14341
14342@@ -1330,7 +1330,7 @@ static void fill_sigtrap_info(struct tas
14343 memset(info, 0, sizeof(*info));
14344 info->si_signo = SIGTRAP;
14345 info->si_code = si_code;
14346- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
14347+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
14348 }
14349
14350 void user_single_step_siginfo(struct task_struct *tsk,
14351diff -urNp linux-3.0.3/arch/x86/kernel/pvclock.c linux-3.0.3/arch/x86/kernel/pvclock.c
14352--- linux-3.0.3/arch/x86/kernel/pvclock.c 2011-07-21 22:17:23.000000000 -0400
14353+++ linux-3.0.3/arch/x86/kernel/pvclock.c 2011-08-23 21:47:55.000000000 -0400
14354@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvc
14355 return pv_tsc_khz;
14356 }
14357
14358-static atomic64_t last_value = ATOMIC64_INIT(0);
14359+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
14360
14361 void pvclock_resume(void)
14362 {
14363- atomic64_set(&last_value, 0);
14364+ atomic64_set_unchecked(&last_value, 0);
14365 }
14366
14367 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
14368@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct
14369 * updating at the same time, and one of them could be slightly behind,
14370 * making the assumption that last_value always go forward fail to hold.
14371 */
14372- last = atomic64_read(&last_value);
14373+ last = atomic64_read_unchecked(&last_value);
14374 do {
14375 if (ret < last)
14376 return last;
14377- last = atomic64_cmpxchg(&last_value, last, ret);
14378+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
14379 } while (unlikely(last != ret));
14380
14381 return ret;
14382diff -urNp linux-3.0.3/arch/x86/kernel/reboot.c linux-3.0.3/arch/x86/kernel/reboot.c
14383--- linux-3.0.3/arch/x86/kernel/reboot.c 2011-07-21 22:17:23.000000000 -0400
14384+++ linux-3.0.3/arch/x86/kernel/reboot.c 2011-08-23 21:47:55.000000000 -0400
14385@@ -35,7 +35,7 @@ void (*pm_power_off)(void);
14386 EXPORT_SYMBOL(pm_power_off);
14387
14388 static const struct desc_ptr no_idt = {};
14389-static int reboot_mode;
14390+static unsigned short reboot_mode;
14391 enum reboot_type reboot_type = BOOT_ACPI;
14392 int reboot_force;
14393
14394@@ -315,13 +315,17 @@ core_initcall(reboot_init);
14395 extern const unsigned char machine_real_restart_asm[];
14396 extern const u64 machine_real_restart_gdt[3];
14397
14398-void machine_real_restart(unsigned int type)
14399+__noreturn void machine_real_restart(unsigned int type)
14400 {
14401 void *restart_va;
14402 unsigned long restart_pa;
14403- void (*restart_lowmem)(unsigned int);
14404+ void (* __noreturn restart_lowmem)(unsigned int);
14405 u64 *lowmem_gdt;
14406
14407+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14408+ struct desc_struct *gdt;
14409+#endif
14410+
14411 local_irq_disable();
14412
14413 /* Write zero to CMOS register number 0x0f, which the BIOS POST
14414@@ -347,14 +351,14 @@ void machine_real_restart(unsigned int t
14415 boot)". This seems like a fairly standard thing that gets set by
14416 REBOOT.COM programs, and the previous reset routine did this
14417 too. */
14418- *((unsigned short *)0x472) = reboot_mode;
14419+ *(unsigned short *)(__va(0x472)) = reboot_mode;
14420
14421 /* Patch the GDT in the low memory trampoline */
14422 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
14423
14424 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
14425 restart_pa = virt_to_phys(restart_va);
14426- restart_lowmem = (void (*)(unsigned int))restart_pa;
14427+ restart_lowmem = (void *)restart_pa;
14428
14429 /* GDT[0]: GDT self-pointer */
14430 lowmem_gdt[0] =
14431@@ -365,7 +369,33 @@ void machine_real_restart(unsigned int t
14432 GDT_ENTRY(0x009b, restart_pa, 0xffff);
14433
14434 /* Jump to the identity-mapped low memory code */
14435+
14436+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14437+ gdt = get_cpu_gdt_table(smp_processor_id());
14438+ pax_open_kernel();
14439+#ifdef CONFIG_PAX_MEMORY_UDEREF
14440+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
14441+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
14442+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
14443+#endif
14444+#ifdef CONFIG_PAX_KERNEXEC
14445+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
14446+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
14447+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
14448+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
14449+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
14450+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
14451+#endif
14452+ pax_close_kernel();
14453+#endif
14454+
14455+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
14456+ asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
14457+ unreachable();
14458+#else
14459 restart_lowmem(type);
14460+#endif
14461+
14462 }
14463 #ifdef CONFIG_APM_MODULE
14464 EXPORT_SYMBOL(machine_real_restart);
14465@@ -523,7 +553,7 @@ void __attribute__((weak)) mach_reboot_f
14466 * try to force a triple fault and then cycle between hitting the keyboard
14467 * controller and doing that
14468 */
14469-static void native_machine_emergency_restart(void)
14470+__noreturn static void native_machine_emergency_restart(void)
14471 {
14472 int i;
14473 int attempt = 0;
14474@@ -647,13 +677,13 @@ void native_machine_shutdown(void)
14475 #endif
14476 }
14477
14478-static void __machine_emergency_restart(int emergency)
14479+static __noreturn void __machine_emergency_restart(int emergency)
14480 {
14481 reboot_emergency = emergency;
14482 machine_ops.emergency_restart();
14483 }
14484
14485-static void native_machine_restart(char *__unused)
14486+static __noreturn void native_machine_restart(char *__unused)
14487 {
14488 printk("machine restart\n");
14489
14490@@ -662,7 +692,7 @@ static void native_machine_restart(char
14491 __machine_emergency_restart(0);
14492 }
14493
14494-static void native_machine_halt(void)
14495+static __noreturn void native_machine_halt(void)
14496 {
14497 /* stop other cpus and apics */
14498 machine_shutdown();
14499@@ -673,7 +703,7 @@ static void native_machine_halt(void)
14500 stop_this_cpu(NULL);
14501 }
14502
14503-static void native_machine_power_off(void)
14504+__noreturn static void native_machine_power_off(void)
14505 {
14506 if (pm_power_off) {
14507 if (!reboot_force)
14508@@ -682,6 +712,7 @@ static void native_machine_power_off(voi
14509 }
14510 /* a fallback in case there is no PM info available */
14511 tboot_shutdown(TB_SHUTDOWN_HALT);
14512+ unreachable();
14513 }
14514
14515 struct machine_ops machine_ops = {
14516diff -urNp linux-3.0.3/arch/x86/kernel/setup.c linux-3.0.3/arch/x86/kernel/setup.c
14517--- linux-3.0.3/arch/x86/kernel/setup.c 2011-07-21 22:17:23.000000000 -0400
14518+++ linux-3.0.3/arch/x86/kernel/setup.c 2011-08-23 21:47:55.000000000 -0400
14519@@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
14520 * area (640->1Mb) as ram even though it is not.
14521 * take them out.
14522 */
14523- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
14524+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
14525 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
14526 }
14527
14528@@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
14529
14530 if (!boot_params.hdr.root_flags)
14531 root_mountflags &= ~MS_RDONLY;
14532- init_mm.start_code = (unsigned long) _text;
14533- init_mm.end_code = (unsigned long) _etext;
14534+ init_mm.start_code = ktla_ktva((unsigned long) _text);
14535+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
14536 init_mm.end_data = (unsigned long) _edata;
14537 init_mm.brk = _brk_end;
14538
14539- code_resource.start = virt_to_phys(_text);
14540- code_resource.end = virt_to_phys(_etext)-1;
14541- data_resource.start = virt_to_phys(_etext);
14542+ code_resource.start = virt_to_phys(ktla_ktva(_text));
14543+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
14544+ data_resource.start = virt_to_phys(_sdata);
14545 data_resource.end = virt_to_phys(_edata)-1;
14546 bss_resource.start = virt_to_phys(&__bss_start);
14547 bss_resource.end = virt_to_phys(&__bss_stop)-1;
14548diff -urNp linux-3.0.3/arch/x86/kernel/setup_percpu.c linux-3.0.3/arch/x86/kernel/setup_percpu.c
14549--- linux-3.0.3/arch/x86/kernel/setup_percpu.c 2011-07-21 22:17:23.000000000 -0400
14550+++ linux-3.0.3/arch/x86/kernel/setup_percpu.c 2011-08-23 21:47:55.000000000 -0400
14551@@ -21,19 +21,17 @@
14552 #include <asm/cpu.h>
14553 #include <asm/stackprotector.h>
14554
14555-DEFINE_PER_CPU(int, cpu_number);
14556+#ifdef CONFIG_SMP
14557+DEFINE_PER_CPU(unsigned int, cpu_number);
14558 EXPORT_PER_CPU_SYMBOL(cpu_number);
14559+#endif
14560
14561-#ifdef CONFIG_X86_64
14562 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
14563-#else
14564-#define BOOT_PERCPU_OFFSET 0
14565-#endif
14566
14567 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
14568 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
14569
14570-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
14571+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
14572 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
14573 };
14574 EXPORT_SYMBOL(__per_cpu_offset);
14575@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(
14576 {
14577 #ifdef CONFIG_X86_32
14578 struct desc_struct gdt;
14579+ unsigned long base = per_cpu_offset(cpu);
14580
14581- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
14582- 0x2 | DESCTYPE_S, 0x8);
14583- gdt.s = 1;
14584+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
14585+ 0x83 | DESCTYPE_S, 0xC);
14586 write_gdt_entry(get_cpu_gdt_table(cpu),
14587 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
14588 #endif
14589@@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
14590 /* alrighty, percpu areas up and running */
14591 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
14592 for_each_possible_cpu(cpu) {
14593+#ifdef CONFIG_CC_STACKPROTECTOR
14594+#ifdef CONFIG_X86_32
14595+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
14596+#endif
14597+#endif
14598 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
14599 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
14600 per_cpu(cpu_number, cpu) = cpu;
14601@@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
14602 */
14603 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
14604 #endif
14605+#ifdef CONFIG_CC_STACKPROTECTOR
14606+#ifdef CONFIG_X86_32
14607+ if (!cpu)
14608+ per_cpu(stack_canary.canary, cpu) = canary;
14609+#endif
14610+#endif
14611 /*
14612 * Up to this point, the boot CPU has been using .init.data
14613 * area. Reload any changed state for the boot CPU.
14614diff -urNp linux-3.0.3/arch/x86/kernel/signal.c linux-3.0.3/arch/x86/kernel/signal.c
14615--- linux-3.0.3/arch/x86/kernel/signal.c 2011-07-21 22:17:23.000000000 -0400
14616+++ linux-3.0.3/arch/x86/kernel/signal.c 2011-08-23 21:48:14.000000000 -0400
14617@@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsi
14618 * Align the stack pointer according to the i386 ABI,
14619 * i.e. so that on function entry ((sp + 4) & 15) == 0.
14620 */
14621- sp = ((sp + 4) & -16ul) - 4;
14622+ sp = ((sp - 12) & -16ul) - 4;
14623 #else /* !CONFIG_X86_32 */
14624 sp = round_down(sp, 16) - 8;
14625 #endif
14626@@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, str
14627 * Return an always-bogus address instead so we will die with SIGSEGV.
14628 */
14629 if (onsigstack && !likely(on_sig_stack(sp)))
14630- return (void __user *)-1L;
14631+ return (__force void __user *)-1L;
14632
14633 /* save i387 state */
14634 if (used_math() && save_i387_xstate(*fpstate) < 0)
14635- return (void __user *)-1L;
14636+ return (__force void __user *)-1L;
14637
14638 return (void __user *)sp;
14639 }
14640@@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigactio
14641 }
14642
14643 if (current->mm->context.vdso)
14644- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
14645+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
14646 else
14647- restorer = &frame->retcode;
14648+ restorer = (void __user *)&frame->retcode;
14649 if (ka->sa.sa_flags & SA_RESTORER)
14650 restorer = ka->sa.sa_restorer;
14651
14652@@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigactio
14653 * reasons and because gdb uses it as a signature to notice
14654 * signal handler stack frames.
14655 */
14656- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
14657+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
14658
14659 if (err)
14660 return -EFAULT;
14661@@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, str
14662 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
14663
14664 /* Set up to return from userspace. */
14665- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14666+ if (current->mm->context.vdso)
14667+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14668+ else
14669+ restorer = (void __user *)&frame->retcode;
14670 if (ka->sa.sa_flags & SA_RESTORER)
14671 restorer = ka->sa.sa_restorer;
14672 put_user_ex(restorer, &frame->pretcode);
14673@@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, str
14674 * reasons and because gdb uses it as a signature to notice
14675 * signal handler stack frames.
14676 */
14677- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
14678+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
14679 } put_user_catch(err);
14680
14681 if (err)
14682@@ -769,6 +772,8 @@ static void do_signal(struct pt_regs *re
14683 int signr;
14684 sigset_t *oldset;
14685
14686+ pax_track_stack();
14687+
14688 /*
14689 * We want the common case to go fast, which is why we may in certain
14690 * cases get here from kernel mode. Just return without doing anything
14691@@ -776,7 +781,7 @@ static void do_signal(struct pt_regs *re
14692 * X86_32: vm86 regs switched out by assembly code before reaching
14693 * here, so testing against kernel CS suffices.
14694 */
14695- if (!user_mode(regs))
14696+ if (!user_mode_novm(regs))
14697 return;
14698
14699 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
14700diff -urNp linux-3.0.3/arch/x86/kernel/smpboot.c linux-3.0.3/arch/x86/kernel/smpboot.c
14701--- linux-3.0.3/arch/x86/kernel/smpboot.c 2011-07-21 22:17:23.000000000 -0400
14702+++ linux-3.0.3/arch/x86/kernel/smpboot.c 2011-08-23 21:47:55.000000000 -0400
14703@@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int api
14704 set_idle_for_cpu(cpu, c_idle.idle);
14705 do_rest:
14706 per_cpu(current_task, cpu) = c_idle.idle;
14707+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
14708 #ifdef CONFIG_X86_32
14709 /* Stack for startup_32 can be just as for start_secondary onwards */
14710 irq_ctx_init(cpu);
14711 #else
14712 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
14713 initial_gs = per_cpu_offset(cpu);
14714- per_cpu(kernel_stack, cpu) =
14715- (unsigned long)task_stack_page(c_idle.idle) -
14716- KERNEL_STACK_OFFSET + THREAD_SIZE;
14717+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
14718 #endif
14719+
14720+ pax_open_kernel();
14721 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
14722+ pax_close_kernel();
14723+
14724 initial_code = (unsigned long)start_secondary;
14725 stack_start = c_idle.idle->thread.sp;
14726
14727@@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int
14728
14729 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
14730
14731+#ifdef CONFIG_PAX_PER_CPU_PGD
14732+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
14733+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
14734+ KERNEL_PGD_PTRS);
14735+#endif
14736+
14737 err = do_boot_cpu(apicid, cpu);
14738 if (err) {
14739 pr_debug("do_boot_cpu failed %d\n", err);
14740diff -urNp linux-3.0.3/arch/x86/kernel/step.c linux-3.0.3/arch/x86/kernel/step.c
14741--- linux-3.0.3/arch/x86/kernel/step.c 2011-07-21 22:17:23.000000000 -0400
14742+++ linux-3.0.3/arch/x86/kernel/step.c 2011-08-23 21:47:55.000000000 -0400
14743@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
14744 struct desc_struct *desc;
14745 unsigned long base;
14746
14747- seg &= ~7UL;
14748+ seg >>= 3;
14749
14750 mutex_lock(&child->mm->context.lock);
14751- if (unlikely((seg >> 3) >= child->mm->context.size))
14752+ if (unlikely(seg >= child->mm->context.size))
14753 addr = -1L; /* bogus selector, access would fault */
14754 else {
14755 desc = child->mm->context.ldt + seg;
14756@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
14757 addr += base;
14758 }
14759 mutex_unlock(&child->mm->context.lock);
14760- }
14761+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
14762+ addr = ktla_ktva(addr);
14763
14764 return addr;
14765 }
14766@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
14767 unsigned char opcode[15];
14768 unsigned long addr = convert_ip_to_linear(child, regs);
14769
14770+ if (addr == -EINVAL)
14771+ return 0;
14772+
14773 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
14774 for (i = 0; i < copied; i++) {
14775 switch (opcode[i]) {
14776@@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
14777
14778 #ifdef CONFIG_X86_64
14779 case 0x40 ... 0x4f:
14780- if (regs->cs != __USER_CS)
14781+ if ((regs->cs & 0xffff) != __USER_CS)
14782 /* 32-bit mode: register increment */
14783 return 0;
14784 /* 64-bit mode: REX prefix */
14785diff -urNp linux-3.0.3/arch/x86/kernel/syscall_table_32.S linux-3.0.3/arch/x86/kernel/syscall_table_32.S
14786--- linux-3.0.3/arch/x86/kernel/syscall_table_32.S 2011-07-21 22:17:23.000000000 -0400
14787+++ linux-3.0.3/arch/x86/kernel/syscall_table_32.S 2011-08-23 21:47:55.000000000 -0400
14788@@ -1,3 +1,4 @@
14789+.section .rodata,"a",@progbits
14790 ENTRY(sys_call_table)
14791 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
14792 .long sys_exit
14793diff -urNp linux-3.0.3/arch/x86/kernel/sys_i386_32.c linux-3.0.3/arch/x86/kernel/sys_i386_32.c
14794--- linux-3.0.3/arch/x86/kernel/sys_i386_32.c 2011-07-21 22:17:23.000000000 -0400
14795+++ linux-3.0.3/arch/x86/kernel/sys_i386_32.c 2011-08-23 21:47:55.000000000 -0400
14796@@ -24,17 +24,224 @@
14797
14798 #include <asm/syscalls.h>
14799
14800-/*
14801- * Do a system call from kernel instead of calling sys_execve so we
14802- * end up with proper pt_regs.
14803- */
14804-int kernel_execve(const char *filename,
14805- const char *const argv[],
14806- const char *const envp[])
14807+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
14808 {
14809- long __res;
14810- asm volatile ("int $0x80"
14811- : "=a" (__res)
14812- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
14813- return __res;
14814+ unsigned long pax_task_size = TASK_SIZE;
14815+
14816+#ifdef CONFIG_PAX_SEGMEXEC
14817+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
14818+ pax_task_size = SEGMEXEC_TASK_SIZE;
14819+#endif
14820+
14821+ if (len > pax_task_size || addr > pax_task_size - len)
14822+ return -EINVAL;
14823+
14824+ return 0;
14825+}
14826+
14827+unsigned long
14828+arch_get_unmapped_area(struct file *filp, unsigned long addr,
14829+ unsigned long len, unsigned long pgoff, unsigned long flags)
14830+{
14831+ struct mm_struct *mm = current->mm;
14832+ struct vm_area_struct *vma;
14833+ unsigned long start_addr, pax_task_size = TASK_SIZE;
14834+
14835+#ifdef CONFIG_PAX_SEGMEXEC
14836+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
14837+ pax_task_size = SEGMEXEC_TASK_SIZE;
14838+#endif
14839+
14840+ pax_task_size -= PAGE_SIZE;
14841+
14842+ if (len > pax_task_size)
14843+ return -ENOMEM;
14844+
14845+ if (flags & MAP_FIXED)
14846+ return addr;
14847+
14848+#ifdef CONFIG_PAX_RANDMMAP
14849+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
14850+#endif
14851+
14852+ if (addr) {
14853+ addr = PAGE_ALIGN(addr);
14854+ if (pax_task_size - len >= addr) {
14855+ vma = find_vma(mm, addr);
14856+ if (check_heap_stack_gap(vma, addr, len))
14857+ return addr;
14858+ }
14859+ }
14860+ if (len > mm->cached_hole_size) {
14861+ start_addr = addr = mm->free_area_cache;
14862+ } else {
14863+ start_addr = addr = mm->mmap_base;
14864+ mm->cached_hole_size = 0;
14865+ }
14866+
14867+#ifdef CONFIG_PAX_PAGEEXEC
14868+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
14869+ start_addr = 0x00110000UL;
14870+
14871+#ifdef CONFIG_PAX_RANDMMAP
14872+ if (mm->pax_flags & MF_PAX_RANDMMAP)
14873+ start_addr += mm->delta_mmap & 0x03FFF000UL;
14874+#endif
14875+
14876+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
14877+ start_addr = addr = mm->mmap_base;
14878+ else
14879+ addr = start_addr;
14880+ }
14881+#endif
14882+
14883+full_search:
14884+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
14885+ /* At this point: (!vma || addr < vma->vm_end). */
14886+ if (pax_task_size - len < addr) {
14887+ /*
14888+ * Start a new search - just in case we missed
14889+ * some holes.
14890+ */
14891+ if (start_addr != mm->mmap_base) {
14892+ start_addr = addr = mm->mmap_base;
14893+ mm->cached_hole_size = 0;
14894+ goto full_search;
14895+ }
14896+ return -ENOMEM;
14897+ }
14898+ if (check_heap_stack_gap(vma, addr, len))
14899+ break;
14900+ if (addr + mm->cached_hole_size < vma->vm_start)
14901+ mm->cached_hole_size = vma->vm_start - addr;
14902+ addr = vma->vm_end;
14903+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
14904+ start_addr = addr = mm->mmap_base;
14905+ mm->cached_hole_size = 0;
14906+ goto full_search;
14907+ }
14908+ }
14909+
14910+ /*
14911+ * Remember the place where we stopped the search:
14912+ */
14913+ mm->free_area_cache = addr + len;
14914+ return addr;
14915+}
14916+
14917+unsigned long
14918+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
14919+ const unsigned long len, const unsigned long pgoff,
14920+ const unsigned long flags)
14921+{
14922+ struct vm_area_struct *vma;
14923+ struct mm_struct *mm = current->mm;
14924+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
14925+
14926+#ifdef CONFIG_PAX_SEGMEXEC
14927+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
14928+ pax_task_size = SEGMEXEC_TASK_SIZE;
14929+#endif
14930+
14931+ pax_task_size -= PAGE_SIZE;
14932+
14933+ /* requested length too big for entire address space */
14934+ if (len > pax_task_size)
14935+ return -ENOMEM;
14936+
14937+ if (flags & MAP_FIXED)
14938+ return addr;
14939+
14940+#ifdef CONFIG_PAX_PAGEEXEC
14941+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
14942+ goto bottomup;
14943+#endif
14944+
14945+#ifdef CONFIG_PAX_RANDMMAP
14946+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
14947+#endif
14948+
14949+ /* requesting a specific address */
14950+ if (addr) {
14951+ addr = PAGE_ALIGN(addr);
14952+ if (pax_task_size - len >= addr) {
14953+ vma = find_vma(mm, addr);
14954+ if (check_heap_stack_gap(vma, addr, len))
14955+ return addr;
14956+ }
14957+ }
14958+
14959+ /* check if free_area_cache is useful for us */
14960+ if (len <= mm->cached_hole_size) {
14961+ mm->cached_hole_size = 0;
14962+ mm->free_area_cache = mm->mmap_base;
14963+ }
14964+
14965+ /* either no address requested or can't fit in requested address hole */
14966+ addr = mm->free_area_cache;
14967+
14968+ /* make sure it can fit in the remaining address space */
14969+ if (addr > len) {
14970+ vma = find_vma(mm, addr-len);
14971+ if (check_heap_stack_gap(vma, addr - len, len))
14972+ /* remember the address as a hint for next time */
14973+ return (mm->free_area_cache = addr-len);
14974+ }
14975+
14976+ if (mm->mmap_base < len)
14977+ goto bottomup;
14978+
14979+ addr = mm->mmap_base-len;
14980+
14981+ do {
14982+ /*
14983+ * Lookup failure means no vma is above this address,
14984+ * else if new region fits below vma->vm_start,
14985+ * return with success:
14986+ */
14987+ vma = find_vma(mm, addr);
14988+ if (check_heap_stack_gap(vma, addr, len))
14989+ /* remember the address as a hint for next time */
14990+ return (mm->free_area_cache = addr);
14991+
14992+ /* remember the largest hole we saw so far */
14993+ if (addr + mm->cached_hole_size < vma->vm_start)
14994+ mm->cached_hole_size = vma->vm_start - addr;
14995+
14996+ /* try just below the current vma->vm_start */
14997+ addr = skip_heap_stack_gap(vma, len);
14998+ } while (!IS_ERR_VALUE(addr));
14999+
15000+bottomup:
15001+ /*
15002+ * A failed mmap() very likely causes application failure,
15003+ * so fall back to the bottom-up function here. This scenario
15004+ * can happen with large stack limits and large mmap()
15005+ * allocations.
15006+ */
15007+
15008+#ifdef CONFIG_PAX_SEGMEXEC
15009+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
15010+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
15011+ else
15012+#endif
15013+
15014+ mm->mmap_base = TASK_UNMAPPED_BASE;
15015+
15016+#ifdef CONFIG_PAX_RANDMMAP
15017+ if (mm->pax_flags & MF_PAX_RANDMMAP)
15018+ mm->mmap_base += mm->delta_mmap;
15019+#endif
15020+
15021+ mm->free_area_cache = mm->mmap_base;
15022+ mm->cached_hole_size = ~0UL;
15023+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15024+ /*
15025+ * Restore the topdown base:
15026+ */
15027+ mm->mmap_base = base;
15028+ mm->free_area_cache = base;
15029+ mm->cached_hole_size = ~0UL;
15030+
15031+ return addr;
15032 }
15033diff -urNp linux-3.0.3/arch/x86/kernel/sys_x86_64.c linux-3.0.3/arch/x86/kernel/sys_x86_64.c
15034--- linux-3.0.3/arch/x86/kernel/sys_x86_64.c 2011-07-21 22:17:23.000000000 -0400
15035+++ linux-3.0.3/arch/x86/kernel/sys_x86_64.c 2011-08-23 21:47:55.000000000 -0400
15036@@ -32,8 +32,8 @@ out:
15037 return error;
15038 }
15039
15040-static void find_start_end(unsigned long flags, unsigned long *begin,
15041- unsigned long *end)
15042+static void find_start_end(struct mm_struct *mm, unsigned long flags,
15043+ unsigned long *begin, unsigned long *end)
15044 {
15045 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
15046 unsigned long new_begin;
15047@@ -52,7 +52,7 @@ static void find_start_end(unsigned long
15048 *begin = new_begin;
15049 }
15050 } else {
15051- *begin = TASK_UNMAPPED_BASE;
15052+ *begin = mm->mmap_base;
15053 *end = TASK_SIZE;
15054 }
15055 }
15056@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
15057 if (flags & MAP_FIXED)
15058 return addr;
15059
15060- find_start_end(flags, &begin, &end);
15061+ find_start_end(mm, flags, &begin, &end);
15062
15063 if (len > end)
15064 return -ENOMEM;
15065
15066+#ifdef CONFIG_PAX_RANDMMAP
15067+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15068+#endif
15069+
15070 if (addr) {
15071 addr = PAGE_ALIGN(addr);
15072 vma = find_vma(mm, addr);
15073- if (end - len >= addr &&
15074- (!vma || addr + len <= vma->vm_start))
15075+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
15076 return addr;
15077 }
15078 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
15079@@ -106,7 +109,7 @@ full_search:
15080 }
15081 return -ENOMEM;
15082 }
15083- if (!vma || addr + len <= vma->vm_start) {
15084+ if (check_heap_stack_gap(vma, addr, len)) {
15085 /*
15086 * Remember the place where we stopped the search:
15087 */
15088@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
15089 {
15090 struct vm_area_struct *vma;
15091 struct mm_struct *mm = current->mm;
15092- unsigned long addr = addr0;
15093+ unsigned long base = mm->mmap_base, addr = addr0;
15094
15095 /* requested length too big for entire address space */
15096 if (len > TASK_SIZE)
15097@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
15098 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
15099 goto bottomup;
15100
15101+#ifdef CONFIG_PAX_RANDMMAP
15102+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15103+#endif
15104+
15105 /* requesting a specific address */
15106 if (addr) {
15107 addr = PAGE_ALIGN(addr);
15108- vma = find_vma(mm, addr);
15109- if (TASK_SIZE - len >= addr &&
15110- (!vma || addr + len <= vma->vm_start))
15111- return addr;
15112+ if (TASK_SIZE - len >= addr) {
15113+ vma = find_vma(mm, addr);
15114+ if (check_heap_stack_gap(vma, addr, len))
15115+ return addr;
15116+ }
15117 }
15118
15119 /* check if free_area_cache is useful for us */
15120@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
15121 /* make sure it can fit in the remaining address space */
15122 if (addr > len) {
15123 vma = find_vma(mm, addr-len);
15124- if (!vma || addr <= vma->vm_start)
15125+ if (check_heap_stack_gap(vma, addr - len, len))
15126 /* remember the address as a hint for next time */
15127 return mm->free_area_cache = addr-len;
15128 }
15129@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
15130 * return with success:
15131 */
15132 vma = find_vma(mm, addr);
15133- if (!vma || addr+len <= vma->vm_start)
15134+ if (check_heap_stack_gap(vma, addr, len))
15135 /* remember the address as a hint for next time */
15136 return mm->free_area_cache = addr;
15137
15138@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
15139 mm->cached_hole_size = vma->vm_start - addr;
15140
15141 /* try just below the current vma->vm_start */
15142- addr = vma->vm_start-len;
15143- } while (len < vma->vm_start);
15144+ addr = skip_heap_stack_gap(vma, len);
15145+ } while (!IS_ERR_VALUE(addr));
15146
15147 bottomup:
15148 /*
15149@@ -198,13 +206,21 @@ bottomup:
15150 * can happen with large stack limits and large mmap()
15151 * allocations.
15152 */
15153+ mm->mmap_base = TASK_UNMAPPED_BASE;
15154+
15155+#ifdef CONFIG_PAX_RANDMMAP
15156+ if (mm->pax_flags & MF_PAX_RANDMMAP)
15157+ mm->mmap_base += mm->delta_mmap;
15158+#endif
15159+
15160+ mm->free_area_cache = mm->mmap_base;
15161 mm->cached_hole_size = ~0UL;
15162- mm->free_area_cache = TASK_UNMAPPED_BASE;
15163 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15164 /*
15165 * Restore the topdown base:
15166 */
15167- mm->free_area_cache = mm->mmap_base;
15168+ mm->mmap_base = base;
15169+ mm->free_area_cache = base;
15170 mm->cached_hole_size = ~0UL;
15171
15172 return addr;
15173diff -urNp linux-3.0.3/arch/x86/kernel/tboot.c linux-3.0.3/arch/x86/kernel/tboot.c
15174--- linux-3.0.3/arch/x86/kernel/tboot.c 2011-07-21 22:17:23.000000000 -0400
15175+++ linux-3.0.3/arch/x86/kernel/tboot.c 2011-08-23 21:47:55.000000000 -0400
15176@@ -217,7 +217,7 @@ static int tboot_setup_sleep(void)
15177
15178 void tboot_shutdown(u32 shutdown_type)
15179 {
15180- void (*shutdown)(void);
15181+ void (* __noreturn shutdown)(void);
15182
15183 if (!tboot_enabled())
15184 return;
15185@@ -239,7 +239,7 @@ void tboot_shutdown(u32 shutdown_type)
15186
15187 switch_to_tboot_pt();
15188
15189- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
15190+ shutdown = (void *)tboot->shutdown_entry;
15191 shutdown();
15192
15193 /* should not reach here */
15194@@ -296,7 +296,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
15195 tboot_shutdown(acpi_shutdown_map[sleep_state]);
15196 }
15197
15198-static atomic_t ap_wfs_count;
15199+static atomic_unchecked_t ap_wfs_count;
15200
15201 static int tboot_wait_for_aps(int num_aps)
15202 {
15203@@ -320,9 +320,9 @@ static int __cpuinit tboot_cpu_callback(
15204 {
15205 switch (action) {
15206 case CPU_DYING:
15207- atomic_inc(&ap_wfs_count);
15208+ atomic_inc_unchecked(&ap_wfs_count);
15209 if (num_online_cpus() == 1)
15210- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
15211+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
15212 return NOTIFY_BAD;
15213 break;
15214 }
15215@@ -341,7 +341,7 @@ static __init int tboot_late_init(void)
15216
15217 tboot_create_trampoline();
15218
15219- atomic_set(&ap_wfs_count, 0);
15220+ atomic_set_unchecked(&ap_wfs_count, 0);
15221 register_hotcpu_notifier(&tboot_cpu_notifier);
15222 return 0;
15223 }
15224diff -urNp linux-3.0.3/arch/x86/kernel/time.c linux-3.0.3/arch/x86/kernel/time.c
15225--- linux-3.0.3/arch/x86/kernel/time.c 2011-07-21 22:17:23.000000000 -0400
15226+++ linux-3.0.3/arch/x86/kernel/time.c 2011-08-23 21:47:55.000000000 -0400
15227@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs
15228 {
15229 unsigned long pc = instruction_pointer(regs);
15230
15231- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
15232+ if (!user_mode(regs) && in_lock_functions(pc)) {
15233 #ifdef CONFIG_FRAME_POINTER
15234- return *(unsigned long *)(regs->bp + sizeof(long));
15235+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
15236 #else
15237 unsigned long *sp =
15238 (unsigned long *)kernel_stack_pointer(regs);
15239@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
15240 * or above a saved flags. Eflags has bits 22-31 zero,
15241 * kernel addresses don't.
15242 */
15243+
15244+#ifdef CONFIG_PAX_KERNEXEC
15245+ return ktla_ktva(sp[0]);
15246+#else
15247 if (sp[0] >> 22)
15248 return sp[0];
15249 if (sp[1] >> 22)
15250 return sp[1];
15251 #endif
15252+
15253+#endif
15254 }
15255 return pc;
15256 }
15257diff -urNp linux-3.0.3/arch/x86/kernel/tls.c linux-3.0.3/arch/x86/kernel/tls.c
15258--- linux-3.0.3/arch/x86/kernel/tls.c 2011-07-21 22:17:23.000000000 -0400
15259+++ linux-3.0.3/arch/x86/kernel/tls.c 2011-08-23 21:47:55.000000000 -0400
15260@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
15261 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
15262 return -EINVAL;
15263
15264+#ifdef CONFIG_PAX_SEGMEXEC
15265+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
15266+ return -EINVAL;
15267+#endif
15268+
15269 set_tls_desc(p, idx, &info, 1);
15270
15271 return 0;
15272diff -urNp linux-3.0.3/arch/x86/kernel/trampoline_32.S linux-3.0.3/arch/x86/kernel/trampoline_32.S
15273--- linux-3.0.3/arch/x86/kernel/trampoline_32.S 2011-07-21 22:17:23.000000000 -0400
15274+++ linux-3.0.3/arch/x86/kernel/trampoline_32.S 2011-08-23 21:47:55.000000000 -0400
15275@@ -32,6 +32,12 @@
15276 #include <asm/segment.h>
15277 #include <asm/page_types.h>
15278
15279+#ifdef CONFIG_PAX_KERNEXEC
15280+#define ta(X) (X)
15281+#else
15282+#define ta(X) ((X) - __PAGE_OFFSET)
15283+#endif
15284+
15285 #ifdef CONFIG_SMP
15286
15287 .section ".x86_trampoline","a"
15288@@ -62,7 +68,7 @@ r_base = .
15289 inc %ax # protected mode (PE) bit
15290 lmsw %ax # into protected mode
15291 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
15292- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
15293+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
15294
15295 # These need to be in the same 64K segment as the above;
15296 # hence we don't use the boot_gdt_descr defined in head.S
15297diff -urNp linux-3.0.3/arch/x86/kernel/trampoline_64.S linux-3.0.3/arch/x86/kernel/trampoline_64.S
15298--- linux-3.0.3/arch/x86/kernel/trampoline_64.S 2011-07-21 22:17:23.000000000 -0400
15299+++ linux-3.0.3/arch/x86/kernel/trampoline_64.S 2011-08-23 21:47:55.000000000 -0400
15300@@ -90,7 +90,7 @@ startup_32:
15301 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
15302 movl %eax, %ds
15303
15304- movl $X86_CR4_PAE, %eax
15305+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15306 movl %eax, %cr4 # Enable PAE mode
15307
15308 # Setup trampoline 4 level pagetables
15309@@ -138,7 +138,7 @@ tidt:
15310 # so the kernel can live anywhere
15311 .balign 4
15312 tgdt:
15313- .short tgdt_end - tgdt # gdt limit
15314+ .short tgdt_end - tgdt - 1 # gdt limit
15315 .long tgdt - r_base
15316 .short 0
15317 .quad 0x00cf9b000000ffff # __KERNEL32_CS
15318diff -urNp linux-3.0.3/arch/x86/kernel/traps.c linux-3.0.3/arch/x86/kernel/traps.c
15319--- linux-3.0.3/arch/x86/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
15320+++ linux-3.0.3/arch/x86/kernel/traps.c 2011-08-23 21:47:55.000000000 -0400
15321@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
15322
15323 /* Do we ignore FPU interrupts ? */
15324 char ignore_fpu_irq;
15325-
15326-/*
15327- * The IDT has to be page-aligned to simplify the Pentium
15328- * F0 0F bug workaround.
15329- */
15330-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
15331 #endif
15332
15333 DECLARE_BITMAP(used_vectors, NR_VECTORS);
15334@@ -117,13 +111,13 @@ static inline void preempt_conditional_c
15335 }
15336
15337 static void __kprobes
15338-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
15339+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
15340 long error_code, siginfo_t *info)
15341 {
15342 struct task_struct *tsk = current;
15343
15344 #ifdef CONFIG_X86_32
15345- if (regs->flags & X86_VM_MASK) {
15346+ if (v8086_mode(regs)) {
15347 /*
15348 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
15349 * On nmi (interrupt 2), do_trap should not be called.
15350@@ -134,7 +128,7 @@ do_trap(int trapnr, int signr, char *str
15351 }
15352 #endif
15353
15354- if (!user_mode(regs))
15355+ if (!user_mode_novm(regs))
15356 goto kernel_trap;
15357
15358 #ifdef CONFIG_X86_32
15359@@ -157,7 +151,7 @@ trap_signal:
15360 printk_ratelimit()) {
15361 printk(KERN_INFO
15362 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
15363- tsk->comm, tsk->pid, str,
15364+ tsk->comm, task_pid_nr(tsk), str,
15365 regs->ip, regs->sp, error_code);
15366 print_vma_addr(" in ", regs->ip);
15367 printk("\n");
15368@@ -174,8 +168,20 @@ kernel_trap:
15369 if (!fixup_exception(regs)) {
15370 tsk->thread.error_code = error_code;
15371 tsk->thread.trap_no = trapnr;
15372+
15373+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15374+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
15375+ str = "PAX: suspicious stack segment fault";
15376+#endif
15377+
15378 die(str, regs, error_code);
15379 }
15380+
15381+#ifdef CONFIG_PAX_REFCOUNT
15382+ if (trapnr == 4)
15383+ pax_report_refcount_overflow(regs);
15384+#endif
15385+
15386 return;
15387
15388 #ifdef CONFIG_X86_32
15389@@ -264,14 +270,30 @@ do_general_protection(struct pt_regs *re
15390 conditional_sti(regs);
15391
15392 #ifdef CONFIG_X86_32
15393- if (regs->flags & X86_VM_MASK)
15394+ if (v8086_mode(regs))
15395 goto gp_in_vm86;
15396 #endif
15397
15398 tsk = current;
15399- if (!user_mode(regs))
15400+ if (!user_mode_novm(regs))
15401 goto gp_in_kernel;
15402
15403+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
15404+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
15405+ struct mm_struct *mm = tsk->mm;
15406+ unsigned long limit;
15407+
15408+ down_write(&mm->mmap_sem);
15409+ limit = mm->context.user_cs_limit;
15410+ if (limit < TASK_SIZE) {
15411+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
15412+ up_write(&mm->mmap_sem);
15413+ return;
15414+ }
15415+ up_write(&mm->mmap_sem);
15416+ }
15417+#endif
15418+
15419 tsk->thread.error_code = error_code;
15420 tsk->thread.trap_no = 13;
15421
15422@@ -304,6 +326,13 @@ gp_in_kernel:
15423 if (notify_die(DIE_GPF, "general protection fault", regs,
15424 error_code, 13, SIGSEGV) == NOTIFY_STOP)
15425 return;
15426+
15427+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15428+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
15429+ die("PAX: suspicious general protection fault", regs, error_code);
15430+ else
15431+#endif
15432+
15433 die("general protection fault", regs, error_code);
15434 }
15435
15436@@ -433,6 +462,17 @@ static notrace __kprobes void default_do
15437 dotraplinkage notrace __kprobes void
15438 do_nmi(struct pt_regs *regs, long error_code)
15439 {
15440+
15441+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15442+ if (!user_mode(regs)) {
15443+ unsigned long cs = regs->cs & 0xFFFF;
15444+ unsigned long ip = ktva_ktla(regs->ip);
15445+
15446+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
15447+ regs->ip = ip;
15448+ }
15449+#endif
15450+
15451 nmi_enter();
15452
15453 inc_irq_stat(__nmi_count);
15454@@ -569,7 +609,7 @@ dotraplinkage void __kprobes do_debug(st
15455 /* It's safe to allow irq's after DR6 has been saved */
15456 preempt_conditional_sti(regs);
15457
15458- if (regs->flags & X86_VM_MASK) {
15459+ if (v8086_mode(regs)) {
15460 handle_vm86_trap((struct kernel_vm86_regs *) regs,
15461 error_code, 1);
15462 preempt_conditional_cli(regs);
15463@@ -583,7 +623,7 @@ dotraplinkage void __kprobes do_debug(st
15464 * We already checked v86 mode above, so we can check for kernel mode
15465 * by just checking the CPL of CS.
15466 */
15467- if ((dr6 & DR_STEP) && !user_mode(regs)) {
15468+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
15469 tsk->thread.debugreg6 &= ~DR_STEP;
15470 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
15471 regs->flags &= ~X86_EFLAGS_TF;
15472@@ -612,7 +652,7 @@ void math_error(struct pt_regs *regs, in
15473 return;
15474 conditional_sti(regs);
15475
15476- if (!user_mode_vm(regs))
15477+ if (!user_mode(regs))
15478 {
15479 if (!fixup_exception(regs)) {
15480 task->thread.error_code = error_code;
15481@@ -723,7 +763,7 @@ asmlinkage void __attribute__((weak)) sm
15482 void __math_state_restore(void)
15483 {
15484 struct thread_info *thread = current_thread_info();
15485- struct task_struct *tsk = thread->task;
15486+ struct task_struct *tsk = current;
15487
15488 /*
15489 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
15490@@ -750,8 +790,7 @@ void __math_state_restore(void)
15491 */
15492 asmlinkage void math_state_restore(void)
15493 {
15494- struct thread_info *thread = current_thread_info();
15495- struct task_struct *tsk = thread->task;
15496+ struct task_struct *tsk = current;
15497
15498 if (!tsk_used_math(tsk)) {
15499 local_irq_enable();
15500diff -urNp linux-3.0.3/arch/x86/kernel/verify_cpu.S linux-3.0.3/arch/x86/kernel/verify_cpu.S
15501--- linux-3.0.3/arch/x86/kernel/verify_cpu.S 2011-07-21 22:17:23.000000000 -0400
15502+++ linux-3.0.3/arch/x86/kernel/verify_cpu.S 2011-08-23 21:48:14.000000000 -0400
15503@@ -20,6 +20,7 @@
15504 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
15505 * arch/x86/kernel/trampoline_64.S: secondary processor verification
15506 * arch/x86/kernel/head_32.S: processor startup
15507+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
15508 *
15509 * verify_cpu, returns the status of longmode and SSE in register %eax.
15510 * 0: Success 1: Failure
15511diff -urNp linux-3.0.3/arch/x86/kernel/vm86_32.c linux-3.0.3/arch/x86/kernel/vm86_32.c
15512--- linux-3.0.3/arch/x86/kernel/vm86_32.c 2011-07-21 22:17:23.000000000 -0400
15513+++ linux-3.0.3/arch/x86/kernel/vm86_32.c 2011-08-23 21:48:14.000000000 -0400
15514@@ -41,6 +41,7 @@
15515 #include <linux/ptrace.h>
15516 #include <linux/audit.h>
15517 #include <linux/stddef.h>
15518+#include <linux/grsecurity.h>
15519
15520 #include <asm/uaccess.h>
15521 #include <asm/io.h>
15522@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
15523 do_exit(SIGSEGV);
15524 }
15525
15526- tss = &per_cpu(init_tss, get_cpu());
15527+ tss = init_tss + get_cpu();
15528 current->thread.sp0 = current->thread.saved_sp0;
15529 current->thread.sysenter_cs = __KERNEL_CS;
15530 load_sp0(tss, &current->thread);
15531@@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __use
15532 struct task_struct *tsk;
15533 int tmp, ret = -EPERM;
15534
15535+#ifdef CONFIG_GRKERNSEC_VM86
15536+ if (!capable(CAP_SYS_RAWIO)) {
15537+ gr_handle_vm86();
15538+ goto out;
15539+ }
15540+#endif
15541+
15542 tsk = current;
15543 if (tsk->thread.saved_sp0)
15544 goto out;
15545@@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned
15546 int tmp, ret;
15547 struct vm86plus_struct __user *v86;
15548
15549+#ifdef CONFIG_GRKERNSEC_VM86
15550+ if (!capable(CAP_SYS_RAWIO)) {
15551+ gr_handle_vm86();
15552+ ret = -EPERM;
15553+ goto out;
15554+ }
15555+#endif
15556+
15557 tsk = current;
15558 switch (cmd) {
15559 case VM86_REQUEST_IRQ:
15560@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
15561 tsk->thread.saved_fs = info->regs32->fs;
15562 tsk->thread.saved_gs = get_user_gs(info->regs32);
15563
15564- tss = &per_cpu(init_tss, get_cpu());
15565+ tss = init_tss + get_cpu();
15566 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
15567 if (cpu_has_sep)
15568 tsk->thread.sysenter_cs = 0;
15569@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
15570 goto cannot_handle;
15571 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
15572 goto cannot_handle;
15573- intr_ptr = (unsigned long __user *) (i << 2);
15574+ intr_ptr = (__force unsigned long __user *) (i << 2);
15575 if (get_user(segoffs, intr_ptr))
15576 goto cannot_handle;
15577 if ((segoffs >> 16) == BIOSSEG)
15578diff -urNp linux-3.0.3/arch/x86/kernel/vmlinux.lds.S linux-3.0.3/arch/x86/kernel/vmlinux.lds.S
15579--- linux-3.0.3/arch/x86/kernel/vmlinux.lds.S 2011-07-21 22:17:23.000000000 -0400
15580+++ linux-3.0.3/arch/x86/kernel/vmlinux.lds.S 2011-08-23 21:47:55.000000000 -0400
15581@@ -26,6 +26,13 @@
15582 #include <asm/page_types.h>
15583 #include <asm/cache.h>
15584 #include <asm/boot.h>
15585+#include <asm/segment.h>
15586+
15587+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15588+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
15589+#else
15590+#define __KERNEL_TEXT_OFFSET 0
15591+#endif
15592
15593 #undef i386 /* in case the preprocessor is a 32bit one */
15594
15595@@ -69,31 +76,46 @@ jiffies_64 = jiffies;
15596
15597 PHDRS {
15598 text PT_LOAD FLAGS(5); /* R_E */
15599+#ifdef CONFIG_X86_32
15600+ module PT_LOAD FLAGS(5); /* R_E */
15601+#endif
15602+#ifdef CONFIG_XEN
15603+ rodata PT_LOAD FLAGS(5); /* R_E */
15604+#else
15605+ rodata PT_LOAD FLAGS(4); /* R__ */
15606+#endif
15607 data PT_LOAD FLAGS(6); /* RW_ */
15608 #ifdef CONFIG_X86_64
15609 user PT_LOAD FLAGS(5); /* R_E */
15610+#endif
15611+ init.begin PT_LOAD FLAGS(6); /* RW_ */
15612 #ifdef CONFIG_SMP
15613 percpu PT_LOAD FLAGS(6); /* RW_ */
15614 #endif
15615+ text.init PT_LOAD FLAGS(5); /* R_E */
15616+ text.exit PT_LOAD FLAGS(5); /* R_E */
15617 init PT_LOAD FLAGS(7); /* RWE */
15618-#endif
15619 note PT_NOTE FLAGS(0); /* ___ */
15620 }
15621
15622 SECTIONS
15623 {
15624 #ifdef CONFIG_X86_32
15625- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
15626- phys_startup_32 = startup_32 - LOAD_OFFSET;
15627+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
15628 #else
15629- . = __START_KERNEL;
15630- phys_startup_64 = startup_64 - LOAD_OFFSET;
15631+ . = __START_KERNEL;
15632 #endif
15633
15634 /* Text and read-only data */
15635- .text : AT(ADDR(.text) - LOAD_OFFSET) {
15636- _text = .;
15637+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
15638 /* bootstrapping code */
15639+#ifdef CONFIG_X86_32
15640+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15641+#else
15642+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15643+#endif
15644+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15645+ _text = .;
15646 HEAD_TEXT
15647 #ifdef CONFIG_X86_32
15648 . = ALIGN(PAGE_SIZE);
15649@@ -109,13 +131,47 @@ SECTIONS
15650 IRQENTRY_TEXT
15651 *(.fixup)
15652 *(.gnu.warning)
15653- /* End of text section */
15654- _etext = .;
15655 } :text = 0x9090
15656
15657- NOTES :text :note
15658+ . += __KERNEL_TEXT_OFFSET;
15659+
15660+#ifdef CONFIG_X86_32
15661+ . = ALIGN(PAGE_SIZE);
15662+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
15663+
15664+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
15665+ MODULES_EXEC_VADDR = .;
15666+ BYTE(0)
15667+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
15668+ . = ALIGN(HPAGE_SIZE);
15669+ MODULES_EXEC_END = . - 1;
15670+#endif
15671+
15672+ } :module
15673+#endif
15674+
15675+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
15676+ /* End of text section */
15677+ _etext = . - __KERNEL_TEXT_OFFSET;
15678+ }
15679+
15680+#ifdef CONFIG_X86_32
15681+ . = ALIGN(PAGE_SIZE);
15682+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
15683+ *(.idt)
15684+ . = ALIGN(PAGE_SIZE);
15685+ *(.empty_zero_page)
15686+ *(.initial_pg_fixmap)
15687+ *(.initial_pg_pmd)
15688+ *(.initial_page_table)
15689+ *(.swapper_pg_dir)
15690+ } :rodata
15691+#endif
15692+
15693+ . = ALIGN(PAGE_SIZE);
15694+ NOTES :rodata :note
15695
15696- EXCEPTION_TABLE(16) :text = 0x9090
15697+ EXCEPTION_TABLE(16) :rodata
15698
15699 #if defined(CONFIG_DEBUG_RODATA)
15700 /* .text should occupy whole number of pages */
15701@@ -127,16 +183,20 @@ SECTIONS
15702
15703 /* Data */
15704 .data : AT(ADDR(.data) - LOAD_OFFSET) {
15705+
15706+#ifdef CONFIG_PAX_KERNEXEC
15707+ . = ALIGN(HPAGE_SIZE);
15708+#else
15709+ . = ALIGN(PAGE_SIZE);
15710+#endif
15711+
15712 /* Start of data section */
15713 _sdata = .;
15714
15715 /* init_task */
15716 INIT_TASK_DATA(THREAD_SIZE)
15717
15718-#ifdef CONFIG_X86_32
15719- /* 32 bit has nosave before _edata */
15720 NOSAVE_DATA
15721-#endif
15722
15723 PAGE_ALIGNED_DATA(PAGE_SIZE)
15724
15725@@ -208,12 +268,19 @@ SECTIONS
15726 #endif /* CONFIG_X86_64 */
15727
15728 /* Init code and data - will be freed after init */
15729- . = ALIGN(PAGE_SIZE);
15730 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
15731+ BYTE(0)
15732+
15733+#ifdef CONFIG_PAX_KERNEXEC
15734+ . = ALIGN(HPAGE_SIZE);
15735+#else
15736+ . = ALIGN(PAGE_SIZE);
15737+#endif
15738+
15739 __init_begin = .; /* paired with __init_end */
15740- }
15741+ } :init.begin
15742
15743-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
15744+#ifdef CONFIG_SMP
15745 /*
15746 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
15747 * output PHDR, so the next output section - .init.text - should
15748@@ -222,12 +289,27 @@ SECTIONS
15749 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
15750 #endif
15751
15752- INIT_TEXT_SECTION(PAGE_SIZE)
15753-#ifdef CONFIG_X86_64
15754- :init
15755-#endif
15756+ . = ALIGN(PAGE_SIZE);
15757+ init_begin = .;
15758+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
15759+ VMLINUX_SYMBOL(_sinittext) = .;
15760+ INIT_TEXT
15761+ VMLINUX_SYMBOL(_einittext) = .;
15762+ . = ALIGN(PAGE_SIZE);
15763+ } :text.init
15764
15765- INIT_DATA_SECTION(16)
15766+ /*
15767+ * .exit.text is discard at runtime, not link time, to deal with
15768+ * references from .altinstructions and .eh_frame
15769+ */
15770+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
15771+ EXIT_TEXT
15772+ . = ALIGN(16);
15773+ } :text.exit
15774+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
15775+
15776+ . = ALIGN(PAGE_SIZE);
15777+ INIT_DATA_SECTION(16) :init
15778
15779 /*
15780 * Code and data for a variety of lowlevel trampolines, to be
15781@@ -301,19 +383,12 @@ SECTIONS
15782 }
15783
15784 . = ALIGN(8);
15785- /*
15786- * .exit.text is discard at runtime, not link time, to deal with
15787- * references from .altinstructions and .eh_frame
15788- */
15789- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
15790- EXIT_TEXT
15791- }
15792
15793 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
15794 EXIT_DATA
15795 }
15796
15797-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
15798+#ifndef CONFIG_SMP
15799 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
15800 #endif
15801
15802@@ -332,16 +407,10 @@ SECTIONS
15803 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
15804 __smp_locks = .;
15805 *(.smp_locks)
15806- . = ALIGN(PAGE_SIZE);
15807 __smp_locks_end = .;
15808+ . = ALIGN(PAGE_SIZE);
15809 }
15810
15811-#ifdef CONFIG_X86_64
15812- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
15813- NOSAVE_DATA
15814- }
15815-#endif
15816-
15817 /* BSS */
15818 . = ALIGN(PAGE_SIZE);
15819 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
15820@@ -357,6 +426,7 @@ SECTIONS
15821 __brk_base = .;
15822 . += 64 * 1024; /* 64k alignment slop space */
15823 *(.brk_reservation) /* areas brk users have reserved */
15824+ . = ALIGN(HPAGE_SIZE);
15825 __brk_limit = .;
15826 }
15827
15828@@ -383,13 +453,12 @@ SECTIONS
15829 * for the boot processor.
15830 */
15831 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
15832-INIT_PER_CPU(gdt_page);
15833 INIT_PER_CPU(irq_stack_union);
15834
15835 /*
15836 * Build-time check on the image size:
15837 */
15838-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
15839+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
15840 "kernel image bigger than KERNEL_IMAGE_SIZE");
15841
15842 #ifdef CONFIG_SMP
15843diff -urNp linux-3.0.3/arch/x86/kernel/vsyscall_64.c linux-3.0.3/arch/x86/kernel/vsyscall_64.c
15844--- linux-3.0.3/arch/x86/kernel/vsyscall_64.c 2011-07-21 22:17:23.000000000 -0400
15845+++ linux-3.0.3/arch/x86/kernel/vsyscall_64.c 2011-08-23 21:47:55.000000000 -0400
15846@@ -53,7 +53,7 @@ DEFINE_VVAR(int, vgetcpu_mode);
15847 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
15848 {
15849 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
15850- .sysctl_enabled = 1,
15851+ .sysctl_enabled = 0,
15852 };
15853
15854 void update_vsyscall_tz(void)
15855@@ -231,7 +231,7 @@ static long __vsyscall(3) venosys_1(void
15856 static ctl_table kernel_table2[] = {
15857 { .procname = "vsyscall64",
15858 .data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int),
15859- .mode = 0644,
15860+ .mode = 0444,
15861 .proc_handler = proc_dointvec },
15862 {}
15863 };
15864diff -urNp linux-3.0.3/arch/x86/kernel/x8664_ksyms_64.c linux-3.0.3/arch/x86/kernel/x8664_ksyms_64.c
15865--- linux-3.0.3/arch/x86/kernel/x8664_ksyms_64.c 2011-07-21 22:17:23.000000000 -0400
15866+++ linux-3.0.3/arch/x86/kernel/x8664_ksyms_64.c 2011-08-23 21:47:55.000000000 -0400
15867@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
15868 EXPORT_SYMBOL(copy_user_generic_string);
15869 EXPORT_SYMBOL(copy_user_generic_unrolled);
15870 EXPORT_SYMBOL(__copy_user_nocache);
15871-EXPORT_SYMBOL(_copy_from_user);
15872-EXPORT_SYMBOL(_copy_to_user);
15873
15874 EXPORT_SYMBOL(copy_page);
15875 EXPORT_SYMBOL(clear_page);
15876diff -urNp linux-3.0.3/arch/x86/kernel/xsave.c linux-3.0.3/arch/x86/kernel/xsave.c
15877--- linux-3.0.3/arch/x86/kernel/xsave.c 2011-07-21 22:17:23.000000000 -0400
15878+++ linux-3.0.3/arch/x86/kernel/xsave.c 2011-08-23 21:47:55.000000000 -0400
15879@@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_
15880 fx_sw_user->xstate_size > fx_sw_user->extended_size)
15881 return -EINVAL;
15882
15883- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
15884+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
15885 fx_sw_user->extended_size -
15886 FP_XSTATE_MAGIC2_SIZE));
15887 if (err)
15888@@ -267,7 +267,7 @@ fx_only:
15889 * the other extended state.
15890 */
15891 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
15892- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
15893+ return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
15894 }
15895
15896 /*
15897@@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf
15898 if (use_xsave())
15899 err = restore_user_xstate(buf);
15900 else
15901- err = fxrstor_checking((__force struct i387_fxsave_struct *)
15902+ err = fxrstor_checking((struct i387_fxsave_struct __user *)
15903 buf);
15904 if (unlikely(err)) {
15905 /*
15906diff -urNp linux-3.0.3/arch/x86/kvm/emulate.c linux-3.0.3/arch/x86/kvm/emulate.c
15907--- linux-3.0.3/arch/x86/kvm/emulate.c 2011-07-21 22:17:23.000000000 -0400
15908+++ linux-3.0.3/arch/x86/kvm/emulate.c 2011-08-23 21:47:55.000000000 -0400
15909@@ -96,7 +96,7 @@
15910 #define Src2ImmByte (2<<29)
15911 #define Src2One (3<<29)
15912 #define Src2Imm (4<<29)
15913-#define Src2Mask (7<<29)
15914+#define Src2Mask (7U<<29)
15915
15916 #define X2(x...) x, x
15917 #define X3(x...) X2(x), x
15918@@ -207,6 +207,7 @@ struct gprefix {
15919
15920 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
15921 do { \
15922+ unsigned long _tmp; \
15923 __asm__ __volatile__ ( \
15924 _PRE_EFLAGS("0", "4", "2") \
15925 _op _suffix " %"_x"3,%1; " \
15926@@ -220,8 +221,6 @@ struct gprefix {
15927 /* Raw emulation: instruction has two explicit operands. */
15928 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
15929 do { \
15930- unsigned long _tmp; \
15931- \
15932 switch ((_dst).bytes) { \
15933 case 2: \
15934 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
15935@@ -237,7 +236,6 @@ struct gprefix {
15936
15937 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
15938 do { \
15939- unsigned long _tmp; \
15940 switch ((_dst).bytes) { \
15941 case 1: \
15942 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
15943diff -urNp linux-3.0.3/arch/x86/kvm/lapic.c linux-3.0.3/arch/x86/kvm/lapic.c
15944--- linux-3.0.3/arch/x86/kvm/lapic.c 2011-07-21 22:17:23.000000000 -0400
15945+++ linux-3.0.3/arch/x86/kvm/lapic.c 2011-08-23 21:47:55.000000000 -0400
15946@@ -53,7 +53,7 @@
15947 #define APIC_BUS_CYCLE_NS 1
15948
15949 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
15950-#define apic_debug(fmt, arg...)
15951+#define apic_debug(fmt, arg...) do {} while (0)
15952
15953 #define APIC_LVT_NUM 6
15954 /* 14 is the version for Xeon and Pentium 8.4.8*/
15955diff -urNp linux-3.0.3/arch/x86/kvm/mmu.c linux-3.0.3/arch/x86/kvm/mmu.c
15956--- linux-3.0.3/arch/x86/kvm/mmu.c 2011-07-21 22:17:23.000000000 -0400
15957+++ linux-3.0.3/arch/x86/kvm/mmu.c 2011-08-23 21:47:55.000000000 -0400
15958@@ -3238,7 +3238,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
15959
15960 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
15961
15962- invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
15963+ invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
15964
15965 /*
15966 * Assume that the pte write on a page table of the same type
15967@@ -3270,7 +3270,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
15968 }
15969
15970 spin_lock(&vcpu->kvm->mmu_lock);
15971- if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
15972+ if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
15973 gentry = 0;
15974 kvm_mmu_free_some_pages(vcpu);
15975 ++vcpu->kvm->stat.mmu_pte_write;
15976diff -urNp linux-3.0.3/arch/x86/kvm/paging_tmpl.h linux-3.0.3/arch/x86/kvm/paging_tmpl.h
15977--- linux-3.0.3/arch/x86/kvm/paging_tmpl.h 2011-07-21 22:17:23.000000000 -0400
15978+++ linux-3.0.3/arch/x86/kvm/paging_tmpl.h 2011-08-23 21:48:14.000000000 -0400
15979@@ -583,6 +583,8 @@ static int FNAME(page_fault)(struct kvm_
15980 unsigned long mmu_seq;
15981 bool map_writable;
15982
15983+ pax_track_stack();
15984+
15985 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
15986
15987 r = mmu_topup_memory_caches(vcpu);
15988@@ -703,7 +705,7 @@ static void FNAME(invlpg)(struct kvm_vcp
15989 if (need_flush)
15990 kvm_flush_remote_tlbs(vcpu->kvm);
15991
15992- atomic_inc(&vcpu->kvm->arch.invlpg_counter);
15993+ atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
15994
15995 spin_unlock(&vcpu->kvm->mmu_lock);
15996
15997diff -urNp linux-3.0.3/arch/x86/kvm/svm.c linux-3.0.3/arch/x86/kvm/svm.c
15998--- linux-3.0.3/arch/x86/kvm/svm.c 2011-07-21 22:17:23.000000000 -0400
15999+++ linux-3.0.3/arch/x86/kvm/svm.c 2011-08-23 21:47:55.000000000 -0400
16000@@ -3377,7 +3377,11 @@ static void reload_tss(struct kvm_vcpu *
16001 int cpu = raw_smp_processor_id();
16002
16003 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
16004+
16005+ pax_open_kernel();
16006 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
16007+ pax_close_kernel();
16008+
16009 load_TR_desc();
16010 }
16011
16012@@ -3755,6 +3759,10 @@ static void svm_vcpu_run(struct kvm_vcpu
16013 #endif
16014 #endif
16015
16016+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16017+ __set_fs(current_thread_info()->addr_limit);
16018+#endif
16019+
16020 reload_tss(vcpu);
16021
16022 local_irq_disable();
16023diff -urNp linux-3.0.3/arch/x86/kvm/vmx.c linux-3.0.3/arch/x86/kvm/vmx.c
16024--- linux-3.0.3/arch/x86/kvm/vmx.c 2011-07-21 22:17:23.000000000 -0400
16025+++ linux-3.0.3/arch/x86/kvm/vmx.c 2011-08-23 21:47:55.000000000 -0400
16026@@ -797,7 +797,11 @@ static void reload_tss(void)
16027 struct desc_struct *descs;
16028
16029 descs = (void *)gdt->address;
16030+
16031+ pax_open_kernel();
16032 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
16033+ pax_close_kernel();
16034+
16035 load_TR_desc();
16036 }
16037
16038@@ -1747,8 +1751,11 @@ static __init int hardware_setup(void)
16039 if (!cpu_has_vmx_flexpriority())
16040 flexpriority_enabled = 0;
16041
16042- if (!cpu_has_vmx_tpr_shadow())
16043- kvm_x86_ops->update_cr8_intercept = NULL;
16044+ if (!cpu_has_vmx_tpr_shadow()) {
16045+ pax_open_kernel();
16046+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
16047+ pax_close_kernel();
16048+ }
16049
16050 if (enable_ept && !cpu_has_vmx_ept_2m_page())
16051 kvm_disable_largepages();
16052@@ -2814,7 +2821,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
16053 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
16054
16055 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
16056- vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
16057+ vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
16058 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
16059 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
16060 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
16061@@ -4211,6 +4218,12 @@ static void __noclone vmx_vcpu_run(struc
16062 "jmp .Lkvm_vmx_return \n\t"
16063 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
16064 ".Lkvm_vmx_return: "
16065+
16066+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16067+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
16068+ ".Lkvm_vmx_return2: "
16069+#endif
16070+
16071 /* Save guest registers, load host registers, keep flags */
16072 "mov %0, %c[wordsize](%%"R"sp) \n\t"
16073 "pop %0 \n\t"
16074@@ -4259,6 +4272,11 @@ static void __noclone vmx_vcpu_run(struc
16075 #endif
16076 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
16077 [wordsize]"i"(sizeof(ulong))
16078+
16079+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16080+ ,[cs]"i"(__KERNEL_CS)
16081+#endif
16082+
16083 : "cc", "memory"
16084 , R"ax", R"bx", R"di", R"si"
16085 #ifdef CONFIG_X86_64
16086@@ -4276,7 +4294,16 @@ static void __noclone vmx_vcpu_run(struc
16087
16088 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
16089
16090- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
16091+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
16092+
16093+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16094+ loadsegment(fs, __KERNEL_PERCPU);
16095+#endif
16096+
16097+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16098+ __set_fs(current_thread_info()->addr_limit);
16099+#endif
16100+
16101 vmx->launched = 1;
16102
16103 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
16104diff -urNp linux-3.0.3/arch/x86/kvm/x86.c linux-3.0.3/arch/x86/kvm/x86.c
16105--- linux-3.0.3/arch/x86/kvm/x86.c 2011-07-21 22:17:23.000000000 -0400
16106+++ linux-3.0.3/arch/x86/kvm/x86.c 2011-08-23 21:47:55.000000000 -0400
16107@@ -2057,6 +2057,8 @@ long kvm_arch_dev_ioctl(struct file *fil
16108 if (n < msr_list.nmsrs)
16109 goto out;
16110 r = -EFAULT;
16111+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
16112+ goto out;
16113 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
16114 num_msrs_to_save * sizeof(u32)))
16115 goto out;
16116@@ -2229,15 +2231,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
16117 struct kvm_cpuid2 *cpuid,
16118 struct kvm_cpuid_entry2 __user *entries)
16119 {
16120- int r;
16121+ int r, i;
16122
16123 r = -E2BIG;
16124 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
16125 goto out;
16126 r = -EFAULT;
16127- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
16128- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16129+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16130 goto out;
16131+ for (i = 0; i < cpuid->nent; ++i) {
16132+ struct kvm_cpuid_entry2 cpuid_entry;
16133+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
16134+ goto out;
16135+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
16136+ }
16137 vcpu->arch.cpuid_nent = cpuid->nent;
16138 kvm_apic_set_version(vcpu);
16139 kvm_x86_ops->cpuid_update(vcpu);
16140@@ -2252,15 +2259,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
16141 struct kvm_cpuid2 *cpuid,
16142 struct kvm_cpuid_entry2 __user *entries)
16143 {
16144- int r;
16145+ int r, i;
16146
16147 r = -E2BIG;
16148 if (cpuid->nent < vcpu->arch.cpuid_nent)
16149 goto out;
16150 r = -EFAULT;
16151- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
16152- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16153+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16154 goto out;
16155+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
16156+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
16157+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
16158+ goto out;
16159+ }
16160 return 0;
16161
16162 out:
16163@@ -2579,7 +2590,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
16164 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
16165 struct kvm_interrupt *irq)
16166 {
16167- if (irq->irq < 0 || irq->irq >= 256)
16168+ if (irq->irq >= 256)
16169 return -EINVAL;
16170 if (irqchip_in_kernel(vcpu->kvm))
16171 return -ENXIO;
16172@@ -4878,7 +4889,7 @@ void kvm_after_handle_nmi(struct kvm_vcp
16173 }
16174 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
16175
16176-int kvm_arch_init(void *opaque)
16177+int kvm_arch_init(const void *opaque)
16178 {
16179 int r;
16180 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
16181diff -urNp linux-3.0.3/arch/x86/lguest/boot.c linux-3.0.3/arch/x86/lguest/boot.c
16182--- linux-3.0.3/arch/x86/lguest/boot.c 2011-07-21 22:17:23.000000000 -0400
16183+++ linux-3.0.3/arch/x86/lguest/boot.c 2011-08-23 21:47:55.000000000 -0400
16184@@ -1176,9 +1176,10 @@ static __init int early_put_chars(u32 vt
16185 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
16186 * Launcher to reboot us.
16187 */
16188-static void lguest_restart(char *reason)
16189+static __noreturn void lguest_restart(char *reason)
16190 {
16191 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
16192+ BUG();
16193 }
16194
16195 /*G:050
16196diff -urNp linux-3.0.3/arch/x86/lib/atomic64_32.c linux-3.0.3/arch/x86/lib/atomic64_32.c
16197--- linux-3.0.3/arch/x86/lib/atomic64_32.c 2011-07-21 22:17:23.000000000 -0400
16198+++ linux-3.0.3/arch/x86/lib/atomic64_32.c 2011-08-23 21:47:55.000000000 -0400
16199@@ -8,18 +8,30 @@
16200
16201 long long atomic64_read_cx8(long long, const atomic64_t *v);
16202 EXPORT_SYMBOL(atomic64_read_cx8);
16203+long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16204+EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
16205 long long atomic64_set_cx8(long long, const atomic64_t *v);
16206 EXPORT_SYMBOL(atomic64_set_cx8);
16207+long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16208+EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
16209 long long atomic64_xchg_cx8(long long, unsigned high);
16210 EXPORT_SYMBOL(atomic64_xchg_cx8);
16211 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
16212 EXPORT_SYMBOL(atomic64_add_return_cx8);
16213+long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16214+EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
16215 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
16216 EXPORT_SYMBOL(atomic64_sub_return_cx8);
16217+long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16218+EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
16219 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
16220 EXPORT_SYMBOL(atomic64_inc_return_cx8);
16221+long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16222+EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
16223 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
16224 EXPORT_SYMBOL(atomic64_dec_return_cx8);
16225+long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16226+EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
16227 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
16228 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
16229 int atomic64_inc_not_zero_cx8(atomic64_t *v);
16230@@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
16231 #ifndef CONFIG_X86_CMPXCHG64
16232 long long atomic64_read_386(long long, const atomic64_t *v);
16233 EXPORT_SYMBOL(atomic64_read_386);
16234+long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
16235+EXPORT_SYMBOL(atomic64_read_unchecked_386);
16236 long long atomic64_set_386(long long, const atomic64_t *v);
16237 EXPORT_SYMBOL(atomic64_set_386);
16238+long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
16239+EXPORT_SYMBOL(atomic64_set_unchecked_386);
16240 long long atomic64_xchg_386(long long, unsigned high);
16241 EXPORT_SYMBOL(atomic64_xchg_386);
16242 long long atomic64_add_return_386(long long a, atomic64_t *v);
16243 EXPORT_SYMBOL(atomic64_add_return_386);
16244+long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16245+EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
16246 long long atomic64_sub_return_386(long long a, atomic64_t *v);
16247 EXPORT_SYMBOL(atomic64_sub_return_386);
16248+long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16249+EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
16250 long long atomic64_inc_return_386(long long a, atomic64_t *v);
16251 EXPORT_SYMBOL(atomic64_inc_return_386);
16252+long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16253+EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
16254 long long atomic64_dec_return_386(long long a, atomic64_t *v);
16255 EXPORT_SYMBOL(atomic64_dec_return_386);
16256+long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16257+EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
16258 long long atomic64_add_386(long long a, atomic64_t *v);
16259 EXPORT_SYMBOL(atomic64_add_386);
16260+long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
16261+EXPORT_SYMBOL(atomic64_add_unchecked_386);
16262 long long atomic64_sub_386(long long a, atomic64_t *v);
16263 EXPORT_SYMBOL(atomic64_sub_386);
16264+long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
16265+EXPORT_SYMBOL(atomic64_sub_unchecked_386);
16266 long long atomic64_inc_386(long long a, atomic64_t *v);
16267 EXPORT_SYMBOL(atomic64_inc_386);
16268+long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
16269+EXPORT_SYMBOL(atomic64_inc_unchecked_386);
16270 long long atomic64_dec_386(long long a, atomic64_t *v);
16271 EXPORT_SYMBOL(atomic64_dec_386);
16272+long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
16273+EXPORT_SYMBOL(atomic64_dec_unchecked_386);
16274 long long atomic64_dec_if_positive_386(atomic64_t *v);
16275 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
16276 int atomic64_inc_not_zero_386(atomic64_t *v);
16277diff -urNp linux-3.0.3/arch/x86/lib/atomic64_386_32.S linux-3.0.3/arch/x86/lib/atomic64_386_32.S
16278--- linux-3.0.3/arch/x86/lib/atomic64_386_32.S 2011-07-21 22:17:23.000000000 -0400
16279+++ linux-3.0.3/arch/x86/lib/atomic64_386_32.S 2011-08-23 21:47:55.000000000 -0400
16280@@ -48,6 +48,10 @@ BEGIN(read)
16281 movl (v), %eax
16282 movl 4(v), %edx
16283 RET_ENDP
16284+BEGIN(read_unchecked)
16285+ movl (v), %eax
16286+ movl 4(v), %edx
16287+RET_ENDP
16288 #undef v
16289
16290 #define v %esi
16291@@ -55,6 +59,10 @@ BEGIN(set)
16292 movl %ebx, (v)
16293 movl %ecx, 4(v)
16294 RET_ENDP
16295+BEGIN(set_unchecked)
16296+ movl %ebx, (v)
16297+ movl %ecx, 4(v)
16298+RET_ENDP
16299 #undef v
16300
16301 #define v %esi
16302@@ -70,6 +78,20 @@ RET_ENDP
16303 BEGIN(add)
16304 addl %eax, (v)
16305 adcl %edx, 4(v)
16306+
16307+#ifdef CONFIG_PAX_REFCOUNT
16308+ jno 0f
16309+ subl %eax, (v)
16310+ sbbl %edx, 4(v)
16311+ int $4
16312+0:
16313+ _ASM_EXTABLE(0b, 0b)
16314+#endif
16315+
16316+RET_ENDP
16317+BEGIN(add_unchecked)
16318+ addl %eax, (v)
16319+ adcl %edx, 4(v)
16320 RET_ENDP
16321 #undef v
16322
16323@@ -77,6 +99,24 @@ RET_ENDP
16324 BEGIN(add_return)
16325 addl (v), %eax
16326 adcl 4(v), %edx
16327+
16328+#ifdef CONFIG_PAX_REFCOUNT
16329+ into
16330+1234:
16331+ _ASM_EXTABLE(1234b, 2f)
16332+#endif
16333+
16334+ movl %eax, (v)
16335+ movl %edx, 4(v)
16336+
16337+#ifdef CONFIG_PAX_REFCOUNT
16338+2:
16339+#endif
16340+
16341+RET_ENDP
16342+BEGIN(add_return_unchecked)
16343+ addl (v), %eax
16344+ adcl 4(v), %edx
16345 movl %eax, (v)
16346 movl %edx, 4(v)
16347 RET_ENDP
16348@@ -86,6 +126,20 @@ RET_ENDP
16349 BEGIN(sub)
16350 subl %eax, (v)
16351 sbbl %edx, 4(v)
16352+
16353+#ifdef CONFIG_PAX_REFCOUNT
16354+ jno 0f
16355+ addl %eax, (v)
16356+ adcl %edx, 4(v)
16357+ int $4
16358+0:
16359+ _ASM_EXTABLE(0b, 0b)
16360+#endif
16361+
16362+RET_ENDP
16363+BEGIN(sub_unchecked)
16364+ subl %eax, (v)
16365+ sbbl %edx, 4(v)
16366 RET_ENDP
16367 #undef v
16368
16369@@ -96,6 +150,27 @@ BEGIN(sub_return)
16370 sbbl $0, %edx
16371 addl (v), %eax
16372 adcl 4(v), %edx
16373+
16374+#ifdef CONFIG_PAX_REFCOUNT
16375+ into
16376+1234:
16377+ _ASM_EXTABLE(1234b, 2f)
16378+#endif
16379+
16380+ movl %eax, (v)
16381+ movl %edx, 4(v)
16382+
16383+#ifdef CONFIG_PAX_REFCOUNT
16384+2:
16385+#endif
16386+
16387+RET_ENDP
16388+BEGIN(sub_return_unchecked)
16389+ negl %edx
16390+ negl %eax
16391+ sbbl $0, %edx
16392+ addl (v), %eax
16393+ adcl 4(v), %edx
16394 movl %eax, (v)
16395 movl %edx, 4(v)
16396 RET_ENDP
16397@@ -105,6 +180,20 @@ RET_ENDP
16398 BEGIN(inc)
16399 addl $1, (v)
16400 adcl $0, 4(v)
16401+
16402+#ifdef CONFIG_PAX_REFCOUNT
16403+ jno 0f
16404+ subl $1, (v)
16405+ sbbl $0, 4(v)
16406+ int $4
16407+0:
16408+ _ASM_EXTABLE(0b, 0b)
16409+#endif
16410+
16411+RET_ENDP
16412+BEGIN(inc_unchecked)
16413+ addl $1, (v)
16414+ adcl $0, 4(v)
16415 RET_ENDP
16416 #undef v
16417
16418@@ -114,6 +203,26 @@ BEGIN(inc_return)
16419 movl 4(v), %edx
16420 addl $1, %eax
16421 adcl $0, %edx
16422+
16423+#ifdef CONFIG_PAX_REFCOUNT
16424+ into
16425+1234:
16426+ _ASM_EXTABLE(1234b, 2f)
16427+#endif
16428+
16429+ movl %eax, (v)
16430+ movl %edx, 4(v)
16431+
16432+#ifdef CONFIG_PAX_REFCOUNT
16433+2:
16434+#endif
16435+
16436+RET_ENDP
16437+BEGIN(inc_return_unchecked)
16438+ movl (v), %eax
16439+ movl 4(v), %edx
16440+ addl $1, %eax
16441+ adcl $0, %edx
16442 movl %eax, (v)
16443 movl %edx, 4(v)
16444 RET_ENDP
16445@@ -123,6 +232,20 @@ RET_ENDP
16446 BEGIN(dec)
16447 subl $1, (v)
16448 sbbl $0, 4(v)
16449+
16450+#ifdef CONFIG_PAX_REFCOUNT
16451+ jno 0f
16452+ addl $1, (v)
16453+ adcl $0, 4(v)
16454+ int $4
16455+0:
16456+ _ASM_EXTABLE(0b, 0b)
16457+#endif
16458+
16459+RET_ENDP
16460+BEGIN(dec_unchecked)
16461+ subl $1, (v)
16462+ sbbl $0, 4(v)
16463 RET_ENDP
16464 #undef v
16465
16466@@ -132,6 +255,26 @@ BEGIN(dec_return)
16467 movl 4(v), %edx
16468 subl $1, %eax
16469 sbbl $0, %edx
16470+
16471+#ifdef CONFIG_PAX_REFCOUNT
16472+ into
16473+1234:
16474+ _ASM_EXTABLE(1234b, 2f)
16475+#endif
16476+
16477+ movl %eax, (v)
16478+ movl %edx, 4(v)
16479+
16480+#ifdef CONFIG_PAX_REFCOUNT
16481+2:
16482+#endif
16483+
16484+RET_ENDP
16485+BEGIN(dec_return_unchecked)
16486+ movl (v), %eax
16487+ movl 4(v), %edx
16488+ subl $1, %eax
16489+ sbbl $0, %edx
16490 movl %eax, (v)
16491 movl %edx, 4(v)
16492 RET_ENDP
16493@@ -143,6 +286,13 @@ BEGIN(add_unless)
16494 adcl %edx, %edi
16495 addl (v), %eax
16496 adcl 4(v), %edx
16497+
16498+#ifdef CONFIG_PAX_REFCOUNT
16499+ into
16500+1234:
16501+ _ASM_EXTABLE(1234b, 2f)
16502+#endif
16503+
16504 cmpl %eax, %esi
16505 je 3f
16506 1:
16507@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
16508 1:
16509 addl $1, %eax
16510 adcl $0, %edx
16511+
16512+#ifdef CONFIG_PAX_REFCOUNT
16513+ into
16514+1234:
16515+ _ASM_EXTABLE(1234b, 2f)
16516+#endif
16517+
16518 movl %eax, (v)
16519 movl %edx, 4(v)
16520 movl $1, %eax
16521@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
16522 movl 4(v), %edx
16523 subl $1, %eax
16524 sbbl $0, %edx
16525+
16526+#ifdef CONFIG_PAX_REFCOUNT
16527+ into
16528+1234:
16529+ _ASM_EXTABLE(1234b, 1f)
16530+#endif
16531+
16532 js 1f
16533 movl %eax, (v)
16534 movl %edx, 4(v)
16535diff -urNp linux-3.0.3/arch/x86/lib/atomic64_cx8_32.S linux-3.0.3/arch/x86/lib/atomic64_cx8_32.S
16536--- linux-3.0.3/arch/x86/lib/atomic64_cx8_32.S 2011-07-21 22:17:23.000000000 -0400
16537+++ linux-3.0.3/arch/x86/lib/atomic64_cx8_32.S 2011-08-23 21:47:55.000000000 -0400
16538@@ -39,6 +39,14 @@ ENTRY(atomic64_read_cx8)
16539 CFI_ENDPROC
16540 ENDPROC(atomic64_read_cx8)
16541
16542+ENTRY(atomic64_read_unchecked_cx8)
16543+ CFI_STARTPROC
16544+
16545+ read64 %ecx
16546+ ret
16547+ CFI_ENDPROC
16548+ENDPROC(atomic64_read_unchecked_cx8)
16549+
16550 ENTRY(atomic64_set_cx8)
16551 CFI_STARTPROC
16552
16553@@ -52,6 +60,19 @@ ENTRY(atomic64_set_cx8)
16554 CFI_ENDPROC
16555 ENDPROC(atomic64_set_cx8)
16556
16557+ENTRY(atomic64_set_unchecked_cx8)
16558+ CFI_STARTPROC
16559+
16560+1:
16561+/* we don't need LOCK_PREFIX since aligned 64-bit writes
16562+ * are atomic on 586 and newer */
16563+ cmpxchg8b (%esi)
16564+ jne 1b
16565+
16566+ ret
16567+ CFI_ENDPROC
16568+ENDPROC(atomic64_set_unchecked_cx8)
16569+
16570 ENTRY(atomic64_xchg_cx8)
16571 CFI_STARTPROC
16572
16573@@ -66,8 +87,8 @@ ENTRY(atomic64_xchg_cx8)
16574 CFI_ENDPROC
16575 ENDPROC(atomic64_xchg_cx8)
16576
16577-.macro addsub_return func ins insc
16578-ENTRY(atomic64_\func\()_return_cx8)
16579+.macro addsub_return func ins insc unchecked=""
16580+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
16581 CFI_STARTPROC
16582 SAVE ebp
16583 SAVE ebx
16584@@ -84,27 +105,43 @@ ENTRY(atomic64_\func\()_return_cx8)
16585 movl %edx, %ecx
16586 \ins\()l %esi, %ebx
16587 \insc\()l %edi, %ecx
16588+
16589+.ifb \unchecked
16590+#ifdef CONFIG_PAX_REFCOUNT
16591+ into
16592+2:
16593+ _ASM_EXTABLE(2b, 3f)
16594+#endif
16595+.endif
16596+
16597 LOCK_PREFIX
16598 cmpxchg8b (%ebp)
16599 jne 1b
16600-
16601-10:
16602 movl %ebx, %eax
16603 movl %ecx, %edx
16604+
16605+.ifb \unchecked
16606+#ifdef CONFIG_PAX_REFCOUNT
16607+3:
16608+#endif
16609+.endif
16610+
16611 RESTORE edi
16612 RESTORE esi
16613 RESTORE ebx
16614 RESTORE ebp
16615 ret
16616 CFI_ENDPROC
16617-ENDPROC(atomic64_\func\()_return_cx8)
16618+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
16619 .endm
16620
16621 addsub_return add add adc
16622 addsub_return sub sub sbb
16623+addsub_return add add adc _unchecked
16624+addsub_return sub sub sbb _unchecked
16625
16626-.macro incdec_return func ins insc
16627-ENTRY(atomic64_\func\()_return_cx8)
16628+.macro incdec_return func ins insc unchecked
16629+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
16630 CFI_STARTPROC
16631 SAVE ebx
16632
16633@@ -114,21 +151,38 @@ ENTRY(atomic64_\func\()_return_cx8)
16634 movl %edx, %ecx
16635 \ins\()l $1, %ebx
16636 \insc\()l $0, %ecx
16637+
16638+.ifb \unchecked
16639+#ifdef CONFIG_PAX_REFCOUNT
16640+ into
16641+2:
16642+ _ASM_EXTABLE(2b, 3f)
16643+#endif
16644+.endif
16645+
16646 LOCK_PREFIX
16647 cmpxchg8b (%esi)
16648 jne 1b
16649
16650-10:
16651 movl %ebx, %eax
16652 movl %ecx, %edx
16653+
16654+.ifb \unchecked
16655+#ifdef CONFIG_PAX_REFCOUNT
16656+3:
16657+#endif
16658+.endif
16659+
16660 RESTORE ebx
16661 ret
16662 CFI_ENDPROC
16663-ENDPROC(atomic64_\func\()_return_cx8)
16664+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
16665 .endm
16666
16667 incdec_return inc add adc
16668 incdec_return dec sub sbb
16669+incdec_return inc add adc _unchecked
16670+incdec_return dec sub sbb _unchecked
16671
16672 ENTRY(atomic64_dec_if_positive_cx8)
16673 CFI_STARTPROC
16674@@ -140,6 +194,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
16675 movl %edx, %ecx
16676 subl $1, %ebx
16677 sbb $0, %ecx
16678+
16679+#ifdef CONFIG_PAX_REFCOUNT
16680+ into
16681+1234:
16682+ _ASM_EXTABLE(1234b, 2f)
16683+#endif
16684+
16685 js 2f
16686 LOCK_PREFIX
16687 cmpxchg8b (%esi)
16688@@ -174,6 +235,13 @@ ENTRY(atomic64_add_unless_cx8)
16689 movl %edx, %ecx
16690 addl %esi, %ebx
16691 adcl %edi, %ecx
16692+
16693+#ifdef CONFIG_PAX_REFCOUNT
16694+ into
16695+1234:
16696+ _ASM_EXTABLE(1234b, 3f)
16697+#endif
16698+
16699 LOCK_PREFIX
16700 cmpxchg8b (%ebp)
16701 jne 1b
16702@@ -206,6 +274,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
16703 movl %edx, %ecx
16704 addl $1, %ebx
16705 adcl $0, %ecx
16706+
16707+#ifdef CONFIG_PAX_REFCOUNT
16708+ into
16709+1234:
16710+ _ASM_EXTABLE(1234b, 3f)
16711+#endif
16712+
16713 LOCK_PREFIX
16714 cmpxchg8b (%esi)
16715 jne 1b
16716diff -urNp linux-3.0.3/arch/x86/lib/checksum_32.S linux-3.0.3/arch/x86/lib/checksum_32.S
16717--- linux-3.0.3/arch/x86/lib/checksum_32.S 2011-07-21 22:17:23.000000000 -0400
16718+++ linux-3.0.3/arch/x86/lib/checksum_32.S 2011-08-23 21:47:55.000000000 -0400
16719@@ -28,7 +28,8 @@
16720 #include <linux/linkage.h>
16721 #include <asm/dwarf2.h>
16722 #include <asm/errno.h>
16723-
16724+#include <asm/segment.h>
16725+
16726 /*
16727 * computes a partial checksum, e.g. for TCP/UDP fragments
16728 */
16729@@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (
16730
16731 #define ARGBASE 16
16732 #define FP 12
16733-
16734-ENTRY(csum_partial_copy_generic)
16735+
16736+ENTRY(csum_partial_copy_generic_to_user)
16737 CFI_STARTPROC
16738+
16739+#ifdef CONFIG_PAX_MEMORY_UDEREF
16740+ pushl_cfi %gs
16741+ popl_cfi %es
16742+ jmp csum_partial_copy_generic
16743+#endif
16744+
16745+ENTRY(csum_partial_copy_generic_from_user)
16746+
16747+#ifdef CONFIG_PAX_MEMORY_UDEREF
16748+ pushl_cfi %gs
16749+ popl_cfi %ds
16750+#endif
16751+
16752+ENTRY(csum_partial_copy_generic)
16753 subl $4,%esp
16754 CFI_ADJUST_CFA_OFFSET 4
16755 pushl_cfi %edi
16756@@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
16757 jmp 4f
16758 SRC(1: movw (%esi), %bx )
16759 addl $2, %esi
16760-DST( movw %bx, (%edi) )
16761+DST( movw %bx, %es:(%edi) )
16762 addl $2, %edi
16763 addw %bx, %ax
16764 adcl $0, %eax
16765@@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
16766 SRC(1: movl (%esi), %ebx )
16767 SRC( movl 4(%esi), %edx )
16768 adcl %ebx, %eax
16769-DST( movl %ebx, (%edi) )
16770+DST( movl %ebx, %es:(%edi) )
16771 adcl %edx, %eax
16772-DST( movl %edx, 4(%edi) )
16773+DST( movl %edx, %es:4(%edi) )
16774
16775 SRC( movl 8(%esi), %ebx )
16776 SRC( movl 12(%esi), %edx )
16777 adcl %ebx, %eax
16778-DST( movl %ebx, 8(%edi) )
16779+DST( movl %ebx, %es:8(%edi) )
16780 adcl %edx, %eax
16781-DST( movl %edx, 12(%edi) )
16782+DST( movl %edx, %es:12(%edi) )
16783
16784 SRC( movl 16(%esi), %ebx )
16785 SRC( movl 20(%esi), %edx )
16786 adcl %ebx, %eax
16787-DST( movl %ebx, 16(%edi) )
16788+DST( movl %ebx, %es:16(%edi) )
16789 adcl %edx, %eax
16790-DST( movl %edx, 20(%edi) )
16791+DST( movl %edx, %es:20(%edi) )
16792
16793 SRC( movl 24(%esi), %ebx )
16794 SRC( movl 28(%esi), %edx )
16795 adcl %ebx, %eax
16796-DST( movl %ebx, 24(%edi) )
16797+DST( movl %ebx, %es:24(%edi) )
16798 adcl %edx, %eax
16799-DST( movl %edx, 28(%edi) )
16800+DST( movl %edx, %es:28(%edi) )
16801
16802 lea 32(%esi), %esi
16803 lea 32(%edi), %edi
16804@@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
16805 shrl $2, %edx # This clears CF
16806 SRC(3: movl (%esi), %ebx )
16807 adcl %ebx, %eax
16808-DST( movl %ebx, (%edi) )
16809+DST( movl %ebx, %es:(%edi) )
16810 lea 4(%esi), %esi
16811 lea 4(%edi), %edi
16812 dec %edx
16813@@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
16814 jb 5f
16815 SRC( movw (%esi), %cx )
16816 leal 2(%esi), %esi
16817-DST( movw %cx, (%edi) )
16818+DST( movw %cx, %es:(%edi) )
16819 leal 2(%edi), %edi
16820 je 6f
16821 shll $16,%ecx
16822 SRC(5: movb (%esi), %cl )
16823-DST( movb %cl, (%edi) )
16824+DST( movb %cl, %es:(%edi) )
16825 6: addl %ecx, %eax
16826 adcl $0, %eax
16827 7:
16828@@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
16829
16830 6001:
16831 movl ARGBASE+20(%esp), %ebx # src_err_ptr
16832- movl $-EFAULT, (%ebx)
16833+ movl $-EFAULT, %ss:(%ebx)
16834
16835 # zero the complete destination - computing the rest
16836 # is too much work
16837@@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
16838
16839 6002:
16840 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
16841- movl $-EFAULT,(%ebx)
16842+ movl $-EFAULT,%ss:(%ebx)
16843 jmp 5000b
16844
16845 .previous
16846
16847+ pushl_cfi %ss
16848+ popl_cfi %ds
16849+ pushl_cfi %ss
16850+ popl_cfi %es
16851 popl_cfi %ebx
16852 CFI_RESTORE ebx
16853 popl_cfi %esi
16854@@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
16855 popl_cfi %ecx # equivalent to addl $4,%esp
16856 ret
16857 CFI_ENDPROC
16858-ENDPROC(csum_partial_copy_generic)
16859+ENDPROC(csum_partial_copy_generic_to_user)
16860
16861 #else
16862
16863 /* Version for PentiumII/PPro */
16864
16865 #define ROUND1(x) \
16866+ nop; nop; nop; \
16867 SRC(movl x(%esi), %ebx ) ; \
16868 addl %ebx, %eax ; \
16869- DST(movl %ebx, x(%edi) ) ;
16870+ DST(movl %ebx, %es:x(%edi)) ;
16871
16872 #define ROUND(x) \
16873+ nop; nop; nop; \
16874 SRC(movl x(%esi), %ebx ) ; \
16875 adcl %ebx, %eax ; \
16876- DST(movl %ebx, x(%edi) ) ;
16877+ DST(movl %ebx, %es:x(%edi)) ;
16878
16879 #define ARGBASE 12
16880-
16881-ENTRY(csum_partial_copy_generic)
16882+
16883+ENTRY(csum_partial_copy_generic_to_user)
16884 CFI_STARTPROC
16885+
16886+#ifdef CONFIG_PAX_MEMORY_UDEREF
16887+ pushl_cfi %gs
16888+ popl_cfi %es
16889+ jmp csum_partial_copy_generic
16890+#endif
16891+
16892+ENTRY(csum_partial_copy_generic_from_user)
16893+
16894+#ifdef CONFIG_PAX_MEMORY_UDEREF
16895+ pushl_cfi %gs
16896+ popl_cfi %ds
16897+#endif
16898+
16899+ENTRY(csum_partial_copy_generic)
16900 pushl_cfi %ebx
16901 CFI_REL_OFFSET ebx, 0
16902 pushl_cfi %edi
16903@@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
16904 subl %ebx, %edi
16905 lea -1(%esi),%edx
16906 andl $-32,%edx
16907- lea 3f(%ebx,%ebx), %ebx
16908+ lea 3f(%ebx,%ebx,2), %ebx
16909 testl %esi, %esi
16910 jmp *%ebx
16911 1: addl $64,%esi
16912@@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
16913 jb 5f
16914 SRC( movw (%esi), %dx )
16915 leal 2(%esi), %esi
16916-DST( movw %dx, (%edi) )
16917+DST( movw %dx, %es:(%edi) )
16918 leal 2(%edi), %edi
16919 je 6f
16920 shll $16,%edx
16921 5:
16922 SRC( movb (%esi), %dl )
16923-DST( movb %dl, (%edi) )
16924+DST( movb %dl, %es:(%edi) )
16925 6: addl %edx, %eax
16926 adcl $0, %eax
16927 7:
16928 .section .fixup, "ax"
16929 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
16930- movl $-EFAULT, (%ebx)
16931+ movl $-EFAULT, %ss:(%ebx)
16932 # zero the complete destination (computing the rest is too much work)
16933 movl ARGBASE+8(%esp),%edi # dst
16934 movl ARGBASE+12(%esp),%ecx # len
16935@@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
16936 rep; stosb
16937 jmp 7b
16938 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
16939- movl $-EFAULT, (%ebx)
16940+ movl $-EFAULT, %ss:(%ebx)
16941 jmp 7b
16942 .previous
16943
16944+#ifdef CONFIG_PAX_MEMORY_UDEREF
16945+ pushl_cfi %ss
16946+ popl_cfi %ds
16947+ pushl_cfi %ss
16948+ popl_cfi %es
16949+#endif
16950+
16951 popl_cfi %esi
16952 CFI_RESTORE esi
16953 popl_cfi %edi
16954@@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
16955 CFI_RESTORE ebx
16956 ret
16957 CFI_ENDPROC
16958-ENDPROC(csum_partial_copy_generic)
16959+ENDPROC(csum_partial_copy_generic_to_user)
16960
16961 #undef ROUND
16962 #undef ROUND1
16963diff -urNp linux-3.0.3/arch/x86/lib/clear_page_64.S linux-3.0.3/arch/x86/lib/clear_page_64.S
16964--- linux-3.0.3/arch/x86/lib/clear_page_64.S 2011-07-21 22:17:23.000000000 -0400
16965+++ linux-3.0.3/arch/x86/lib/clear_page_64.S 2011-08-23 21:47:55.000000000 -0400
16966@@ -58,7 +58,7 @@ ENDPROC(clear_page)
16967
16968 #include <asm/cpufeature.h>
16969
16970- .section .altinstr_replacement,"ax"
16971+ .section .altinstr_replacement,"a"
16972 1: .byte 0xeb /* jmp <disp8> */
16973 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
16974 2: .byte 0xeb /* jmp <disp8> */
16975diff -urNp linux-3.0.3/arch/x86/lib/copy_page_64.S linux-3.0.3/arch/x86/lib/copy_page_64.S
16976--- linux-3.0.3/arch/x86/lib/copy_page_64.S 2011-07-21 22:17:23.000000000 -0400
16977+++ linux-3.0.3/arch/x86/lib/copy_page_64.S 2011-08-23 21:47:55.000000000 -0400
16978@@ -104,7 +104,7 @@ ENDPROC(copy_page)
16979
16980 #include <asm/cpufeature.h>
16981
16982- .section .altinstr_replacement,"ax"
16983+ .section .altinstr_replacement,"a"
16984 1: .byte 0xeb /* jmp <disp8> */
16985 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
16986 2:
16987diff -urNp linux-3.0.3/arch/x86/lib/copy_user_64.S linux-3.0.3/arch/x86/lib/copy_user_64.S
16988--- linux-3.0.3/arch/x86/lib/copy_user_64.S 2011-07-21 22:17:23.000000000 -0400
16989+++ linux-3.0.3/arch/x86/lib/copy_user_64.S 2011-08-23 21:47:55.000000000 -0400
16990@@ -16,6 +16,7 @@
16991 #include <asm/thread_info.h>
16992 #include <asm/cpufeature.h>
16993 #include <asm/alternative-asm.h>
16994+#include <asm/pgtable.h>
16995
16996 /*
16997 * By placing feature2 after feature1 in altinstructions section, we logically
16998@@ -29,7 +30,7 @@
16999 .byte 0xe9 /* 32bit jump */
17000 .long \orig-1f /* by default jump to orig */
17001 1:
17002- .section .altinstr_replacement,"ax"
17003+ .section .altinstr_replacement,"a"
17004 2: .byte 0xe9 /* near jump with 32bit immediate */
17005 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
17006 3: .byte 0xe9 /* near jump with 32bit immediate */
17007@@ -71,41 +72,13 @@
17008 #endif
17009 .endm
17010
17011-/* Standard copy_to_user with segment limit checking */
17012-ENTRY(_copy_to_user)
17013- CFI_STARTPROC
17014- GET_THREAD_INFO(%rax)
17015- movq %rdi,%rcx
17016- addq %rdx,%rcx
17017- jc bad_to_user
17018- cmpq TI_addr_limit(%rax),%rcx
17019- ja bad_to_user
17020- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
17021- copy_user_generic_unrolled,copy_user_generic_string, \
17022- copy_user_enhanced_fast_string
17023- CFI_ENDPROC
17024-ENDPROC(_copy_to_user)
17025-
17026-/* Standard copy_from_user with segment limit checking */
17027-ENTRY(_copy_from_user)
17028- CFI_STARTPROC
17029- GET_THREAD_INFO(%rax)
17030- movq %rsi,%rcx
17031- addq %rdx,%rcx
17032- jc bad_from_user
17033- cmpq TI_addr_limit(%rax),%rcx
17034- ja bad_from_user
17035- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
17036- copy_user_generic_unrolled,copy_user_generic_string, \
17037- copy_user_enhanced_fast_string
17038- CFI_ENDPROC
17039-ENDPROC(_copy_from_user)
17040-
17041 .section .fixup,"ax"
17042 /* must zero dest */
17043 ENTRY(bad_from_user)
17044 bad_from_user:
17045 CFI_STARTPROC
17046+ testl %edx,%edx
17047+ js bad_to_user
17048 movl %edx,%ecx
17049 xorl %eax,%eax
17050 rep
17051diff -urNp linux-3.0.3/arch/x86/lib/copy_user_nocache_64.S linux-3.0.3/arch/x86/lib/copy_user_nocache_64.S
17052--- linux-3.0.3/arch/x86/lib/copy_user_nocache_64.S 2011-07-21 22:17:23.000000000 -0400
17053+++ linux-3.0.3/arch/x86/lib/copy_user_nocache_64.S 2011-08-23 21:47:55.000000000 -0400
17054@@ -14,6 +14,7 @@
17055 #include <asm/current.h>
17056 #include <asm/asm-offsets.h>
17057 #include <asm/thread_info.h>
17058+#include <asm/pgtable.h>
17059
17060 .macro ALIGN_DESTINATION
17061 #ifdef FIX_ALIGNMENT
17062@@ -50,6 +51,15 @@
17063 */
17064 ENTRY(__copy_user_nocache)
17065 CFI_STARTPROC
17066+
17067+#ifdef CONFIG_PAX_MEMORY_UDEREF
17068+ mov $PAX_USER_SHADOW_BASE,%rcx
17069+ cmp %rcx,%rsi
17070+ jae 1f
17071+ add %rcx,%rsi
17072+1:
17073+#endif
17074+
17075 cmpl $8,%edx
17076 jb 20f /* less then 8 bytes, go to byte copy loop */
17077 ALIGN_DESTINATION
17078diff -urNp linux-3.0.3/arch/x86/lib/csum-wrappers_64.c linux-3.0.3/arch/x86/lib/csum-wrappers_64.c
17079--- linux-3.0.3/arch/x86/lib/csum-wrappers_64.c 2011-07-21 22:17:23.000000000 -0400
17080+++ linux-3.0.3/arch/x86/lib/csum-wrappers_64.c 2011-08-23 21:47:55.000000000 -0400
17081@@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
17082 len -= 2;
17083 }
17084 }
17085+
17086+#ifdef CONFIG_PAX_MEMORY_UDEREF
17087+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
17088+ src += PAX_USER_SHADOW_BASE;
17089+#endif
17090+
17091 isum = csum_partial_copy_generic((__force const void *)src,
17092 dst, len, isum, errp, NULL);
17093 if (unlikely(*errp))
17094@@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
17095 }
17096
17097 *errp = 0;
17098+
17099+#ifdef CONFIG_PAX_MEMORY_UDEREF
17100+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
17101+ dst += PAX_USER_SHADOW_BASE;
17102+#endif
17103+
17104 return csum_partial_copy_generic(src, (void __force *)dst,
17105 len, isum, NULL, errp);
17106 }
17107diff -urNp linux-3.0.3/arch/x86/lib/getuser.S linux-3.0.3/arch/x86/lib/getuser.S
17108--- linux-3.0.3/arch/x86/lib/getuser.S 2011-07-21 22:17:23.000000000 -0400
17109+++ linux-3.0.3/arch/x86/lib/getuser.S 2011-08-23 21:47:55.000000000 -0400
17110@@ -33,14 +33,35 @@
17111 #include <asm/asm-offsets.h>
17112 #include <asm/thread_info.h>
17113 #include <asm/asm.h>
17114+#include <asm/segment.h>
17115+#include <asm/pgtable.h>
17116+
17117+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17118+#define __copyuser_seg gs;
17119+#else
17120+#define __copyuser_seg
17121+#endif
17122
17123 .text
17124 ENTRY(__get_user_1)
17125 CFI_STARTPROC
17126+
17127+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17128 GET_THREAD_INFO(%_ASM_DX)
17129 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17130 jae bad_get_user
17131-1: movzb (%_ASM_AX),%edx
17132+
17133+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17134+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17135+ cmp %_ASM_DX,%_ASM_AX
17136+ jae 1234f
17137+ add %_ASM_DX,%_ASM_AX
17138+1234:
17139+#endif
17140+
17141+#endif
17142+
17143+1: __copyuser_seg movzb (%_ASM_AX),%edx
17144 xor %eax,%eax
17145 ret
17146 CFI_ENDPROC
17147@@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
17148 ENTRY(__get_user_2)
17149 CFI_STARTPROC
17150 add $1,%_ASM_AX
17151+
17152+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17153 jc bad_get_user
17154 GET_THREAD_INFO(%_ASM_DX)
17155 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17156 jae bad_get_user
17157-2: movzwl -1(%_ASM_AX),%edx
17158+
17159+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17160+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17161+ cmp %_ASM_DX,%_ASM_AX
17162+ jae 1234f
17163+ add %_ASM_DX,%_ASM_AX
17164+1234:
17165+#endif
17166+
17167+#endif
17168+
17169+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
17170 xor %eax,%eax
17171 ret
17172 CFI_ENDPROC
17173@@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
17174 ENTRY(__get_user_4)
17175 CFI_STARTPROC
17176 add $3,%_ASM_AX
17177+
17178+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17179 jc bad_get_user
17180 GET_THREAD_INFO(%_ASM_DX)
17181 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17182 jae bad_get_user
17183-3: mov -3(%_ASM_AX),%edx
17184+
17185+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17186+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17187+ cmp %_ASM_DX,%_ASM_AX
17188+ jae 1234f
17189+ add %_ASM_DX,%_ASM_AX
17190+1234:
17191+#endif
17192+
17193+#endif
17194+
17195+3: __copyuser_seg mov -3(%_ASM_AX),%edx
17196 xor %eax,%eax
17197 ret
17198 CFI_ENDPROC
17199@@ -80,6 +127,15 @@ ENTRY(__get_user_8)
17200 GET_THREAD_INFO(%_ASM_DX)
17201 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17202 jae bad_get_user
17203+
17204+#ifdef CONFIG_PAX_MEMORY_UDEREF
17205+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17206+ cmp %_ASM_DX,%_ASM_AX
17207+ jae 1234f
17208+ add %_ASM_DX,%_ASM_AX
17209+1234:
17210+#endif
17211+
17212 4: movq -7(%_ASM_AX),%_ASM_DX
17213 xor %eax,%eax
17214 ret
17215diff -urNp linux-3.0.3/arch/x86/lib/insn.c linux-3.0.3/arch/x86/lib/insn.c
17216--- linux-3.0.3/arch/x86/lib/insn.c 2011-07-21 22:17:23.000000000 -0400
17217+++ linux-3.0.3/arch/x86/lib/insn.c 2011-08-23 21:47:55.000000000 -0400
17218@@ -21,6 +21,11 @@
17219 #include <linux/string.h>
17220 #include <asm/inat.h>
17221 #include <asm/insn.h>
17222+#ifdef __KERNEL__
17223+#include <asm/pgtable_types.h>
17224+#else
17225+#define ktla_ktva(addr) addr
17226+#endif
17227
17228 #define get_next(t, insn) \
17229 ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
17230@@ -40,8 +45,8 @@
17231 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
17232 {
17233 memset(insn, 0, sizeof(*insn));
17234- insn->kaddr = kaddr;
17235- insn->next_byte = kaddr;
17236+ insn->kaddr = ktla_ktva(kaddr);
17237+ insn->next_byte = ktla_ktva(kaddr);
17238 insn->x86_64 = x86_64 ? 1 : 0;
17239 insn->opnd_bytes = 4;
17240 if (x86_64)
17241diff -urNp linux-3.0.3/arch/x86/lib/mmx_32.c linux-3.0.3/arch/x86/lib/mmx_32.c
17242--- linux-3.0.3/arch/x86/lib/mmx_32.c 2011-07-21 22:17:23.000000000 -0400
17243+++ linux-3.0.3/arch/x86/lib/mmx_32.c 2011-08-23 21:47:55.000000000 -0400
17244@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
17245 {
17246 void *p;
17247 int i;
17248+ unsigned long cr0;
17249
17250 if (unlikely(in_interrupt()))
17251 return __memcpy(to, from, len);
17252@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
17253 kernel_fpu_begin();
17254
17255 __asm__ __volatile__ (
17256- "1: prefetch (%0)\n" /* This set is 28 bytes */
17257- " prefetch 64(%0)\n"
17258- " prefetch 128(%0)\n"
17259- " prefetch 192(%0)\n"
17260- " prefetch 256(%0)\n"
17261+ "1: prefetch (%1)\n" /* This set is 28 bytes */
17262+ " prefetch 64(%1)\n"
17263+ " prefetch 128(%1)\n"
17264+ " prefetch 192(%1)\n"
17265+ " prefetch 256(%1)\n"
17266 "2: \n"
17267 ".section .fixup, \"ax\"\n"
17268- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17269+ "3: \n"
17270+
17271+#ifdef CONFIG_PAX_KERNEXEC
17272+ " movl %%cr0, %0\n"
17273+ " movl %0, %%eax\n"
17274+ " andl $0xFFFEFFFF, %%eax\n"
17275+ " movl %%eax, %%cr0\n"
17276+#endif
17277+
17278+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17279+
17280+#ifdef CONFIG_PAX_KERNEXEC
17281+ " movl %0, %%cr0\n"
17282+#endif
17283+
17284 " jmp 2b\n"
17285 ".previous\n"
17286 _ASM_EXTABLE(1b, 3b)
17287- : : "r" (from));
17288+ : "=&r" (cr0) : "r" (from) : "ax");
17289
17290 for ( ; i > 5; i--) {
17291 __asm__ __volatile__ (
17292- "1: prefetch 320(%0)\n"
17293- "2: movq (%0), %%mm0\n"
17294- " movq 8(%0), %%mm1\n"
17295- " movq 16(%0), %%mm2\n"
17296- " movq 24(%0), %%mm3\n"
17297- " movq %%mm0, (%1)\n"
17298- " movq %%mm1, 8(%1)\n"
17299- " movq %%mm2, 16(%1)\n"
17300- " movq %%mm3, 24(%1)\n"
17301- " movq 32(%0), %%mm0\n"
17302- " movq 40(%0), %%mm1\n"
17303- " movq 48(%0), %%mm2\n"
17304- " movq 56(%0), %%mm3\n"
17305- " movq %%mm0, 32(%1)\n"
17306- " movq %%mm1, 40(%1)\n"
17307- " movq %%mm2, 48(%1)\n"
17308- " movq %%mm3, 56(%1)\n"
17309+ "1: prefetch 320(%1)\n"
17310+ "2: movq (%1), %%mm0\n"
17311+ " movq 8(%1), %%mm1\n"
17312+ " movq 16(%1), %%mm2\n"
17313+ " movq 24(%1), %%mm3\n"
17314+ " movq %%mm0, (%2)\n"
17315+ " movq %%mm1, 8(%2)\n"
17316+ " movq %%mm2, 16(%2)\n"
17317+ " movq %%mm3, 24(%2)\n"
17318+ " movq 32(%1), %%mm0\n"
17319+ " movq 40(%1), %%mm1\n"
17320+ " movq 48(%1), %%mm2\n"
17321+ " movq 56(%1), %%mm3\n"
17322+ " movq %%mm0, 32(%2)\n"
17323+ " movq %%mm1, 40(%2)\n"
17324+ " movq %%mm2, 48(%2)\n"
17325+ " movq %%mm3, 56(%2)\n"
17326 ".section .fixup, \"ax\"\n"
17327- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17328+ "3:\n"
17329+
17330+#ifdef CONFIG_PAX_KERNEXEC
17331+ " movl %%cr0, %0\n"
17332+ " movl %0, %%eax\n"
17333+ " andl $0xFFFEFFFF, %%eax\n"
17334+ " movl %%eax, %%cr0\n"
17335+#endif
17336+
17337+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17338+
17339+#ifdef CONFIG_PAX_KERNEXEC
17340+ " movl %0, %%cr0\n"
17341+#endif
17342+
17343 " jmp 2b\n"
17344 ".previous\n"
17345 _ASM_EXTABLE(1b, 3b)
17346- : : "r" (from), "r" (to) : "memory");
17347+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17348
17349 from += 64;
17350 to += 64;
17351@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
17352 static void fast_copy_page(void *to, void *from)
17353 {
17354 int i;
17355+ unsigned long cr0;
17356
17357 kernel_fpu_begin();
17358
17359@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
17360 * but that is for later. -AV
17361 */
17362 __asm__ __volatile__(
17363- "1: prefetch (%0)\n"
17364- " prefetch 64(%0)\n"
17365- " prefetch 128(%0)\n"
17366- " prefetch 192(%0)\n"
17367- " prefetch 256(%0)\n"
17368+ "1: prefetch (%1)\n"
17369+ " prefetch 64(%1)\n"
17370+ " prefetch 128(%1)\n"
17371+ " prefetch 192(%1)\n"
17372+ " prefetch 256(%1)\n"
17373 "2: \n"
17374 ".section .fixup, \"ax\"\n"
17375- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17376+ "3: \n"
17377+
17378+#ifdef CONFIG_PAX_KERNEXEC
17379+ " movl %%cr0, %0\n"
17380+ " movl %0, %%eax\n"
17381+ " andl $0xFFFEFFFF, %%eax\n"
17382+ " movl %%eax, %%cr0\n"
17383+#endif
17384+
17385+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17386+
17387+#ifdef CONFIG_PAX_KERNEXEC
17388+ " movl %0, %%cr0\n"
17389+#endif
17390+
17391 " jmp 2b\n"
17392 ".previous\n"
17393- _ASM_EXTABLE(1b, 3b) : : "r" (from));
17394+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
17395
17396 for (i = 0; i < (4096-320)/64; i++) {
17397 __asm__ __volatile__ (
17398- "1: prefetch 320(%0)\n"
17399- "2: movq (%0), %%mm0\n"
17400- " movntq %%mm0, (%1)\n"
17401- " movq 8(%0), %%mm1\n"
17402- " movntq %%mm1, 8(%1)\n"
17403- " movq 16(%0), %%mm2\n"
17404- " movntq %%mm2, 16(%1)\n"
17405- " movq 24(%0), %%mm3\n"
17406- " movntq %%mm3, 24(%1)\n"
17407- " movq 32(%0), %%mm4\n"
17408- " movntq %%mm4, 32(%1)\n"
17409- " movq 40(%0), %%mm5\n"
17410- " movntq %%mm5, 40(%1)\n"
17411- " movq 48(%0), %%mm6\n"
17412- " movntq %%mm6, 48(%1)\n"
17413- " movq 56(%0), %%mm7\n"
17414- " movntq %%mm7, 56(%1)\n"
17415+ "1: prefetch 320(%1)\n"
17416+ "2: movq (%1), %%mm0\n"
17417+ " movntq %%mm0, (%2)\n"
17418+ " movq 8(%1), %%mm1\n"
17419+ " movntq %%mm1, 8(%2)\n"
17420+ " movq 16(%1), %%mm2\n"
17421+ " movntq %%mm2, 16(%2)\n"
17422+ " movq 24(%1), %%mm3\n"
17423+ " movntq %%mm3, 24(%2)\n"
17424+ " movq 32(%1), %%mm4\n"
17425+ " movntq %%mm4, 32(%2)\n"
17426+ " movq 40(%1), %%mm5\n"
17427+ " movntq %%mm5, 40(%2)\n"
17428+ " movq 48(%1), %%mm6\n"
17429+ " movntq %%mm6, 48(%2)\n"
17430+ " movq 56(%1), %%mm7\n"
17431+ " movntq %%mm7, 56(%2)\n"
17432 ".section .fixup, \"ax\"\n"
17433- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17434+ "3:\n"
17435+
17436+#ifdef CONFIG_PAX_KERNEXEC
17437+ " movl %%cr0, %0\n"
17438+ " movl %0, %%eax\n"
17439+ " andl $0xFFFEFFFF, %%eax\n"
17440+ " movl %%eax, %%cr0\n"
17441+#endif
17442+
17443+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17444+
17445+#ifdef CONFIG_PAX_KERNEXEC
17446+ " movl %0, %%cr0\n"
17447+#endif
17448+
17449 " jmp 2b\n"
17450 ".previous\n"
17451- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
17452+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17453
17454 from += 64;
17455 to += 64;
17456@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
17457 static void fast_copy_page(void *to, void *from)
17458 {
17459 int i;
17460+ unsigned long cr0;
17461
17462 kernel_fpu_begin();
17463
17464 __asm__ __volatile__ (
17465- "1: prefetch (%0)\n"
17466- " prefetch 64(%0)\n"
17467- " prefetch 128(%0)\n"
17468- " prefetch 192(%0)\n"
17469- " prefetch 256(%0)\n"
17470+ "1: prefetch (%1)\n"
17471+ " prefetch 64(%1)\n"
17472+ " prefetch 128(%1)\n"
17473+ " prefetch 192(%1)\n"
17474+ " prefetch 256(%1)\n"
17475 "2: \n"
17476 ".section .fixup, \"ax\"\n"
17477- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17478+ "3: \n"
17479+
17480+#ifdef CONFIG_PAX_KERNEXEC
17481+ " movl %%cr0, %0\n"
17482+ " movl %0, %%eax\n"
17483+ " andl $0xFFFEFFFF, %%eax\n"
17484+ " movl %%eax, %%cr0\n"
17485+#endif
17486+
17487+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17488+
17489+#ifdef CONFIG_PAX_KERNEXEC
17490+ " movl %0, %%cr0\n"
17491+#endif
17492+
17493 " jmp 2b\n"
17494 ".previous\n"
17495- _ASM_EXTABLE(1b, 3b) : : "r" (from));
17496+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
17497
17498 for (i = 0; i < 4096/64; i++) {
17499 __asm__ __volatile__ (
17500- "1: prefetch 320(%0)\n"
17501- "2: movq (%0), %%mm0\n"
17502- " movq 8(%0), %%mm1\n"
17503- " movq 16(%0), %%mm2\n"
17504- " movq 24(%0), %%mm3\n"
17505- " movq %%mm0, (%1)\n"
17506- " movq %%mm1, 8(%1)\n"
17507- " movq %%mm2, 16(%1)\n"
17508- " movq %%mm3, 24(%1)\n"
17509- " movq 32(%0), %%mm0\n"
17510- " movq 40(%0), %%mm1\n"
17511- " movq 48(%0), %%mm2\n"
17512- " movq 56(%0), %%mm3\n"
17513- " movq %%mm0, 32(%1)\n"
17514- " movq %%mm1, 40(%1)\n"
17515- " movq %%mm2, 48(%1)\n"
17516- " movq %%mm3, 56(%1)\n"
17517+ "1: prefetch 320(%1)\n"
17518+ "2: movq (%1), %%mm0\n"
17519+ " movq 8(%1), %%mm1\n"
17520+ " movq 16(%1), %%mm2\n"
17521+ " movq 24(%1), %%mm3\n"
17522+ " movq %%mm0, (%2)\n"
17523+ " movq %%mm1, 8(%2)\n"
17524+ " movq %%mm2, 16(%2)\n"
17525+ " movq %%mm3, 24(%2)\n"
17526+ " movq 32(%1), %%mm0\n"
17527+ " movq 40(%1), %%mm1\n"
17528+ " movq 48(%1), %%mm2\n"
17529+ " movq 56(%1), %%mm3\n"
17530+ " movq %%mm0, 32(%2)\n"
17531+ " movq %%mm1, 40(%2)\n"
17532+ " movq %%mm2, 48(%2)\n"
17533+ " movq %%mm3, 56(%2)\n"
17534 ".section .fixup, \"ax\"\n"
17535- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17536+ "3:\n"
17537+
17538+#ifdef CONFIG_PAX_KERNEXEC
17539+ " movl %%cr0, %0\n"
17540+ " movl %0, %%eax\n"
17541+ " andl $0xFFFEFFFF, %%eax\n"
17542+ " movl %%eax, %%cr0\n"
17543+#endif
17544+
17545+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17546+
17547+#ifdef CONFIG_PAX_KERNEXEC
17548+ " movl %0, %%cr0\n"
17549+#endif
17550+
17551 " jmp 2b\n"
17552 ".previous\n"
17553 _ASM_EXTABLE(1b, 3b)
17554- : : "r" (from), "r" (to) : "memory");
17555+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17556
17557 from += 64;
17558 to += 64;
17559diff -urNp linux-3.0.3/arch/x86/lib/putuser.S linux-3.0.3/arch/x86/lib/putuser.S
17560--- linux-3.0.3/arch/x86/lib/putuser.S 2011-07-21 22:17:23.000000000 -0400
17561+++ linux-3.0.3/arch/x86/lib/putuser.S 2011-08-23 21:47:55.000000000 -0400
17562@@ -15,7 +15,8 @@
17563 #include <asm/thread_info.h>
17564 #include <asm/errno.h>
17565 #include <asm/asm.h>
17566-
17567+#include <asm/segment.h>
17568+#include <asm/pgtable.h>
17569
17570 /*
17571 * __put_user_X
17572@@ -29,52 +30,119 @@
17573 * as they get called from within inline assembly.
17574 */
17575
17576-#define ENTER CFI_STARTPROC ; \
17577- GET_THREAD_INFO(%_ASM_BX)
17578+#define ENTER CFI_STARTPROC
17579 #define EXIT ret ; \
17580 CFI_ENDPROC
17581
17582+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17583+#define _DEST %_ASM_CX,%_ASM_BX
17584+#else
17585+#define _DEST %_ASM_CX
17586+#endif
17587+
17588+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17589+#define __copyuser_seg gs;
17590+#else
17591+#define __copyuser_seg
17592+#endif
17593+
17594 .text
17595 ENTRY(__put_user_1)
17596 ENTER
17597+
17598+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17599+ GET_THREAD_INFO(%_ASM_BX)
17600 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
17601 jae bad_put_user
17602-1: movb %al,(%_ASM_CX)
17603+
17604+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17605+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17606+ cmp %_ASM_BX,%_ASM_CX
17607+ jb 1234f
17608+ xor %ebx,%ebx
17609+1234:
17610+#endif
17611+
17612+#endif
17613+
17614+1: __copyuser_seg movb %al,(_DEST)
17615 xor %eax,%eax
17616 EXIT
17617 ENDPROC(__put_user_1)
17618
17619 ENTRY(__put_user_2)
17620 ENTER
17621+
17622+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17623+ GET_THREAD_INFO(%_ASM_BX)
17624 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17625 sub $1,%_ASM_BX
17626 cmp %_ASM_BX,%_ASM_CX
17627 jae bad_put_user
17628-2: movw %ax,(%_ASM_CX)
17629+
17630+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17631+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17632+ cmp %_ASM_BX,%_ASM_CX
17633+ jb 1234f
17634+ xor %ebx,%ebx
17635+1234:
17636+#endif
17637+
17638+#endif
17639+
17640+2: __copyuser_seg movw %ax,(_DEST)
17641 xor %eax,%eax
17642 EXIT
17643 ENDPROC(__put_user_2)
17644
17645 ENTRY(__put_user_4)
17646 ENTER
17647+
17648+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17649+ GET_THREAD_INFO(%_ASM_BX)
17650 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17651 sub $3,%_ASM_BX
17652 cmp %_ASM_BX,%_ASM_CX
17653 jae bad_put_user
17654-3: movl %eax,(%_ASM_CX)
17655+
17656+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17657+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17658+ cmp %_ASM_BX,%_ASM_CX
17659+ jb 1234f
17660+ xor %ebx,%ebx
17661+1234:
17662+#endif
17663+
17664+#endif
17665+
17666+3: __copyuser_seg movl %eax,(_DEST)
17667 xor %eax,%eax
17668 EXIT
17669 ENDPROC(__put_user_4)
17670
17671 ENTRY(__put_user_8)
17672 ENTER
17673+
17674+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17675+ GET_THREAD_INFO(%_ASM_BX)
17676 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17677 sub $7,%_ASM_BX
17678 cmp %_ASM_BX,%_ASM_CX
17679 jae bad_put_user
17680-4: mov %_ASM_AX,(%_ASM_CX)
17681+
17682+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17683+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17684+ cmp %_ASM_BX,%_ASM_CX
17685+ jb 1234f
17686+ xor %ebx,%ebx
17687+1234:
17688+#endif
17689+
17690+#endif
17691+
17692+4: __copyuser_seg mov %_ASM_AX,(_DEST)
17693 #ifdef CONFIG_X86_32
17694-5: movl %edx,4(%_ASM_CX)
17695+5: __copyuser_seg movl %edx,4(_DEST)
17696 #endif
17697 xor %eax,%eax
17698 EXIT
17699diff -urNp linux-3.0.3/arch/x86/lib/usercopy_32.c linux-3.0.3/arch/x86/lib/usercopy_32.c
17700--- linux-3.0.3/arch/x86/lib/usercopy_32.c 2011-07-21 22:17:23.000000000 -0400
17701+++ linux-3.0.3/arch/x86/lib/usercopy_32.c 2011-08-23 21:47:55.000000000 -0400
17702@@ -43,7 +43,7 @@ do { \
17703 __asm__ __volatile__( \
17704 " testl %1,%1\n" \
17705 " jz 2f\n" \
17706- "0: lodsb\n" \
17707+ "0: "__copyuser_seg"lodsb\n" \
17708 " stosb\n" \
17709 " testb %%al,%%al\n" \
17710 " jz 1f\n" \
17711@@ -128,10 +128,12 @@ do { \
17712 int __d0; \
17713 might_fault(); \
17714 __asm__ __volatile__( \
17715+ __COPYUSER_SET_ES \
17716 "0: rep; stosl\n" \
17717 " movl %2,%0\n" \
17718 "1: rep; stosb\n" \
17719 "2:\n" \
17720+ __COPYUSER_RESTORE_ES \
17721 ".section .fixup,\"ax\"\n" \
17722 "3: lea 0(%2,%0,4),%0\n" \
17723 " jmp 2b\n" \
17724@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
17725 might_fault();
17726
17727 __asm__ __volatile__(
17728+ __COPYUSER_SET_ES
17729 " testl %0, %0\n"
17730 " jz 3f\n"
17731 " andl %0,%%ecx\n"
17732@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
17733 " subl %%ecx,%0\n"
17734 " addl %0,%%eax\n"
17735 "1:\n"
17736+ __COPYUSER_RESTORE_ES
17737 ".section .fixup,\"ax\"\n"
17738 "2: xorl %%eax,%%eax\n"
17739 " jmp 1b\n"
17740@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
17741
17742 #ifdef CONFIG_X86_INTEL_USERCOPY
17743 static unsigned long
17744-__copy_user_intel(void __user *to, const void *from, unsigned long size)
17745+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
17746 {
17747 int d0, d1;
17748 __asm__ __volatile__(
17749@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
17750 " .align 2,0x90\n"
17751 "3: movl 0(%4), %%eax\n"
17752 "4: movl 4(%4), %%edx\n"
17753- "5: movl %%eax, 0(%3)\n"
17754- "6: movl %%edx, 4(%3)\n"
17755+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
17756+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
17757 "7: movl 8(%4), %%eax\n"
17758 "8: movl 12(%4),%%edx\n"
17759- "9: movl %%eax, 8(%3)\n"
17760- "10: movl %%edx, 12(%3)\n"
17761+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
17762+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
17763 "11: movl 16(%4), %%eax\n"
17764 "12: movl 20(%4), %%edx\n"
17765- "13: movl %%eax, 16(%3)\n"
17766- "14: movl %%edx, 20(%3)\n"
17767+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
17768+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
17769 "15: movl 24(%4), %%eax\n"
17770 "16: movl 28(%4), %%edx\n"
17771- "17: movl %%eax, 24(%3)\n"
17772- "18: movl %%edx, 28(%3)\n"
17773+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
17774+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
17775 "19: movl 32(%4), %%eax\n"
17776 "20: movl 36(%4), %%edx\n"
17777- "21: movl %%eax, 32(%3)\n"
17778- "22: movl %%edx, 36(%3)\n"
17779+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
17780+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
17781 "23: movl 40(%4), %%eax\n"
17782 "24: movl 44(%4), %%edx\n"
17783- "25: movl %%eax, 40(%3)\n"
17784- "26: movl %%edx, 44(%3)\n"
17785+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
17786+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
17787 "27: movl 48(%4), %%eax\n"
17788 "28: movl 52(%4), %%edx\n"
17789- "29: movl %%eax, 48(%3)\n"
17790- "30: movl %%edx, 52(%3)\n"
17791+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
17792+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
17793 "31: movl 56(%4), %%eax\n"
17794 "32: movl 60(%4), %%edx\n"
17795- "33: movl %%eax, 56(%3)\n"
17796- "34: movl %%edx, 60(%3)\n"
17797+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
17798+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
17799 " addl $-64, %0\n"
17800 " addl $64, %4\n"
17801 " addl $64, %3\n"
17802@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
17803 " shrl $2, %0\n"
17804 " andl $3, %%eax\n"
17805 " cld\n"
17806+ __COPYUSER_SET_ES
17807 "99: rep; movsl\n"
17808 "36: movl %%eax, %0\n"
17809 "37: rep; movsb\n"
17810 "100:\n"
17811+ __COPYUSER_RESTORE_ES
17812+ ".section .fixup,\"ax\"\n"
17813+ "101: lea 0(%%eax,%0,4),%0\n"
17814+ " jmp 100b\n"
17815+ ".previous\n"
17816+ ".section __ex_table,\"a\"\n"
17817+ " .align 4\n"
17818+ " .long 1b,100b\n"
17819+ " .long 2b,100b\n"
17820+ " .long 3b,100b\n"
17821+ " .long 4b,100b\n"
17822+ " .long 5b,100b\n"
17823+ " .long 6b,100b\n"
17824+ " .long 7b,100b\n"
17825+ " .long 8b,100b\n"
17826+ " .long 9b,100b\n"
17827+ " .long 10b,100b\n"
17828+ " .long 11b,100b\n"
17829+ " .long 12b,100b\n"
17830+ " .long 13b,100b\n"
17831+ " .long 14b,100b\n"
17832+ " .long 15b,100b\n"
17833+ " .long 16b,100b\n"
17834+ " .long 17b,100b\n"
17835+ " .long 18b,100b\n"
17836+ " .long 19b,100b\n"
17837+ " .long 20b,100b\n"
17838+ " .long 21b,100b\n"
17839+ " .long 22b,100b\n"
17840+ " .long 23b,100b\n"
17841+ " .long 24b,100b\n"
17842+ " .long 25b,100b\n"
17843+ " .long 26b,100b\n"
17844+ " .long 27b,100b\n"
17845+ " .long 28b,100b\n"
17846+ " .long 29b,100b\n"
17847+ " .long 30b,100b\n"
17848+ " .long 31b,100b\n"
17849+ " .long 32b,100b\n"
17850+ " .long 33b,100b\n"
17851+ " .long 34b,100b\n"
17852+ " .long 35b,100b\n"
17853+ " .long 36b,100b\n"
17854+ " .long 37b,100b\n"
17855+ " .long 99b,101b\n"
17856+ ".previous"
17857+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
17858+ : "1"(to), "2"(from), "0"(size)
17859+ : "eax", "edx", "memory");
17860+ return size;
17861+}
17862+
17863+static unsigned long
17864+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
17865+{
17866+ int d0, d1;
17867+ __asm__ __volatile__(
17868+ " .align 2,0x90\n"
17869+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
17870+ " cmpl $67, %0\n"
17871+ " jbe 3f\n"
17872+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
17873+ " .align 2,0x90\n"
17874+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
17875+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
17876+ "5: movl %%eax, 0(%3)\n"
17877+ "6: movl %%edx, 4(%3)\n"
17878+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
17879+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
17880+ "9: movl %%eax, 8(%3)\n"
17881+ "10: movl %%edx, 12(%3)\n"
17882+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
17883+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
17884+ "13: movl %%eax, 16(%3)\n"
17885+ "14: movl %%edx, 20(%3)\n"
17886+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
17887+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
17888+ "17: movl %%eax, 24(%3)\n"
17889+ "18: movl %%edx, 28(%3)\n"
17890+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
17891+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
17892+ "21: movl %%eax, 32(%3)\n"
17893+ "22: movl %%edx, 36(%3)\n"
17894+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
17895+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
17896+ "25: movl %%eax, 40(%3)\n"
17897+ "26: movl %%edx, 44(%3)\n"
17898+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
17899+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
17900+ "29: movl %%eax, 48(%3)\n"
17901+ "30: movl %%edx, 52(%3)\n"
17902+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
17903+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
17904+ "33: movl %%eax, 56(%3)\n"
17905+ "34: movl %%edx, 60(%3)\n"
17906+ " addl $-64, %0\n"
17907+ " addl $64, %4\n"
17908+ " addl $64, %3\n"
17909+ " cmpl $63, %0\n"
17910+ " ja 1b\n"
17911+ "35: movl %0, %%eax\n"
17912+ " shrl $2, %0\n"
17913+ " andl $3, %%eax\n"
17914+ " cld\n"
17915+ "99: rep; "__copyuser_seg" movsl\n"
17916+ "36: movl %%eax, %0\n"
17917+ "37: rep; "__copyuser_seg" movsb\n"
17918+ "100:\n"
17919 ".section .fixup,\"ax\"\n"
17920 "101: lea 0(%%eax,%0,4),%0\n"
17921 " jmp 100b\n"
17922@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
17923 int d0, d1;
17924 __asm__ __volatile__(
17925 " .align 2,0x90\n"
17926- "0: movl 32(%4), %%eax\n"
17927+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
17928 " cmpl $67, %0\n"
17929 " jbe 2f\n"
17930- "1: movl 64(%4), %%eax\n"
17931+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
17932 " .align 2,0x90\n"
17933- "2: movl 0(%4), %%eax\n"
17934- "21: movl 4(%4), %%edx\n"
17935+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
17936+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
17937 " movl %%eax, 0(%3)\n"
17938 " movl %%edx, 4(%3)\n"
17939- "3: movl 8(%4), %%eax\n"
17940- "31: movl 12(%4),%%edx\n"
17941+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
17942+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
17943 " movl %%eax, 8(%3)\n"
17944 " movl %%edx, 12(%3)\n"
17945- "4: movl 16(%4), %%eax\n"
17946- "41: movl 20(%4), %%edx\n"
17947+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
17948+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
17949 " movl %%eax, 16(%3)\n"
17950 " movl %%edx, 20(%3)\n"
17951- "10: movl 24(%4), %%eax\n"
17952- "51: movl 28(%4), %%edx\n"
17953+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
17954+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
17955 " movl %%eax, 24(%3)\n"
17956 " movl %%edx, 28(%3)\n"
17957- "11: movl 32(%4), %%eax\n"
17958- "61: movl 36(%4), %%edx\n"
17959+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
17960+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
17961 " movl %%eax, 32(%3)\n"
17962 " movl %%edx, 36(%3)\n"
17963- "12: movl 40(%4), %%eax\n"
17964- "71: movl 44(%4), %%edx\n"
17965+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
17966+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
17967 " movl %%eax, 40(%3)\n"
17968 " movl %%edx, 44(%3)\n"
17969- "13: movl 48(%4), %%eax\n"
17970- "81: movl 52(%4), %%edx\n"
17971+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
17972+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
17973 " movl %%eax, 48(%3)\n"
17974 " movl %%edx, 52(%3)\n"
17975- "14: movl 56(%4), %%eax\n"
17976- "91: movl 60(%4), %%edx\n"
17977+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
17978+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
17979 " movl %%eax, 56(%3)\n"
17980 " movl %%edx, 60(%3)\n"
17981 " addl $-64, %0\n"
17982@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
17983 " shrl $2, %0\n"
17984 " andl $3, %%eax\n"
17985 " cld\n"
17986- "6: rep; movsl\n"
17987+ "6: rep; "__copyuser_seg" movsl\n"
17988 " movl %%eax,%0\n"
17989- "7: rep; movsb\n"
17990+ "7: rep; "__copyuser_seg" movsb\n"
17991 "8:\n"
17992 ".section .fixup,\"ax\"\n"
17993 "9: lea 0(%%eax,%0,4),%0\n"
17994@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
17995
17996 __asm__ __volatile__(
17997 " .align 2,0x90\n"
17998- "0: movl 32(%4), %%eax\n"
17999+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18000 " cmpl $67, %0\n"
18001 " jbe 2f\n"
18002- "1: movl 64(%4), %%eax\n"
18003+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18004 " .align 2,0x90\n"
18005- "2: movl 0(%4), %%eax\n"
18006- "21: movl 4(%4), %%edx\n"
18007+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18008+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18009 " movnti %%eax, 0(%3)\n"
18010 " movnti %%edx, 4(%3)\n"
18011- "3: movl 8(%4), %%eax\n"
18012- "31: movl 12(%4),%%edx\n"
18013+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18014+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18015 " movnti %%eax, 8(%3)\n"
18016 " movnti %%edx, 12(%3)\n"
18017- "4: movl 16(%4), %%eax\n"
18018- "41: movl 20(%4), %%edx\n"
18019+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18020+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18021 " movnti %%eax, 16(%3)\n"
18022 " movnti %%edx, 20(%3)\n"
18023- "10: movl 24(%4), %%eax\n"
18024- "51: movl 28(%4), %%edx\n"
18025+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18026+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18027 " movnti %%eax, 24(%3)\n"
18028 " movnti %%edx, 28(%3)\n"
18029- "11: movl 32(%4), %%eax\n"
18030- "61: movl 36(%4), %%edx\n"
18031+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18032+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18033 " movnti %%eax, 32(%3)\n"
18034 " movnti %%edx, 36(%3)\n"
18035- "12: movl 40(%4), %%eax\n"
18036- "71: movl 44(%4), %%edx\n"
18037+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18038+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18039 " movnti %%eax, 40(%3)\n"
18040 " movnti %%edx, 44(%3)\n"
18041- "13: movl 48(%4), %%eax\n"
18042- "81: movl 52(%4), %%edx\n"
18043+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18044+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18045 " movnti %%eax, 48(%3)\n"
18046 " movnti %%edx, 52(%3)\n"
18047- "14: movl 56(%4), %%eax\n"
18048- "91: movl 60(%4), %%edx\n"
18049+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18050+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18051 " movnti %%eax, 56(%3)\n"
18052 " movnti %%edx, 60(%3)\n"
18053 " addl $-64, %0\n"
18054@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
18055 " shrl $2, %0\n"
18056 " andl $3, %%eax\n"
18057 " cld\n"
18058- "6: rep; movsl\n"
18059+ "6: rep; "__copyuser_seg" movsl\n"
18060 " movl %%eax,%0\n"
18061- "7: rep; movsb\n"
18062+ "7: rep; "__copyuser_seg" movsb\n"
18063 "8:\n"
18064 ".section .fixup,\"ax\"\n"
18065 "9: lea 0(%%eax,%0,4),%0\n"
18066@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
18067
18068 __asm__ __volatile__(
18069 " .align 2,0x90\n"
18070- "0: movl 32(%4), %%eax\n"
18071+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18072 " cmpl $67, %0\n"
18073 " jbe 2f\n"
18074- "1: movl 64(%4), %%eax\n"
18075+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18076 " .align 2,0x90\n"
18077- "2: movl 0(%4), %%eax\n"
18078- "21: movl 4(%4), %%edx\n"
18079+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18080+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18081 " movnti %%eax, 0(%3)\n"
18082 " movnti %%edx, 4(%3)\n"
18083- "3: movl 8(%4), %%eax\n"
18084- "31: movl 12(%4),%%edx\n"
18085+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18086+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18087 " movnti %%eax, 8(%3)\n"
18088 " movnti %%edx, 12(%3)\n"
18089- "4: movl 16(%4), %%eax\n"
18090- "41: movl 20(%4), %%edx\n"
18091+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18092+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18093 " movnti %%eax, 16(%3)\n"
18094 " movnti %%edx, 20(%3)\n"
18095- "10: movl 24(%4), %%eax\n"
18096- "51: movl 28(%4), %%edx\n"
18097+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18098+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18099 " movnti %%eax, 24(%3)\n"
18100 " movnti %%edx, 28(%3)\n"
18101- "11: movl 32(%4), %%eax\n"
18102- "61: movl 36(%4), %%edx\n"
18103+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18104+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18105 " movnti %%eax, 32(%3)\n"
18106 " movnti %%edx, 36(%3)\n"
18107- "12: movl 40(%4), %%eax\n"
18108- "71: movl 44(%4), %%edx\n"
18109+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18110+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18111 " movnti %%eax, 40(%3)\n"
18112 " movnti %%edx, 44(%3)\n"
18113- "13: movl 48(%4), %%eax\n"
18114- "81: movl 52(%4), %%edx\n"
18115+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18116+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18117 " movnti %%eax, 48(%3)\n"
18118 " movnti %%edx, 52(%3)\n"
18119- "14: movl 56(%4), %%eax\n"
18120- "91: movl 60(%4), %%edx\n"
18121+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18122+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18123 " movnti %%eax, 56(%3)\n"
18124 " movnti %%edx, 60(%3)\n"
18125 " addl $-64, %0\n"
18126@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
18127 " shrl $2, %0\n"
18128 " andl $3, %%eax\n"
18129 " cld\n"
18130- "6: rep; movsl\n"
18131+ "6: rep; "__copyuser_seg" movsl\n"
18132 " movl %%eax,%0\n"
18133- "7: rep; movsb\n"
18134+ "7: rep; "__copyuser_seg" movsb\n"
18135 "8:\n"
18136 ".section .fixup,\"ax\"\n"
18137 "9: lea 0(%%eax,%0,4),%0\n"
18138@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
18139 */
18140 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
18141 unsigned long size);
18142-unsigned long __copy_user_intel(void __user *to, const void *from,
18143+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
18144+ unsigned long size);
18145+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
18146 unsigned long size);
18147 unsigned long __copy_user_zeroing_intel_nocache(void *to,
18148 const void __user *from, unsigned long size);
18149 #endif /* CONFIG_X86_INTEL_USERCOPY */
18150
18151 /* Generic arbitrary sized copy. */
18152-#define __copy_user(to, from, size) \
18153+#define __copy_user(to, from, size, prefix, set, restore) \
18154 do { \
18155 int __d0, __d1, __d2; \
18156 __asm__ __volatile__( \
18157+ set \
18158 " cmp $7,%0\n" \
18159 " jbe 1f\n" \
18160 " movl %1,%0\n" \
18161 " negl %0\n" \
18162 " andl $7,%0\n" \
18163 " subl %0,%3\n" \
18164- "4: rep; movsb\n" \
18165+ "4: rep; "prefix"movsb\n" \
18166 " movl %3,%0\n" \
18167 " shrl $2,%0\n" \
18168 " andl $3,%3\n" \
18169 " .align 2,0x90\n" \
18170- "0: rep; movsl\n" \
18171+ "0: rep; "prefix"movsl\n" \
18172 " movl %3,%0\n" \
18173- "1: rep; movsb\n" \
18174+ "1: rep; "prefix"movsb\n" \
18175 "2:\n" \
18176+ restore \
18177 ".section .fixup,\"ax\"\n" \
18178 "5: addl %3,%0\n" \
18179 " jmp 2b\n" \
18180@@ -682,14 +799,14 @@ do { \
18181 " negl %0\n" \
18182 " andl $7,%0\n" \
18183 " subl %0,%3\n" \
18184- "4: rep; movsb\n" \
18185+ "4: rep; "__copyuser_seg"movsb\n" \
18186 " movl %3,%0\n" \
18187 " shrl $2,%0\n" \
18188 " andl $3,%3\n" \
18189 " .align 2,0x90\n" \
18190- "0: rep; movsl\n" \
18191+ "0: rep; "__copyuser_seg"movsl\n" \
18192 " movl %3,%0\n" \
18193- "1: rep; movsb\n" \
18194+ "1: rep; "__copyuser_seg"movsb\n" \
18195 "2:\n" \
18196 ".section .fixup,\"ax\"\n" \
18197 "5: addl %3,%0\n" \
18198@@ -775,9 +892,9 @@ survive:
18199 }
18200 #endif
18201 if (movsl_is_ok(to, from, n))
18202- __copy_user(to, from, n);
18203+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
18204 else
18205- n = __copy_user_intel(to, from, n);
18206+ n = __generic_copy_to_user_intel(to, from, n);
18207 return n;
18208 }
18209 EXPORT_SYMBOL(__copy_to_user_ll);
18210@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
18211 unsigned long n)
18212 {
18213 if (movsl_is_ok(to, from, n))
18214- __copy_user(to, from, n);
18215+ __copy_user(to, from, n, __copyuser_seg, "", "");
18216 else
18217- n = __copy_user_intel((void __user *)to,
18218- (const void *)from, n);
18219+ n = __generic_copy_from_user_intel(to, from, n);
18220 return n;
18221 }
18222 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
18223@@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocach
18224 if (n > 64 && cpu_has_xmm2)
18225 n = __copy_user_intel_nocache(to, from, n);
18226 else
18227- __copy_user(to, from, n);
18228+ __copy_user(to, from, n, __copyuser_seg, "", "");
18229 #else
18230- __copy_user(to, from, n);
18231+ __copy_user(to, from, n, __copyuser_seg, "", "");
18232 #endif
18233 return n;
18234 }
18235 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
18236
18237-/**
18238- * copy_to_user: - Copy a block of data into user space.
18239- * @to: Destination address, in user space.
18240- * @from: Source address, in kernel space.
18241- * @n: Number of bytes to copy.
18242- *
18243- * Context: User context only. This function may sleep.
18244- *
18245- * Copy data from kernel space to user space.
18246- *
18247- * Returns number of bytes that could not be copied.
18248- * On success, this will be zero.
18249- */
18250-unsigned long
18251-copy_to_user(void __user *to, const void *from, unsigned long n)
18252+void copy_from_user_overflow(void)
18253 {
18254- if (access_ok(VERIFY_WRITE, to, n))
18255- n = __copy_to_user(to, from, n);
18256- return n;
18257+ WARN(1, "Buffer overflow detected!\n");
18258 }
18259-EXPORT_SYMBOL(copy_to_user);
18260+EXPORT_SYMBOL(copy_from_user_overflow);
18261
18262-/**
18263- * copy_from_user: - Copy a block of data from user space.
18264- * @to: Destination address, in kernel space.
18265- * @from: Source address, in user space.
18266- * @n: Number of bytes to copy.
18267- *
18268- * Context: User context only. This function may sleep.
18269- *
18270- * Copy data from user space to kernel space.
18271- *
18272- * Returns number of bytes that could not be copied.
18273- * On success, this will be zero.
18274- *
18275- * If some data could not be copied, this function will pad the copied
18276- * data to the requested size using zero bytes.
18277- */
18278-unsigned long
18279-_copy_from_user(void *to, const void __user *from, unsigned long n)
18280+void copy_to_user_overflow(void)
18281 {
18282- if (access_ok(VERIFY_READ, from, n))
18283- n = __copy_from_user(to, from, n);
18284- else
18285- memset(to, 0, n);
18286- return n;
18287+ WARN(1, "Buffer overflow detected!\n");
18288 }
18289-EXPORT_SYMBOL(_copy_from_user);
18290+EXPORT_SYMBOL(copy_to_user_overflow);
18291
18292-void copy_from_user_overflow(void)
18293+#ifdef CONFIG_PAX_MEMORY_UDEREF
18294+void __set_fs(mm_segment_t x)
18295 {
18296- WARN(1, "Buffer overflow detected!\n");
18297+ switch (x.seg) {
18298+ case 0:
18299+ loadsegment(gs, 0);
18300+ break;
18301+ case TASK_SIZE_MAX:
18302+ loadsegment(gs, __USER_DS);
18303+ break;
18304+ case -1UL:
18305+ loadsegment(gs, __KERNEL_DS);
18306+ break;
18307+ default:
18308+ BUG();
18309+ }
18310+ return;
18311 }
18312-EXPORT_SYMBOL(copy_from_user_overflow);
18313+EXPORT_SYMBOL(__set_fs);
18314+
18315+void set_fs(mm_segment_t x)
18316+{
18317+ current_thread_info()->addr_limit = x;
18318+ __set_fs(x);
18319+}
18320+EXPORT_SYMBOL(set_fs);
18321+#endif
18322diff -urNp linux-3.0.3/arch/x86/lib/usercopy_64.c linux-3.0.3/arch/x86/lib/usercopy_64.c
18323--- linux-3.0.3/arch/x86/lib/usercopy_64.c 2011-07-21 22:17:23.000000000 -0400
18324+++ linux-3.0.3/arch/x86/lib/usercopy_64.c 2011-08-23 21:47:55.000000000 -0400
18325@@ -42,6 +42,12 @@ long
18326 __strncpy_from_user(char *dst, const char __user *src, long count)
18327 {
18328 long res;
18329+
18330+#ifdef CONFIG_PAX_MEMORY_UDEREF
18331+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
18332+ src += PAX_USER_SHADOW_BASE;
18333+#endif
18334+
18335 __do_strncpy_from_user(dst, src, count, res);
18336 return res;
18337 }
18338@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
18339 {
18340 long __d0;
18341 might_fault();
18342+
18343+#ifdef CONFIG_PAX_MEMORY_UDEREF
18344+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
18345+ addr += PAX_USER_SHADOW_BASE;
18346+#endif
18347+
18348 /* no memory constraint because it doesn't change any memory gcc knows
18349 about */
18350 asm volatile(
18351@@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
18352
18353 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
18354 {
18355- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
18356+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
18357+
18358+#ifdef CONFIG_PAX_MEMORY_UDEREF
18359+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
18360+ to += PAX_USER_SHADOW_BASE;
18361+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
18362+ from += PAX_USER_SHADOW_BASE;
18363+#endif
18364+
18365 return copy_user_generic((__force void *)to, (__force void *)from, len);
18366- }
18367- return len;
18368+ }
18369+ return len;
18370 }
18371 EXPORT_SYMBOL(copy_in_user);
18372
18373diff -urNp linux-3.0.3/arch/x86/Makefile linux-3.0.3/arch/x86/Makefile
18374--- linux-3.0.3/arch/x86/Makefile 2011-07-21 22:17:23.000000000 -0400
18375+++ linux-3.0.3/arch/x86/Makefile 2011-08-23 21:48:14.000000000 -0400
18376@@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
18377 else
18378 BITS := 64
18379 UTS_MACHINE := x86_64
18380+ biarch := $(call cc-option,-m64)
18381 CHECKFLAGS += -D__x86_64__ -m64
18382
18383 KBUILD_AFLAGS += -m64
18384@@ -195,3 +196,12 @@ define archhelp
18385 echo ' FDARGS="..." arguments for the booted kernel'
18386 echo ' FDINITRD=file initrd for the booted kernel'
18387 endef
18388+
18389+define OLD_LD
18390+
18391+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
18392+*** Please upgrade your binutils to 2.18 or newer
18393+endef
18394+
18395+archprepare:
18396+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
18397diff -urNp linux-3.0.3/arch/x86/mm/extable.c linux-3.0.3/arch/x86/mm/extable.c
18398--- linux-3.0.3/arch/x86/mm/extable.c 2011-07-21 22:17:23.000000000 -0400
18399+++ linux-3.0.3/arch/x86/mm/extable.c 2011-08-23 21:47:55.000000000 -0400
18400@@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs
18401 const struct exception_table_entry *fixup;
18402
18403 #ifdef CONFIG_PNPBIOS
18404- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
18405+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
18406 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
18407 extern u32 pnp_bios_is_utter_crap;
18408 pnp_bios_is_utter_crap = 1;
18409diff -urNp linux-3.0.3/arch/x86/mm/fault.c linux-3.0.3/arch/x86/mm/fault.c
18410--- linux-3.0.3/arch/x86/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
18411+++ linux-3.0.3/arch/x86/mm/fault.c 2011-08-23 21:48:14.000000000 -0400
18412@@ -13,10 +13,18 @@
18413 #include <linux/perf_event.h> /* perf_sw_event */
18414 #include <linux/hugetlb.h> /* hstate_index_to_shift */
18415 #include <linux/prefetch.h> /* prefetchw */
18416+#include <linux/unistd.h>
18417+#include <linux/compiler.h>
18418
18419 #include <asm/traps.h> /* dotraplinkage, ... */
18420 #include <asm/pgalloc.h> /* pgd_*(), ... */
18421 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
18422+#include <asm/vsyscall.h>
18423+#include <asm/tlbflush.h>
18424+
18425+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18426+#include <asm/stacktrace.h>
18427+#endif
18428
18429 /*
18430 * Page fault error code bits:
18431@@ -54,7 +62,7 @@ static inline int __kprobes notify_page_
18432 int ret = 0;
18433
18434 /* kprobe_running() needs smp_processor_id() */
18435- if (kprobes_built_in() && !user_mode_vm(regs)) {
18436+ if (kprobes_built_in() && !user_mode(regs)) {
18437 preempt_disable();
18438 if (kprobe_running() && kprobe_fault_handler(regs, 14))
18439 ret = 1;
18440@@ -115,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *re
18441 return !instr_lo || (instr_lo>>1) == 1;
18442 case 0x00:
18443 /* Prefetch instruction is 0x0F0D or 0x0F18 */
18444- if (probe_kernel_address(instr, opcode))
18445+ if (user_mode(regs)) {
18446+ if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
18447+ return 0;
18448+ } else if (probe_kernel_address(instr, opcode))
18449 return 0;
18450
18451 *prefetch = (instr_lo == 0xF) &&
18452@@ -149,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsign
18453 while (instr < max_instr) {
18454 unsigned char opcode;
18455
18456- if (probe_kernel_address(instr, opcode))
18457+ if (user_mode(regs)) {
18458+ if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
18459+ break;
18460+ } else if (probe_kernel_address(instr, opcode))
18461 break;
18462
18463 instr++;
18464@@ -180,6 +194,30 @@ force_sig_info_fault(int si_signo, int s
18465 force_sig_info(si_signo, &info, tsk);
18466 }
18467
18468+#ifdef CONFIG_PAX_EMUTRAMP
18469+static int pax_handle_fetch_fault(struct pt_regs *regs);
18470+#endif
18471+
18472+#ifdef CONFIG_PAX_PAGEEXEC
18473+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
18474+{
18475+ pgd_t *pgd;
18476+ pud_t *pud;
18477+ pmd_t *pmd;
18478+
18479+ pgd = pgd_offset(mm, address);
18480+ if (!pgd_present(*pgd))
18481+ return NULL;
18482+ pud = pud_offset(pgd, address);
18483+ if (!pud_present(*pud))
18484+ return NULL;
18485+ pmd = pmd_offset(pud, address);
18486+ if (!pmd_present(*pmd))
18487+ return NULL;
18488+ return pmd;
18489+}
18490+#endif
18491+
18492 DEFINE_SPINLOCK(pgd_lock);
18493 LIST_HEAD(pgd_list);
18494
18495@@ -230,10 +268,22 @@ void vmalloc_sync_all(void)
18496 for (address = VMALLOC_START & PMD_MASK;
18497 address >= TASK_SIZE && address < FIXADDR_TOP;
18498 address += PMD_SIZE) {
18499+
18500+#ifdef CONFIG_PAX_PER_CPU_PGD
18501+ unsigned long cpu;
18502+#else
18503 struct page *page;
18504+#endif
18505
18506 spin_lock(&pgd_lock);
18507+
18508+#ifdef CONFIG_PAX_PER_CPU_PGD
18509+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
18510+ pgd_t *pgd = get_cpu_pgd(cpu);
18511+ pmd_t *ret;
18512+#else
18513 list_for_each_entry(page, &pgd_list, lru) {
18514+ pgd_t *pgd = page_address(page);
18515 spinlock_t *pgt_lock;
18516 pmd_t *ret;
18517
18518@@ -241,8 +291,13 @@ void vmalloc_sync_all(void)
18519 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
18520
18521 spin_lock(pgt_lock);
18522- ret = vmalloc_sync_one(page_address(page), address);
18523+#endif
18524+
18525+ ret = vmalloc_sync_one(pgd, address);
18526+
18527+#ifndef CONFIG_PAX_PER_CPU_PGD
18528 spin_unlock(pgt_lock);
18529+#endif
18530
18531 if (!ret)
18532 break;
18533@@ -276,6 +331,11 @@ static noinline __kprobes int vmalloc_fa
18534 * an interrupt in the middle of a task switch..
18535 */
18536 pgd_paddr = read_cr3();
18537+
18538+#ifdef CONFIG_PAX_PER_CPU_PGD
18539+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
18540+#endif
18541+
18542 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
18543 if (!pmd_k)
18544 return -1;
18545@@ -371,7 +431,14 @@ static noinline __kprobes int vmalloc_fa
18546 * happen within a race in page table update. In the later
18547 * case just flush:
18548 */
18549+
18550+#ifdef CONFIG_PAX_PER_CPU_PGD
18551+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
18552+ pgd = pgd_offset_cpu(smp_processor_id(), address);
18553+#else
18554 pgd = pgd_offset(current->active_mm, address);
18555+#endif
18556+
18557 pgd_ref = pgd_offset_k(address);
18558 if (pgd_none(*pgd_ref))
18559 return -1;
18560@@ -533,7 +600,7 @@ static int is_errata93(struct pt_regs *r
18561 static int is_errata100(struct pt_regs *regs, unsigned long address)
18562 {
18563 #ifdef CONFIG_X86_64
18564- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
18565+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
18566 return 1;
18567 #endif
18568 return 0;
18569@@ -560,7 +627,7 @@ static int is_f00f_bug(struct pt_regs *r
18570 }
18571
18572 static const char nx_warning[] = KERN_CRIT
18573-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
18574+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
18575
18576 static void
18577 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
18578@@ -569,14 +636,25 @@ show_fault_oops(struct pt_regs *regs, un
18579 if (!oops_may_print())
18580 return;
18581
18582- if (error_code & PF_INSTR) {
18583+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
18584 unsigned int level;
18585
18586 pte_t *pte = lookup_address(address, &level);
18587
18588 if (pte && pte_present(*pte) && !pte_exec(*pte))
18589- printk(nx_warning, current_uid());
18590+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
18591+ }
18592+
18593+#ifdef CONFIG_PAX_KERNEXEC
18594+ if (init_mm.start_code <= address && address < init_mm.end_code) {
18595+ if (current->signal->curr_ip)
18596+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
18597+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
18598+ else
18599+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
18600+ current->comm, task_pid_nr(current), current_uid(), current_euid());
18601 }
18602+#endif
18603
18604 printk(KERN_ALERT "BUG: unable to handle kernel ");
18605 if (address < PAGE_SIZE)
18606@@ -702,6 +780,66 @@ __bad_area_nosemaphore(struct pt_regs *r
18607 unsigned long address, int si_code)
18608 {
18609 struct task_struct *tsk = current;
18610+#if defined(CONFIG_X86_64) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18611+ struct mm_struct *mm = tsk->mm;
18612+#endif
18613+
18614+#ifdef CONFIG_X86_64
18615+ if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
18616+ if (regs->ip == VSYSCALL_ADDR(__NR_vgettimeofday) ||
18617+ regs->ip == VSYSCALL_ADDR(__NR_vtime) ||
18618+ regs->ip == VSYSCALL_ADDR(__NR_vgetcpu)) {
18619+ regs->ip += mm->context.vdso - PAGE_SIZE - VSYSCALL_START;
18620+ return;
18621+ }
18622+ }
18623+#endif
18624+
18625+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18626+ if (mm && (error_code & PF_USER)) {
18627+ unsigned long ip = regs->ip;
18628+
18629+ if (v8086_mode(regs))
18630+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
18631+
18632+ /*
18633+ * It's possible to have interrupts off here:
18634+ */
18635+ local_irq_enable();
18636+
18637+#ifdef CONFIG_PAX_PAGEEXEC
18638+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
18639+ (((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
18640+
18641+#ifdef CONFIG_PAX_EMUTRAMP
18642+ switch (pax_handle_fetch_fault(regs)) {
18643+ case 2:
18644+ return;
18645+ }
18646+#endif
18647+
18648+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
18649+ do_group_exit(SIGKILL);
18650+ }
18651+#endif
18652+
18653+#ifdef CONFIG_PAX_SEGMEXEC
18654+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
18655+
18656+#ifdef CONFIG_PAX_EMUTRAMP
18657+ switch (pax_handle_fetch_fault(regs)) {
18658+ case 2:
18659+ return;
18660+ }
18661+#endif
18662+
18663+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
18664+ do_group_exit(SIGKILL);
18665+ }
18666+#endif
18667+
18668+ }
18669+#endif
18670
18671 /* User mode accesses just cause a SIGSEGV */
18672 if (error_code & PF_USER) {
18673@@ -871,6 +1009,99 @@ static int spurious_fault_check(unsigned
18674 return 1;
18675 }
18676
18677+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
18678+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
18679+{
18680+ pte_t *pte;
18681+ pmd_t *pmd;
18682+ spinlock_t *ptl;
18683+ unsigned char pte_mask;
18684+
18685+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
18686+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
18687+ return 0;
18688+
18689+ /* PaX: it's our fault, let's handle it if we can */
18690+
18691+ /* PaX: take a look at read faults before acquiring any locks */
18692+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
18693+ /* instruction fetch attempt from a protected page in user mode */
18694+ up_read(&mm->mmap_sem);
18695+
18696+#ifdef CONFIG_PAX_EMUTRAMP
18697+ switch (pax_handle_fetch_fault(regs)) {
18698+ case 2:
18699+ return 1;
18700+ }
18701+#endif
18702+
18703+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
18704+ do_group_exit(SIGKILL);
18705+ }
18706+
18707+ pmd = pax_get_pmd(mm, address);
18708+ if (unlikely(!pmd))
18709+ return 0;
18710+
18711+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
18712+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
18713+ pte_unmap_unlock(pte, ptl);
18714+ return 0;
18715+ }
18716+
18717+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
18718+ /* write attempt to a protected page in user mode */
18719+ pte_unmap_unlock(pte, ptl);
18720+ return 0;
18721+ }
18722+
18723+#ifdef CONFIG_SMP
18724+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
18725+#else
18726+ if (likely(address > get_limit(regs->cs)))
18727+#endif
18728+ {
18729+ set_pte(pte, pte_mkread(*pte));
18730+ __flush_tlb_one(address);
18731+ pte_unmap_unlock(pte, ptl);
18732+ up_read(&mm->mmap_sem);
18733+ return 1;
18734+ }
18735+
18736+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
18737+
18738+ /*
18739+ * PaX: fill DTLB with user rights and retry
18740+ */
18741+ __asm__ __volatile__ (
18742+ "orb %2,(%1)\n"
18743+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
18744+/*
18745+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
18746+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
18747+ * page fault when examined during a TLB load attempt. this is true not only
18748+ * for PTEs holding a non-present entry but also present entries that will
18749+ * raise a page fault (such as those set up by PaX, or the copy-on-write
18750+ * mechanism). in effect it means that we do *not* need to flush the TLBs
18751+ * for our target pages since their PTEs are simply not in the TLBs at all.
18752+
18753+ * the best thing in omitting it is that we gain around 15-20% speed in the
18754+ * fast path of the page fault handler and can get rid of tracing since we
18755+ * can no longer flush unintended entries.
18756+ */
18757+ "invlpg (%0)\n"
18758+#endif
18759+ __copyuser_seg"testb $0,(%0)\n"
18760+ "xorb %3,(%1)\n"
18761+ :
18762+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
18763+ : "memory", "cc");
18764+ pte_unmap_unlock(pte, ptl);
18765+ up_read(&mm->mmap_sem);
18766+ return 1;
18767+}
18768+#endif
18769+
18770 /*
18771 * Handle a spurious fault caused by a stale TLB entry.
18772 *
18773@@ -943,6 +1174,9 @@ int show_unhandled_signals = 1;
18774 static inline int
18775 access_error(unsigned long error_code, struct vm_area_struct *vma)
18776 {
18777+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
18778+ return 1;
18779+
18780 if (error_code & PF_WRITE) {
18781 /* write, present and write, not present: */
18782 if (unlikely(!(vma->vm_flags & VM_WRITE)))
18783@@ -976,19 +1210,33 @@ do_page_fault(struct pt_regs *regs, unsi
18784 {
18785 struct vm_area_struct *vma;
18786 struct task_struct *tsk;
18787- unsigned long address;
18788 struct mm_struct *mm;
18789 int fault;
18790 int write = error_code & PF_WRITE;
18791 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
18792 (write ? FAULT_FLAG_WRITE : 0);
18793
18794+ /* Get the faulting address: */
18795+ unsigned long address = read_cr2();
18796+
18797+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18798+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
18799+ if (!search_exception_tables(regs->ip)) {
18800+ bad_area_nosemaphore(regs, error_code, address);
18801+ return;
18802+ }
18803+ if (address < PAX_USER_SHADOW_BASE) {
18804+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
18805+ printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
18806+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
18807+ } else
18808+ address -= PAX_USER_SHADOW_BASE;
18809+ }
18810+#endif
18811+
18812 tsk = current;
18813 mm = tsk->mm;
18814
18815- /* Get the faulting address: */
18816- address = read_cr2();
18817-
18818 /*
18819 * Detect and handle instructions that would cause a page fault for
18820 * both a tracked kernel page and a userspace page.
18821@@ -1048,7 +1296,7 @@ do_page_fault(struct pt_regs *regs, unsi
18822 * User-mode registers count as a user access even for any
18823 * potential system fault or CPU buglet:
18824 */
18825- if (user_mode_vm(regs)) {
18826+ if (user_mode(regs)) {
18827 local_irq_enable();
18828 error_code |= PF_USER;
18829 } else {
18830@@ -1103,6 +1351,11 @@ retry:
18831 might_sleep();
18832 }
18833
18834+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
18835+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
18836+ return;
18837+#endif
18838+
18839 vma = find_vma(mm, address);
18840 if (unlikely(!vma)) {
18841 bad_area(regs, error_code, address);
18842@@ -1114,18 +1367,24 @@ retry:
18843 bad_area(regs, error_code, address);
18844 return;
18845 }
18846- if (error_code & PF_USER) {
18847- /*
18848- * Accessing the stack below %sp is always a bug.
18849- * The large cushion allows instructions like enter
18850- * and pusha to work. ("enter $65535, $31" pushes
18851- * 32 pointers and then decrements %sp by 65535.)
18852- */
18853- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
18854- bad_area(regs, error_code, address);
18855- return;
18856- }
18857+ /*
18858+ * Accessing the stack below %sp is always a bug.
18859+ * The large cushion allows instructions like enter
18860+ * and pusha to work. ("enter $65535, $31" pushes
18861+ * 32 pointers and then decrements %sp by 65535.)
18862+ */
18863+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
18864+ bad_area(regs, error_code, address);
18865+ return;
18866 }
18867+
18868+#ifdef CONFIG_PAX_SEGMEXEC
18869+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
18870+ bad_area(regs, error_code, address);
18871+ return;
18872+ }
18873+#endif
18874+
18875 if (unlikely(expand_stack(vma, address))) {
18876 bad_area(regs, error_code, address);
18877 return;
18878@@ -1180,3 +1439,199 @@ good_area:
18879
18880 up_read(&mm->mmap_sem);
18881 }
18882+
18883+#ifdef CONFIG_PAX_EMUTRAMP
18884+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
18885+{
18886+ int err;
18887+
18888+ do { /* PaX: gcc trampoline emulation #1 */
18889+ unsigned char mov1, mov2;
18890+ unsigned short jmp;
18891+ unsigned int addr1, addr2;
18892+
18893+#ifdef CONFIG_X86_64
18894+ if ((regs->ip + 11) >> 32)
18895+ break;
18896+#endif
18897+
18898+ err = get_user(mov1, (unsigned char __user *)regs->ip);
18899+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
18900+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
18901+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
18902+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
18903+
18904+ if (err)
18905+ break;
18906+
18907+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
18908+ regs->cx = addr1;
18909+ regs->ax = addr2;
18910+ regs->ip = addr2;
18911+ return 2;
18912+ }
18913+ } while (0);
18914+
18915+ do { /* PaX: gcc trampoline emulation #2 */
18916+ unsigned char mov, jmp;
18917+ unsigned int addr1, addr2;
18918+
18919+#ifdef CONFIG_X86_64
18920+ if ((regs->ip + 9) >> 32)
18921+ break;
18922+#endif
18923+
18924+ err = get_user(mov, (unsigned char __user *)regs->ip);
18925+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
18926+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
18927+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
18928+
18929+ if (err)
18930+ break;
18931+
18932+ if (mov == 0xB9 && jmp == 0xE9) {
18933+ regs->cx = addr1;
18934+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
18935+ return 2;
18936+ }
18937+ } while (0);
18938+
18939+ return 1; /* PaX in action */
18940+}
18941+
18942+#ifdef CONFIG_X86_64
18943+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
18944+{
18945+ int err;
18946+
18947+ do { /* PaX: gcc trampoline emulation #1 */
18948+ unsigned short mov1, mov2, jmp1;
18949+ unsigned char jmp2;
18950+ unsigned int addr1;
18951+ unsigned long addr2;
18952+
18953+ err = get_user(mov1, (unsigned short __user *)regs->ip);
18954+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
18955+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
18956+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
18957+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
18958+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
18959+
18960+ if (err)
18961+ break;
18962+
18963+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
18964+ regs->r11 = addr1;
18965+ regs->r10 = addr2;
18966+ regs->ip = addr1;
18967+ return 2;
18968+ }
18969+ } while (0);
18970+
18971+ do { /* PaX: gcc trampoline emulation #2 */
18972+ unsigned short mov1, mov2, jmp1;
18973+ unsigned char jmp2;
18974+ unsigned long addr1, addr2;
18975+
18976+ err = get_user(mov1, (unsigned short __user *)regs->ip);
18977+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
18978+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
18979+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
18980+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
18981+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
18982+
18983+ if (err)
18984+ break;
18985+
18986+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
18987+ regs->r11 = addr1;
18988+ regs->r10 = addr2;
18989+ regs->ip = addr1;
18990+ return 2;
18991+ }
18992+ } while (0);
18993+
18994+ return 1; /* PaX in action */
18995+}
18996+#endif
18997+
18998+/*
18999+ * PaX: decide what to do with offenders (regs->ip = fault address)
19000+ *
19001+ * returns 1 when task should be killed
19002+ * 2 when gcc trampoline was detected
19003+ */
19004+static int pax_handle_fetch_fault(struct pt_regs *regs)
19005+{
19006+ if (v8086_mode(regs))
19007+ return 1;
19008+
19009+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
19010+ return 1;
19011+
19012+#ifdef CONFIG_X86_32
19013+ return pax_handle_fetch_fault_32(regs);
19014+#else
19015+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
19016+ return pax_handle_fetch_fault_32(regs);
19017+ else
19018+ return pax_handle_fetch_fault_64(regs);
19019+#endif
19020+}
19021+#endif
19022+
19023+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19024+void pax_report_insns(void *pc, void *sp)
19025+{
19026+ long i;
19027+
19028+ printk(KERN_ERR "PAX: bytes at PC: ");
19029+ for (i = 0; i < 20; i++) {
19030+ unsigned char c;
19031+ if (get_user(c, (__force unsigned char __user *)pc+i))
19032+ printk(KERN_CONT "?? ");
19033+ else
19034+ printk(KERN_CONT "%02x ", c);
19035+ }
19036+ printk("\n");
19037+
19038+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
19039+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
19040+ unsigned long c;
19041+ if (get_user(c, (__force unsigned long __user *)sp+i))
19042+#ifdef CONFIG_X86_32
19043+ printk(KERN_CONT "???????? ");
19044+#else
19045+ printk(KERN_CONT "???????????????? ");
19046+#endif
19047+ else
19048+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
19049+ }
19050+ printk("\n");
19051+}
19052+#endif
19053+
19054+/**
19055+ * probe_kernel_write(): safely attempt to write to a location
19056+ * @dst: address to write to
19057+ * @src: pointer to the data that shall be written
19058+ * @size: size of the data chunk
19059+ *
19060+ * Safely write to address @dst from the buffer at @src. If a kernel fault
19061+ * happens, handle that and return -EFAULT.
19062+ */
19063+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
19064+{
19065+ long ret;
19066+ mm_segment_t old_fs = get_fs();
19067+
19068+ set_fs(KERNEL_DS);
19069+ pagefault_disable();
19070+ pax_open_kernel();
19071+ ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
19072+ pax_close_kernel();
19073+ pagefault_enable();
19074+ set_fs(old_fs);
19075+
19076+ return ret ? -EFAULT : 0;
19077+}
19078diff -urNp linux-3.0.3/arch/x86/mm/gup.c linux-3.0.3/arch/x86/mm/gup.c
19079--- linux-3.0.3/arch/x86/mm/gup.c 2011-07-21 22:17:23.000000000 -0400
19080+++ linux-3.0.3/arch/x86/mm/gup.c 2011-08-23 21:47:55.000000000 -0400
19081@@ -263,7 +263,7 @@ int __get_user_pages_fast(unsigned long
19082 addr = start;
19083 len = (unsigned long) nr_pages << PAGE_SHIFT;
19084 end = start + len;
19085- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19086+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19087 (void __user *)start, len)))
19088 return 0;
19089
19090diff -urNp linux-3.0.3/arch/x86/mm/highmem_32.c linux-3.0.3/arch/x86/mm/highmem_32.c
19091--- linux-3.0.3/arch/x86/mm/highmem_32.c 2011-07-21 22:17:23.000000000 -0400
19092+++ linux-3.0.3/arch/x86/mm/highmem_32.c 2011-08-23 21:47:55.000000000 -0400
19093@@ -44,7 +44,10 @@ void *kmap_atomic_prot(struct page *page
19094 idx = type + KM_TYPE_NR*smp_processor_id();
19095 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
19096 BUG_ON(!pte_none(*(kmap_pte-idx)));
19097+
19098+ pax_open_kernel();
19099 set_pte(kmap_pte-idx, mk_pte(page, prot));
19100+ pax_close_kernel();
19101
19102 return (void *)vaddr;
19103 }
19104diff -urNp linux-3.0.3/arch/x86/mm/hugetlbpage.c linux-3.0.3/arch/x86/mm/hugetlbpage.c
19105--- linux-3.0.3/arch/x86/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
19106+++ linux-3.0.3/arch/x86/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
19107@@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmappe
19108 struct hstate *h = hstate_file(file);
19109 struct mm_struct *mm = current->mm;
19110 struct vm_area_struct *vma;
19111- unsigned long start_addr;
19112+ unsigned long start_addr, pax_task_size = TASK_SIZE;
19113+
19114+#ifdef CONFIG_PAX_SEGMEXEC
19115+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19116+ pax_task_size = SEGMEXEC_TASK_SIZE;
19117+#endif
19118+
19119+ pax_task_size -= PAGE_SIZE;
19120
19121 if (len > mm->cached_hole_size) {
19122- start_addr = mm->free_area_cache;
19123+ start_addr = mm->free_area_cache;
19124 } else {
19125- start_addr = TASK_UNMAPPED_BASE;
19126- mm->cached_hole_size = 0;
19127+ start_addr = mm->mmap_base;
19128+ mm->cached_hole_size = 0;
19129 }
19130
19131 full_search:
19132@@ -280,26 +287,27 @@ full_search:
19133
19134 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19135 /* At this point: (!vma || addr < vma->vm_end). */
19136- if (TASK_SIZE - len < addr) {
19137+ if (pax_task_size - len < addr) {
19138 /*
19139 * Start a new search - just in case we missed
19140 * some holes.
19141 */
19142- if (start_addr != TASK_UNMAPPED_BASE) {
19143- start_addr = TASK_UNMAPPED_BASE;
19144+ if (start_addr != mm->mmap_base) {
19145+ start_addr = mm->mmap_base;
19146 mm->cached_hole_size = 0;
19147 goto full_search;
19148 }
19149 return -ENOMEM;
19150 }
19151- if (!vma || addr + len <= vma->vm_start) {
19152- mm->free_area_cache = addr + len;
19153- return addr;
19154- }
19155+ if (check_heap_stack_gap(vma, addr, len))
19156+ break;
19157 if (addr + mm->cached_hole_size < vma->vm_start)
19158 mm->cached_hole_size = vma->vm_start - addr;
19159 addr = ALIGN(vma->vm_end, huge_page_size(h));
19160 }
19161+
19162+ mm->free_area_cache = addr + len;
19163+ return addr;
19164 }
19165
19166 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
19167@@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmappe
19168 {
19169 struct hstate *h = hstate_file(file);
19170 struct mm_struct *mm = current->mm;
19171- struct vm_area_struct *vma, *prev_vma;
19172- unsigned long base = mm->mmap_base, addr = addr0;
19173+ struct vm_area_struct *vma;
19174+ unsigned long base = mm->mmap_base, addr;
19175 unsigned long largest_hole = mm->cached_hole_size;
19176- int first_time = 1;
19177
19178 /* don't allow allocations above current base */
19179 if (mm->free_area_cache > base)
19180@@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmappe
19181 largest_hole = 0;
19182 mm->free_area_cache = base;
19183 }
19184-try_again:
19185+
19186 /* make sure it can fit in the remaining address space */
19187 if (mm->free_area_cache < len)
19188 goto fail;
19189
19190 /* either no address requested or can't fit in requested address hole */
19191- addr = (mm->free_area_cache - len) & huge_page_mask(h);
19192+ addr = (mm->free_area_cache - len);
19193 do {
19194+ addr &= huge_page_mask(h);
19195+ vma = find_vma(mm, addr);
19196 /*
19197 * Lookup failure means no vma is above this address,
19198 * i.e. return with success:
19199- */
19200- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
19201- return addr;
19202-
19203- /*
19204 * new region fits between prev_vma->vm_end and
19205 * vma->vm_start, use it:
19206 */
19207- if (addr + len <= vma->vm_start &&
19208- (!prev_vma || (addr >= prev_vma->vm_end))) {
19209+ if (check_heap_stack_gap(vma, addr, len)) {
19210 /* remember the address as a hint for next time */
19211- mm->cached_hole_size = largest_hole;
19212- return (mm->free_area_cache = addr);
19213- } else {
19214- /* pull free_area_cache down to the first hole */
19215- if (mm->free_area_cache == vma->vm_end) {
19216- mm->free_area_cache = vma->vm_start;
19217- mm->cached_hole_size = largest_hole;
19218- }
19219+ mm->cached_hole_size = largest_hole;
19220+ return (mm->free_area_cache = addr);
19221+ }
19222+ /* pull free_area_cache down to the first hole */
19223+ if (mm->free_area_cache == vma->vm_end) {
19224+ mm->free_area_cache = vma->vm_start;
19225+ mm->cached_hole_size = largest_hole;
19226 }
19227
19228 /* remember the largest hole we saw so far */
19229 if (addr + largest_hole < vma->vm_start)
19230- largest_hole = vma->vm_start - addr;
19231+ largest_hole = vma->vm_start - addr;
19232
19233 /* try just below the current vma->vm_start */
19234- addr = (vma->vm_start - len) & huge_page_mask(h);
19235- } while (len <= vma->vm_start);
19236+ addr = skip_heap_stack_gap(vma, len);
19237+ } while (!IS_ERR_VALUE(addr));
19238
19239 fail:
19240 /*
19241- * if hint left us with no space for the requested
19242- * mapping then try again:
19243- */
19244- if (first_time) {
19245- mm->free_area_cache = base;
19246- largest_hole = 0;
19247- first_time = 0;
19248- goto try_again;
19249- }
19250- /*
19251 * A failed mmap() very likely causes application failure,
19252 * so fall back to the bottom-up function here. This scenario
19253 * can happen with large stack limits and large mmap()
19254 * allocations.
19255 */
19256- mm->free_area_cache = TASK_UNMAPPED_BASE;
19257+
19258+#ifdef CONFIG_PAX_SEGMEXEC
19259+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19260+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19261+ else
19262+#endif
19263+
19264+ mm->mmap_base = TASK_UNMAPPED_BASE;
19265+
19266+#ifdef CONFIG_PAX_RANDMMAP
19267+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19268+ mm->mmap_base += mm->delta_mmap;
19269+#endif
19270+
19271+ mm->free_area_cache = mm->mmap_base;
19272 mm->cached_hole_size = ~0UL;
19273 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
19274 len, pgoff, flags);
19275@@ -386,6 +392,7 @@ fail:
19276 /*
19277 * Restore the topdown base:
19278 */
19279+ mm->mmap_base = base;
19280 mm->free_area_cache = base;
19281 mm->cached_hole_size = ~0UL;
19282
19283@@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *f
19284 struct hstate *h = hstate_file(file);
19285 struct mm_struct *mm = current->mm;
19286 struct vm_area_struct *vma;
19287+ unsigned long pax_task_size = TASK_SIZE;
19288
19289 if (len & ~huge_page_mask(h))
19290 return -EINVAL;
19291- if (len > TASK_SIZE)
19292+
19293+#ifdef CONFIG_PAX_SEGMEXEC
19294+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19295+ pax_task_size = SEGMEXEC_TASK_SIZE;
19296+#endif
19297+
19298+ pax_task_size -= PAGE_SIZE;
19299+
19300+ if (len > pax_task_size)
19301 return -ENOMEM;
19302
19303 if (flags & MAP_FIXED) {
19304@@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *f
19305 if (addr) {
19306 addr = ALIGN(addr, huge_page_size(h));
19307 vma = find_vma(mm, addr);
19308- if (TASK_SIZE - len >= addr &&
19309- (!vma || addr + len <= vma->vm_start))
19310+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
19311 return addr;
19312 }
19313 if (mm->get_unmapped_area == arch_get_unmapped_area)
19314diff -urNp linux-3.0.3/arch/x86/mm/init_32.c linux-3.0.3/arch/x86/mm/init_32.c
19315--- linux-3.0.3/arch/x86/mm/init_32.c 2011-07-21 22:17:23.000000000 -0400
19316+++ linux-3.0.3/arch/x86/mm/init_32.c 2011-08-23 21:47:55.000000000 -0400
19317@@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
19318 }
19319
19320 /*
19321- * Creates a middle page table and puts a pointer to it in the
19322- * given global directory entry. This only returns the gd entry
19323- * in non-PAE compilation mode, since the middle layer is folded.
19324- */
19325-static pmd_t * __init one_md_table_init(pgd_t *pgd)
19326-{
19327- pud_t *pud;
19328- pmd_t *pmd_table;
19329-
19330-#ifdef CONFIG_X86_PAE
19331- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
19332- if (after_bootmem)
19333- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
19334- else
19335- pmd_table = (pmd_t *)alloc_low_page();
19336- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
19337- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
19338- pud = pud_offset(pgd, 0);
19339- BUG_ON(pmd_table != pmd_offset(pud, 0));
19340-
19341- return pmd_table;
19342- }
19343-#endif
19344- pud = pud_offset(pgd, 0);
19345- pmd_table = pmd_offset(pud, 0);
19346-
19347- return pmd_table;
19348-}
19349-
19350-/*
19351 * Create a page table and place a pointer to it in a middle page
19352 * directory entry:
19353 */
19354@@ -123,13 +93,28 @@ static pte_t * __init one_page_table_ini
19355 page_table = (pte_t *)alloc_low_page();
19356
19357 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
19358+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19359+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
19360+#else
19361 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
19362+#endif
19363 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
19364 }
19365
19366 return pte_offset_kernel(pmd, 0);
19367 }
19368
19369+static pmd_t * __init one_md_table_init(pgd_t *pgd)
19370+{
19371+ pud_t *pud;
19372+ pmd_t *pmd_table;
19373+
19374+ pud = pud_offset(pgd, 0);
19375+ pmd_table = pmd_offset(pud, 0);
19376+
19377+ return pmd_table;
19378+}
19379+
19380 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
19381 {
19382 int pgd_idx = pgd_index(vaddr);
19383@@ -203,6 +188,7 @@ page_table_range_init(unsigned long star
19384 int pgd_idx, pmd_idx;
19385 unsigned long vaddr;
19386 pgd_t *pgd;
19387+ pud_t *pud;
19388 pmd_t *pmd;
19389 pte_t *pte = NULL;
19390
19391@@ -212,8 +198,13 @@ page_table_range_init(unsigned long star
19392 pgd = pgd_base + pgd_idx;
19393
19394 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
19395- pmd = one_md_table_init(pgd);
19396- pmd = pmd + pmd_index(vaddr);
19397+ pud = pud_offset(pgd, vaddr);
19398+ pmd = pmd_offset(pud, vaddr);
19399+
19400+#ifdef CONFIG_X86_PAE
19401+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
19402+#endif
19403+
19404 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
19405 pmd++, pmd_idx++) {
19406 pte = page_table_kmap_check(one_page_table_init(pmd),
19407@@ -225,11 +216,20 @@ page_table_range_init(unsigned long star
19408 }
19409 }
19410
19411-static inline int is_kernel_text(unsigned long addr)
19412+static inline int is_kernel_text(unsigned long start, unsigned long end)
19413 {
19414- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
19415- return 1;
19416- return 0;
19417+ if ((start > ktla_ktva((unsigned long)_etext) ||
19418+ end <= ktla_ktva((unsigned long)_stext)) &&
19419+ (start > ktla_ktva((unsigned long)_einittext) ||
19420+ end <= ktla_ktva((unsigned long)_sinittext)) &&
19421+
19422+#ifdef CONFIG_ACPI_SLEEP
19423+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
19424+#endif
19425+
19426+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
19427+ return 0;
19428+ return 1;
19429 }
19430
19431 /*
19432@@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned lo
19433 unsigned long last_map_addr = end;
19434 unsigned long start_pfn, end_pfn;
19435 pgd_t *pgd_base = swapper_pg_dir;
19436- int pgd_idx, pmd_idx, pte_ofs;
19437+ unsigned int pgd_idx, pmd_idx, pte_ofs;
19438 unsigned long pfn;
19439 pgd_t *pgd;
19440+ pud_t *pud;
19441 pmd_t *pmd;
19442 pte_t *pte;
19443 unsigned pages_2m, pages_4k;
19444@@ -281,8 +282,13 @@ repeat:
19445 pfn = start_pfn;
19446 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
19447 pgd = pgd_base + pgd_idx;
19448- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
19449- pmd = one_md_table_init(pgd);
19450+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
19451+ pud = pud_offset(pgd, 0);
19452+ pmd = pmd_offset(pud, 0);
19453+
19454+#ifdef CONFIG_X86_PAE
19455+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
19456+#endif
19457
19458 if (pfn >= end_pfn)
19459 continue;
19460@@ -294,14 +300,13 @@ repeat:
19461 #endif
19462 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
19463 pmd++, pmd_idx++) {
19464- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
19465+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
19466
19467 /*
19468 * Map with big pages if possible, otherwise
19469 * create normal page tables:
19470 */
19471 if (use_pse) {
19472- unsigned int addr2;
19473 pgprot_t prot = PAGE_KERNEL_LARGE;
19474 /*
19475 * first pass will use the same initial
19476@@ -311,11 +316,7 @@ repeat:
19477 __pgprot(PTE_IDENT_ATTR |
19478 _PAGE_PSE);
19479
19480- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
19481- PAGE_OFFSET + PAGE_SIZE-1;
19482-
19483- if (is_kernel_text(addr) ||
19484- is_kernel_text(addr2))
19485+ if (is_kernel_text(address, address + PMD_SIZE))
19486 prot = PAGE_KERNEL_LARGE_EXEC;
19487
19488 pages_2m++;
19489@@ -332,7 +333,7 @@ repeat:
19490 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
19491 pte += pte_ofs;
19492 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
19493- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
19494+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
19495 pgprot_t prot = PAGE_KERNEL;
19496 /*
19497 * first pass will use the same initial
19498@@ -340,7 +341,7 @@ repeat:
19499 */
19500 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
19501
19502- if (is_kernel_text(addr))
19503+ if (is_kernel_text(address, address + PAGE_SIZE))
19504 prot = PAGE_KERNEL_EXEC;
19505
19506 pages_4k++;
19507@@ -472,7 +473,7 @@ void __init native_pagetable_setup_start
19508
19509 pud = pud_offset(pgd, va);
19510 pmd = pmd_offset(pud, va);
19511- if (!pmd_present(*pmd))
19512+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
19513 break;
19514
19515 pte = pte_offset_kernel(pmd, va);
19516@@ -524,12 +525,10 @@ void __init early_ioremap_page_table_ran
19517
19518 static void __init pagetable_init(void)
19519 {
19520- pgd_t *pgd_base = swapper_pg_dir;
19521-
19522- permanent_kmaps_init(pgd_base);
19523+ permanent_kmaps_init(swapper_pg_dir);
19524 }
19525
19526-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
19527+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
19528 EXPORT_SYMBOL_GPL(__supported_pte_mask);
19529
19530 /* user-defined highmem size */
19531@@ -757,6 +756,12 @@ void __init mem_init(void)
19532
19533 pci_iommu_alloc();
19534
19535+#ifdef CONFIG_PAX_PER_CPU_PGD
19536+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
19537+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19538+ KERNEL_PGD_PTRS);
19539+#endif
19540+
19541 #ifdef CONFIG_FLATMEM
19542 BUG_ON(!mem_map);
19543 #endif
19544@@ -774,7 +779,7 @@ void __init mem_init(void)
19545 set_highmem_pages_init();
19546
19547 codesize = (unsigned long) &_etext - (unsigned long) &_text;
19548- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
19549+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
19550 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
19551
19552 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
19553@@ -815,10 +820,10 @@ void __init mem_init(void)
19554 ((unsigned long)&__init_end -
19555 (unsigned long)&__init_begin) >> 10,
19556
19557- (unsigned long)&_etext, (unsigned long)&_edata,
19558- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
19559+ (unsigned long)&_sdata, (unsigned long)&_edata,
19560+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
19561
19562- (unsigned long)&_text, (unsigned long)&_etext,
19563+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
19564 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
19565
19566 /*
19567@@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
19568 if (!kernel_set_to_readonly)
19569 return;
19570
19571+ start = ktla_ktva(start);
19572 pr_debug("Set kernel text: %lx - %lx for read write\n",
19573 start, start+size);
19574
19575@@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
19576 if (!kernel_set_to_readonly)
19577 return;
19578
19579+ start = ktla_ktva(start);
19580 pr_debug("Set kernel text: %lx - %lx for read only\n",
19581 start, start+size);
19582
19583@@ -938,6 +945,7 @@ void mark_rodata_ro(void)
19584 unsigned long start = PFN_ALIGN(_text);
19585 unsigned long size = PFN_ALIGN(_etext) - start;
19586
19587+ start = ktla_ktva(start);
19588 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
19589 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
19590 size >> 10);
19591diff -urNp linux-3.0.3/arch/x86/mm/init_64.c linux-3.0.3/arch/x86/mm/init_64.c
19592--- linux-3.0.3/arch/x86/mm/init_64.c 2011-07-21 22:17:23.000000000 -0400
19593+++ linux-3.0.3/arch/x86/mm/init_64.c 2011-08-23 21:47:55.000000000 -0400
19594@@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpa
19595 * around without checking the pgd every time.
19596 */
19597
19598-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
19599+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
19600 EXPORT_SYMBOL_GPL(__supported_pte_mask);
19601
19602 int force_personality32;
19603@@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long star
19604
19605 for (address = start; address <= end; address += PGDIR_SIZE) {
19606 const pgd_t *pgd_ref = pgd_offset_k(address);
19607+
19608+#ifdef CONFIG_PAX_PER_CPU_PGD
19609+ unsigned long cpu;
19610+#else
19611 struct page *page;
19612+#endif
19613
19614 if (pgd_none(*pgd_ref))
19615 continue;
19616
19617 spin_lock(&pgd_lock);
19618+
19619+#ifdef CONFIG_PAX_PER_CPU_PGD
19620+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
19621+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
19622+#else
19623 list_for_each_entry(page, &pgd_list, lru) {
19624 pgd_t *pgd;
19625 spinlock_t *pgt_lock;
19626@@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long star
19627 /* the pgt_lock only for Xen */
19628 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
19629 spin_lock(pgt_lock);
19630+#endif
19631
19632 if (pgd_none(*pgd))
19633 set_pgd(pgd, *pgd_ref);
19634@@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long star
19635 BUG_ON(pgd_page_vaddr(*pgd)
19636 != pgd_page_vaddr(*pgd_ref));
19637
19638+#ifndef CONFIG_PAX_PER_CPU_PGD
19639 spin_unlock(pgt_lock);
19640+#endif
19641+
19642 }
19643 spin_unlock(&pgd_lock);
19644 }
19645@@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
19646 pmd = fill_pmd(pud, vaddr);
19647 pte = fill_pte(pmd, vaddr);
19648
19649+ pax_open_kernel();
19650 set_pte(pte, new_pte);
19651+ pax_close_kernel();
19652
19653 /*
19654 * It's enough to flush this one mapping.
19655@@ -262,14 +278,12 @@ static void __init __init_extra_mapping(
19656 pgd = pgd_offset_k((unsigned long)__va(phys));
19657 if (pgd_none(*pgd)) {
19658 pud = (pud_t *) spp_getpage();
19659- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
19660- _PAGE_USER));
19661+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
19662 }
19663 pud = pud_offset(pgd, (unsigned long)__va(phys));
19664 if (pud_none(*pud)) {
19665 pmd = (pmd_t *) spp_getpage();
19666- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
19667- _PAGE_USER));
19668+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
19669 }
19670 pmd = pmd_offset(pud, phys);
19671 BUG_ON(!pmd_none(*pmd));
19672@@ -693,6 +707,12 @@ void __init mem_init(void)
19673
19674 pci_iommu_alloc();
19675
19676+#ifdef CONFIG_PAX_PER_CPU_PGD
19677+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
19678+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19679+ KERNEL_PGD_PTRS);
19680+#endif
19681+
19682 /* clear_bss() already clear the empty_zero_page */
19683
19684 reservedpages = 0;
19685@@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
19686 static struct vm_area_struct gate_vma = {
19687 .vm_start = VSYSCALL_START,
19688 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
19689- .vm_page_prot = PAGE_READONLY_EXEC,
19690- .vm_flags = VM_READ | VM_EXEC
19691+ .vm_page_prot = PAGE_READONLY,
19692+ .vm_flags = VM_READ
19693 };
19694
19695 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
19696@@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long add
19697
19698 const char *arch_vma_name(struct vm_area_struct *vma)
19699 {
19700- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
19701+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
19702 return "[vdso]";
19703 if (vma == &gate_vma)
19704 return "[vsyscall]";
19705diff -urNp linux-3.0.3/arch/x86/mm/init.c linux-3.0.3/arch/x86/mm/init.c
19706--- linux-3.0.3/arch/x86/mm/init.c 2011-07-21 22:17:23.000000000 -0400
19707+++ linux-3.0.3/arch/x86/mm/init.c 2011-08-23 21:48:14.000000000 -0400
19708@@ -31,7 +31,7 @@ int direct_gbpages
19709 static void __init find_early_table_space(unsigned long end, int use_pse,
19710 int use_gbpages)
19711 {
19712- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
19713+ unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
19714 phys_addr_t base;
19715
19716 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
19717@@ -313,12 +313,34 @@ unsigned long __init_refok init_memory_m
19718 */
19719 int devmem_is_allowed(unsigned long pagenr)
19720 {
19721- if (pagenr <= 256)
19722+#ifdef CONFIG_GRKERNSEC_KMEM
19723+ /* allow BDA */
19724+ if (!pagenr)
19725+ return 1;
19726+ /* allow EBDA */
19727+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
19728+ return 1;
19729+#else
19730+ if (!pagenr)
19731+ return 1;
19732+#ifdef CONFIG_VM86
19733+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
19734+ return 1;
19735+#endif
19736+#endif
19737+
19738+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
19739 return 1;
19740+#ifdef CONFIG_GRKERNSEC_KMEM
19741+ /* throw out everything else below 1MB */
19742+ if (pagenr <= 256)
19743+ return 0;
19744+#endif
19745 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
19746 return 0;
19747 if (!page_is_ram(pagenr))
19748 return 1;
19749+
19750 return 0;
19751 }
19752
19753@@ -373,6 +395,86 @@ void free_init_pages(char *what, unsigne
19754
19755 void free_initmem(void)
19756 {
19757+
19758+#ifdef CONFIG_PAX_KERNEXEC
19759+#ifdef CONFIG_X86_32
19760+ /* PaX: limit KERNEL_CS to actual size */
19761+ unsigned long addr, limit;
19762+ struct desc_struct d;
19763+ int cpu;
19764+
19765+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
19766+ limit = (limit - 1UL) >> PAGE_SHIFT;
19767+
19768+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
19769+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
19770+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
19771+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
19772+ }
19773+
19774+ /* PaX: make KERNEL_CS read-only */
19775+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
19776+ if (!paravirt_enabled())
19777+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
19778+/*
19779+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
19780+ pgd = pgd_offset_k(addr);
19781+ pud = pud_offset(pgd, addr);
19782+ pmd = pmd_offset(pud, addr);
19783+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19784+ }
19785+*/
19786+#ifdef CONFIG_X86_PAE
19787+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
19788+/*
19789+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
19790+ pgd = pgd_offset_k(addr);
19791+ pud = pud_offset(pgd, addr);
19792+ pmd = pmd_offset(pud, addr);
19793+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
19794+ }
19795+*/
19796+#endif
19797+
19798+#ifdef CONFIG_MODULES
19799+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
19800+#endif
19801+
19802+#else
19803+ pgd_t *pgd;
19804+ pud_t *pud;
19805+ pmd_t *pmd;
19806+ unsigned long addr, end;
19807+
19808+ /* PaX: make kernel code/rodata read-only, rest non-executable */
19809+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
19810+ pgd = pgd_offset_k(addr);
19811+ pud = pud_offset(pgd, addr);
19812+ pmd = pmd_offset(pud, addr);
19813+ if (!pmd_present(*pmd))
19814+ continue;
19815+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
19816+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19817+ else
19818+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
19819+ }
19820+
19821+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
19822+ end = addr + KERNEL_IMAGE_SIZE;
19823+ for (; addr < end; addr += PMD_SIZE) {
19824+ pgd = pgd_offset_k(addr);
19825+ pud = pud_offset(pgd, addr);
19826+ pmd = pmd_offset(pud, addr);
19827+ if (!pmd_present(*pmd))
19828+ continue;
19829+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
19830+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19831+ }
19832+#endif
19833+
19834+ flush_tlb_all();
19835+#endif
19836+
19837 free_init_pages("unused kernel memory",
19838 (unsigned long)(&__init_begin),
19839 (unsigned long)(&__init_end));
19840diff -urNp linux-3.0.3/arch/x86/mm/iomap_32.c linux-3.0.3/arch/x86/mm/iomap_32.c
19841--- linux-3.0.3/arch/x86/mm/iomap_32.c 2011-07-21 22:17:23.000000000 -0400
19842+++ linux-3.0.3/arch/x86/mm/iomap_32.c 2011-08-23 21:47:55.000000000 -0400
19843@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long
19844 type = kmap_atomic_idx_push();
19845 idx = type + KM_TYPE_NR * smp_processor_id();
19846 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
19847+
19848+ pax_open_kernel();
19849 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
19850+ pax_close_kernel();
19851+
19852 arch_flush_lazy_mmu_mode();
19853
19854 return (void *)vaddr;
19855diff -urNp linux-3.0.3/arch/x86/mm/ioremap.c linux-3.0.3/arch/x86/mm/ioremap.c
19856--- linux-3.0.3/arch/x86/mm/ioremap.c 2011-07-21 22:17:23.000000000 -0400
19857+++ linux-3.0.3/arch/x86/mm/ioremap.c 2011-08-23 21:47:55.000000000 -0400
19858@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(re
19859 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
19860 int is_ram = page_is_ram(pfn);
19861
19862- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
19863+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
19864 return NULL;
19865 WARN_ON_ONCE(is_ram);
19866 }
19867@@ -344,7 +344,7 @@ static int __init early_ioremap_debug_se
19868 early_param("early_ioremap_debug", early_ioremap_debug_setup);
19869
19870 static __initdata int after_paging_init;
19871-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
19872+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
19873
19874 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
19875 {
19876@@ -381,8 +381,7 @@ void __init early_ioremap_init(void)
19877 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
19878
19879 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
19880- memset(bm_pte, 0, sizeof(bm_pte));
19881- pmd_populate_kernel(&init_mm, pmd, bm_pte);
19882+ pmd_populate_user(&init_mm, pmd, bm_pte);
19883
19884 /*
19885 * The boot-ioremap range spans multiple pmds, for which
19886diff -urNp linux-3.0.3/arch/x86/mm/kmemcheck/kmemcheck.c linux-3.0.3/arch/x86/mm/kmemcheck/kmemcheck.c
19887--- linux-3.0.3/arch/x86/mm/kmemcheck/kmemcheck.c 2011-07-21 22:17:23.000000000 -0400
19888+++ linux-3.0.3/arch/x86/mm/kmemcheck/kmemcheck.c 2011-08-23 21:47:55.000000000 -0400
19889@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
19890 * memory (e.g. tracked pages)? For now, we need this to avoid
19891 * invoking kmemcheck for PnP BIOS calls.
19892 */
19893- if (regs->flags & X86_VM_MASK)
19894+ if (v8086_mode(regs))
19895 return false;
19896- if (regs->cs != __KERNEL_CS)
19897+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
19898 return false;
19899
19900 pte = kmemcheck_pte_lookup(address);
19901diff -urNp linux-3.0.3/arch/x86/mm/mmap.c linux-3.0.3/arch/x86/mm/mmap.c
19902--- linux-3.0.3/arch/x86/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
19903+++ linux-3.0.3/arch/x86/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
19904@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
19905 * Leave an at least ~128 MB hole with possible stack randomization.
19906 */
19907 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
19908-#define MAX_GAP (TASK_SIZE/6*5)
19909+#define MAX_GAP (pax_task_size/6*5)
19910
19911 /*
19912 * True on X86_32 or when emulating IA32 on X86_64
19913@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
19914 return rnd << PAGE_SHIFT;
19915 }
19916
19917-static unsigned long mmap_base(void)
19918+static unsigned long mmap_base(struct mm_struct *mm)
19919 {
19920 unsigned long gap = rlimit(RLIMIT_STACK);
19921+ unsigned long pax_task_size = TASK_SIZE;
19922+
19923+#ifdef CONFIG_PAX_SEGMEXEC
19924+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19925+ pax_task_size = SEGMEXEC_TASK_SIZE;
19926+#endif
19927
19928 if (gap < MIN_GAP)
19929 gap = MIN_GAP;
19930 else if (gap > MAX_GAP)
19931 gap = MAX_GAP;
19932
19933- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
19934+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
19935 }
19936
19937 /*
19938 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
19939 * does, but not when emulating X86_32
19940 */
19941-static unsigned long mmap_legacy_base(void)
19942+static unsigned long mmap_legacy_base(struct mm_struct *mm)
19943 {
19944- if (mmap_is_ia32())
19945+ if (mmap_is_ia32()) {
19946+
19947+#ifdef CONFIG_PAX_SEGMEXEC
19948+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19949+ return SEGMEXEC_TASK_UNMAPPED_BASE;
19950+ else
19951+#endif
19952+
19953 return TASK_UNMAPPED_BASE;
19954- else
19955+ } else
19956 return TASK_UNMAPPED_BASE + mmap_rnd();
19957 }
19958
19959@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
19960 void arch_pick_mmap_layout(struct mm_struct *mm)
19961 {
19962 if (mmap_is_legacy()) {
19963- mm->mmap_base = mmap_legacy_base();
19964+ mm->mmap_base = mmap_legacy_base(mm);
19965+
19966+#ifdef CONFIG_PAX_RANDMMAP
19967+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19968+ mm->mmap_base += mm->delta_mmap;
19969+#endif
19970+
19971 mm->get_unmapped_area = arch_get_unmapped_area;
19972 mm->unmap_area = arch_unmap_area;
19973 } else {
19974- mm->mmap_base = mmap_base();
19975+ mm->mmap_base = mmap_base(mm);
19976+
19977+#ifdef CONFIG_PAX_RANDMMAP
19978+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19979+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
19980+#endif
19981+
19982 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
19983 mm->unmap_area = arch_unmap_area_topdown;
19984 }
19985diff -urNp linux-3.0.3/arch/x86/mm/mmio-mod.c linux-3.0.3/arch/x86/mm/mmio-mod.c
19986--- linux-3.0.3/arch/x86/mm/mmio-mod.c 2011-07-21 22:17:23.000000000 -0400
19987+++ linux-3.0.3/arch/x86/mm/mmio-mod.c 2011-08-23 21:47:55.000000000 -0400
19988@@ -195,7 +195,7 @@ static void pre(struct kmmio_probe *p, s
19989 break;
19990 default:
19991 {
19992- unsigned char *ip = (unsigned char *)instptr;
19993+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
19994 my_trace->opcode = MMIO_UNKNOWN_OP;
19995 my_trace->width = 0;
19996 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
19997@@ -235,7 +235,7 @@ static void post(struct kmmio_probe *p,
19998 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
19999 void __iomem *addr)
20000 {
20001- static atomic_t next_id;
20002+ static atomic_unchecked_t next_id;
20003 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
20004 /* These are page-unaligned. */
20005 struct mmiotrace_map map = {
20006@@ -259,7 +259,7 @@ static void ioremap_trace_core(resource_
20007 .private = trace
20008 },
20009 .phys = offset,
20010- .id = atomic_inc_return(&next_id)
20011+ .id = atomic_inc_return_unchecked(&next_id)
20012 };
20013 map.map_id = trace->id;
20014
20015diff -urNp linux-3.0.3/arch/x86/mm/pageattr.c linux-3.0.3/arch/x86/mm/pageattr.c
20016--- linux-3.0.3/arch/x86/mm/pageattr.c 2011-07-21 22:17:23.000000000 -0400
20017+++ linux-3.0.3/arch/x86/mm/pageattr.c 2011-08-23 21:47:55.000000000 -0400
20018@@ -261,7 +261,7 @@ static inline pgprot_t static_protection
20019 */
20020 #ifdef CONFIG_PCI_BIOS
20021 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
20022- pgprot_val(forbidden) |= _PAGE_NX;
20023+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20024 #endif
20025
20026 /*
20027@@ -269,9 +269,10 @@ static inline pgprot_t static_protection
20028 * Does not cover __inittext since that is gone later on. On
20029 * 64bit we do not enforce !NX on the low mapping
20030 */
20031- if (within(address, (unsigned long)_text, (unsigned long)_etext))
20032- pgprot_val(forbidden) |= _PAGE_NX;
20033+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
20034+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20035
20036+#ifdef CONFIG_DEBUG_RODATA
20037 /*
20038 * The .rodata section needs to be read-only. Using the pfn
20039 * catches all aliases.
20040@@ -279,6 +280,7 @@ static inline pgprot_t static_protection
20041 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
20042 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
20043 pgprot_val(forbidden) |= _PAGE_RW;
20044+#endif
20045
20046 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
20047 /*
20048@@ -317,6 +319,13 @@ static inline pgprot_t static_protection
20049 }
20050 #endif
20051
20052+#ifdef CONFIG_PAX_KERNEXEC
20053+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
20054+ pgprot_val(forbidden) |= _PAGE_RW;
20055+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20056+ }
20057+#endif
20058+
20059 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
20060
20061 return prot;
20062@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
20063 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
20064 {
20065 /* change init_mm */
20066+ pax_open_kernel();
20067 set_pte_atomic(kpte, pte);
20068+
20069 #ifdef CONFIG_X86_32
20070 if (!SHARED_KERNEL_PMD) {
20071+
20072+#ifdef CONFIG_PAX_PER_CPU_PGD
20073+ unsigned long cpu;
20074+#else
20075 struct page *page;
20076+#endif
20077
20078+#ifdef CONFIG_PAX_PER_CPU_PGD
20079+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20080+ pgd_t *pgd = get_cpu_pgd(cpu);
20081+#else
20082 list_for_each_entry(page, &pgd_list, lru) {
20083- pgd_t *pgd;
20084+ pgd_t *pgd = (pgd_t *)page_address(page);
20085+#endif
20086+
20087 pud_t *pud;
20088 pmd_t *pmd;
20089
20090- pgd = (pgd_t *)page_address(page) + pgd_index(address);
20091+ pgd += pgd_index(address);
20092 pud = pud_offset(pgd, address);
20093 pmd = pmd_offset(pud, address);
20094 set_pte_atomic((pte_t *)pmd, pte);
20095 }
20096 }
20097 #endif
20098+ pax_close_kernel();
20099 }
20100
20101 static int
20102diff -urNp linux-3.0.3/arch/x86/mm/pageattr-test.c linux-3.0.3/arch/x86/mm/pageattr-test.c
20103--- linux-3.0.3/arch/x86/mm/pageattr-test.c 2011-07-21 22:17:23.000000000 -0400
20104+++ linux-3.0.3/arch/x86/mm/pageattr-test.c 2011-08-23 21:47:55.000000000 -0400
20105@@ -36,7 +36,7 @@ enum {
20106
20107 static int pte_testbit(pte_t pte)
20108 {
20109- return pte_flags(pte) & _PAGE_UNUSED1;
20110+ return pte_flags(pte) & _PAGE_CPA_TEST;
20111 }
20112
20113 struct split_state {
20114diff -urNp linux-3.0.3/arch/x86/mm/pat.c linux-3.0.3/arch/x86/mm/pat.c
20115--- linux-3.0.3/arch/x86/mm/pat.c 2011-07-21 22:17:23.000000000 -0400
20116+++ linux-3.0.3/arch/x86/mm/pat.c 2011-08-23 21:47:55.000000000 -0400
20117@@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
20118
20119 if (!entry) {
20120 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
20121- current->comm, current->pid, start, end);
20122+ current->comm, task_pid_nr(current), start, end);
20123 return -EINVAL;
20124 }
20125
20126@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsig
20127 while (cursor < to) {
20128 if (!devmem_is_allowed(pfn)) {
20129 printk(KERN_INFO
20130- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
20131- current->comm, from, to);
20132+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
20133+ current->comm, from, to, cursor);
20134 return 0;
20135 }
20136 cursor += PAGE_SIZE;
20137@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, un
20138 printk(KERN_INFO
20139 "%s:%d ioremap_change_attr failed %s "
20140 "for %Lx-%Lx\n",
20141- current->comm, current->pid,
20142+ current->comm, task_pid_nr(current),
20143 cattr_name(flags),
20144 base, (unsigned long long)(base + size));
20145 return -EINVAL;
20146@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr,
20147 if (want_flags != flags) {
20148 printk(KERN_WARNING
20149 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
20150- current->comm, current->pid,
20151+ current->comm, task_pid_nr(current),
20152 cattr_name(want_flags),
20153 (unsigned long long)paddr,
20154 (unsigned long long)(paddr + size),
20155@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr,
20156 free_memtype(paddr, paddr + size);
20157 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
20158 " for %Lx-%Lx, got %s\n",
20159- current->comm, current->pid,
20160+ current->comm, task_pid_nr(current),
20161 cattr_name(want_flags),
20162 (unsigned long long)paddr,
20163 (unsigned long long)(paddr + size),
20164diff -urNp linux-3.0.3/arch/x86/mm/pf_in.c linux-3.0.3/arch/x86/mm/pf_in.c
20165--- linux-3.0.3/arch/x86/mm/pf_in.c 2011-07-21 22:17:23.000000000 -0400
20166+++ linux-3.0.3/arch/x86/mm/pf_in.c 2011-08-23 21:47:55.000000000 -0400
20167@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
20168 int i;
20169 enum reason_type rv = OTHERS;
20170
20171- p = (unsigned char *)ins_addr;
20172+ p = (unsigned char *)ktla_ktva(ins_addr);
20173 p += skip_prefix(p, &prf);
20174 p += get_opcode(p, &opcode);
20175
20176@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
20177 struct prefix_bits prf;
20178 int i;
20179
20180- p = (unsigned char *)ins_addr;
20181+ p = (unsigned char *)ktla_ktva(ins_addr);
20182 p += skip_prefix(p, &prf);
20183 p += get_opcode(p, &opcode);
20184
20185@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
20186 struct prefix_bits prf;
20187 int i;
20188
20189- p = (unsigned char *)ins_addr;
20190+ p = (unsigned char *)ktla_ktva(ins_addr);
20191 p += skip_prefix(p, &prf);
20192 p += get_opcode(p, &opcode);
20193
20194@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned l
20195 struct prefix_bits prf;
20196 int i;
20197
20198- p = (unsigned char *)ins_addr;
20199+ p = (unsigned char *)ktla_ktva(ins_addr);
20200 p += skip_prefix(p, &prf);
20201 p += get_opcode(p, &opcode);
20202 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
20203@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned l
20204 struct prefix_bits prf;
20205 int i;
20206
20207- p = (unsigned char *)ins_addr;
20208+ p = (unsigned char *)ktla_ktva(ins_addr);
20209 p += skip_prefix(p, &prf);
20210 p += get_opcode(p, &opcode);
20211 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
20212diff -urNp linux-3.0.3/arch/x86/mm/pgtable_32.c linux-3.0.3/arch/x86/mm/pgtable_32.c
20213--- linux-3.0.3/arch/x86/mm/pgtable_32.c 2011-07-21 22:17:23.000000000 -0400
20214+++ linux-3.0.3/arch/x86/mm/pgtable_32.c 2011-08-23 21:47:55.000000000 -0400
20215@@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr,
20216 return;
20217 }
20218 pte = pte_offset_kernel(pmd, vaddr);
20219+
20220+ pax_open_kernel();
20221 if (pte_val(pteval))
20222 set_pte_at(&init_mm, vaddr, pte, pteval);
20223 else
20224 pte_clear(&init_mm, vaddr, pte);
20225+ pax_close_kernel();
20226
20227 /*
20228 * It's enough to flush this one mapping.
20229diff -urNp linux-3.0.3/arch/x86/mm/pgtable.c linux-3.0.3/arch/x86/mm/pgtable.c
20230--- linux-3.0.3/arch/x86/mm/pgtable.c 2011-07-21 22:17:23.000000000 -0400
20231+++ linux-3.0.3/arch/x86/mm/pgtable.c 2011-08-23 21:47:55.000000000 -0400
20232@@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *p
20233 list_del(&page->lru);
20234 }
20235
20236-#define UNSHARED_PTRS_PER_PGD \
20237- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
20238+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20239+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
20240
20241+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
20242+{
20243+ while (count--)
20244+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
20245+}
20246+#endif
20247+
20248+#ifdef CONFIG_PAX_PER_CPU_PGD
20249+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
20250+{
20251+ while (count--)
20252+
20253+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20254+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
20255+#else
20256+ *dst++ = *src++;
20257+#endif
20258
20259+}
20260+#endif
20261+
20262+#ifdef CONFIG_X86_64
20263+#define pxd_t pud_t
20264+#define pyd_t pgd_t
20265+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
20266+#define pxd_free(mm, pud) pud_free((mm), (pud))
20267+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
20268+#define pyd_offset(mm ,address) pgd_offset((mm), (address))
20269+#define PYD_SIZE PGDIR_SIZE
20270+#else
20271+#define pxd_t pmd_t
20272+#define pyd_t pud_t
20273+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
20274+#define pxd_free(mm, pud) pmd_free((mm), (pud))
20275+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
20276+#define pyd_offset(mm ,address) pud_offset((mm), (address))
20277+#define PYD_SIZE PUD_SIZE
20278+#endif
20279+
20280+#ifdef CONFIG_PAX_PER_CPU_PGD
20281+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
20282+static inline void pgd_dtor(pgd_t *pgd) {}
20283+#else
20284 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
20285 {
20286 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
20287@@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
20288 pgd_list_del(pgd);
20289 spin_unlock(&pgd_lock);
20290 }
20291+#endif
20292
20293 /*
20294 * List of all pgd's needed for non-PAE so it can invalidate entries
20295@@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
20296 * -- wli
20297 */
20298
20299-#ifdef CONFIG_X86_PAE
20300+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
20301 /*
20302 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
20303 * updating the top-level pagetable entries to guarantee the
20304@@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
20305 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
20306 * and initialize the kernel pmds here.
20307 */
20308-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
20309+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
20310
20311 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
20312 {
20313@@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm,
20314 */
20315 flush_tlb_mm(mm);
20316 }
20317+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
20318+#define PREALLOCATED_PXDS USER_PGD_PTRS
20319 #else /* !CONFIG_X86_PAE */
20320
20321 /* No need to prepopulate any pagetable entries in non-PAE modes. */
20322-#define PREALLOCATED_PMDS 0
20323+#define PREALLOCATED_PXDS 0
20324
20325 #endif /* CONFIG_X86_PAE */
20326
20327-static void free_pmds(pmd_t *pmds[])
20328+static void free_pxds(pxd_t *pxds[])
20329 {
20330 int i;
20331
20332- for(i = 0; i < PREALLOCATED_PMDS; i++)
20333- if (pmds[i])
20334- free_page((unsigned long)pmds[i]);
20335+ for(i = 0; i < PREALLOCATED_PXDS; i++)
20336+ if (pxds[i])
20337+ free_page((unsigned long)pxds[i]);
20338 }
20339
20340-static int preallocate_pmds(pmd_t *pmds[])
20341+static int preallocate_pxds(pxd_t *pxds[])
20342 {
20343 int i;
20344 bool failed = false;
20345
20346- for(i = 0; i < PREALLOCATED_PMDS; i++) {
20347- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
20348- if (pmd == NULL)
20349+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
20350+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
20351+ if (pxd == NULL)
20352 failed = true;
20353- pmds[i] = pmd;
20354+ pxds[i] = pxd;
20355 }
20356
20357 if (failed) {
20358- free_pmds(pmds);
20359+ free_pxds(pxds);
20360 return -ENOMEM;
20361 }
20362
20363@@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[
20364 * preallocate which never got a corresponding vma will need to be
20365 * freed manually.
20366 */
20367-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
20368+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
20369 {
20370 int i;
20371
20372- for(i = 0; i < PREALLOCATED_PMDS; i++) {
20373+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
20374 pgd_t pgd = pgdp[i];
20375
20376 if (pgd_val(pgd) != 0) {
20377- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
20378+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
20379
20380- pgdp[i] = native_make_pgd(0);
20381+ set_pgd(pgdp + i, native_make_pgd(0));
20382
20383- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
20384- pmd_free(mm, pmd);
20385+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
20386+ pxd_free(mm, pxd);
20387 }
20388 }
20389 }
20390
20391-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
20392+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
20393 {
20394- pud_t *pud;
20395+ pyd_t *pyd;
20396 unsigned long addr;
20397 int i;
20398
20399- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
20400+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
20401 return;
20402
20403- pud = pud_offset(pgd, 0);
20404+#ifdef CONFIG_X86_64
20405+ pyd = pyd_offset(mm, 0L);
20406+#else
20407+ pyd = pyd_offset(pgd, 0L);
20408+#endif
20409
20410- for (addr = i = 0; i < PREALLOCATED_PMDS;
20411- i++, pud++, addr += PUD_SIZE) {
20412- pmd_t *pmd = pmds[i];
20413+ for (addr = i = 0; i < PREALLOCATED_PXDS;
20414+ i++, pyd++, addr += PYD_SIZE) {
20415+ pxd_t *pxd = pxds[i];
20416
20417 if (i >= KERNEL_PGD_BOUNDARY)
20418- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
20419- sizeof(pmd_t) * PTRS_PER_PMD);
20420+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
20421+ sizeof(pxd_t) * PTRS_PER_PMD);
20422
20423- pud_populate(mm, pud, pmd);
20424+ pyd_populate(mm, pyd, pxd);
20425 }
20426 }
20427
20428 pgd_t *pgd_alloc(struct mm_struct *mm)
20429 {
20430 pgd_t *pgd;
20431- pmd_t *pmds[PREALLOCATED_PMDS];
20432+ pxd_t *pxds[PREALLOCATED_PXDS];
20433
20434 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
20435
20436@@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
20437
20438 mm->pgd = pgd;
20439
20440- if (preallocate_pmds(pmds) != 0)
20441+ if (preallocate_pxds(pxds) != 0)
20442 goto out_free_pgd;
20443
20444 if (paravirt_pgd_alloc(mm) != 0)
20445- goto out_free_pmds;
20446+ goto out_free_pxds;
20447
20448 /*
20449 * Make sure that pre-populating the pmds is atomic with
20450@@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
20451 spin_lock(&pgd_lock);
20452
20453 pgd_ctor(mm, pgd);
20454- pgd_prepopulate_pmd(mm, pgd, pmds);
20455+ pgd_prepopulate_pxd(mm, pgd, pxds);
20456
20457 spin_unlock(&pgd_lock);
20458
20459 return pgd;
20460
20461-out_free_pmds:
20462- free_pmds(pmds);
20463+out_free_pxds:
20464+ free_pxds(pxds);
20465 out_free_pgd:
20466 free_page((unsigned long)pgd);
20467 out:
20468@@ -295,7 +344,7 @@ out:
20469
20470 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
20471 {
20472- pgd_mop_up_pmds(mm, pgd);
20473+ pgd_mop_up_pxds(mm, pgd);
20474 pgd_dtor(pgd);
20475 paravirt_pgd_free(mm, pgd);
20476 free_page((unsigned long)pgd);
20477diff -urNp linux-3.0.3/arch/x86/mm/setup_nx.c linux-3.0.3/arch/x86/mm/setup_nx.c
20478--- linux-3.0.3/arch/x86/mm/setup_nx.c 2011-07-21 22:17:23.000000000 -0400
20479+++ linux-3.0.3/arch/x86/mm/setup_nx.c 2011-08-23 21:47:55.000000000 -0400
20480@@ -5,8 +5,10 @@
20481 #include <asm/pgtable.h>
20482 #include <asm/proto.h>
20483
20484+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
20485 static int disable_nx __cpuinitdata;
20486
20487+#ifndef CONFIG_PAX_PAGEEXEC
20488 /*
20489 * noexec = on|off
20490 *
20491@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str
20492 return 0;
20493 }
20494 early_param("noexec", noexec_setup);
20495+#endif
20496+
20497+#endif
20498
20499 void __cpuinit x86_configure_nx(void)
20500 {
20501+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
20502 if (cpu_has_nx && !disable_nx)
20503 __supported_pte_mask |= _PAGE_NX;
20504 else
20505+#endif
20506 __supported_pte_mask &= ~_PAGE_NX;
20507 }
20508
20509diff -urNp linux-3.0.3/arch/x86/mm/tlb.c linux-3.0.3/arch/x86/mm/tlb.c
20510--- linux-3.0.3/arch/x86/mm/tlb.c 2011-07-21 22:17:23.000000000 -0400
20511+++ linux-3.0.3/arch/x86/mm/tlb.c 2011-08-23 21:47:55.000000000 -0400
20512@@ -65,7 +65,11 @@ void leave_mm(int cpu)
20513 BUG();
20514 cpumask_clear_cpu(cpu,
20515 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
20516+
20517+#ifndef CONFIG_PAX_PER_CPU_PGD
20518 load_cr3(swapper_pg_dir);
20519+#endif
20520+
20521 }
20522 EXPORT_SYMBOL_GPL(leave_mm);
20523
20524diff -urNp linux-3.0.3/arch/x86/net/bpf_jit_comp.c linux-3.0.3/arch/x86/net/bpf_jit_comp.c
20525--- linux-3.0.3/arch/x86/net/bpf_jit_comp.c 2011-07-21 22:17:23.000000000 -0400
20526+++ linux-3.0.3/arch/x86/net/bpf_jit_comp.c 2011-08-23 21:47:55.000000000 -0400
20527@@ -589,7 +589,9 @@ cond_branch: f_offset = addrs[i + filt
20528 module_free(NULL, image);
20529 return;
20530 }
20531+ pax_open_kernel();
20532 memcpy(image + proglen, temp, ilen);
20533+ pax_close_kernel();
20534 }
20535 proglen += ilen;
20536 addrs[i] = proglen;
20537@@ -609,7 +611,7 @@ cond_branch: f_offset = addrs[i + filt
20538 break;
20539 }
20540 if (proglen == oldproglen) {
20541- image = module_alloc(max_t(unsigned int,
20542+ image = module_alloc_exec(max_t(unsigned int,
20543 proglen,
20544 sizeof(struct work_struct)));
20545 if (!image)
20546diff -urNp linux-3.0.3/arch/x86/oprofile/backtrace.c linux-3.0.3/arch/x86/oprofile/backtrace.c
20547--- linux-3.0.3/arch/x86/oprofile/backtrace.c 2011-08-23 21:44:40.000000000 -0400
20548+++ linux-3.0.3/arch/x86/oprofile/backtrace.c 2011-08-23 21:47:55.000000000 -0400
20549@@ -148,7 +148,7 @@ x86_backtrace(struct pt_regs * const reg
20550 {
20551 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
20552
20553- if (!user_mode_vm(regs)) {
20554+ if (!user_mode(regs)) {
20555 unsigned long stack = kernel_stack_pointer(regs);
20556 if (depth)
20557 dump_trace(NULL, regs, (unsigned long *)stack, 0,
20558diff -urNp linux-3.0.3/arch/x86/pci/mrst.c linux-3.0.3/arch/x86/pci/mrst.c
20559--- linux-3.0.3/arch/x86/pci/mrst.c 2011-07-21 22:17:23.000000000 -0400
20560+++ linux-3.0.3/arch/x86/pci/mrst.c 2011-08-23 21:47:55.000000000 -0400
20561@@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
20562 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
20563 pci_mmcfg_late_init();
20564 pcibios_enable_irq = mrst_pci_irq_enable;
20565- pci_root_ops = pci_mrst_ops;
20566+ pax_open_kernel();
20567+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
20568+ pax_close_kernel();
20569 /* Continue with standard init */
20570 return 1;
20571 }
20572diff -urNp linux-3.0.3/arch/x86/pci/pcbios.c linux-3.0.3/arch/x86/pci/pcbios.c
20573--- linux-3.0.3/arch/x86/pci/pcbios.c 2011-07-21 22:17:23.000000000 -0400
20574+++ linux-3.0.3/arch/x86/pci/pcbios.c 2011-08-23 21:47:55.000000000 -0400
20575@@ -79,50 +79,93 @@ union bios32 {
20576 static struct {
20577 unsigned long address;
20578 unsigned short segment;
20579-} bios32_indirect = { 0, __KERNEL_CS };
20580+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
20581
20582 /*
20583 * Returns the entry point for the given service, NULL on error
20584 */
20585
20586-static unsigned long bios32_service(unsigned long service)
20587+static unsigned long __devinit bios32_service(unsigned long service)
20588 {
20589 unsigned char return_code; /* %al */
20590 unsigned long address; /* %ebx */
20591 unsigned long length; /* %ecx */
20592 unsigned long entry; /* %edx */
20593 unsigned long flags;
20594+ struct desc_struct d, *gdt;
20595
20596 local_irq_save(flags);
20597- __asm__("lcall *(%%edi); cld"
20598+
20599+ gdt = get_cpu_gdt_table(smp_processor_id());
20600+
20601+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
20602+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
20603+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
20604+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
20605+
20606+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
20607 : "=a" (return_code),
20608 "=b" (address),
20609 "=c" (length),
20610 "=d" (entry)
20611 : "0" (service),
20612 "1" (0),
20613- "D" (&bios32_indirect));
20614+ "D" (&bios32_indirect),
20615+ "r"(__PCIBIOS_DS)
20616+ : "memory");
20617+
20618+ pax_open_kernel();
20619+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
20620+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
20621+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
20622+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
20623+ pax_close_kernel();
20624+
20625 local_irq_restore(flags);
20626
20627 switch (return_code) {
20628- case 0:
20629- return address + entry;
20630- case 0x80: /* Not present */
20631- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
20632- return 0;
20633- default: /* Shouldn't happen */
20634- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
20635- service, return_code);
20636+ case 0: {
20637+ int cpu;
20638+ unsigned char flags;
20639+
20640+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
20641+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
20642+ printk(KERN_WARNING "bios32_service: not valid\n");
20643 return 0;
20644+ }
20645+ address = address + PAGE_OFFSET;
20646+ length += 16UL; /* some BIOSs underreport this... */
20647+ flags = 4;
20648+ if (length >= 64*1024*1024) {
20649+ length >>= PAGE_SHIFT;
20650+ flags |= 8;
20651+ }
20652+
20653+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
20654+ gdt = get_cpu_gdt_table(cpu);
20655+ pack_descriptor(&d, address, length, 0x9b, flags);
20656+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
20657+ pack_descriptor(&d, address, length, 0x93, flags);
20658+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
20659+ }
20660+ return entry;
20661+ }
20662+ case 0x80: /* Not present */
20663+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
20664+ return 0;
20665+ default: /* Shouldn't happen */
20666+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
20667+ service, return_code);
20668+ return 0;
20669 }
20670 }
20671
20672 static struct {
20673 unsigned long address;
20674 unsigned short segment;
20675-} pci_indirect = { 0, __KERNEL_CS };
20676+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
20677
20678-static int pci_bios_present;
20679+static int pci_bios_present __read_only;
20680
20681 static int __devinit check_pcibios(void)
20682 {
20683@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
20684 unsigned long flags, pcibios_entry;
20685
20686 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
20687- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
20688+ pci_indirect.address = pcibios_entry;
20689
20690 local_irq_save(flags);
20691- __asm__(
20692- "lcall *(%%edi); cld\n\t"
20693+ __asm__("movw %w6, %%ds\n\t"
20694+ "lcall *%%ss:(%%edi); cld\n\t"
20695+ "push %%ss\n\t"
20696+ "pop %%ds\n\t"
20697 "jc 1f\n\t"
20698 "xor %%ah, %%ah\n"
20699 "1:"
20700@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
20701 "=b" (ebx),
20702 "=c" (ecx)
20703 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
20704- "D" (&pci_indirect)
20705+ "D" (&pci_indirect),
20706+ "r" (__PCIBIOS_DS)
20707 : "memory");
20708 local_irq_restore(flags);
20709
20710@@ -188,7 +234,10 @@ static int pci_bios_read(unsigned int se
20711
20712 switch (len) {
20713 case 1:
20714- __asm__("lcall *(%%esi); cld\n\t"
20715+ __asm__("movw %w6, %%ds\n\t"
20716+ "lcall *%%ss:(%%esi); cld\n\t"
20717+ "push %%ss\n\t"
20718+ "pop %%ds\n\t"
20719 "jc 1f\n\t"
20720 "xor %%ah, %%ah\n"
20721 "1:"
20722@@ -197,7 +246,8 @@ static int pci_bios_read(unsigned int se
20723 : "1" (PCIBIOS_READ_CONFIG_BYTE),
20724 "b" (bx),
20725 "D" ((long)reg),
20726- "S" (&pci_indirect));
20727+ "S" (&pci_indirect),
20728+ "r" (__PCIBIOS_DS));
20729 /*
20730 * Zero-extend the result beyond 8 bits, do not trust the
20731 * BIOS having done it:
20732@@ -205,7 +255,10 @@ static int pci_bios_read(unsigned int se
20733 *value &= 0xff;
20734 break;
20735 case 2:
20736- __asm__("lcall *(%%esi); cld\n\t"
20737+ __asm__("movw %w6, %%ds\n\t"
20738+ "lcall *%%ss:(%%esi); cld\n\t"
20739+ "push %%ss\n\t"
20740+ "pop %%ds\n\t"
20741 "jc 1f\n\t"
20742 "xor %%ah, %%ah\n"
20743 "1:"
20744@@ -214,7 +267,8 @@ static int pci_bios_read(unsigned int se
20745 : "1" (PCIBIOS_READ_CONFIG_WORD),
20746 "b" (bx),
20747 "D" ((long)reg),
20748- "S" (&pci_indirect));
20749+ "S" (&pci_indirect),
20750+ "r" (__PCIBIOS_DS));
20751 /*
20752 * Zero-extend the result beyond 16 bits, do not trust the
20753 * BIOS having done it:
20754@@ -222,7 +276,10 @@ static int pci_bios_read(unsigned int se
20755 *value &= 0xffff;
20756 break;
20757 case 4:
20758- __asm__("lcall *(%%esi); cld\n\t"
20759+ __asm__("movw %w6, %%ds\n\t"
20760+ "lcall *%%ss:(%%esi); cld\n\t"
20761+ "push %%ss\n\t"
20762+ "pop %%ds\n\t"
20763 "jc 1f\n\t"
20764 "xor %%ah, %%ah\n"
20765 "1:"
20766@@ -231,7 +288,8 @@ static int pci_bios_read(unsigned int se
20767 : "1" (PCIBIOS_READ_CONFIG_DWORD),
20768 "b" (bx),
20769 "D" ((long)reg),
20770- "S" (&pci_indirect));
20771+ "S" (&pci_indirect),
20772+ "r" (__PCIBIOS_DS));
20773 break;
20774 }
20775
20776@@ -254,7 +312,10 @@ static int pci_bios_write(unsigned int s
20777
20778 switch (len) {
20779 case 1:
20780- __asm__("lcall *(%%esi); cld\n\t"
20781+ __asm__("movw %w6, %%ds\n\t"
20782+ "lcall *%%ss:(%%esi); cld\n\t"
20783+ "push %%ss\n\t"
20784+ "pop %%ds\n\t"
20785 "jc 1f\n\t"
20786 "xor %%ah, %%ah\n"
20787 "1:"
20788@@ -263,10 +324,14 @@ static int pci_bios_write(unsigned int s
20789 "c" (value),
20790 "b" (bx),
20791 "D" ((long)reg),
20792- "S" (&pci_indirect));
20793+ "S" (&pci_indirect),
20794+ "r" (__PCIBIOS_DS));
20795 break;
20796 case 2:
20797- __asm__("lcall *(%%esi); cld\n\t"
20798+ __asm__("movw %w6, %%ds\n\t"
20799+ "lcall *%%ss:(%%esi); cld\n\t"
20800+ "push %%ss\n\t"
20801+ "pop %%ds\n\t"
20802 "jc 1f\n\t"
20803 "xor %%ah, %%ah\n"
20804 "1:"
20805@@ -275,10 +340,14 @@ static int pci_bios_write(unsigned int s
20806 "c" (value),
20807 "b" (bx),
20808 "D" ((long)reg),
20809- "S" (&pci_indirect));
20810+ "S" (&pci_indirect),
20811+ "r" (__PCIBIOS_DS));
20812 break;
20813 case 4:
20814- __asm__("lcall *(%%esi); cld\n\t"
20815+ __asm__("movw %w6, %%ds\n\t"
20816+ "lcall *%%ss:(%%esi); cld\n\t"
20817+ "push %%ss\n\t"
20818+ "pop %%ds\n\t"
20819 "jc 1f\n\t"
20820 "xor %%ah, %%ah\n"
20821 "1:"
20822@@ -287,7 +356,8 @@ static int pci_bios_write(unsigned int s
20823 "c" (value),
20824 "b" (bx),
20825 "D" ((long)reg),
20826- "S" (&pci_indirect));
20827+ "S" (&pci_indirect),
20828+ "r" (__PCIBIOS_DS));
20829 break;
20830 }
20831
20832@@ -392,10 +462,13 @@ struct irq_routing_table * pcibios_get_i
20833
20834 DBG("PCI: Fetching IRQ routing table... ");
20835 __asm__("push %%es\n\t"
20836+ "movw %w8, %%ds\n\t"
20837 "push %%ds\n\t"
20838 "pop %%es\n\t"
20839- "lcall *(%%esi); cld\n\t"
20840+ "lcall *%%ss:(%%esi); cld\n\t"
20841 "pop %%es\n\t"
20842+ "push %%ss\n\t"
20843+ "pop %%ds\n"
20844 "jc 1f\n\t"
20845 "xor %%ah, %%ah\n"
20846 "1:"
20847@@ -406,7 +479,8 @@ struct irq_routing_table * pcibios_get_i
20848 "1" (0),
20849 "D" ((long) &opt),
20850 "S" (&pci_indirect),
20851- "m" (opt)
20852+ "m" (opt),
20853+ "r" (__PCIBIOS_DS)
20854 : "memory");
20855 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
20856 if (ret & 0xff00)
20857@@ -430,7 +504,10 @@ int pcibios_set_irq_routing(struct pci_d
20858 {
20859 int ret;
20860
20861- __asm__("lcall *(%%esi); cld\n\t"
20862+ __asm__("movw %w5, %%ds\n\t"
20863+ "lcall *%%ss:(%%esi); cld\n\t"
20864+ "push %%ss\n\t"
20865+ "pop %%ds\n"
20866 "jc 1f\n\t"
20867 "xor %%ah, %%ah\n"
20868 "1:"
20869@@ -438,7 +515,8 @@ int pcibios_set_irq_routing(struct pci_d
20870 : "0" (PCIBIOS_SET_PCI_HW_INT),
20871 "b" ((dev->bus->number << 8) | dev->devfn),
20872 "c" ((irq << 8) | (pin + 10)),
20873- "S" (&pci_indirect));
20874+ "S" (&pci_indirect),
20875+ "r" (__PCIBIOS_DS));
20876 return !(ret & 0xff00);
20877 }
20878 EXPORT_SYMBOL(pcibios_set_irq_routing);
20879diff -urNp linux-3.0.3/arch/x86/platform/efi/efi_32.c linux-3.0.3/arch/x86/platform/efi/efi_32.c
20880--- linux-3.0.3/arch/x86/platform/efi/efi_32.c 2011-07-21 22:17:23.000000000 -0400
20881+++ linux-3.0.3/arch/x86/platform/efi/efi_32.c 2011-08-23 21:47:55.000000000 -0400
20882@@ -38,70 +38,37 @@
20883 */
20884
20885 static unsigned long efi_rt_eflags;
20886-static pgd_t efi_bak_pg_dir_pointer[2];
20887+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
20888
20889-void efi_call_phys_prelog(void)
20890+void __init efi_call_phys_prelog(void)
20891 {
20892- unsigned long cr4;
20893- unsigned long temp;
20894 struct desc_ptr gdt_descr;
20895
20896 local_irq_save(efi_rt_eflags);
20897
20898- /*
20899- * If I don't have PAE, I should just duplicate two entries in page
20900- * directory. If I have PAE, I just need to duplicate one entry in
20901- * page directory.
20902- */
20903- cr4 = read_cr4_safe();
20904-
20905- if (cr4 & X86_CR4_PAE) {
20906- efi_bak_pg_dir_pointer[0].pgd =
20907- swapper_pg_dir[pgd_index(0)].pgd;
20908- swapper_pg_dir[0].pgd =
20909- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
20910- } else {
20911- efi_bak_pg_dir_pointer[0].pgd =
20912- swapper_pg_dir[pgd_index(0)].pgd;
20913- efi_bak_pg_dir_pointer[1].pgd =
20914- swapper_pg_dir[pgd_index(0x400000)].pgd;
20915- swapper_pg_dir[pgd_index(0)].pgd =
20916- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
20917- temp = PAGE_OFFSET + 0x400000;
20918- swapper_pg_dir[pgd_index(0x400000)].pgd =
20919- swapper_pg_dir[pgd_index(temp)].pgd;
20920- }
20921+ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
20922+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20923+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
20924
20925 /*
20926 * After the lock is released, the original page table is restored.
20927 */
20928 __flush_tlb_all();
20929
20930- gdt_descr.address = __pa(get_cpu_gdt_table(0));
20931+ gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
20932 gdt_descr.size = GDT_SIZE - 1;
20933 load_gdt(&gdt_descr);
20934 }
20935
20936-void efi_call_phys_epilog(void)
20937+void __init efi_call_phys_epilog(void)
20938 {
20939- unsigned long cr4;
20940 struct desc_ptr gdt_descr;
20941
20942- gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
20943+ gdt_descr.address = get_cpu_gdt_table(0);
20944 gdt_descr.size = GDT_SIZE - 1;
20945 load_gdt(&gdt_descr);
20946
20947- cr4 = read_cr4_safe();
20948-
20949- if (cr4 & X86_CR4_PAE) {
20950- swapper_pg_dir[pgd_index(0)].pgd =
20951- efi_bak_pg_dir_pointer[0].pgd;
20952- } else {
20953- swapper_pg_dir[pgd_index(0)].pgd =
20954- efi_bak_pg_dir_pointer[0].pgd;
20955- swapper_pg_dir[pgd_index(0x400000)].pgd =
20956- efi_bak_pg_dir_pointer[1].pgd;
20957- }
20958+ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
20959
20960 /*
20961 * After the lock is released, the original page table is restored.
20962diff -urNp linux-3.0.3/arch/x86/platform/efi/efi_stub_32.S linux-3.0.3/arch/x86/platform/efi/efi_stub_32.S
20963--- linux-3.0.3/arch/x86/platform/efi/efi_stub_32.S 2011-07-21 22:17:23.000000000 -0400
20964+++ linux-3.0.3/arch/x86/platform/efi/efi_stub_32.S 2011-08-23 21:47:55.000000000 -0400
20965@@ -6,6 +6,7 @@
20966 */
20967
20968 #include <linux/linkage.h>
20969+#include <linux/init.h>
20970 #include <asm/page_types.h>
20971
20972 /*
20973@@ -20,7 +21,7 @@
20974 * service functions will comply with gcc calling convention, too.
20975 */
20976
20977-.text
20978+__INIT
20979 ENTRY(efi_call_phys)
20980 /*
20981 * 0. The function can only be called in Linux kernel. So CS has been
20982@@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
20983 * The mapping of lower virtual memory has been created in prelog and
20984 * epilog.
20985 */
20986- movl $1f, %edx
20987- subl $__PAGE_OFFSET, %edx
20988- jmp *%edx
20989+ jmp 1f-__PAGE_OFFSET
20990 1:
20991
20992 /*
20993@@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
20994 * parameter 2, ..., param n. To make things easy, we save the return
20995 * address of efi_call_phys in a global variable.
20996 */
20997- popl %edx
20998- movl %edx, saved_return_addr
20999- /* get the function pointer into ECX*/
21000- popl %ecx
21001- movl %ecx, efi_rt_function_ptr
21002- movl $2f, %edx
21003- subl $__PAGE_OFFSET, %edx
21004- pushl %edx
21005+ popl (saved_return_addr)
21006+ popl (efi_rt_function_ptr)
21007
21008 /*
21009 * 3. Clear PG bit in %CR0.
21010@@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
21011 /*
21012 * 5. Call the physical function.
21013 */
21014- jmp *%ecx
21015+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
21016
21017-2:
21018 /*
21019 * 6. After EFI runtime service returns, control will return to
21020 * following instruction. We'd better readjust stack pointer first.
21021@@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
21022 movl %cr0, %edx
21023 orl $0x80000000, %edx
21024 movl %edx, %cr0
21025- jmp 1f
21026-1:
21027+
21028 /*
21029 * 8. Now restore the virtual mode from flat mode by
21030 * adding EIP with PAGE_OFFSET.
21031 */
21032- movl $1f, %edx
21033- jmp *%edx
21034+ jmp 1f+__PAGE_OFFSET
21035 1:
21036
21037 /*
21038 * 9. Balance the stack. And because EAX contain the return value,
21039 * we'd better not clobber it.
21040 */
21041- leal efi_rt_function_ptr, %edx
21042- movl (%edx), %ecx
21043- pushl %ecx
21044+ pushl (efi_rt_function_ptr)
21045
21046 /*
21047- * 10. Push the saved return address onto the stack and return.
21048+ * 10. Return to the saved return address.
21049 */
21050- leal saved_return_addr, %edx
21051- movl (%edx), %ecx
21052- pushl %ecx
21053- ret
21054+ jmpl *(saved_return_addr)
21055 ENDPROC(efi_call_phys)
21056 .previous
21057
21058-.data
21059+__INITDATA
21060 saved_return_addr:
21061 .long 0
21062 efi_rt_function_ptr:
21063diff -urNp linux-3.0.3/arch/x86/platform/mrst/mrst.c linux-3.0.3/arch/x86/platform/mrst/mrst.c
21064--- linux-3.0.3/arch/x86/platform/mrst/mrst.c 2011-07-21 22:17:23.000000000 -0400
21065+++ linux-3.0.3/arch/x86/platform/mrst/mrst.c 2011-08-23 21:47:55.000000000 -0400
21066@@ -239,14 +239,16 @@ static int mrst_i8042_detect(void)
21067 }
21068
21069 /* Reboot and power off are handled by the SCU on a MID device */
21070-static void mrst_power_off(void)
21071+static __noreturn void mrst_power_off(void)
21072 {
21073 intel_scu_ipc_simple_command(0xf1, 1);
21074+ BUG();
21075 }
21076
21077-static void mrst_reboot(void)
21078+static __noreturn void mrst_reboot(void)
21079 {
21080 intel_scu_ipc_simple_command(0xf1, 0);
21081+ BUG();
21082 }
21083
21084 /*
21085diff -urNp linux-3.0.3/arch/x86/platform/uv/tlb_uv.c linux-3.0.3/arch/x86/platform/uv/tlb_uv.c
21086--- linux-3.0.3/arch/x86/platform/uv/tlb_uv.c 2011-07-21 22:17:23.000000000 -0400
21087+++ linux-3.0.3/arch/x86/platform/uv/tlb_uv.c 2011-08-23 21:48:14.000000000 -0400
21088@@ -373,6 +373,8 @@ static void reset_with_ipi(struct bau_ta
21089 cpumask_t mask;
21090 struct reset_args reset_args;
21091
21092+ pax_track_stack();
21093+
21094 reset_args.sender = sender;
21095 cpus_clear(mask);
21096 /* find a single cpu for each uvhub in this distribution mask */
21097diff -urNp linux-3.0.3/arch/x86/power/cpu.c linux-3.0.3/arch/x86/power/cpu.c
21098--- linux-3.0.3/arch/x86/power/cpu.c 2011-07-21 22:17:23.000000000 -0400
21099+++ linux-3.0.3/arch/x86/power/cpu.c 2011-08-23 21:47:55.000000000 -0400
21100@@ -130,7 +130,7 @@ static void do_fpu_end(void)
21101 static void fix_processor_context(void)
21102 {
21103 int cpu = smp_processor_id();
21104- struct tss_struct *t = &per_cpu(init_tss, cpu);
21105+ struct tss_struct *t = init_tss + cpu;
21106
21107 set_tss_desc(cpu, t); /*
21108 * This just modifies memory; should not be
21109@@ -140,7 +140,9 @@ static void fix_processor_context(void)
21110 */
21111
21112 #ifdef CONFIG_X86_64
21113+ pax_open_kernel();
21114 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
21115+ pax_close_kernel();
21116
21117 syscall_init(); /* This sets MSR_*STAR and related */
21118 #endif
21119diff -urNp linux-3.0.3/arch/x86/vdso/Makefile linux-3.0.3/arch/x86/vdso/Makefile
21120--- linux-3.0.3/arch/x86/vdso/Makefile 2011-07-21 22:17:23.000000000 -0400
21121+++ linux-3.0.3/arch/x86/vdso/Makefile 2011-08-23 21:47:55.000000000 -0400
21122@@ -136,7 +136,7 @@ quiet_cmd_vdso = VDSO $@
21123 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
21124 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
21125
21126-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
21127+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
21128 GCOV_PROFILE := n
21129
21130 #
21131diff -urNp linux-3.0.3/arch/x86/vdso/vdso32-setup.c linux-3.0.3/arch/x86/vdso/vdso32-setup.c
21132--- linux-3.0.3/arch/x86/vdso/vdso32-setup.c 2011-07-21 22:17:23.000000000 -0400
21133+++ linux-3.0.3/arch/x86/vdso/vdso32-setup.c 2011-08-23 21:47:55.000000000 -0400
21134@@ -25,6 +25,7 @@
21135 #include <asm/tlbflush.h>
21136 #include <asm/vdso.h>
21137 #include <asm/proto.h>
21138+#include <asm/mman.h>
21139
21140 enum {
21141 VDSO_DISABLED = 0,
21142@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
21143 void enable_sep_cpu(void)
21144 {
21145 int cpu = get_cpu();
21146- struct tss_struct *tss = &per_cpu(init_tss, cpu);
21147+ struct tss_struct *tss = init_tss + cpu;
21148
21149 if (!boot_cpu_has(X86_FEATURE_SEP)) {
21150 put_cpu();
21151@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
21152 gate_vma.vm_start = FIXADDR_USER_START;
21153 gate_vma.vm_end = FIXADDR_USER_END;
21154 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
21155- gate_vma.vm_page_prot = __P101;
21156+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
21157 /*
21158 * Make sure the vDSO gets into every core dump.
21159 * Dumping its contents makes post-mortem fully interpretable later
21160@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
21161 if (compat)
21162 addr = VDSO_HIGH_BASE;
21163 else {
21164- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
21165+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
21166 if (IS_ERR_VALUE(addr)) {
21167 ret = addr;
21168 goto up_fail;
21169 }
21170 }
21171
21172- current->mm->context.vdso = (void *)addr;
21173+ current->mm->context.vdso = addr;
21174
21175 if (compat_uses_vma || !compat) {
21176 /*
21177@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
21178 }
21179
21180 current_thread_info()->sysenter_return =
21181- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
21182+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
21183
21184 up_fail:
21185 if (ret)
21186- current->mm->context.vdso = NULL;
21187+ current->mm->context.vdso = 0;
21188
21189 up_write(&mm->mmap_sem);
21190
21191@@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
21192
21193 const char *arch_vma_name(struct vm_area_struct *vma)
21194 {
21195- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
21196+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
21197 return "[vdso]";
21198+
21199+#ifdef CONFIG_PAX_SEGMEXEC
21200+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
21201+ return "[vdso]";
21202+#endif
21203+
21204 return NULL;
21205 }
21206
21207@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
21208 * Check to see if the corresponding task was created in compat vdso
21209 * mode.
21210 */
21211- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
21212+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
21213 return &gate_vma;
21214 return NULL;
21215 }
21216diff -urNp linux-3.0.3/arch/x86/vdso/vma.c linux-3.0.3/arch/x86/vdso/vma.c
21217--- linux-3.0.3/arch/x86/vdso/vma.c 2011-07-21 22:17:23.000000000 -0400
21218+++ linux-3.0.3/arch/x86/vdso/vma.c 2011-08-23 21:47:55.000000000 -0400
21219@@ -15,18 +15,19 @@
21220 #include <asm/proto.h>
21221 #include <asm/vdso.h>
21222
21223-unsigned int __read_mostly vdso_enabled = 1;
21224-
21225 extern char vdso_start[], vdso_end[];
21226 extern unsigned short vdso_sync_cpuid;
21227+extern char __vsyscall_0;
21228
21229 static struct page **vdso_pages;
21230+static struct page *vsyscall_page;
21231 static unsigned vdso_size;
21232
21233 static int __init init_vdso_vars(void)
21234 {
21235- int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
21236- int i;
21237+ size_t nbytes = vdso_end - vdso_start;
21238+ size_t npages = (nbytes + PAGE_SIZE - 1) / PAGE_SIZE;
21239+ size_t i;
21240
21241 vdso_size = npages << PAGE_SHIFT;
21242 vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL);
21243@@ -34,19 +35,19 @@ static int __init init_vdso_vars(void)
21244 goto oom;
21245 for (i = 0; i < npages; i++) {
21246 struct page *p;
21247- p = alloc_page(GFP_KERNEL);
21248+ p = alloc_page(GFP_KERNEL | __GFP_ZERO);
21249 if (!p)
21250 goto oom;
21251 vdso_pages[i] = p;
21252- copy_page(page_address(p), vdso_start + i*PAGE_SIZE);
21253+ memcpy(page_address(p), vdso_start + i*PAGE_SIZE, nbytes > PAGE_SIZE ? PAGE_SIZE : nbytes);
21254+ nbytes -= PAGE_SIZE;
21255 }
21256+ vsyscall_page = pfn_to_page((__pa_symbol(&__vsyscall_0)) >> PAGE_SHIFT);
21257
21258 return 0;
21259
21260 oom:
21261- printk("Cannot allocate vdso\n");
21262- vdso_enabled = 0;
21263- return -ENOMEM;
21264+ panic("Cannot allocate vdso\n");
21265 }
21266 subsys_initcall(init_vdso_vars);
21267
21268@@ -80,37 +81,35 @@ int arch_setup_additional_pages(struct l
21269 unsigned long addr;
21270 int ret;
21271
21272- if (!vdso_enabled)
21273- return 0;
21274-
21275 down_write(&mm->mmap_sem);
21276- addr = vdso_addr(mm->start_stack, vdso_size);
21277- addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
21278+ addr = vdso_addr(mm->start_stack, vdso_size + PAGE_SIZE);
21279+ addr = get_unmapped_area(NULL, addr, vdso_size + PAGE_SIZE, 0, 0);
21280 if (IS_ERR_VALUE(addr)) {
21281 ret = addr;
21282 goto up_fail;
21283 }
21284
21285- current->mm->context.vdso = (void *)addr;
21286+ mm->context.vdso = addr + PAGE_SIZE;
21287
21288- ret = install_special_mapping(mm, addr, vdso_size,
21289+ ret = install_special_mapping(mm, addr, PAGE_SIZE,
21290 VM_READ|VM_EXEC|
21291- VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
21292+ VM_MAYREAD|VM_MAYEXEC|
21293 VM_ALWAYSDUMP,
21294- vdso_pages);
21295+ &vsyscall_page);
21296 if (ret) {
21297- current->mm->context.vdso = NULL;
21298+ mm->context.vdso = 0;
21299 goto up_fail;
21300 }
21301
21302+ ret = install_special_mapping(mm, addr + PAGE_SIZE, vdso_size,
21303+ VM_READ|VM_EXEC|
21304+ VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
21305+ VM_ALWAYSDUMP,
21306+ vdso_pages);
21307+ if (ret)
21308+ mm->context.vdso = 0;
21309+
21310 up_fail:
21311 up_write(&mm->mmap_sem);
21312 return ret;
21313 }
21314-
21315-static __init int vdso_setup(char *s)
21316-{
21317- vdso_enabled = simple_strtoul(s, NULL, 0);
21318- return 0;
21319-}
21320-__setup("vdso=", vdso_setup);
21321diff -urNp linux-3.0.3/arch/x86/xen/enlighten.c linux-3.0.3/arch/x86/xen/enlighten.c
21322--- linux-3.0.3/arch/x86/xen/enlighten.c 2011-08-23 21:44:40.000000000 -0400
21323+++ linux-3.0.3/arch/x86/xen/enlighten.c 2011-08-23 21:47:55.000000000 -0400
21324@@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
21325
21326 struct shared_info xen_dummy_shared_info;
21327
21328-void *xen_initial_gdt;
21329-
21330 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
21331 __read_mostly int xen_have_vector_callback;
21332 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
21333@@ -1010,7 +1008,7 @@ static const struct pv_apic_ops xen_apic
21334 #endif
21335 };
21336
21337-static void xen_reboot(int reason)
21338+static __noreturn void xen_reboot(int reason)
21339 {
21340 struct sched_shutdown r = { .reason = reason };
21341
21342@@ -1018,17 +1016,17 @@ static void xen_reboot(int reason)
21343 BUG();
21344 }
21345
21346-static void xen_restart(char *msg)
21347+static __noreturn void xen_restart(char *msg)
21348 {
21349 xen_reboot(SHUTDOWN_reboot);
21350 }
21351
21352-static void xen_emergency_restart(void)
21353+static __noreturn void xen_emergency_restart(void)
21354 {
21355 xen_reboot(SHUTDOWN_reboot);
21356 }
21357
21358-static void xen_machine_halt(void)
21359+static __noreturn void xen_machine_halt(void)
21360 {
21361 xen_reboot(SHUTDOWN_poweroff);
21362 }
21363@@ -1134,7 +1132,17 @@ asmlinkage void __init xen_start_kernel(
21364 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
21365
21366 /* Work out if we support NX */
21367- x86_configure_nx();
21368+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21369+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
21370+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
21371+ unsigned l, h;
21372+
21373+ __supported_pte_mask |= _PAGE_NX;
21374+ rdmsr(MSR_EFER, l, h);
21375+ l |= EFER_NX;
21376+ wrmsr(MSR_EFER, l, h);
21377+ }
21378+#endif
21379
21380 xen_setup_features();
21381
21382@@ -1165,13 +1173,6 @@ asmlinkage void __init xen_start_kernel(
21383
21384 machine_ops = xen_machine_ops;
21385
21386- /*
21387- * The only reliable way to retain the initial address of the
21388- * percpu gdt_page is to remember it here, so we can go and
21389- * mark it RW later, when the initial percpu area is freed.
21390- */
21391- xen_initial_gdt = &per_cpu(gdt_page, 0);
21392-
21393 xen_smp_init();
21394
21395 #ifdef CONFIG_ACPI_NUMA
21396diff -urNp linux-3.0.3/arch/x86/xen/mmu.c linux-3.0.3/arch/x86/xen/mmu.c
21397--- linux-3.0.3/arch/x86/xen/mmu.c 2011-07-21 22:17:23.000000000 -0400
21398+++ linux-3.0.3/arch/x86/xen/mmu.c 2011-08-24 18:10:12.000000000 -0400
21399@@ -1679,6 +1679,8 @@ pgd_t * __init xen_setup_kernel_pagetabl
21400 convert_pfn_mfn(init_level4_pgt);
21401 convert_pfn_mfn(level3_ident_pgt);
21402 convert_pfn_mfn(level3_kernel_pgt);
21403+ convert_pfn_mfn(level3_vmalloc_pgt);
21404+ convert_pfn_mfn(level3_vmemmap_pgt);
21405
21406 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
21407 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
21408@@ -1697,7 +1699,10 @@ pgd_t * __init xen_setup_kernel_pagetabl
21409 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
21410 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
21411 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
21412+ set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
21413+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
21414 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
21415+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
21416 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
21417 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
21418
21419@@ -1909,6 +1914,7 @@ static void __init xen_post_allocator_in
21420 pv_mmu_ops.set_pud = xen_set_pud;
21421 #if PAGETABLE_LEVELS == 4
21422 pv_mmu_ops.set_pgd = xen_set_pgd;
21423+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
21424 #endif
21425
21426 /* This will work as long as patching hasn't happened yet
21427@@ -1990,6 +1996,7 @@ static const struct pv_mmu_ops xen_mmu_o
21428 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
21429 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
21430 .set_pgd = xen_set_pgd_hyper,
21431+ .set_pgd_batched = xen_set_pgd_hyper,
21432
21433 .alloc_pud = xen_alloc_pmd_init,
21434 .release_pud = xen_release_pmd_init,
21435diff -urNp linux-3.0.3/arch/x86/xen/smp.c linux-3.0.3/arch/x86/xen/smp.c
21436--- linux-3.0.3/arch/x86/xen/smp.c 2011-07-21 22:17:23.000000000 -0400
21437+++ linux-3.0.3/arch/x86/xen/smp.c 2011-08-23 21:47:55.000000000 -0400
21438@@ -193,11 +193,6 @@ static void __init xen_smp_prepare_boot_
21439 {
21440 BUG_ON(smp_processor_id() != 0);
21441 native_smp_prepare_boot_cpu();
21442-
21443- /* We've switched to the "real" per-cpu gdt, so make sure the
21444- old memory can be recycled */
21445- make_lowmem_page_readwrite(xen_initial_gdt);
21446-
21447 xen_filter_cpu_maps();
21448 xen_setup_vcpu_info_placement();
21449 }
21450@@ -265,12 +260,12 @@ cpu_initialize_context(unsigned int cpu,
21451 gdt = get_cpu_gdt_table(cpu);
21452
21453 ctxt->flags = VGCF_IN_KERNEL;
21454- ctxt->user_regs.ds = __USER_DS;
21455- ctxt->user_regs.es = __USER_DS;
21456+ ctxt->user_regs.ds = __KERNEL_DS;
21457+ ctxt->user_regs.es = __KERNEL_DS;
21458 ctxt->user_regs.ss = __KERNEL_DS;
21459 #ifdef CONFIG_X86_32
21460 ctxt->user_regs.fs = __KERNEL_PERCPU;
21461- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
21462+ savesegment(gs, ctxt->user_regs.gs);
21463 #else
21464 ctxt->gs_base_kernel = per_cpu_offset(cpu);
21465 #endif
21466@@ -321,13 +316,12 @@ static int __cpuinit xen_cpu_up(unsigned
21467 int rc;
21468
21469 per_cpu(current_task, cpu) = idle;
21470+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
21471 #ifdef CONFIG_X86_32
21472 irq_ctx_init(cpu);
21473 #else
21474 clear_tsk_thread_flag(idle, TIF_FORK);
21475- per_cpu(kernel_stack, cpu) =
21476- (unsigned long)task_stack_page(idle) -
21477- KERNEL_STACK_OFFSET + THREAD_SIZE;
21478+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
21479 #endif
21480 xen_setup_runstate_info(cpu);
21481 xen_setup_timer(cpu);
21482diff -urNp linux-3.0.3/arch/x86/xen/xen-asm_32.S linux-3.0.3/arch/x86/xen/xen-asm_32.S
21483--- linux-3.0.3/arch/x86/xen/xen-asm_32.S 2011-07-21 22:17:23.000000000 -0400
21484+++ linux-3.0.3/arch/x86/xen/xen-asm_32.S 2011-08-23 21:47:55.000000000 -0400
21485@@ -83,14 +83,14 @@ ENTRY(xen_iret)
21486 ESP_OFFSET=4 # bytes pushed onto stack
21487
21488 /*
21489- * Store vcpu_info pointer for easy access. Do it this way to
21490- * avoid having to reload %fs
21491+ * Store vcpu_info pointer for easy access.
21492 */
21493 #ifdef CONFIG_SMP
21494- GET_THREAD_INFO(%eax)
21495- movl TI_cpu(%eax), %eax
21496- movl __per_cpu_offset(,%eax,4), %eax
21497- mov xen_vcpu(%eax), %eax
21498+ push %fs
21499+ mov $(__KERNEL_PERCPU), %eax
21500+ mov %eax, %fs
21501+ mov PER_CPU_VAR(xen_vcpu), %eax
21502+ pop %fs
21503 #else
21504 movl xen_vcpu, %eax
21505 #endif
21506diff -urNp linux-3.0.3/arch/x86/xen/xen-head.S linux-3.0.3/arch/x86/xen/xen-head.S
21507--- linux-3.0.3/arch/x86/xen/xen-head.S 2011-07-21 22:17:23.000000000 -0400
21508+++ linux-3.0.3/arch/x86/xen/xen-head.S 2011-08-23 21:47:55.000000000 -0400
21509@@ -19,6 +19,17 @@ ENTRY(startup_xen)
21510 #ifdef CONFIG_X86_32
21511 mov %esi,xen_start_info
21512 mov $init_thread_union+THREAD_SIZE,%esp
21513+#ifdef CONFIG_SMP
21514+ movl $cpu_gdt_table,%edi
21515+ movl $__per_cpu_load,%eax
21516+ movw %ax,__KERNEL_PERCPU + 2(%edi)
21517+ rorl $16,%eax
21518+ movb %al,__KERNEL_PERCPU + 4(%edi)
21519+ movb %ah,__KERNEL_PERCPU + 7(%edi)
21520+ movl $__per_cpu_end - 1,%eax
21521+ subl $__per_cpu_start,%eax
21522+ movw %ax,__KERNEL_PERCPU + 0(%edi)
21523+#endif
21524 #else
21525 mov %rsi,xen_start_info
21526 mov $init_thread_union+THREAD_SIZE,%rsp
21527diff -urNp linux-3.0.3/arch/x86/xen/xen-ops.h linux-3.0.3/arch/x86/xen/xen-ops.h
21528--- linux-3.0.3/arch/x86/xen/xen-ops.h 2011-08-23 21:44:40.000000000 -0400
21529+++ linux-3.0.3/arch/x86/xen/xen-ops.h 2011-08-23 21:47:55.000000000 -0400
21530@@ -10,8 +10,6 @@
21531 extern const char xen_hypervisor_callback[];
21532 extern const char xen_failsafe_callback[];
21533
21534-extern void *xen_initial_gdt;
21535-
21536 struct trap_info;
21537 void xen_copy_trap_info(struct trap_info *traps);
21538
21539diff -urNp linux-3.0.3/block/blk-iopoll.c linux-3.0.3/block/blk-iopoll.c
21540--- linux-3.0.3/block/blk-iopoll.c 2011-07-21 22:17:23.000000000 -0400
21541+++ linux-3.0.3/block/blk-iopoll.c 2011-08-23 21:47:55.000000000 -0400
21542@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
21543 }
21544 EXPORT_SYMBOL(blk_iopoll_complete);
21545
21546-static void blk_iopoll_softirq(struct softirq_action *h)
21547+static void blk_iopoll_softirq(void)
21548 {
21549 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
21550 int rearm = 0, budget = blk_iopoll_budget;
21551diff -urNp linux-3.0.3/block/blk-map.c linux-3.0.3/block/blk-map.c
21552--- linux-3.0.3/block/blk-map.c 2011-07-21 22:17:23.000000000 -0400
21553+++ linux-3.0.3/block/blk-map.c 2011-08-23 21:47:55.000000000 -0400
21554@@ -301,7 +301,7 @@ int blk_rq_map_kern(struct request_queue
21555 if (!len || !kbuf)
21556 return -EINVAL;
21557
21558- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
21559+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
21560 if (do_copy)
21561 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
21562 else
21563diff -urNp linux-3.0.3/block/blk-softirq.c linux-3.0.3/block/blk-softirq.c
21564--- linux-3.0.3/block/blk-softirq.c 2011-07-21 22:17:23.000000000 -0400
21565+++ linux-3.0.3/block/blk-softirq.c 2011-08-23 21:47:55.000000000 -0400
21566@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
21567 * Softirq action handler - move entries to local list and loop over them
21568 * while passing them to the queue registered handler.
21569 */
21570-static void blk_done_softirq(struct softirq_action *h)
21571+static void blk_done_softirq(void)
21572 {
21573 struct list_head *cpu_list, local_list;
21574
21575diff -urNp linux-3.0.3/block/bsg.c linux-3.0.3/block/bsg.c
21576--- linux-3.0.3/block/bsg.c 2011-07-21 22:17:23.000000000 -0400
21577+++ linux-3.0.3/block/bsg.c 2011-08-23 21:47:55.000000000 -0400
21578@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
21579 struct sg_io_v4 *hdr, struct bsg_device *bd,
21580 fmode_t has_write_perm)
21581 {
21582+ unsigned char tmpcmd[sizeof(rq->__cmd)];
21583+ unsigned char *cmdptr;
21584+
21585 if (hdr->request_len > BLK_MAX_CDB) {
21586 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
21587 if (!rq->cmd)
21588 return -ENOMEM;
21589- }
21590+ cmdptr = rq->cmd;
21591+ } else
21592+ cmdptr = tmpcmd;
21593
21594- if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
21595+ if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
21596 hdr->request_len))
21597 return -EFAULT;
21598
21599+ if (cmdptr != rq->cmd)
21600+ memcpy(rq->cmd, cmdptr, hdr->request_len);
21601+
21602 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
21603 if (blk_verify_command(rq->cmd, has_write_perm))
21604 return -EPERM;
21605diff -urNp linux-3.0.3/block/scsi_ioctl.c linux-3.0.3/block/scsi_ioctl.c
21606--- linux-3.0.3/block/scsi_ioctl.c 2011-07-21 22:17:23.000000000 -0400
21607+++ linux-3.0.3/block/scsi_ioctl.c 2011-08-23 21:47:55.000000000 -0400
21608@@ -222,8 +222,20 @@ EXPORT_SYMBOL(blk_verify_command);
21609 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
21610 struct sg_io_hdr *hdr, fmode_t mode)
21611 {
21612- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
21613+ unsigned char tmpcmd[sizeof(rq->__cmd)];
21614+ unsigned char *cmdptr;
21615+
21616+ if (rq->cmd != rq->__cmd)
21617+ cmdptr = rq->cmd;
21618+ else
21619+ cmdptr = tmpcmd;
21620+
21621+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
21622 return -EFAULT;
21623+
21624+ if (cmdptr != rq->cmd)
21625+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
21626+
21627 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
21628 return -EPERM;
21629
21630@@ -432,6 +444,8 @@ int sg_scsi_ioctl(struct request_queue *
21631 int err;
21632 unsigned int in_len, out_len, bytes, opcode, cmdlen;
21633 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
21634+ unsigned char tmpcmd[sizeof(rq->__cmd)];
21635+ unsigned char *cmdptr;
21636
21637 if (!sic)
21638 return -EINVAL;
21639@@ -465,9 +479,18 @@ int sg_scsi_ioctl(struct request_queue *
21640 */
21641 err = -EFAULT;
21642 rq->cmd_len = cmdlen;
21643- if (copy_from_user(rq->cmd, sic->data, cmdlen))
21644+
21645+ if (rq->cmd != rq->__cmd)
21646+ cmdptr = rq->cmd;
21647+ else
21648+ cmdptr = tmpcmd;
21649+
21650+ if (copy_from_user(cmdptr, sic->data, cmdlen))
21651 goto error;
21652
21653+ if (rq->cmd != cmdptr)
21654+ memcpy(rq->cmd, cmdptr, cmdlen);
21655+
21656 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
21657 goto error;
21658
21659diff -urNp linux-3.0.3/crypto/cryptd.c linux-3.0.3/crypto/cryptd.c
21660--- linux-3.0.3/crypto/cryptd.c 2011-07-21 22:17:23.000000000 -0400
21661+++ linux-3.0.3/crypto/cryptd.c 2011-08-23 21:47:55.000000000 -0400
21662@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
21663
21664 struct cryptd_blkcipher_request_ctx {
21665 crypto_completion_t complete;
21666-};
21667+} __no_const;
21668
21669 struct cryptd_hash_ctx {
21670 struct crypto_shash *child;
21671@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
21672
21673 struct cryptd_aead_request_ctx {
21674 crypto_completion_t complete;
21675-};
21676+} __no_const;
21677
21678 static void cryptd_queue_worker(struct work_struct *work);
21679
21680diff -urNp linux-3.0.3/crypto/gf128mul.c linux-3.0.3/crypto/gf128mul.c
21681--- linux-3.0.3/crypto/gf128mul.c 2011-07-21 22:17:23.000000000 -0400
21682+++ linux-3.0.3/crypto/gf128mul.c 2011-08-23 21:47:55.000000000 -0400
21683@@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128
21684 for (i = 0; i < 7; ++i)
21685 gf128mul_x_lle(&p[i + 1], &p[i]);
21686
21687- memset(r, 0, sizeof(r));
21688+ memset(r, 0, sizeof(*r));
21689 for (i = 0;;) {
21690 u8 ch = ((u8 *)b)[15 - i];
21691
21692@@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128
21693 for (i = 0; i < 7; ++i)
21694 gf128mul_x_bbe(&p[i + 1], &p[i]);
21695
21696- memset(r, 0, sizeof(r));
21697+ memset(r, 0, sizeof(*r));
21698 for (i = 0;;) {
21699 u8 ch = ((u8 *)b)[i];
21700
21701diff -urNp linux-3.0.3/crypto/serpent.c linux-3.0.3/crypto/serpent.c
21702--- linux-3.0.3/crypto/serpent.c 2011-07-21 22:17:23.000000000 -0400
21703+++ linux-3.0.3/crypto/serpent.c 2011-08-23 21:48:14.000000000 -0400
21704@@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
21705 u32 r0,r1,r2,r3,r4;
21706 int i;
21707
21708+ pax_track_stack();
21709+
21710 /* Copy key, add padding */
21711
21712 for (i = 0; i < keylen; ++i)
21713diff -urNp linux-3.0.3/Documentation/dontdiff linux-3.0.3/Documentation/dontdiff
21714--- linux-3.0.3/Documentation/dontdiff 2011-07-21 22:17:23.000000000 -0400
21715+++ linux-3.0.3/Documentation/dontdiff 2011-08-23 21:47:55.000000000 -0400
21716@@ -5,6 +5,7 @@
21717 *.cis
21718 *.cpio
21719 *.csp
21720+*.dbg
21721 *.dsp
21722 *.dvi
21723 *.elf
21724@@ -48,9 +49,11 @@
21725 *.tab.h
21726 *.tex
21727 *.ver
21728+*.vim
21729 *.xml
21730 *.xz
21731 *_MODULES
21732+*_reg_safe.h
21733 *_vga16.c
21734 *~
21735 \#*#
21736@@ -70,6 +73,7 @@ Kerntypes
21737 Module.markers
21738 Module.symvers
21739 PENDING
21740+PERF*
21741 SCCS
21742 System.map*
21743 TAGS
21744@@ -98,6 +102,8 @@ bzImage*
21745 capability_names.h
21746 capflags.c
21747 classlist.h*
21748+clut_vga16.c
21749+common-cmds.h
21750 comp*.log
21751 compile.h*
21752 conf
21753@@ -126,12 +132,14 @@ fore200e_pca_fw.c*
21754 gconf
21755 gconf.glade.h
21756 gen-devlist
21757+gen-kdb_cmds.c
21758 gen_crc32table
21759 gen_init_cpio
21760 generated
21761 genheaders
21762 genksyms
21763 *_gray256.c
21764+hash
21765 hpet_example
21766 hugepage-mmap
21767 hugepage-shm
21768@@ -146,7 +154,6 @@ int32.c
21769 int4.c
21770 int8.c
21771 kallsyms
21772-kconfig
21773 keywords.c
21774 ksym.c*
21775 ksym.h*
21776@@ -154,7 +161,6 @@ kxgettext
21777 lkc_defs.h
21778 lex.c
21779 lex.*.c
21780-linux
21781 logo_*.c
21782 logo_*_clut224.c
21783 logo_*_mono.c
21784@@ -174,6 +180,7 @@ mkboot
21785 mkbugboot
21786 mkcpustr
21787 mkdep
21788+mkpiggy
21789 mkprep
21790 mkregtable
21791 mktables
21792@@ -209,6 +216,7 @@ r300_reg_safe.h
21793 r420_reg_safe.h
21794 r600_reg_safe.h
21795 recordmcount
21796+regdb.c
21797 relocs
21798 rlim_names.h
21799 rn50_reg_safe.h
21800@@ -219,6 +227,7 @@ setup
21801 setup.bin
21802 setup.elf
21803 sImage
21804+slabinfo
21805 sm_tbl*
21806 split-include
21807 syscalltab.h
21808@@ -246,7 +255,9 @@ vmlinux
21809 vmlinux-*
21810 vmlinux.aout
21811 vmlinux.bin.all
21812+vmlinux.bin.bz2
21813 vmlinux.lds
21814+vmlinux.relocs
21815 vmlinuz
21816 voffset.h
21817 vsyscall.lds
21818@@ -254,6 +265,7 @@ vsyscall_32.lds
21819 wanxlfw.inc
21820 uImage
21821 unifdef
21822+utsrelease.h
21823 wakeup.bin
21824 wakeup.elf
21825 wakeup.lds
21826diff -urNp linux-3.0.3/Documentation/kernel-parameters.txt linux-3.0.3/Documentation/kernel-parameters.txt
21827--- linux-3.0.3/Documentation/kernel-parameters.txt 2011-07-21 22:17:23.000000000 -0400
21828+++ linux-3.0.3/Documentation/kernel-parameters.txt 2011-08-23 21:47:55.000000000 -0400
21829@@ -1883,6 +1883,13 @@ bytes respectively. Such letter suffixes
21830 the specified number of seconds. This is to be used if
21831 your oopses keep scrolling off the screen.
21832
21833+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
21834+ virtualization environments that don't cope well with the
21835+ expand down segment used by UDEREF on X86-32 or the frequent
21836+ page table updates on X86-64.
21837+
21838+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
21839+
21840 pcbit= [HW,ISDN]
21841
21842 pcd. [PARIDE]
21843diff -urNp linux-3.0.3/drivers/acpi/apei/cper.c linux-3.0.3/drivers/acpi/apei/cper.c
21844--- linux-3.0.3/drivers/acpi/apei/cper.c 2011-07-21 22:17:23.000000000 -0400
21845+++ linux-3.0.3/drivers/acpi/apei/cper.c 2011-08-23 21:47:55.000000000 -0400
21846@@ -38,12 +38,12 @@
21847 */
21848 u64 cper_next_record_id(void)
21849 {
21850- static atomic64_t seq;
21851+ static atomic64_unchecked_t seq;
21852
21853- if (!atomic64_read(&seq))
21854- atomic64_set(&seq, ((u64)get_seconds()) << 32);
21855+ if (!atomic64_read_unchecked(&seq))
21856+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
21857
21858- return atomic64_inc_return(&seq);
21859+ return atomic64_inc_return_unchecked(&seq);
21860 }
21861 EXPORT_SYMBOL_GPL(cper_next_record_id);
21862
21863diff -urNp linux-3.0.3/drivers/acpi/ec_sys.c linux-3.0.3/drivers/acpi/ec_sys.c
21864--- linux-3.0.3/drivers/acpi/ec_sys.c 2011-07-21 22:17:23.000000000 -0400
21865+++ linux-3.0.3/drivers/acpi/ec_sys.c 2011-08-24 19:06:55.000000000 -0400
21866@@ -11,6 +11,7 @@
21867 #include <linux/kernel.h>
21868 #include <linux/acpi.h>
21869 #include <linux/debugfs.h>
21870+#include <asm/uaccess.h>
21871 #include "internal.h"
21872
21873 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
21874@@ -39,7 +40,7 @@ static ssize_t acpi_ec_read_io(struct fi
21875 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
21876 */
21877 unsigned int size = EC_SPACE_SIZE;
21878- u8 *data = (u8 *) buf;
21879+ u8 data;
21880 loff_t init_off = *off;
21881 int err = 0;
21882
21883@@ -52,9 +53,11 @@ static ssize_t acpi_ec_read_io(struct fi
21884 size = count;
21885
21886 while (size) {
21887- err = ec_read(*off, &data[*off - init_off]);
21888+ err = ec_read(*off, &data);
21889 if (err)
21890 return err;
21891+ if (put_user(data, &buf[*off - init_off]))
21892+ return -EFAULT;
21893 *off += 1;
21894 size--;
21895 }
21896@@ -70,7 +73,6 @@ static ssize_t acpi_ec_write_io(struct f
21897
21898 unsigned int size = count;
21899 loff_t init_off = *off;
21900- u8 *data = (u8 *) buf;
21901 int err = 0;
21902
21903 if (*off >= EC_SPACE_SIZE)
21904@@ -81,7 +83,9 @@ static ssize_t acpi_ec_write_io(struct f
21905 }
21906
21907 while (size) {
21908- u8 byte_write = data[*off - init_off];
21909+ u8 byte_write;
21910+ if (get_user(byte_write, &buf[*off - init_off]))
21911+ return -EFAULT;
21912 err = ec_write(*off, byte_write);
21913 if (err)
21914 return err;
21915diff -urNp linux-3.0.3/drivers/acpi/proc.c linux-3.0.3/drivers/acpi/proc.c
21916--- linux-3.0.3/drivers/acpi/proc.c 2011-07-21 22:17:23.000000000 -0400
21917+++ linux-3.0.3/drivers/acpi/proc.c 2011-08-23 21:47:55.000000000 -0400
21918@@ -342,19 +342,13 @@ acpi_system_write_wakeup_device(struct f
21919 size_t count, loff_t * ppos)
21920 {
21921 struct list_head *node, *next;
21922- char strbuf[5];
21923- char str[5] = "";
21924- unsigned int len = count;
21925-
21926- if (len > 4)
21927- len = 4;
21928- if (len < 0)
21929- return -EFAULT;
21930+ char strbuf[5] = {0};
21931
21932- if (copy_from_user(strbuf, buffer, len))
21933+ if (count > 4)
21934+ count = 4;
21935+ if (copy_from_user(strbuf, buffer, count))
21936 return -EFAULT;
21937- strbuf[len] = '\0';
21938- sscanf(strbuf, "%s", str);
21939+ strbuf[count] = '\0';
21940
21941 mutex_lock(&acpi_device_lock);
21942 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
21943@@ -363,7 +357,7 @@ acpi_system_write_wakeup_device(struct f
21944 if (!dev->wakeup.flags.valid)
21945 continue;
21946
21947- if (!strncmp(dev->pnp.bus_id, str, 4)) {
21948+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
21949 if (device_can_wakeup(&dev->dev)) {
21950 bool enable = !device_may_wakeup(&dev->dev);
21951 device_set_wakeup_enable(&dev->dev, enable);
21952diff -urNp linux-3.0.3/drivers/acpi/processor_driver.c linux-3.0.3/drivers/acpi/processor_driver.c
21953--- linux-3.0.3/drivers/acpi/processor_driver.c 2011-07-21 22:17:23.000000000 -0400
21954+++ linux-3.0.3/drivers/acpi/processor_driver.c 2011-08-23 21:47:55.000000000 -0400
21955@@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(
21956 return 0;
21957 #endif
21958
21959- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
21960+ BUG_ON(pr->id >= nr_cpu_ids);
21961
21962 /*
21963 * Buggy BIOS check
21964diff -urNp linux-3.0.3/drivers/ata/libata-core.c linux-3.0.3/drivers/ata/libata-core.c
21965--- linux-3.0.3/drivers/ata/libata-core.c 2011-07-21 22:17:23.000000000 -0400
21966+++ linux-3.0.3/drivers/ata/libata-core.c 2011-08-23 21:47:55.000000000 -0400
21967@@ -4753,7 +4753,7 @@ void ata_qc_free(struct ata_queued_cmd *
21968 struct ata_port *ap;
21969 unsigned int tag;
21970
21971- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
21972+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
21973 ap = qc->ap;
21974
21975 qc->flags = 0;
21976@@ -4769,7 +4769,7 @@ void __ata_qc_complete(struct ata_queued
21977 struct ata_port *ap;
21978 struct ata_link *link;
21979
21980- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
21981+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
21982 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
21983 ap = qc->ap;
21984 link = qc->dev->link;
21985@@ -5774,6 +5774,7 @@ static void ata_finalize_port_ops(struct
21986 return;
21987
21988 spin_lock(&lock);
21989+ pax_open_kernel();
21990
21991 for (cur = ops->inherits; cur; cur = cur->inherits) {
21992 void **inherit = (void **)cur;
21993@@ -5787,8 +5788,9 @@ static void ata_finalize_port_ops(struct
21994 if (IS_ERR(*pp))
21995 *pp = NULL;
21996
21997- ops->inherits = NULL;
21998+ *(struct ata_port_operations **)&ops->inherits = NULL;
21999
22000+ pax_close_kernel();
22001 spin_unlock(&lock);
22002 }
22003
22004diff -urNp linux-3.0.3/drivers/ata/libata-eh.c linux-3.0.3/drivers/ata/libata-eh.c
22005--- linux-3.0.3/drivers/ata/libata-eh.c 2011-07-21 22:17:23.000000000 -0400
22006+++ linux-3.0.3/drivers/ata/libata-eh.c 2011-08-23 21:48:14.000000000 -0400
22007@@ -2518,6 +2518,8 @@ void ata_eh_report(struct ata_port *ap)
22008 {
22009 struct ata_link *link;
22010
22011+ pax_track_stack();
22012+
22013 ata_for_each_link(link, ap, HOST_FIRST)
22014 ata_eh_link_report(link);
22015 }
22016diff -urNp linux-3.0.3/drivers/ata/pata_arasan_cf.c linux-3.0.3/drivers/ata/pata_arasan_cf.c
22017--- linux-3.0.3/drivers/ata/pata_arasan_cf.c 2011-07-21 22:17:23.000000000 -0400
22018+++ linux-3.0.3/drivers/ata/pata_arasan_cf.c 2011-08-23 21:47:55.000000000 -0400
22019@@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(str
22020 /* Handle platform specific quirks */
22021 if (pdata->quirk) {
22022 if (pdata->quirk & CF_BROKEN_PIO) {
22023- ap->ops->set_piomode = NULL;
22024+ pax_open_kernel();
22025+ *(void **)&ap->ops->set_piomode = NULL;
22026+ pax_close_kernel();
22027 ap->pio_mask = 0;
22028 }
22029 if (pdata->quirk & CF_BROKEN_MWDMA)
22030diff -urNp linux-3.0.3/drivers/atm/adummy.c linux-3.0.3/drivers/atm/adummy.c
22031--- linux-3.0.3/drivers/atm/adummy.c 2011-07-21 22:17:23.000000000 -0400
22032+++ linux-3.0.3/drivers/atm/adummy.c 2011-08-23 21:47:55.000000000 -0400
22033@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct
22034 vcc->pop(vcc, skb);
22035 else
22036 dev_kfree_skb_any(skb);
22037- atomic_inc(&vcc->stats->tx);
22038+ atomic_inc_unchecked(&vcc->stats->tx);
22039
22040 return 0;
22041 }
22042diff -urNp linux-3.0.3/drivers/atm/ambassador.c linux-3.0.3/drivers/atm/ambassador.c
22043--- linux-3.0.3/drivers/atm/ambassador.c 2011-07-21 22:17:23.000000000 -0400
22044+++ linux-3.0.3/drivers/atm/ambassador.c 2011-08-23 21:47:55.000000000 -0400
22045@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev,
22046 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
22047
22048 // VC layer stats
22049- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22050+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22051
22052 // free the descriptor
22053 kfree (tx_descr);
22054@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev,
22055 dump_skb ("<<<", vc, skb);
22056
22057 // VC layer stats
22058- atomic_inc(&atm_vcc->stats->rx);
22059+ atomic_inc_unchecked(&atm_vcc->stats->rx);
22060 __net_timestamp(skb);
22061 // end of our responsibility
22062 atm_vcc->push (atm_vcc, skb);
22063@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev,
22064 } else {
22065 PRINTK (KERN_INFO, "dropped over-size frame");
22066 // should we count this?
22067- atomic_inc(&atm_vcc->stats->rx_drop);
22068+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22069 }
22070
22071 } else {
22072@@ -1342,7 +1342,7 @@ static int amb_send (struct atm_vcc * at
22073 }
22074
22075 if (check_area (skb->data, skb->len)) {
22076- atomic_inc(&atm_vcc->stats->tx_err);
22077+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
22078 return -ENOMEM; // ?
22079 }
22080
22081diff -urNp linux-3.0.3/drivers/atm/atmtcp.c linux-3.0.3/drivers/atm/atmtcp.c
22082--- linux-3.0.3/drivers/atm/atmtcp.c 2011-07-21 22:17:23.000000000 -0400
22083+++ linux-3.0.3/drivers/atm/atmtcp.c 2011-08-23 21:47:55.000000000 -0400
22084@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc
22085 if (vcc->pop) vcc->pop(vcc,skb);
22086 else dev_kfree_skb(skb);
22087 if (dev_data) return 0;
22088- atomic_inc(&vcc->stats->tx_err);
22089+ atomic_inc_unchecked(&vcc->stats->tx_err);
22090 return -ENOLINK;
22091 }
22092 size = skb->len+sizeof(struct atmtcp_hdr);
22093@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc
22094 if (!new_skb) {
22095 if (vcc->pop) vcc->pop(vcc,skb);
22096 else dev_kfree_skb(skb);
22097- atomic_inc(&vcc->stats->tx_err);
22098+ atomic_inc_unchecked(&vcc->stats->tx_err);
22099 return -ENOBUFS;
22100 }
22101 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
22102@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc
22103 if (vcc->pop) vcc->pop(vcc,skb);
22104 else dev_kfree_skb(skb);
22105 out_vcc->push(out_vcc,new_skb);
22106- atomic_inc(&vcc->stats->tx);
22107- atomic_inc(&out_vcc->stats->rx);
22108+ atomic_inc_unchecked(&vcc->stats->tx);
22109+ atomic_inc_unchecked(&out_vcc->stats->rx);
22110 return 0;
22111 }
22112
22113@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc
22114 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
22115 read_unlock(&vcc_sklist_lock);
22116 if (!out_vcc) {
22117- atomic_inc(&vcc->stats->tx_err);
22118+ atomic_inc_unchecked(&vcc->stats->tx_err);
22119 goto done;
22120 }
22121 skb_pull(skb,sizeof(struct atmtcp_hdr));
22122@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc
22123 __net_timestamp(new_skb);
22124 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
22125 out_vcc->push(out_vcc,new_skb);
22126- atomic_inc(&vcc->stats->tx);
22127- atomic_inc(&out_vcc->stats->rx);
22128+ atomic_inc_unchecked(&vcc->stats->tx);
22129+ atomic_inc_unchecked(&out_vcc->stats->rx);
22130 done:
22131 if (vcc->pop) vcc->pop(vcc,skb);
22132 else dev_kfree_skb(skb);
22133diff -urNp linux-3.0.3/drivers/atm/eni.c linux-3.0.3/drivers/atm/eni.c
22134--- linux-3.0.3/drivers/atm/eni.c 2011-07-21 22:17:23.000000000 -0400
22135+++ linux-3.0.3/drivers/atm/eni.c 2011-08-23 21:47:55.000000000 -0400
22136@@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
22137 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
22138 vcc->dev->number);
22139 length = 0;
22140- atomic_inc(&vcc->stats->rx_err);
22141+ atomic_inc_unchecked(&vcc->stats->rx_err);
22142 }
22143 else {
22144 length = ATM_CELL_SIZE-1; /* no HEC */
22145@@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
22146 size);
22147 }
22148 eff = length = 0;
22149- atomic_inc(&vcc->stats->rx_err);
22150+ atomic_inc_unchecked(&vcc->stats->rx_err);
22151 }
22152 else {
22153 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
22154@@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
22155 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
22156 vcc->dev->number,vcc->vci,length,size << 2,descr);
22157 length = eff = 0;
22158- atomic_inc(&vcc->stats->rx_err);
22159+ atomic_inc_unchecked(&vcc->stats->rx_err);
22160 }
22161 }
22162 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
22163@@ -771,7 +771,7 @@ rx_dequeued++;
22164 vcc->push(vcc,skb);
22165 pushed++;
22166 }
22167- atomic_inc(&vcc->stats->rx);
22168+ atomic_inc_unchecked(&vcc->stats->rx);
22169 }
22170 wake_up(&eni_dev->rx_wait);
22171 }
22172@@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *d
22173 PCI_DMA_TODEVICE);
22174 if (vcc->pop) vcc->pop(vcc,skb);
22175 else dev_kfree_skb_irq(skb);
22176- atomic_inc(&vcc->stats->tx);
22177+ atomic_inc_unchecked(&vcc->stats->tx);
22178 wake_up(&eni_dev->tx_wait);
22179 dma_complete++;
22180 }
22181diff -urNp linux-3.0.3/drivers/atm/firestream.c linux-3.0.3/drivers/atm/firestream.c
22182--- linux-3.0.3/drivers/atm/firestream.c 2011-07-21 22:17:23.000000000 -0400
22183+++ linux-3.0.3/drivers/atm/firestream.c 2011-08-23 21:47:55.000000000 -0400
22184@@ -749,7 +749,7 @@ static void process_txdone_queue (struct
22185 }
22186 }
22187
22188- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22189+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22190
22191 fs_dprintk (FS_DEBUG_TXMEM, "i");
22192 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
22193@@ -816,7 +816,7 @@ static void process_incoming (struct fs_
22194 #endif
22195 skb_put (skb, qe->p1 & 0xffff);
22196 ATM_SKB(skb)->vcc = atm_vcc;
22197- atomic_inc(&atm_vcc->stats->rx);
22198+ atomic_inc_unchecked(&atm_vcc->stats->rx);
22199 __net_timestamp(skb);
22200 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
22201 atm_vcc->push (atm_vcc, skb);
22202@@ -837,12 +837,12 @@ static void process_incoming (struct fs_
22203 kfree (pe);
22204 }
22205 if (atm_vcc)
22206- atomic_inc(&atm_vcc->stats->rx_drop);
22207+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22208 break;
22209 case 0x1f: /* Reassembly abort: no buffers. */
22210 /* Silently increment error counter. */
22211 if (atm_vcc)
22212- atomic_inc(&atm_vcc->stats->rx_drop);
22213+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22214 break;
22215 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
22216 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
22217diff -urNp linux-3.0.3/drivers/atm/fore200e.c linux-3.0.3/drivers/atm/fore200e.c
22218--- linux-3.0.3/drivers/atm/fore200e.c 2011-07-21 22:17:23.000000000 -0400
22219+++ linux-3.0.3/drivers/atm/fore200e.c 2011-08-23 21:47:55.000000000 -0400
22220@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200
22221 #endif
22222 /* check error condition */
22223 if (*entry->status & STATUS_ERROR)
22224- atomic_inc(&vcc->stats->tx_err);
22225+ atomic_inc_unchecked(&vcc->stats->tx_err);
22226 else
22227- atomic_inc(&vcc->stats->tx);
22228+ atomic_inc_unchecked(&vcc->stats->tx);
22229 }
22230 }
22231
22232@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore2
22233 if (skb == NULL) {
22234 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
22235
22236- atomic_inc(&vcc->stats->rx_drop);
22237+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22238 return -ENOMEM;
22239 }
22240
22241@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore2
22242
22243 dev_kfree_skb_any(skb);
22244
22245- atomic_inc(&vcc->stats->rx_drop);
22246+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22247 return -ENOMEM;
22248 }
22249
22250 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
22251
22252 vcc->push(vcc, skb);
22253- atomic_inc(&vcc->stats->rx);
22254+ atomic_inc_unchecked(&vcc->stats->rx);
22255
22256 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
22257
22258@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200
22259 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
22260 fore200e->atm_dev->number,
22261 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
22262- atomic_inc(&vcc->stats->rx_err);
22263+ atomic_inc_unchecked(&vcc->stats->rx_err);
22264 }
22265 }
22266
22267@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struc
22268 goto retry_here;
22269 }
22270
22271- atomic_inc(&vcc->stats->tx_err);
22272+ atomic_inc_unchecked(&vcc->stats->tx_err);
22273
22274 fore200e->tx_sat++;
22275 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
22276diff -urNp linux-3.0.3/drivers/atm/he.c linux-3.0.3/drivers/atm/he.c
22277--- linux-3.0.3/drivers/atm/he.c 2011-07-21 22:17:23.000000000 -0400
22278+++ linux-3.0.3/drivers/atm/he.c 2011-08-23 21:47:55.000000000 -0400
22279@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22280
22281 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
22282 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
22283- atomic_inc(&vcc->stats->rx_drop);
22284+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22285 goto return_host_buffers;
22286 }
22287
22288@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22289 RBRQ_LEN_ERR(he_dev->rbrq_head)
22290 ? "LEN_ERR" : "",
22291 vcc->vpi, vcc->vci);
22292- atomic_inc(&vcc->stats->rx_err);
22293+ atomic_inc_unchecked(&vcc->stats->rx_err);
22294 goto return_host_buffers;
22295 }
22296
22297@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22298 vcc->push(vcc, skb);
22299 spin_lock(&he_dev->global_lock);
22300
22301- atomic_inc(&vcc->stats->rx);
22302+ atomic_inc_unchecked(&vcc->stats->rx);
22303
22304 return_host_buffers:
22305 ++pdus_assembled;
22306@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
22307 tpd->vcc->pop(tpd->vcc, tpd->skb);
22308 else
22309 dev_kfree_skb_any(tpd->skb);
22310- atomic_inc(&tpd->vcc->stats->tx_err);
22311+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
22312 }
22313 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
22314 return;
22315@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22316 vcc->pop(vcc, skb);
22317 else
22318 dev_kfree_skb_any(skb);
22319- atomic_inc(&vcc->stats->tx_err);
22320+ atomic_inc_unchecked(&vcc->stats->tx_err);
22321 return -EINVAL;
22322 }
22323
22324@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22325 vcc->pop(vcc, skb);
22326 else
22327 dev_kfree_skb_any(skb);
22328- atomic_inc(&vcc->stats->tx_err);
22329+ atomic_inc_unchecked(&vcc->stats->tx_err);
22330 return -EINVAL;
22331 }
22332 #endif
22333@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22334 vcc->pop(vcc, skb);
22335 else
22336 dev_kfree_skb_any(skb);
22337- atomic_inc(&vcc->stats->tx_err);
22338+ atomic_inc_unchecked(&vcc->stats->tx_err);
22339 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22340 return -ENOMEM;
22341 }
22342@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22343 vcc->pop(vcc, skb);
22344 else
22345 dev_kfree_skb_any(skb);
22346- atomic_inc(&vcc->stats->tx_err);
22347+ atomic_inc_unchecked(&vcc->stats->tx_err);
22348 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22349 return -ENOMEM;
22350 }
22351@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22352 __enqueue_tpd(he_dev, tpd, cid);
22353 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22354
22355- atomic_inc(&vcc->stats->tx);
22356+ atomic_inc_unchecked(&vcc->stats->tx);
22357
22358 return 0;
22359 }
22360diff -urNp linux-3.0.3/drivers/atm/horizon.c linux-3.0.3/drivers/atm/horizon.c
22361--- linux-3.0.3/drivers/atm/horizon.c 2011-07-21 22:17:23.000000000 -0400
22362+++ linux-3.0.3/drivers/atm/horizon.c 2011-08-23 21:47:55.000000000 -0400
22363@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev,
22364 {
22365 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
22366 // VC layer stats
22367- atomic_inc(&vcc->stats->rx);
22368+ atomic_inc_unchecked(&vcc->stats->rx);
22369 __net_timestamp(skb);
22370 // end of our responsibility
22371 vcc->push (vcc, skb);
22372@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const
22373 dev->tx_iovec = NULL;
22374
22375 // VC layer stats
22376- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22377+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22378
22379 // free the skb
22380 hrz_kfree_skb (skb);
22381diff -urNp linux-3.0.3/drivers/atm/idt77252.c linux-3.0.3/drivers/atm/idt77252.c
22382--- linux-3.0.3/drivers/atm/idt77252.c 2011-07-21 22:17:23.000000000 -0400
22383+++ linux-3.0.3/drivers/atm/idt77252.c 2011-08-23 21:47:55.000000000 -0400
22384@@ -811,7 +811,7 @@ drain_scq(struct idt77252_dev *card, str
22385 else
22386 dev_kfree_skb(skb);
22387
22388- atomic_inc(&vcc->stats->tx);
22389+ atomic_inc_unchecked(&vcc->stats->tx);
22390 }
22391
22392 atomic_dec(&scq->used);
22393@@ -1074,13 +1074,13 @@ dequeue_rx(struct idt77252_dev *card, st
22394 if ((sb = dev_alloc_skb(64)) == NULL) {
22395 printk("%s: Can't allocate buffers for aal0.\n",
22396 card->name);
22397- atomic_add(i, &vcc->stats->rx_drop);
22398+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
22399 break;
22400 }
22401 if (!atm_charge(vcc, sb->truesize)) {
22402 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
22403 card->name);
22404- atomic_add(i - 1, &vcc->stats->rx_drop);
22405+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
22406 dev_kfree_skb(sb);
22407 break;
22408 }
22409@@ -1097,7 +1097,7 @@ dequeue_rx(struct idt77252_dev *card, st
22410 ATM_SKB(sb)->vcc = vcc;
22411 __net_timestamp(sb);
22412 vcc->push(vcc, sb);
22413- atomic_inc(&vcc->stats->rx);
22414+ atomic_inc_unchecked(&vcc->stats->rx);
22415
22416 cell += ATM_CELL_PAYLOAD;
22417 }
22418@@ -1134,13 +1134,13 @@ dequeue_rx(struct idt77252_dev *card, st
22419 "(CDC: %08x)\n",
22420 card->name, len, rpp->len, readl(SAR_REG_CDC));
22421 recycle_rx_pool_skb(card, rpp);
22422- atomic_inc(&vcc->stats->rx_err);
22423+ atomic_inc_unchecked(&vcc->stats->rx_err);
22424 return;
22425 }
22426 if (stat & SAR_RSQE_CRC) {
22427 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
22428 recycle_rx_pool_skb(card, rpp);
22429- atomic_inc(&vcc->stats->rx_err);
22430+ atomic_inc_unchecked(&vcc->stats->rx_err);
22431 return;
22432 }
22433 if (skb_queue_len(&rpp->queue) > 1) {
22434@@ -1151,7 +1151,7 @@ dequeue_rx(struct idt77252_dev *card, st
22435 RXPRINTK("%s: Can't alloc RX skb.\n",
22436 card->name);
22437 recycle_rx_pool_skb(card, rpp);
22438- atomic_inc(&vcc->stats->rx_err);
22439+ atomic_inc_unchecked(&vcc->stats->rx_err);
22440 return;
22441 }
22442 if (!atm_charge(vcc, skb->truesize)) {
22443@@ -1170,7 +1170,7 @@ dequeue_rx(struct idt77252_dev *card, st
22444 __net_timestamp(skb);
22445
22446 vcc->push(vcc, skb);
22447- atomic_inc(&vcc->stats->rx);
22448+ atomic_inc_unchecked(&vcc->stats->rx);
22449
22450 return;
22451 }
22452@@ -1192,7 +1192,7 @@ dequeue_rx(struct idt77252_dev *card, st
22453 __net_timestamp(skb);
22454
22455 vcc->push(vcc, skb);
22456- atomic_inc(&vcc->stats->rx);
22457+ atomic_inc_unchecked(&vcc->stats->rx);
22458
22459 if (skb->truesize > SAR_FB_SIZE_3)
22460 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
22461@@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
22462 if (vcc->qos.aal != ATM_AAL0) {
22463 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
22464 card->name, vpi, vci);
22465- atomic_inc(&vcc->stats->rx_drop);
22466+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22467 goto drop;
22468 }
22469
22470 if ((sb = dev_alloc_skb(64)) == NULL) {
22471 printk("%s: Can't allocate buffers for AAL0.\n",
22472 card->name);
22473- atomic_inc(&vcc->stats->rx_err);
22474+ atomic_inc_unchecked(&vcc->stats->rx_err);
22475 goto drop;
22476 }
22477
22478@@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
22479 ATM_SKB(sb)->vcc = vcc;
22480 __net_timestamp(sb);
22481 vcc->push(vcc, sb);
22482- atomic_inc(&vcc->stats->rx);
22483+ atomic_inc_unchecked(&vcc->stats->rx);
22484
22485 drop:
22486 skb_pull(queue, 64);
22487@@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22488
22489 if (vc == NULL) {
22490 printk("%s: NULL connection in send().\n", card->name);
22491- atomic_inc(&vcc->stats->tx_err);
22492+ atomic_inc_unchecked(&vcc->stats->tx_err);
22493 dev_kfree_skb(skb);
22494 return -EINVAL;
22495 }
22496 if (!test_bit(VCF_TX, &vc->flags)) {
22497 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
22498- atomic_inc(&vcc->stats->tx_err);
22499+ atomic_inc_unchecked(&vcc->stats->tx_err);
22500 dev_kfree_skb(skb);
22501 return -EINVAL;
22502 }
22503@@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22504 break;
22505 default:
22506 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
22507- atomic_inc(&vcc->stats->tx_err);
22508+ atomic_inc_unchecked(&vcc->stats->tx_err);
22509 dev_kfree_skb(skb);
22510 return -EINVAL;
22511 }
22512
22513 if (skb_shinfo(skb)->nr_frags != 0) {
22514 printk("%s: No scatter-gather yet.\n", card->name);
22515- atomic_inc(&vcc->stats->tx_err);
22516+ atomic_inc_unchecked(&vcc->stats->tx_err);
22517 dev_kfree_skb(skb);
22518 return -EINVAL;
22519 }
22520@@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22521
22522 err = queue_skb(card, vc, skb, oam);
22523 if (err) {
22524- atomic_inc(&vcc->stats->tx_err);
22525+ atomic_inc_unchecked(&vcc->stats->tx_err);
22526 dev_kfree_skb(skb);
22527 return err;
22528 }
22529@@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
22530 skb = dev_alloc_skb(64);
22531 if (!skb) {
22532 printk("%s: Out of memory in send_oam().\n", card->name);
22533- atomic_inc(&vcc->stats->tx_err);
22534+ atomic_inc_unchecked(&vcc->stats->tx_err);
22535 return -ENOMEM;
22536 }
22537 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
22538diff -urNp linux-3.0.3/drivers/atm/iphase.c linux-3.0.3/drivers/atm/iphase.c
22539--- linux-3.0.3/drivers/atm/iphase.c 2011-07-21 22:17:23.000000000 -0400
22540+++ linux-3.0.3/drivers/atm/iphase.c 2011-08-23 21:47:55.000000000 -0400
22541@@ -1120,7 +1120,7 @@ static int rx_pkt(struct atm_dev *dev)
22542 status = (u_short) (buf_desc_ptr->desc_mode);
22543 if (status & (RX_CER | RX_PTE | RX_OFL))
22544 {
22545- atomic_inc(&vcc->stats->rx_err);
22546+ atomic_inc_unchecked(&vcc->stats->rx_err);
22547 IF_ERR(printk("IA: bad packet, dropping it");)
22548 if (status & RX_CER) {
22549 IF_ERR(printk(" cause: packet CRC error\n");)
22550@@ -1143,7 +1143,7 @@ static int rx_pkt(struct atm_dev *dev)
22551 len = dma_addr - buf_addr;
22552 if (len > iadev->rx_buf_sz) {
22553 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
22554- atomic_inc(&vcc->stats->rx_err);
22555+ atomic_inc_unchecked(&vcc->stats->rx_err);
22556 goto out_free_desc;
22557 }
22558
22559@@ -1293,7 +1293,7 @@ static void rx_dle_intr(struct atm_dev *
22560 ia_vcc = INPH_IA_VCC(vcc);
22561 if (ia_vcc == NULL)
22562 {
22563- atomic_inc(&vcc->stats->rx_err);
22564+ atomic_inc_unchecked(&vcc->stats->rx_err);
22565 dev_kfree_skb_any(skb);
22566 atm_return(vcc, atm_guess_pdu2truesize(len));
22567 goto INCR_DLE;
22568@@ -1305,7 +1305,7 @@ static void rx_dle_intr(struct atm_dev *
22569 if ((length > iadev->rx_buf_sz) || (length >
22570 (skb->len - sizeof(struct cpcs_trailer))))
22571 {
22572- atomic_inc(&vcc->stats->rx_err);
22573+ atomic_inc_unchecked(&vcc->stats->rx_err);
22574 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
22575 length, skb->len);)
22576 dev_kfree_skb_any(skb);
22577@@ -1321,7 +1321,7 @@ static void rx_dle_intr(struct atm_dev *
22578
22579 IF_RX(printk("rx_dle_intr: skb push");)
22580 vcc->push(vcc,skb);
22581- atomic_inc(&vcc->stats->rx);
22582+ atomic_inc_unchecked(&vcc->stats->rx);
22583 iadev->rx_pkt_cnt++;
22584 }
22585 INCR_DLE:
22586@@ -2801,15 +2801,15 @@ static int ia_ioctl(struct atm_dev *dev,
22587 {
22588 struct k_sonet_stats *stats;
22589 stats = &PRIV(_ia_dev[board])->sonet_stats;
22590- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
22591- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
22592- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
22593- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
22594- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
22595- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
22596- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
22597- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
22598- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
22599+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
22600+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
22601+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
22602+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
22603+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
22604+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
22605+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
22606+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
22607+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
22608 }
22609 ia_cmds.status = 0;
22610 break;
22611@@ -2914,7 +2914,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
22612 if ((desc == 0) || (desc > iadev->num_tx_desc))
22613 {
22614 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
22615- atomic_inc(&vcc->stats->tx);
22616+ atomic_inc_unchecked(&vcc->stats->tx);
22617 if (vcc->pop)
22618 vcc->pop(vcc, skb);
22619 else
22620@@ -3019,14 +3019,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
22621 ATM_DESC(skb) = vcc->vci;
22622 skb_queue_tail(&iadev->tx_dma_q, skb);
22623
22624- atomic_inc(&vcc->stats->tx);
22625+ atomic_inc_unchecked(&vcc->stats->tx);
22626 iadev->tx_pkt_cnt++;
22627 /* Increment transaction counter */
22628 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
22629
22630 #if 0
22631 /* add flow control logic */
22632- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
22633+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
22634 if (iavcc->vc_desc_cnt > 10) {
22635 vcc->tx_quota = vcc->tx_quota * 3 / 4;
22636 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
22637diff -urNp linux-3.0.3/drivers/atm/lanai.c linux-3.0.3/drivers/atm/lanai.c
22638--- linux-3.0.3/drivers/atm/lanai.c 2011-07-21 22:17:23.000000000 -0400
22639+++ linux-3.0.3/drivers/atm/lanai.c 2011-08-23 21:47:55.000000000 -0400
22640@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct l
22641 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
22642 lanai_endtx(lanai, lvcc);
22643 lanai_free_skb(lvcc->tx.atmvcc, skb);
22644- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
22645+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
22646 }
22647
22648 /* Try to fill the buffer - don't call unless there is backlog */
22649@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc
22650 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
22651 __net_timestamp(skb);
22652 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
22653- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
22654+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
22655 out:
22656 lvcc->rx.buf.ptr = end;
22657 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
22658@@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_d
22659 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
22660 "vcc %d\n", lanai->number, (unsigned int) s, vci);
22661 lanai->stats.service_rxnotaal5++;
22662- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22663+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22664 return 0;
22665 }
22666 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
22667@@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_d
22668 int bytes;
22669 read_unlock(&vcc_sklist_lock);
22670 DPRINTK("got trashed rx pdu on vci %d\n", vci);
22671- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22672+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22673 lvcc->stats.x.aal5.service_trash++;
22674 bytes = (SERVICE_GET_END(s) * 16) -
22675 (((unsigned long) lvcc->rx.buf.ptr) -
22676@@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_d
22677 }
22678 if (s & SERVICE_STREAM) {
22679 read_unlock(&vcc_sklist_lock);
22680- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22681+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22682 lvcc->stats.x.aal5.service_stream++;
22683 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
22684 "PDU on VCI %d!\n", lanai->number, vci);
22685@@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_d
22686 return 0;
22687 }
22688 DPRINTK("got rx crc error on vci %d\n", vci);
22689- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22690+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22691 lvcc->stats.x.aal5.service_rxcrc++;
22692 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
22693 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
22694diff -urNp linux-3.0.3/drivers/atm/nicstar.c linux-3.0.3/drivers/atm/nicstar.c
22695--- linux-3.0.3/drivers/atm/nicstar.c 2011-07-21 22:17:23.000000000 -0400
22696+++ linux-3.0.3/drivers/atm/nicstar.c 2011-08-23 21:47:55.000000000 -0400
22697@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc,
22698 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
22699 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
22700 card->index);
22701- atomic_inc(&vcc->stats->tx_err);
22702+ atomic_inc_unchecked(&vcc->stats->tx_err);
22703 dev_kfree_skb_any(skb);
22704 return -EINVAL;
22705 }
22706@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc,
22707 if (!vc->tx) {
22708 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
22709 card->index);
22710- atomic_inc(&vcc->stats->tx_err);
22711+ atomic_inc_unchecked(&vcc->stats->tx_err);
22712 dev_kfree_skb_any(skb);
22713 return -EINVAL;
22714 }
22715@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc,
22716 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
22717 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
22718 card->index);
22719- atomic_inc(&vcc->stats->tx_err);
22720+ atomic_inc_unchecked(&vcc->stats->tx_err);
22721 dev_kfree_skb_any(skb);
22722 return -EINVAL;
22723 }
22724
22725 if (skb_shinfo(skb)->nr_frags != 0) {
22726 printk("nicstar%d: No scatter-gather yet.\n", card->index);
22727- atomic_inc(&vcc->stats->tx_err);
22728+ atomic_inc_unchecked(&vcc->stats->tx_err);
22729 dev_kfree_skb_any(skb);
22730 return -EINVAL;
22731 }
22732@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc,
22733 }
22734
22735 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
22736- atomic_inc(&vcc->stats->tx_err);
22737+ atomic_inc_unchecked(&vcc->stats->tx_err);
22738 dev_kfree_skb_any(skb);
22739 return -EIO;
22740 }
22741- atomic_inc(&vcc->stats->tx);
22742+ atomic_inc_unchecked(&vcc->stats->tx);
22743
22744 return 0;
22745 }
22746@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns
22747 printk
22748 ("nicstar%d: Can't allocate buffers for aal0.\n",
22749 card->index);
22750- atomic_add(i, &vcc->stats->rx_drop);
22751+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
22752 break;
22753 }
22754 if (!atm_charge(vcc, sb->truesize)) {
22755 RXPRINTK
22756 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
22757 card->index);
22758- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
22759+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
22760 dev_kfree_skb_any(sb);
22761 break;
22762 }
22763@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns
22764 ATM_SKB(sb)->vcc = vcc;
22765 __net_timestamp(sb);
22766 vcc->push(vcc, sb);
22767- atomic_inc(&vcc->stats->rx);
22768+ atomic_inc_unchecked(&vcc->stats->rx);
22769 cell += ATM_CELL_PAYLOAD;
22770 }
22771
22772@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns
22773 if (iovb == NULL) {
22774 printk("nicstar%d: Out of iovec buffers.\n",
22775 card->index);
22776- atomic_inc(&vcc->stats->rx_drop);
22777+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22778 recycle_rx_buf(card, skb);
22779 return;
22780 }
22781@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns
22782 small or large buffer itself. */
22783 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
22784 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
22785- atomic_inc(&vcc->stats->rx_err);
22786+ atomic_inc_unchecked(&vcc->stats->rx_err);
22787 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22788 NS_MAX_IOVECS);
22789 NS_PRV_IOVCNT(iovb) = 0;
22790@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns
22791 ("nicstar%d: Expected a small buffer, and this is not one.\n",
22792 card->index);
22793 which_list(card, skb);
22794- atomic_inc(&vcc->stats->rx_err);
22795+ atomic_inc_unchecked(&vcc->stats->rx_err);
22796 recycle_rx_buf(card, skb);
22797 vc->rx_iov = NULL;
22798 recycle_iov_buf(card, iovb);
22799@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns
22800 ("nicstar%d: Expected a large buffer, and this is not one.\n",
22801 card->index);
22802 which_list(card, skb);
22803- atomic_inc(&vcc->stats->rx_err);
22804+ atomic_inc_unchecked(&vcc->stats->rx_err);
22805 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22806 NS_PRV_IOVCNT(iovb));
22807 vc->rx_iov = NULL;
22808@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns
22809 printk(" - PDU size mismatch.\n");
22810 else
22811 printk(".\n");
22812- atomic_inc(&vcc->stats->rx_err);
22813+ atomic_inc_unchecked(&vcc->stats->rx_err);
22814 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22815 NS_PRV_IOVCNT(iovb));
22816 vc->rx_iov = NULL;
22817@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns
22818 /* skb points to a small buffer */
22819 if (!atm_charge(vcc, skb->truesize)) {
22820 push_rxbufs(card, skb);
22821- atomic_inc(&vcc->stats->rx_drop);
22822+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22823 } else {
22824 skb_put(skb, len);
22825 dequeue_sm_buf(card, skb);
22826@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns
22827 ATM_SKB(skb)->vcc = vcc;
22828 __net_timestamp(skb);
22829 vcc->push(vcc, skb);
22830- atomic_inc(&vcc->stats->rx);
22831+ atomic_inc_unchecked(&vcc->stats->rx);
22832 }
22833 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
22834 struct sk_buff *sb;
22835@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns
22836 if (len <= NS_SMBUFSIZE) {
22837 if (!atm_charge(vcc, sb->truesize)) {
22838 push_rxbufs(card, sb);
22839- atomic_inc(&vcc->stats->rx_drop);
22840+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22841 } else {
22842 skb_put(sb, len);
22843 dequeue_sm_buf(card, sb);
22844@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns
22845 ATM_SKB(sb)->vcc = vcc;
22846 __net_timestamp(sb);
22847 vcc->push(vcc, sb);
22848- atomic_inc(&vcc->stats->rx);
22849+ atomic_inc_unchecked(&vcc->stats->rx);
22850 }
22851
22852 push_rxbufs(card, skb);
22853@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns
22854
22855 if (!atm_charge(vcc, skb->truesize)) {
22856 push_rxbufs(card, skb);
22857- atomic_inc(&vcc->stats->rx_drop);
22858+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22859 } else {
22860 dequeue_lg_buf(card, skb);
22861 #ifdef NS_USE_DESTRUCTORS
22862@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns
22863 ATM_SKB(skb)->vcc = vcc;
22864 __net_timestamp(skb);
22865 vcc->push(vcc, skb);
22866- atomic_inc(&vcc->stats->rx);
22867+ atomic_inc_unchecked(&vcc->stats->rx);
22868 }
22869
22870 push_rxbufs(card, sb);
22871@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns
22872 printk
22873 ("nicstar%d: Out of huge buffers.\n",
22874 card->index);
22875- atomic_inc(&vcc->stats->rx_drop);
22876+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22877 recycle_iovec_rx_bufs(card,
22878 (struct iovec *)
22879 iovb->data,
22880@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns
22881 card->hbpool.count++;
22882 } else
22883 dev_kfree_skb_any(hb);
22884- atomic_inc(&vcc->stats->rx_drop);
22885+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22886 } else {
22887 /* Copy the small buffer to the huge buffer */
22888 sb = (struct sk_buff *)iov->iov_base;
22889@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns
22890 #endif /* NS_USE_DESTRUCTORS */
22891 __net_timestamp(hb);
22892 vcc->push(vcc, hb);
22893- atomic_inc(&vcc->stats->rx);
22894+ atomic_inc_unchecked(&vcc->stats->rx);
22895 }
22896 }
22897
22898diff -urNp linux-3.0.3/drivers/atm/solos-pci.c linux-3.0.3/drivers/atm/solos-pci.c
22899--- linux-3.0.3/drivers/atm/solos-pci.c 2011-07-21 22:17:23.000000000 -0400
22900+++ linux-3.0.3/drivers/atm/solos-pci.c 2011-08-23 21:48:14.000000000 -0400
22901@@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
22902 }
22903 atm_charge(vcc, skb->truesize);
22904 vcc->push(vcc, skb);
22905- atomic_inc(&vcc->stats->rx);
22906+ atomic_inc_unchecked(&vcc->stats->rx);
22907 break;
22908
22909 case PKT_STATUS:
22910@@ -899,6 +899,8 @@ static int print_buffer(struct sk_buff *
22911 char msg[500];
22912 char item[10];
22913
22914+ pax_track_stack();
22915+
22916 len = buf->len;
22917 for (i = 0; i < len; i++){
22918 if(i % 8 == 0)
22919@@ -1008,7 +1010,7 @@ static uint32_t fpga_tx(struct solos_car
22920 vcc = SKB_CB(oldskb)->vcc;
22921
22922 if (vcc) {
22923- atomic_inc(&vcc->stats->tx);
22924+ atomic_inc_unchecked(&vcc->stats->tx);
22925 solos_pop(vcc, oldskb);
22926 } else
22927 dev_kfree_skb_irq(oldskb);
22928diff -urNp linux-3.0.3/drivers/atm/suni.c linux-3.0.3/drivers/atm/suni.c
22929--- linux-3.0.3/drivers/atm/suni.c 2011-07-21 22:17:23.000000000 -0400
22930+++ linux-3.0.3/drivers/atm/suni.c 2011-08-23 21:47:55.000000000 -0400
22931@@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
22932
22933
22934 #define ADD_LIMITED(s,v) \
22935- atomic_add((v),&stats->s); \
22936- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
22937+ atomic_add_unchecked((v),&stats->s); \
22938+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
22939
22940
22941 static void suni_hz(unsigned long from_timer)
22942diff -urNp linux-3.0.3/drivers/atm/uPD98402.c linux-3.0.3/drivers/atm/uPD98402.c
22943--- linux-3.0.3/drivers/atm/uPD98402.c 2011-07-21 22:17:23.000000000 -0400
22944+++ linux-3.0.3/drivers/atm/uPD98402.c 2011-08-23 21:47:55.000000000 -0400
22945@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *d
22946 struct sonet_stats tmp;
22947 int error = 0;
22948
22949- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
22950+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
22951 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
22952 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
22953 if (zero && !error) {
22954@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev
22955
22956
22957 #define ADD_LIMITED(s,v) \
22958- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
22959- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
22960- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
22961+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
22962+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
22963+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
22964
22965
22966 static void stat_event(struct atm_dev *dev)
22967@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev
22968 if (reason & uPD98402_INT_PFM) stat_event(dev);
22969 if (reason & uPD98402_INT_PCO) {
22970 (void) GET(PCOCR); /* clear interrupt cause */
22971- atomic_add(GET(HECCT),
22972+ atomic_add_unchecked(GET(HECCT),
22973 &PRIV(dev)->sonet_stats.uncorr_hcs);
22974 }
22975 if ((reason & uPD98402_INT_RFO) &&
22976@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev
22977 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
22978 uPD98402_INT_LOS),PIMR); /* enable them */
22979 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
22980- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
22981- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
22982- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
22983+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
22984+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
22985+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
22986 return 0;
22987 }
22988
22989diff -urNp linux-3.0.3/drivers/atm/zatm.c linux-3.0.3/drivers/atm/zatm.c
22990--- linux-3.0.3/drivers/atm/zatm.c 2011-07-21 22:17:23.000000000 -0400
22991+++ linux-3.0.3/drivers/atm/zatm.c 2011-08-23 21:47:55.000000000 -0400
22992@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
22993 }
22994 if (!size) {
22995 dev_kfree_skb_irq(skb);
22996- if (vcc) atomic_inc(&vcc->stats->rx_err);
22997+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
22998 continue;
22999 }
23000 if (!atm_charge(vcc,skb->truesize)) {
23001@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
23002 skb->len = size;
23003 ATM_SKB(skb)->vcc = vcc;
23004 vcc->push(vcc,skb);
23005- atomic_inc(&vcc->stats->rx);
23006+ atomic_inc_unchecked(&vcc->stats->rx);
23007 }
23008 zout(pos & 0xffff,MTA(mbx));
23009 #if 0 /* probably a stupid idea */
23010@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
23011 skb_queue_head(&zatm_vcc->backlog,skb);
23012 break;
23013 }
23014- atomic_inc(&vcc->stats->tx);
23015+ atomic_inc_unchecked(&vcc->stats->tx);
23016 wake_up(&zatm_vcc->tx_wait);
23017 }
23018
23019diff -urNp linux-3.0.3/drivers/base/power/wakeup.c linux-3.0.3/drivers/base/power/wakeup.c
23020--- linux-3.0.3/drivers/base/power/wakeup.c 2011-07-21 22:17:23.000000000 -0400
23021+++ linux-3.0.3/drivers/base/power/wakeup.c 2011-08-23 21:47:55.000000000 -0400
23022@@ -29,14 +29,14 @@ bool events_check_enabled;
23023 * They need to be modified together atomically, so it's better to use one
23024 * atomic variable to hold them both.
23025 */
23026-static atomic_t combined_event_count = ATOMIC_INIT(0);
23027+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
23028
23029 #define IN_PROGRESS_BITS (sizeof(int) * 4)
23030 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
23031
23032 static void split_counters(unsigned int *cnt, unsigned int *inpr)
23033 {
23034- unsigned int comb = atomic_read(&combined_event_count);
23035+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
23036
23037 *cnt = (comb >> IN_PROGRESS_BITS);
23038 *inpr = comb & MAX_IN_PROGRESS;
23039@@ -350,7 +350,7 @@ static void wakeup_source_activate(struc
23040 ws->last_time = ktime_get();
23041
23042 /* Increment the counter of events in progress. */
23043- atomic_inc(&combined_event_count);
23044+ atomic_inc_unchecked(&combined_event_count);
23045 }
23046
23047 /**
23048@@ -440,7 +440,7 @@ static void wakeup_source_deactivate(str
23049 * Increment the counter of registered wakeup events and decrement the
23050 * couter of wakeup events in progress simultaneously.
23051 */
23052- atomic_add(MAX_IN_PROGRESS, &combined_event_count);
23053+ atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
23054 }
23055
23056 /**
23057diff -urNp linux-3.0.3/drivers/block/cciss.c linux-3.0.3/drivers/block/cciss.c
23058--- linux-3.0.3/drivers/block/cciss.c 2011-07-21 22:17:23.000000000 -0400
23059+++ linux-3.0.3/drivers/block/cciss.c 2011-08-23 21:48:14.000000000 -0400
23060@@ -1179,6 +1179,8 @@ static int cciss_ioctl32_passthru(struct
23061 int err;
23062 u32 cp;
23063
23064+ memset(&arg64, 0, sizeof(arg64));
23065+
23066 err = 0;
23067 err |=
23068 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
23069@@ -2986,7 +2988,7 @@ static void start_io(ctlr_info_t *h)
23070 while (!list_empty(&h->reqQ)) {
23071 c = list_entry(h->reqQ.next, CommandList_struct, list);
23072 /* can't do anything if fifo is full */
23073- if ((h->access.fifo_full(h))) {
23074+ if ((h->access->fifo_full(h))) {
23075 dev_warn(&h->pdev->dev, "fifo full\n");
23076 break;
23077 }
23078@@ -2996,7 +2998,7 @@ static void start_io(ctlr_info_t *h)
23079 h->Qdepth--;
23080
23081 /* Tell the controller execute command */
23082- h->access.submit_command(h, c);
23083+ h->access->submit_command(h, c);
23084
23085 /* Put job onto the completed Q */
23086 addQ(&h->cmpQ, c);
23087@@ -3422,17 +3424,17 @@ startio:
23088
23089 static inline unsigned long get_next_completion(ctlr_info_t *h)
23090 {
23091- return h->access.command_completed(h);
23092+ return h->access->command_completed(h);
23093 }
23094
23095 static inline int interrupt_pending(ctlr_info_t *h)
23096 {
23097- return h->access.intr_pending(h);
23098+ return h->access->intr_pending(h);
23099 }
23100
23101 static inline long interrupt_not_for_us(ctlr_info_t *h)
23102 {
23103- return ((h->access.intr_pending(h) == 0) ||
23104+ return ((h->access->intr_pending(h) == 0) ||
23105 (h->interrupts_enabled == 0));
23106 }
23107
23108@@ -3465,7 +3467,7 @@ static inline u32 next_command(ctlr_info
23109 u32 a;
23110
23111 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
23112- return h->access.command_completed(h);
23113+ return h->access->command_completed(h);
23114
23115 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
23116 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
23117@@ -4020,7 +4022,7 @@ static void __devinit cciss_put_controll
23118 trans_support & CFGTBL_Trans_use_short_tags);
23119
23120 /* Change the access methods to the performant access methods */
23121- h->access = SA5_performant_access;
23122+ h->access = &SA5_performant_access;
23123 h->transMethod = CFGTBL_Trans_Performant;
23124
23125 return;
23126@@ -4292,7 +4294,7 @@ static int __devinit cciss_pci_init(ctlr
23127 if (prod_index < 0)
23128 return -ENODEV;
23129 h->product_name = products[prod_index].product_name;
23130- h->access = *(products[prod_index].access);
23131+ h->access = products[prod_index].access;
23132
23133 if (cciss_board_disabled(h)) {
23134 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
23135@@ -5002,7 +5004,7 @@ reinit_after_soft_reset:
23136 }
23137
23138 /* make sure the board interrupts are off */
23139- h->access.set_intr_mask(h, CCISS_INTR_OFF);
23140+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
23141 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
23142 if (rc)
23143 goto clean2;
23144@@ -5054,7 +5056,7 @@ reinit_after_soft_reset:
23145 * fake ones to scoop up any residual completions.
23146 */
23147 spin_lock_irqsave(&h->lock, flags);
23148- h->access.set_intr_mask(h, CCISS_INTR_OFF);
23149+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
23150 spin_unlock_irqrestore(&h->lock, flags);
23151 free_irq(h->intr[PERF_MODE_INT], h);
23152 rc = cciss_request_irq(h, cciss_msix_discard_completions,
23153@@ -5074,9 +5076,9 @@ reinit_after_soft_reset:
23154 dev_info(&h->pdev->dev, "Board READY.\n");
23155 dev_info(&h->pdev->dev,
23156 "Waiting for stale completions to drain.\n");
23157- h->access.set_intr_mask(h, CCISS_INTR_ON);
23158+ h->access->set_intr_mask(h, CCISS_INTR_ON);
23159 msleep(10000);
23160- h->access.set_intr_mask(h, CCISS_INTR_OFF);
23161+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
23162
23163 rc = controller_reset_failed(h->cfgtable);
23164 if (rc)
23165@@ -5099,7 +5101,7 @@ reinit_after_soft_reset:
23166 cciss_scsi_setup(h);
23167
23168 /* Turn the interrupts on so we can service requests */
23169- h->access.set_intr_mask(h, CCISS_INTR_ON);
23170+ h->access->set_intr_mask(h, CCISS_INTR_ON);
23171
23172 /* Get the firmware version */
23173 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
23174@@ -5171,7 +5173,7 @@ static void cciss_shutdown(struct pci_de
23175 kfree(flush_buf);
23176 if (return_code != IO_OK)
23177 dev_warn(&h->pdev->dev, "Error flushing cache\n");
23178- h->access.set_intr_mask(h, CCISS_INTR_OFF);
23179+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
23180 free_irq(h->intr[PERF_MODE_INT], h);
23181 }
23182
23183diff -urNp linux-3.0.3/drivers/block/cciss.h linux-3.0.3/drivers/block/cciss.h
23184--- linux-3.0.3/drivers/block/cciss.h 2011-08-23 21:44:40.000000000 -0400
23185+++ linux-3.0.3/drivers/block/cciss.h 2011-08-23 21:47:55.000000000 -0400
23186@@ -100,7 +100,7 @@ struct ctlr_info
23187 /* information about each logical volume */
23188 drive_info_struct *drv[CISS_MAX_LUN];
23189
23190- struct access_method access;
23191+ struct access_method *access;
23192
23193 /* queue and queue Info */
23194 struct list_head reqQ;
23195diff -urNp linux-3.0.3/drivers/block/cpqarray.c linux-3.0.3/drivers/block/cpqarray.c
23196--- linux-3.0.3/drivers/block/cpqarray.c 2011-07-21 22:17:23.000000000 -0400
23197+++ linux-3.0.3/drivers/block/cpqarray.c 2011-08-23 21:48:14.000000000 -0400
23198@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_c
23199 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
23200 goto Enomem4;
23201 }
23202- hba[i]->access.set_intr_mask(hba[i], 0);
23203+ hba[i]->access->set_intr_mask(hba[i], 0);
23204 if (request_irq(hba[i]->intr, do_ida_intr,
23205 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
23206 {
23207@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_c
23208 add_timer(&hba[i]->timer);
23209
23210 /* Enable IRQ now that spinlock and rate limit timer are set up */
23211- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
23212+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
23213
23214 for(j=0; j<NWD; j++) {
23215 struct gendisk *disk = ida_gendisk[i][j];
23216@@ -694,7 +694,7 @@ DBGINFO(
23217 for(i=0; i<NR_PRODUCTS; i++) {
23218 if (board_id == products[i].board_id) {
23219 c->product_name = products[i].product_name;
23220- c->access = *(products[i].access);
23221+ c->access = products[i].access;
23222 break;
23223 }
23224 }
23225@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detec
23226 hba[ctlr]->intr = intr;
23227 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
23228 hba[ctlr]->product_name = products[j].product_name;
23229- hba[ctlr]->access = *(products[j].access);
23230+ hba[ctlr]->access = products[j].access;
23231 hba[ctlr]->ctlr = ctlr;
23232 hba[ctlr]->board_id = board_id;
23233 hba[ctlr]->pci_dev = NULL; /* not PCI */
23234@@ -911,6 +911,8 @@ static void do_ida_request(struct reques
23235 struct scatterlist tmp_sg[SG_MAX];
23236 int i, dir, seg;
23237
23238+ pax_track_stack();
23239+
23240 queue_next:
23241 creq = blk_peek_request(q);
23242 if (!creq)
23243@@ -980,7 +982,7 @@ static void start_io(ctlr_info_t *h)
23244
23245 while((c = h->reqQ) != NULL) {
23246 /* Can't do anything if we're busy */
23247- if (h->access.fifo_full(h) == 0)
23248+ if (h->access->fifo_full(h) == 0)
23249 return;
23250
23251 /* Get the first entry from the request Q */
23252@@ -988,7 +990,7 @@ static void start_io(ctlr_info_t *h)
23253 h->Qdepth--;
23254
23255 /* Tell the controller to do our bidding */
23256- h->access.submit_command(h, c);
23257+ h->access->submit_command(h, c);
23258
23259 /* Get onto the completion Q */
23260 addQ(&h->cmpQ, c);
23261@@ -1050,7 +1052,7 @@ static irqreturn_t do_ida_intr(int irq,
23262 unsigned long flags;
23263 __u32 a,a1;
23264
23265- istat = h->access.intr_pending(h);
23266+ istat = h->access->intr_pending(h);
23267 /* Is this interrupt for us? */
23268 if (istat == 0)
23269 return IRQ_NONE;
23270@@ -1061,7 +1063,7 @@ static irqreturn_t do_ida_intr(int irq,
23271 */
23272 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
23273 if (istat & FIFO_NOT_EMPTY) {
23274- while((a = h->access.command_completed(h))) {
23275+ while((a = h->access->command_completed(h))) {
23276 a1 = a; a &= ~3;
23277 if ((c = h->cmpQ) == NULL)
23278 {
23279@@ -1449,11 +1451,11 @@ static int sendcmd(
23280 /*
23281 * Disable interrupt
23282 */
23283- info_p->access.set_intr_mask(info_p, 0);
23284+ info_p->access->set_intr_mask(info_p, 0);
23285 /* Make sure there is room in the command FIFO */
23286 /* Actually it should be completely empty at this time. */
23287 for (i = 200000; i > 0; i--) {
23288- temp = info_p->access.fifo_full(info_p);
23289+ temp = info_p->access->fifo_full(info_p);
23290 if (temp != 0) {
23291 break;
23292 }
23293@@ -1466,7 +1468,7 @@ DBG(
23294 /*
23295 * Send the cmd
23296 */
23297- info_p->access.submit_command(info_p, c);
23298+ info_p->access->submit_command(info_p, c);
23299 complete = pollcomplete(ctlr);
23300
23301 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
23302@@ -1549,9 +1551,9 @@ static int revalidate_allvol(ctlr_info_t
23303 * we check the new geometry. Then turn interrupts back on when
23304 * we're done.
23305 */
23306- host->access.set_intr_mask(host, 0);
23307+ host->access->set_intr_mask(host, 0);
23308 getgeometry(ctlr);
23309- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
23310+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
23311
23312 for(i=0; i<NWD; i++) {
23313 struct gendisk *disk = ida_gendisk[ctlr][i];
23314@@ -1591,7 +1593,7 @@ static int pollcomplete(int ctlr)
23315 /* Wait (up to 2 seconds) for a command to complete */
23316
23317 for (i = 200000; i > 0; i--) {
23318- done = hba[ctlr]->access.command_completed(hba[ctlr]);
23319+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
23320 if (done == 0) {
23321 udelay(10); /* a short fixed delay */
23322 } else
23323diff -urNp linux-3.0.3/drivers/block/cpqarray.h linux-3.0.3/drivers/block/cpqarray.h
23324--- linux-3.0.3/drivers/block/cpqarray.h 2011-07-21 22:17:23.000000000 -0400
23325+++ linux-3.0.3/drivers/block/cpqarray.h 2011-08-23 21:47:55.000000000 -0400
23326@@ -99,7 +99,7 @@ struct ctlr_info {
23327 drv_info_t drv[NWD];
23328 struct proc_dir_entry *proc;
23329
23330- struct access_method access;
23331+ struct access_method *access;
23332
23333 cmdlist_t *reqQ;
23334 cmdlist_t *cmpQ;
23335diff -urNp linux-3.0.3/drivers/block/DAC960.c linux-3.0.3/drivers/block/DAC960.c
23336--- linux-3.0.3/drivers/block/DAC960.c 2011-07-21 22:17:23.000000000 -0400
23337+++ linux-3.0.3/drivers/block/DAC960.c 2011-08-23 21:48:14.000000000 -0400
23338@@ -1980,6 +1980,8 @@ static bool DAC960_V1_ReadDeviceConfigur
23339 unsigned long flags;
23340 int Channel, TargetID;
23341
23342+ pax_track_stack();
23343+
23344 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
23345 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
23346 sizeof(DAC960_SCSI_Inquiry_T) +
23347diff -urNp linux-3.0.3/drivers/block/drbd/drbd_int.h linux-3.0.3/drivers/block/drbd/drbd_int.h
23348--- linux-3.0.3/drivers/block/drbd/drbd_int.h 2011-07-21 22:17:23.000000000 -0400
23349+++ linux-3.0.3/drivers/block/drbd/drbd_int.h 2011-08-23 21:47:55.000000000 -0400
23350@@ -737,7 +737,7 @@ struct drbd_request;
23351 struct drbd_epoch {
23352 struct list_head list;
23353 unsigned int barrier_nr;
23354- atomic_t epoch_size; /* increased on every request added. */
23355+ atomic_unchecked_t epoch_size; /* increased on every request added. */
23356 atomic_t active; /* increased on every req. added, and dec on every finished. */
23357 unsigned long flags;
23358 };
23359@@ -1109,7 +1109,7 @@ struct drbd_conf {
23360 void *int_dig_in;
23361 void *int_dig_vv;
23362 wait_queue_head_t seq_wait;
23363- atomic_t packet_seq;
23364+ atomic_unchecked_t packet_seq;
23365 unsigned int peer_seq;
23366 spinlock_t peer_seq_lock;
23367 unsigned int minor;
23368diff -urNp linux-3.0.3/drivers/block/drbd/drbd_main.c linux-3.0.3/drivers/block/drbd/drbd_main.c
23369--- linux-3.0.3/drivers/block/drbd/drbd_main.c 2011-07-21 22:17:23.000000000 -0400
23370+++ linux-3.0.3/drivers/block/drbd/drbd_main.c 2011-08-23 21:47:55.000000000 -0400
23371@@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_co
23372 p.sector = sector;
23373 p.block_id = block_id;
23374 p.blksize = blksize;
23375- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
23376+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
23377
23378 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
23379 return false;
23380@@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *m
23381 p.sector = cpu_to_be64(req->sector);
23382 p.block_id = (unsigned long)req;
23383 p.seq_num = cpu_to_be32(req->seq_num =
23384- atomic_add_return(1, &mdev->packet_seq));
23385+ atomic_add_return_unchecked(1, &mdev->packet_seq));
23386
23387 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
23388
23389@@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_
23390 atomic_set(&mdev->unacked_cnt, 0);
23391 atomic_set(&mdev->local_cnt, 0);
23392 atomic_set(&mdev->net_cnt, 0);
23393- atomic_set(&mdev->packet_seq, 0);
23394+ atomic_set_unchecked(&mdev->packet_seq, 0);
23395 atomic_set(&mdev->pp_in_use, 0);
23396 atomic_set(&mdev->pp_in_use_by_net, 0);
23397 atomic_set(&mdev->rs_sect_in, 0);
23398@@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf
23399 mdev->receiver.t_state);
23400
23401 /* no need to lock it, I'm the only thread alive */
23402- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
23403- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
23404+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
23405+ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
23406 mdev->al_writ_cnt =
23407 mdev->bm_writ_cnt =
23408 mdev->read_cnt =
23409diff -urNp linux-3.0.3/drivers/block/drbd/drbd_nl.c linux-3.0.3/drivers/block/drbd/drbd_nl.c
23410--- linux-3.0.3/drivers/block/drbd/drbd_nl.c 2011-07-21 22:17:23.000000000 -0400
23411+++ linux-3.0.3/drivers/block/drbd/drbd_nl.c 2011-08-23 21:47:55.000000000 -0400
23412@@ -2359,7 +2359,7 @@ static void drbd_connector_callback(stru
23413 module_put(THIS_MODULE);
23414 }
23415
23416-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
23417+static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
23418
23419 static unsigned short *
23420 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
23421@@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *
23422 cn_reply->id.idx = CN_IDX_DRBD;
23423 cn_reply->id.val = CN_VAL_DRBD;
23424
23425- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23426+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23427 cn_reply->ack = 0; /* not used here. */
23428 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23429 (int)((char *)tl - (char *)reply->tag_list);
23430@@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_co
23431 cn_reply->id.idx = CN_IDX_DRBD;
23432 cn_reply->id.val = CN_VAL_DRBD;
23433
23434- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23435+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23436 cn_reply->ack = 0; /* not used here. */
23437 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23438 (int)((char *)tl - (char *)reply->tag_list);
23439@@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mde
23440 cn_reply->id.idx = CN_IDX_DRBD;
23441 cn_reply->id.val = CN_VAL_DRBD;
23442
23443- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
23444+ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
23445 cn_reply->ack = 0; // not used here.
23446 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23447 (int)((char*)tl - (char*)reply->tag_list);
23448@@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drb
23449 cn_reply->id.idx = CN_IDX_DRBD;
23450 cn_reply->id.val = CN_VAL_DRBD;
23451
23452- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23453+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23454 cn_reply->ack = 0; /* not used here. */
23455 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23456 (int)((char *)tl - (char *)reply->tag_list);
23457diff -urNp linux-3.0.3/drivers/block/drbd/drbd_receiver.c linux-3.0.3/drivers/block/drbd/drbd_receiver.c
23458--- linux-3.0.3/drivers/block/drbd/drbd_receiver.c 2011-07-21 22:17:23.000000000 -0400
23459+++ linux-3.0.3/drivers/block/drbd/drbd_receiver.c 2011-08-23 21:47:55.000000000 -0400
23460@@ -894,7 +894,7 @@ retry:
23461 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
23462 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
23463
23464- atomic_set(&mdev->packet_seq, 0);
23465+ atomic_set_unchecked(&mdev->packet_seq, 0);
23466 mdev->peer_seq = 0;
23467
23468 drbd_thread_start(&mdev->asender);
23469@@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish
23470 do {
23471 next_epoch = NULL;
23472
23473- epoch_size = atomic_read(&epoch->epoch_size);
23474+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
23475
23476 switch (ev & ~EV_CLEANUP) {
23477 case EV_PUT:
23478@@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish
23479 rv = FE_DESTROYED;
23480 } else {
23481 epoch->flags = 0;
23482- atomic_set(&epoch->epoch_size, 0);
23483+ atomic_set_unchecked(&epoch->epoch_size, 0);
23484 /* atomic_set(&epoch->active, 0); is already zero */
23485 if (rv == FE_STILL_LIVE)
23486 rv = FE_RECYCLED;
23487@@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_c
23488 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
23489 drbd_flush(mdev);
23490
23491- if (atomic_read(&mdev->current_epoch->epoch_size)) {
23492+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
23493 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
23494 if (epoch)
23495 break;
23496 }
23497
23498 epoch = mdev->current_epoch;
23499- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
23500+ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
23501
23502 D_ASSERT(atomic_read(&epoch->active) == 0);
23503 D_ASSERT(epoch->flags == 0);
23504@@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_c
23505 }
23506
23507 epoch->flags = 0;
23508- atomic_set(&epoch->epoch_size, 0);
23509+ atomic_set_unchecked(&epoch->epoch_size, 0);
23510 atomic_set(&epoch->active, 0);
23511
23512 spin_lock(&mdev->epoch_lock);
23513- if (atomic_read(&mdev->current_epoch->epoch_size)) {
23514+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
23515 list_add(&epoch->list, &mdev->current_epoch->list);
23516 mdev->current_epoch = epoch;
23517 mdev->epochs++;
23518@@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf
23519 spin_unlock(&mdev->peer_seq_lock);
23520
23521 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
23522- atomic_inc(&mdev->current_epoch->epoch_size);
23523+ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
23524 return drbd_drain_block(mdev, data_size);
23525 }
23526
23527@@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf
23528
23529 spin_lock(&mdev->epoch_lock);
23530 e->epoch = mdev->current_epoch;
23531- atomic_inc(&e->epoch->epoch_size);
23532+ atomic_inc_unchecked(&e->epoch->epoch_size);
23533 atomic_inc(&e->epoch->active);
23534 spin_unlock(&mdev->epoch_lock);
23535
23536@@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_
23537 D_ASSERT(list_empty(&mdev->done_ee));
23538
23539 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
23540- atomic_set(&mdev->current_epoch->epoch_size, 0);
23541+ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
23542 D_ASSERT(list_empty(&mdev->current_epoch->list));
23543 }
23544
23545diff -urNp linux-3.0.3/drivers/block/nbd.c linux-3.0.3/drivers/block/nbd.c
23546--- linux-3.0.3/drivers/block/nbd.c 2011-07-21 22:17:23.000000000 -0400
23547+++ linux-3.0.3/drivers/block/nbd.c 2011-08-23 21:48:14.000000000 -0400
23548@@ -157,6 +157,8 @@ static int sock_xmit(struct nbd_device *
23549 struct kvec iov;
23550 sigset_t blocked, oldset;
23551
23552+ pax_track_stack();
23553+
23554 if (unlikely(!sock)) {
23555 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
23556 lo->disk->disk_name, (send ? "send" : "recv"));
23557@@ -572,6 +574,8 @@ static void do_nbd_request(struct reques
23558 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
23559 unsigned int cmd, unsigned long arg)
23560 {
23561+ pax_track_stack();
23562+
23563 switch (cmd) {
23564 case NBD_DISCONNECT: {
23565 struct request sreq;
23566diff -urNp linux-3.0.3/drivers/char/agp/frontend.c linux-3.0.3/drivers/char/agp/frontend.c
23567--- linux-3.0.3/drivers/char/agp/frontend.c 2011-07-21 22:17:23.000000000 -0400
23568+++ linux-3.0.3/drivers/char/agp/frontend.c 2011-08-23 21:47:55.000000000 -0400
23569@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct ag
23570 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
23571 return -EFAULT;
23572
23573- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
23574+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
23575 return -EFAULT;
23576
23577 client = agp_find_client_by_pid(reserve.pid);
23578diff -urNp linux-3.0.3/drivers/char/briq_panel.c linux-3.0.3/drivers/char/briq_panel.c
23579--- linux-3.0.3/drivers/char/briq_panel.c 2011-07-21 22:17:23.000000000 -0400
23580+++ linux-3.0.3/drivers/char/briq_panel.c 2011-08-23 21:48:14.000000000 -0400
23581@@ -9,6 +9,7 @@
23582 #include <linux/types.h>
23583 #include <linux/errno.h>
23584 #include <linux/tty.h>
23585+#include <linux/mutex.h>
23586 #include <linux/timer.h>
23587 #include <linux/kernel.h>
23588 #include <linux/wait.h>
23589@@ -34,6 +35,7 @@ static int vfd_is_open;
23590 static unsigned char vfd[40];
23591 static int vfd_cursor;
23592 static unsigned char ledpb, led;
23593+static DEFINE_MUTEX(vfd_mutex);
23594
23595 static void update_vfd(void)
23596 {
23597@@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct f
23598 if (!vfd_is_open)
23599 return -EBUSY;
23600
23601+ mutex_lock(&vfd_mutex);
23602 for (;;) {
23603 char c;
23604 if (!indx)
23605 break;
23606- if (get_user(c, buf))
23607+ if (get_user(c, buf)) {
23608+ mutex_unlock(&vfd_mutex);
23609 return -EFAULT;
23610+ }
23611 if (esc) {
23612 set_led(c);
23613 esc = 0;
23614@@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct f
23615 buf++;
23616 }
23617 update_vfd();
23618+ mutex_unlock(&vfd_mutex);
23619
23620 return len;
23621 }
23622diff -urNp linux-3.0.3/drivers/char/genrtc.c linux-3.0.3/drivers/char/genrtc.c
23623--- linux-3.0.3/drivers/char/genrtc.c 2011-07-21 22:17:23.000000000 -0400
23624+++ linux-3.0.3/drivers/char/genrtc.c 2011-08-23 21:48:14.000000000 -0400
23625@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *fi
23626 switch (cmd) {
23627
23628 case RTC_PLL_GET:
23629+ memset(&pll, 0, sizeof(pll));
23630 if (get_rtc_pll(&pll))
23631 return -EINVAL;
23632 else
23633diff -urNp linux-3.0.3/drivers/char/hpet.c linux-3.0.3/drivers/char/hpet.c
23634--- linux-3.0.3/drivers/char/hpet.c 2011-07-21 22:17:23.000000000 -0400
23635+++ linux-3.0.3/drivers/char/hpet.c 2011-08-23 21:47:55.000000000 -0400
23636@@ -572,7 +572,7 @@ static inline unsigned long hpet_time_di
23637 }
23638
23639 static int
23640-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
23641+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
23642 struct hpet_info *info)
23643 {
23644 struct hpet_timer __iomem *timer;
23645diff -urNp linux-3.0.3/drivers/char/ipmi/ipmi_msghandler.c linux-3.0.3/drivers/char/ipmi/ipmi_msghandler.c
23646--- linux-3.0.3/drivers/char/ipmi/ipmi_msghandler.c 2011-07-21 22:17:23.000000000 -0400
23647+++ linux-3.0.3/drivers/char/ipmi/ipmi_msghandler.c 2011-08-23 21:48:14.000000000 -0400
23648@@ -415,7 +415,7 @@ struct ipmi_smi {
23649 struct proc_dir_entry *proc_dir;
23650 char proc_dir_name[10];
23651
23652- atomic_t stats[IPMI_NUM_STATS];
23653+ atomic_unchecked_t stats[IPMI_NUM_STATS];
23654
23655 /*
23656 * run_to_completion duplicate of smb_info, smi_info
23657@@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
23658
23659
23660 #define ipmi_inc_stat(intf, stat) \
23661- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
23662+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
23663 #define ipmi_get_stat(intf, stat) \
23664- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
23665+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
23666
23667 static int is_lan_addr(struct ipmi_addr *addr)
23668 {
23669@@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
23670 INIT_LIST_HEAD(&intf->cmd_rcvrs);
23671 init_waitqueue_head(&intf->waitq);
23672 for (i = 0; i < IPMI_NUM_STATS; i++)
23673- atomic_set(&intf->stats[i], 0);
23674+ atomic_set_unchecked(&intf->stats[i], 0);
23675
23676 intf->proc_dir = NULL;
23677
23678@@ -4220,6 +4220,8 @@ static void send_panic_events(char *str)
23679 struct ipmi_smi_msg smi_msg;
23680 struct ipmi_recv_msg recv_msg;
23681
23682+ pax_track_stack();
23683+
23684 si = (struct ipmi_system_interface_addr *) &addr;
23685 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
23686 si->channel = IPMI_BMC_CHANNEL;
23687diff -urNp linux-3.0.3/drivers/char/ipmi/ipmi_si_intf.c linux-3.0.3/drivers/char/ipmi/ipmi_si_intf.c
23688--- linux-3.0.3/drivers/char/ipmi/ipmi_si_intf.c 2011-07-21 22:17:23.000000000 -0400
23689+++ linux-3.0.3/drivers/char/ipmi/ipmi_si_intf.c 2011-08-23 21:47:55.000000000 -0400
23690@@ -277,7 +277,7 @@ struct smi_info {
23691 unsigned char slave_addr;
23692
23693 /* Counters and things for the proc filesystem. */
23694- atomic_t stats[SI_NUM_STATS];
23695+ atomic_unchecked_t stats[SI_NUM_STATS];
23696
23697 struct task_struct *thread;
23698
23699@@ -286,9 +286,9 @@ struct smi_info {
23700 };
23701
23702 #define smi_inc_stat(smi, stat) \
23703- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
23704+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
23705 #define smi_get_stat(smi, stat) \
23706- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
23707+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
23708
23709 #define SI_MAX_PARMS 4
23710
23711@@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info
23712 atomic_set(&new_smi->req_events, 0);
23713 new_smi->run_to_completion = 0;
23714 for (i = 0; i < SI_NUM_STATS; i++)
23715- atomic_set(&new_smi->stats[i], 0);
23716+ atomic_set_unchecked(&new_smi->stats[i], 0);
23717
23718 new_smi->interrupt_disabled = 1;
23719 atomic_set(&new_smi->stop_operation, 0);
23720diff -urNp linux-3.0.3/drivers/char/Kconfig linux-3.0.3/drivers/char/Kconfig
23721--- linux-3.0.3/drivers/char/Kconfig 2011-07-21 22:17:23.000000000 -0400
23722+++ linux-3.0.3/drivers/char/Kconfig 2011-08-23 21:48:14.000000000 -0400
23723@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
23724
23725 config DEVKMEM
23726 bool "/dev/kmem virtual device support"
23727- default y
23728+ default n
23729+ depends on !GRKERNSEC_KMEM
23730 help
23731 Say Y here if you want to support the /dev/kmem device. The
23732 /dev/kmem device is rarely used, but can be used for certain
23733@@ -596,6 +597,7 @@ config DEVPORT
23734 bool
23735 depends on !M68K
23736 depends on ISA || PCI
23737+ depends on !GRKERNSEC_KMEM
23738 default y
23739
23740 source "drivers/s390/char/Kconfig"
23741diff -urNp linux-3.0.3/drivers/char/mem.c linux-3.0.3/drivers/char/mem.c
23742--- linux-3.0.3/drivers/char/mem.c 2011-07-21 22:17:23.000000000 -0400
23743+++ linux-3.0.3/drivers/char/mem.c 2011-08-23 21:48:14.000000000 -0400
23744@@ -18,6 +18,7 @@
23745 #include <linux/raw.h>
23746 #include <linux/tty.h>
23747 #include <linux/capability.h>
23748+#include <linux/security.h>
23749 #include <linux/ptrace.h>
23750 #include <linux/device.h>
23751 #include <linux/highmem.h>
23752@@ -34,6 +35,10 @@
23753 # include <linux/efi.h>
23754 #endif
23755
23756+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
23757+extern struct file_operations grsec_fops;
23758+#endif
23759+
23760 static inline unsigned long size_inside_page(unsigned long start,
23761 unsigned long size)
23762 {
23763@@ -65,9 +70,13 @@ static inline int range_is_allowed(unsig
23764
23765 while (cursor < to) {
23766 if (!devmem_is_allowed(pfn)) {
23767+#ifdef CONFIG_GRKERNSEC_KMEM
23768+ gr_handle_mem_readwrite(from, to);
23769+#else
23770 printk(KERN_INFO
23771 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
23772 current->comm, from, to);
23773+#endif
23774 return 0;
23775 }
23776 cursor += PAGE_SIZE;
23777@@ -75,6 +84,11 @@ static inline int range_is_allowed(unsig
23778 }
23779 return 1;
23780 }
23781+#elif defined(CONFIG_GRKERNSEC_KMEM)
23782+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23783+{
23784+ return 0;
23785+}
23786 #else
23787 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23788 {
23789@@ -117,6 +131,7 @@ static ssize_t read_mem(struct file *fil
23790
23791 while (count > 0) {
23792 unsigned long remaining;
23793+ char *temp;
23794
23795 sz = size_inside_page(p, count);
23796
23797@@ -132,7 +147,23 @@ static ssize_t read_mem(struct file *fil
23798 if (!ptr)
23799 return -EFAULT;
23800
23801- remaining = copy_to_user(buf, ptr, sz);
23802+#ifdef CONFIG_PAX_USERCOPY
23803+ temp = kmalloc(sz, GFP_KERNEL);
23804+ if (!temp) {
23805+ unxlate_dev_mem_ptr(p, ptr);
23806+ return -ENOMEM;
23807+ }
23808+ memcpy(temp, ptr, sz);
23809+#else
23810+ temp = ptr;
23811+#endif
23812+
23813+ remaining = copy_to_user(buf, temp, sz);
23814+
23815+#ifdef CONFIG_PAX_USERCOPY
23816+ kfree(temp);
23817+#endif
23818+
23819 unxlate_dev_mem_ptr(p, ptr);
23820 if (remaining)
23821 return -EFAULT;
23822@@ -395,9 +426,8 @@ static ssize_t read_kmem(struct file *fi
23823 size_t count, loff_t *ppos)
23824 {
23825 unsigned long p = *ppos;
23826- ssize_t low_count, read, sz;
23827+ ssize_t low_count, read, sz, err = 0;
23828 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
23829- int err = 0;
23830
23831 read = 0;
23832 if (p < (unsigned long) high_memory) {
23833@@ -419,6 +449,8 @@ static ssize_t read_kmem(struct file *fi
23834 }
23835 #endif
23836 while (low_count > 0) {
23837+ char *temp;
23838+
23839 sz = size_inside_page(p, low_count);
23840
23841 /*
23842@@ -428,7 +460,22 @@ static ssize_t read_kmem(struct file *fi
23843 */
23844 kbuf = xlate_dev_kmem_ptr((char *)p);
23845
23846- if (copy_to_user(buf, kbuf, sz))
23847+#ifdef CONFIG_PAX_USERCOPY
23848+ temp = kmalloc(sz, GFP_KERNEL);
23849+ if (!temp)
23850+ return -ENOMEM;
23851+ memcpy(temp, kbuf, sz);
23852+#else
23853+ temp = kbuf;
23854+#endif
23855+
23856+ err = copy_to_user(buf, temp, sz);
23857+
23858+#ifdef CONFIG_PAX_USERCOPY
23859+ kfree(temp);
23860+#endif
23861+
23862+ if (err)
23863 return -EFAULT;
23864 buf += sz;
23865 p += sz;
23866@@ -866,6 +913,9 @@ static const struct memdev {
23867 #ifdef CONFIG_CRASH_DUMP
23868 [12] = { "oldmem", 0, &oldmem_fops, NULL },
23869 #endif
23870+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
23871+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
23872+#endif
23873 };
23874
23875 static int memory_open(struct inode *inode, struct file *filp)
23876diff -urNp linux-3.0.3/drivers/char/nvram.c linux-3.0.3/drivers/char/nvram.c
23877--- linux-3.0.3/drivers/char/nvram.c 2011-07-21 22:17:23.000000000 -0400
23878+++ linux-3.0.3/drivers/char/nvram.c 2011-08-23 21:47:55.000000000 -0400
23879@@ -246,7 +246,7 @@ static ssize_t nvram_read(struct file *f
23880
23881 spin_unlock_irq(&rtc_lock);
23882
23883- if (copy_to_user(buf, contents, tmp - contents))
23884+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
23885 return -EFAULT;
23886
23887 *ppos = i;
23888diff -urNp linux-3.0.3/drivers/char/random.c linux-3.0.3/drivers/char/random.c
23889--- linux-3.0.3/drivers/char/random.c 2011-08-23 21:44:40.000000000 -0400
23890+++ linux-3.0.3/drivers/char/random.c 2011-08-23 21:48:14.000000000 -0400
23891@@ -261,8 +261,13 @@
23892 /*
23893 * Configuration information
23894 */
23895+#ifdef CONFIG_GRKERNSEC_RANDNET
23896+#define INPUT_POOL_WORDS 512
23897+#define OUTPUT_POOL_WORDS 128
23898+#else
23899 #define INPUT_POOL_WORDS 128
23900 #define OUTPUT_POOL_WORDS 32
23901+#endif
23902 #define SEC_XFER_SIZE 512
23903 #define EXTRACT_SIZE 10
23904
23905@@ -300,10 +305,17 @@ static struct poolinfo {
23906 int poolwords;
23907 int tap1, tap2, tap3, tap4, tap5;
23908 } poolinfo_table[] = {
23909+#ifdef CONFIG_GRKERNSEC_RANDNET
23910+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
23911+ { 512, 411, 308, 208, 104, 1 },
23912+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
23913+ { 128, 103, 76, 51, 25, 1 },
23914+#else
23915 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
23916 { 128, 103, 76, 51, 25, 1 },
23917 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
23918 { 32, 26, 20, 14, 7, 1 },
23919+#endif
23920 #if 0
23921 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
23922 { 2048, 1638, 1231, 819, 411, 1 },
23923@@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(stru
23924
23925 extract_buf(r, tmp);
23926 i = min_t(int, nbytes, EXTRACT_SIZE);
23927- if (copy_to_user(buf, tmp, i)) {
23928+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
23929 ret = -EFAULT;
23930 break;
23931 }
23932@@ -1214,7 +1226,7 @@ EXPORT_SYMBOL(generate_random_uuid);
23933 #include <linux/sysctl.h>
23934
23935 static int min_read_thresh = 8, min_write_thresh;
23936-static int max_read_thresh = INPUT_POOL_WORDS * 32;
23937+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
23938 static int max_write_thresh = INPUT_POOL_WORDS * 32;
23939 static char sysctl_bootid[16];
23940
23941diff -urNp linux-3.0.3/drivers/char/sonypi.c linux-3.0.3/drivers/char/sonypi.c
23942--- linux-3.0.3/drivers/char/sonypi.c 2011-07-21 22:17:23.000000000 -0400
23943+++ linux-3.0.3/drivers/char/sonypi.c 2011-08-23 21:47:55.000000000 -0400
23944@@ -55,6 +55,7 @@
23945 #include <asm/uaccess.h>
23946 #include <asm/io.h>
23947 #include <asm/system.h>
23948+#include <asm/local.h>
23949
23950 #include <linux/sonypi.h>
23951
23952@@ -491,7 +492,7 @@ static struct sonypi_device {
23953 spinlock_t fifo_lock;
23954 wait_queue_head_t fifo_proc_list;
23955 struct fasync_struct *fifo_async;
23956- int open_count;
23957+ local_t open_count;
23958 int model;
23959 struct input_dev *input_jog_dev;
23960 struct input_dev *input_key_dev;
23961@@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, st
23962 static int sonypi_misc_release(struct inode *inode, struct file *file)
23963 {
23964 mutex_lock(&sonypi_device.lock);
23965- sonypi_device.open_count--;
23966+ local_dec(&sonypi_device.open_count);
23967 mutex_unlock(&sonypi_device.lock);
23968 return 0;
23969 }
23970@@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode
23971 {
23972 mutex_lock(&sonypi_device.lock);
23973 /* Flush input queue on first open */
23974- if (!sonypi_device.open_count)
23975+ if (!local_read(&sonypi_device.open_count))
23976 kfifo_reset(&sonypi_device.fifo);
23977- sonypi_device.open_count++;
23978+ local_inc(&sonypi_device.open_count);
23979 mutex_unlock(&sonypi_device.lock);
23980
23981 return 0;
23982diff -urNp linux-3.0.3/drivers/char/tpm/tpm_bios.c linux-3.0.3/drivers/char/tpm/tpm_bios.c
23983--- linux-3.0.3/drivers/char/tpm/tpm_bios.c 2011-07-21 22:17:23.000000000 -0400
23984+++ linux-3.0.3/drivers/char/tpm/tpm_bios.c 2011-08-23 21:47:55.000000000 -0400
23985@@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start
23986 event = addr;
23987
23988 if ((event->event_type == 0 && event->event_size == 0) ||
23989- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
23990+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
23991 return NULL;
23992
23993 return addr;
23994@@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(
23995 return NULL;
23996
23997 if ((event->event_type == 0 && event->event_size == 0) ||
23998- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
23999+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
24000 return NULL;
24001
24002 (*pos)++;
24003@@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_
24004 int i;
24005
24006 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
24007- seq_putc(m, data[i]);
24008+ if (!seq_putc(m, data[i]))
24009+ return -EFAULT;
24010
24011 return 0;
24012 }
24013@@ -410,6 +411,11 @@ static int read_log(struct tpm_bios_log
24014 log->bios_event_log_end = log->bios_event_log + len;
24015
24016 virt = acpi_os_map_memory(start, len);
24017+ if (!virt) {
24018+ kfree(log->bios_event_log);
24019+ log->bios_event_log = NULL;
24020+ return -EFAULT;
24021+ }
24022
24023 memcpy(log->bios_event_log, virt, len);
24024
24025diff -urNp linux-3.0.3/drivers/char/tpm/tpm.c linux-3.0.3/drivers/char/tpm/tpm.c
24026--- linux-3.0.3/drivers/char/tpm/tpm.c 2011-07-21 22:17:23.000000000 -0400
24027+++ linux-3.0.3/drivers/char/tpm/tpm.c 2011-08-23 21:48:14.000000000 -0400
24028@@ -411,7 +411,7 @@ static ssize_t tpm_transmit(struct tpm_c
24029 chip->vendor.req_complete_val)
24030 goto out_recv;
24031
24032- if ((status == chip->vendor.req_canceled)) {
24033+ if (status == chip->vendor.req_canceled) {
24034 dev_err(chip->dev, "Operation Canceled\n");
24035 rc = -ECANCELED;
24036 goto out;
24037@@ -844,6 +844,8 @@ ssize_t tpm_show_pubek(struct device *de
24038
24039 struct tpm_chip *chip = dev_get_drvdata(dev);
24040
24041+ pax_track_stack();
24042+
24043 tpm_cmd.header.in = tpm_readpubek_header;
24044 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
24045 "attempting to read the PUBEK");
24046diff -urNp linux-3.0.3/drivers/crypto/hifn_795x.c linux-3.0.3/drivers/crypto/hifn_795x.c
24047--- linux-3.0.3/drivers/crypto/hifn_795x.c 2011-07-21 22:17:23.000000000 -0400
24048+++ linux-3.0.3/drivers/crypto/hifn_795x.c 2011-08-23 21:48:14.000000000 -0400
24049@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
24050 0xCA, 0x34, 0x2B, 0x2E};
24051 struct scatterlist sg;
24052
24053+ pax_track_stack();
24054+
24055 memset(src, 0, sizeof(src));
24056 memset(ctx.key, 0, sizeof(ctx.key));
24057
24058diff -urNp linux-3.0.3/drivers/crypto/padlock-aes.c linux-3.0.3/drivers/crypto/padlock-aes.c
24059--- linux-3.0.3/drivers/crypto/padlock-aes.c 2011-07-21 22:17:23.000000000 -0400
24060+++ linux-3.0.3/drivers/crypto/padlock-aes.c 2011-08-23 21:48:14.000000000 -0400
24061@@ -109,6 +109,8 @@ static int aes_set_key(struct crypto_tfm
24062 struct crypto_aes_ctx gen_aes;
24063 int cpu;
24064
24065+ pax_track_stack();
24066+
24067 if (key_len % 8) {
24068 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
24069 return -EINVAL;
24070diff -urNp linux-3.0.3/drivers/edac/edac_pci_sysfs.c linux-3.0.3/drivers/edac/edac_pci_sysfs.c
24071--- linux-3.0.3/drivers/edac/edac_pci_sysfs.c 2011-07-21 22:17:23.000000000 -0400
24072+++ linux-3.0.3/drivers/edac/edac_pci_sysfs.c 2011-08-23 21:47:55.000000000 -0400
24073@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log
24074 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
24075 static int edac_pci_poll_msec = 1000; /* one second workq period */
24076
24077-static atomic_t pci_parity_count = ATOMIC_INIT(0);
24078-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
24079+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
24080+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
24081
24082 static struct kobject *edac_pci_top_main_kobj;
24083 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
24084@@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(str
24085 edac_printk(KERN_CRIT, EDAC_PCI,
24086 "Signaled System Error on %s\n",
24087 pci_name(dev));
24088- atomic_inc(&pci_nonparity_count);
24089+ atomic_inc_unchecked(&pci_nonparity_count);
24090 }
24091
24092 if (status & (PCI_STATUS_PARITY)) {
24093@@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(str
24094 "Master Data Parity Error on %s\n",
24095 pci_name(dev));
24096
24097- atomic_inc(&pci_parity_count);
24098+ atomic_inc_unchecked(&pci_parity_count);
24099 }
24100
24101 if (status & (PCI_STATUS_DETECTED_PARITY)) {
24102@@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(str
24103 "Detected Parity Error on %s\n",
24104 pci_name(dev));
24105
24106- atomic_inc(&pci_parity_count);
24107+ atomic_inc_unchecked(&pci_parity_count);
24108 }
24109 }
24110
24111@@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(str
24112 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
24113 "Signaled System Error on %s\n",
24114 pci_name(dev));
24115- atomic_inc(&pci_nonparity_count);
24116+ atomic_inc_unchecked(&pci_nonparity_count);
24117 }
24118
24119 if (status & (PCI_STATUS_PARITY)) {
24120@@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(str
24121 "Master Data Parity Error on "
24122 "%s\n", pci_name(dev));
24123
24124- atomic_inc(&pci_parity_count);
24125+ atomic_inc_unchecked(&pci_parity_count);
24126 }
24127
24128 if (status & (PCI_STATUS_DETECTED_PARITY)) {
24129@@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(str
24130 "Detected Parity Error on %s\n",
24131 pci_name(dev));
24132
24133- atomic_inc(&pci_parity_count);
24134+ atomic_inc_unchecked(&pci_parity_count);
24135 }
24136 }
24137 }
24138@@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
24139 if (!check_pci_errors)
24140 return;
24141
24142- before_count = atomic_read(&pci_parity_count);
24143+ before_count = atomic_read_unchecked(&pci_parity_count);
24144
24145 /* scan all PCI devices looking for a Parity Error on devices and
24146 * bridges.
24147@@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
24148 /* Only if operator has selected panic on PCI Error */
24149 if (edac_pci_get_panic_on_pe()) {
24150 /* If the count is different 'after' from 'before' */
24151- if (before_count != atomic_read(&pci_parity_count))
24152+ if (before_count != atomic_read_unchecked(&pci_parity_count))
24153 panic("EDAC: PCI Parity Error");
24154 }
24155 }
24156diff -urNp linux-3.0.3/drivers/edac/i7core_edac.c linux-3.0.3/drivers/edac/i7core_edac.c
24157--- linux-3.0.3/drivers/edac/i7core_edac.c 2011-07-21 22:17:23.000000000 -0400
24158+++ linux-3.0.3/drivers/edac/i7core_edac.c 2011-08-23 21:47:55.000000000 -0400
24159@@ -1670,7 +1670,7 @@ static void i7core_mce_output_error(stru
24160 char *type, *optype, *err, *msg;
24161 unsigned long error = m->status & 0x1ff0000l;
24162 u32 optypenum = (m->status >> 4) & 0x07;
24163- u32 core_err_cnt = (m->status >> 38) && 0x7fff;
24164+ u32 core_err_cnt = (m->status >> 38) & 0x7fff;
24165 u32 dimm = (m->misc >> 16) & 0x3;
24166 u32 channel = (m->misc >> 18) & 0x3;
24167 u32 syndrome = m->misc >> 32;
24168diff -urNp linux-3.0.3/drivers/edac/mce_amd.h linux-3.0.3/drivers/edac/mce_amd.h
24169--- linux-3.0.3/drivers/edac/mce_amd.h 2011-07-21 22:17:23.000000000 -0400
24170+++ linux-3.0.3/drivers/edac/mce_amd.h 2011-08-23 21:47:55.000000000 -0400
24171@@ -83,7 +83,7 @@ struct amd_decoder_ops {
24172 bool (*dc_mce)(u16, u8);
24173 bool (*ic_mce)(u16, u8);
24174 bool (*nb_mce)(u16, u8);
24175-};
24176+} __no_const;
24177
24178 void amd_report_gart_errors(bool);
24179 void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32));
24180diff -urNp linux-3.0.3/drivers/firewire/core-card.c linux-3.0.3/drivers/firewire/core-card.c
24181--- linux-3.0.3/drivers/firewire/core-card.c 2011-07-21 22:17:23.000000000 -0400
24182+++ linux-3.0.3/drivers/firewire/core-card.c 2011-08-23 21:47:55.000000000 -0400
24183@@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
24184
24185 void fw_core_remove_card(struct fw_card *card)
24186 {
24187- struct fw_card_driver dummy_driver = dummy_driver_template;
24188+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
24189
24190 card->driver->update_phy_reg(card, 4,
24191 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
24192diff -urNp linux-3.0.3/drivers/firewire/core-cdev.c linux-3.0.3/drivers/firewire/core-cdev.c
24193--- linux-3.0.3/drivers/firewire/core-cdev.c 2011-08-23 21:44:40.000000000 -0400
24194+++ linux-3.0.3/drivers/firewire/core-cdev.c 2011-08-23 21:47:55.000000000 -0400
24195@@ -1313,8 +1313,7 @@ static int init_iso_resource(struct clie
24196 int ret;
24197
24198 if ((request->channels == 0 && request->bandwidth == 0) ||
24199- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
24200- request->bandwidth < 0)
24201+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
24202 return -EINVAL;
24203
24204 r = kmalloc(sizeof(*r), GFP_KERNEL);
24205diff -urNp linux-3.0.3/drivers/firewire/core.h linux-3.0.3/drivers/firewire/core.h
24206--- linux-3.0.3/drivers/firewire/core.h 2011-07-21 22:17:23.000000000 -0400
24207+++ linux-3.0.3/drivers/firewire/core.h 2011-08-23 21:47:55.000000000 -0400
24208@@ -101,6 +101,7 @@ struct fw_card_driver {
24209
24210 int (*stop_iso)(struct fw_iso_context *ctx);
24211 };
24212+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
24213
24214 void fw_card_initialize(struct fw_card *card,
24215 const struct fw_card_driver *driver, struct device *device);
24216diff -urNp linux-3.0.3/drivers/firewire/core-transaction.c linux-3.0.3/drivers/firewire/core-transaction.c
24217--- linux-3.0.3/drivers/firewire/core-transaction.c 2011-07-21 22:17:23.000000000 -0400
24218+++ linux-3.0.3/drivers/firewire/core-transaction.c 2011-08-23 21:48:14.000000000 -0400
24219@@ -37,6 +37,7 @@
24220 #include <linux/timer.h>
24221 #include <linux/types.h>
24222 #include <linux/workqueue.h>
24223+#include <linux/sched.h>
24224
24225 #include <asm/byteorder.h>
24226
24227@@ -422,6 +423,8 @@ int fw_run_transaction(struct fw_card *c
24228 struct transaction_callback_data d;
24229 struct fw_transaction t;
24230
24231+ pax_track_stack();
24232+
24233 init_timer_on_stack(&t.split_timeout_timer);
24234 init_completion(&d.done);
24235 d.payload = payload;
24236diff -urNp linux-3.0.3/drivers/firmware/dmi_scan.c linux-3.0.3/drivers/firmware/dmi_scan.c
24237--- linux-3.0.3/drivers/firmware/dmi_scan.c 2011-07-21 22:17:23.000000000 -0400
24238+++ linux-3.0.3/drivers/firmware/dmi_scan.c 2011-08-23 21:47:55.000000000 -0400
24239@@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
24240 }
24241 }
24242 else {
24243- /*
24244- * no iounmap() for that ioremap(); it would be a no-op, but
24245- * it's so early in setup that sucker gets confused into doing
24246- * what it shouldn't if we actually call it.
24247- */
24248 p = dmi_ioremap(0xF0000, 0x10000);
24249 if (p == NULL)
24250 goto error;
24251diff -urNp linux-3.0.3/drivers/gpio/vr41xx_giu.c linux-3.0.3/drivers/gpio/vr41xx_giu.c
24252--- linux-3.0.3/drivers/gpio/vr41xx_giu.c 2011-07-21 22:17:23.000000000 -0400
24253+++ linux-3.0.3/drivers/gpio/vr41xx_giu.c 2011-08-23 21:47:55.000000000 -0400
24254@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
24255 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
24256 maskl, pendl, maskh, pendh);
24257
24258- atomic_inc(&irq_err_count);
24259+ atomic_inc_unchecked(&irq_err_count);
24260
24261 return -EINVAL;
24262 }
24263diff -urNp linux-3.0.3/drivers/gpu/drm/drm_crtc_helper.c linux-3.0.3/drivers/gpu/drm/drm_crtc_helper.c
24264--- linux-3.0.3/drivers/gpu/drm/drm_crtc_helper.c 2011-07-21 22:17:23.000000000 -0400
24265+++ linux-3.0.3/drivers/gpu/drm/drm_crtc_helper.c 2011-08-23 21:48:14.000000000 -0400
24266@@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct d
24267 struct drm_crtc *tmp;
24268 int crtc_mask = 1;
24269
24270- WARN(!crtc, "checking null crtc?\n");
24271+ BUG_ON(!crtc);
24272
24273 dev = crtc->dev;
24274
24275@@ -343,6 +343,8 @@ bool drm_crtc_helper_set_mode(struct drm
24276 struct drm_encoder *encoder;
24277 bool ret = true;
24278
24279+ pax_track_stack();
24280+
24281 crtc->enabled = drm_helper_crtc_in_use(crtc);
24282 if (!crtc->enabled)
24283 return true;
24284diff -urNp linux-3.0.3/drivers/gpu/drm/drm_drv.c linux-3.0.3/drivers/gpu/drm/drm_drv.c
24285--- linux-3.0.3/drivers/gpu/drm/drm_drv.c 2011-07-21 22:17:23.000000000 -0400
24286+++ linux-3.0.3/drivers/gpu/drm/drm_drv.c 2011-08-23 21:47:55.000000000 -0400
24287@@ -386,7 +386,7 @@ long drm_ioctl(struct file *filp,
24288
24289 dev = file_priv->minor->dev;
24290 atomic_inc(&dev->ioctl_count);
24291- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
24292+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
24293 ++file_priv->ioctl_count;
24294
24295 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
24296diff -urNp linux-3.0.3/drivers/gpu/drm/drm_fops.c linux-3.0.3/drivers/gpu/drm/drm_fops.c
24297--- linux-3.0.3/drivers/gpu/drm/drm_fops.c 2011-07-21 22:17:23.000000000 -0400
24298+++ linux-3.0.3/drivers/gpu/drm/drm_fops.c 2011-08-23 21:47:55.000000000 -0400
24299@@ -70,7 +70,7 @@ static int drm_setup(struct drm_device *
24300 }
24301
24302 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
24303- atomic_set(&dev->counts[i], 0);
24304+ atomic_set_unchecked(&dev->counts[i], 0);
24305
24306 dev->sigdata.lock = NULL;
24307
24308@@ -134,8 +134,8 @@ int drm_open(struct inode *inode, struct
24309
24310 retcode = drm_open_helper(inode, filp, dev);
24311 if (!retcode) {
24312- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
24313- if (!dev->open_count++)
24314+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
24315+ if (local_inc_return(&dev->open_count) == 1)
24316 retcode = drm_setup(dev);
24317 }
24318 if (!retcode) {
24319@@ -472,7 +472,7 @@ int drm_release(struct inode *inode, str
24320
24321 mutex_lock(&drm_global_mutex);
24322
24323- DRM_DEBUG("open_count = %d\n", dev->open_count);
24324+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
24325
24326 if (dev->driver->preclose)
24327 dev->driver->preclose(dev, file_priv);
24328@@ -484,7 +484,7 @@ int drm_release(struct inode *inode, str
24329 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
24330 task_pid_nr(current),
24331 (long)old_encode_dev(file_priv->minor->device),
24332- dev->open_count);
24333+ local_read(&dev->open_count));
24334
24335 /* if the master has gone away we can't do anything with the lock */
24336 if (file_priv->minor->master)
24337@@ -565,8 +565,8 @@ int drm_release(struct inode *inode, str
24338 * End inline drm_release
24339 */
24340
24341- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
24342- if (!--dev->open_count) {
24343+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
24344+ if (local_dec_and_test(&dev->open_count)) {
24345 if (atomic_read(&dev->ioctl_count)) {
24346 DRM_ERROR("Device busy: %d\n",
24347 atomic_read(&dev->ioctl_count));
24348diff -urNp linux-3.0.3/drivers/gpu/drm/drm_global.c linux-3.0.3/drivers/gpu/drm/drm_global.c
24349--- linux-3.0.3/drivers/gpu/drm/drm_global.c 2011-07-21 22:17:23.000000000 -0400
24350+++ linux-3.0.3/drivers/gpu/drm/drm_global.c 2011-08-23 21:47:55.000000000 -0400
24351@@ -36,7 +36,7 @@
24352 struct drm_global_item {
24353 struct mutex mutex;
24354 void *object;
24355- int refcount;
24356+ atomic_t refcount;
24357 };
24358
24359 static struct drm_global_item glob[DRM_GLOBAL_NUM];
24360@@ -49,7 +49,7 @@ void drm_global_init(void)
24361 struct drm_global_item *item = &glob[i];
24362 mutex_init(&item->mutex);
24363 item->object = NULL;
24364- item->refcount = 0;
24365+ atomic_set(&item->refcount, 0);
24366 }
24367 }
24368
24369@@ -59,7 +59,7 @@ void drm_global_release(void)
24370 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
24371 struct drm_global_item *item = &glob[i];
24372 BUG_ON(item->object != NULL);
24373- BUG_ON(item->refcount != 0);
24374+ BUG_ON(atomic_read(&item->refcount) != 0);
24375 }
24376 }
24377
24378@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_globa
24379 void *object;
24380
24381 mutex_lock(&item->mutex);
24382- if (item->refcount == 0) {
24383+ if (atomic_read(&item->refcount) == 0) {
24384 item->object = kzalloc(ref->size, GFP_KERNEL);
24385 if (unlikely(item->object == NULL)) {
24386 ret = -ENOMEM;
24387@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_globa
24388 goto out_err;
24389
24390 }
24391- ++item->refcount;
24392+ atomic_inc(&item->refcount);
24393 ref->object = item->object;
24394 object = item->object;
24395 mutex_unlock(&item->mutex);
24396@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_gl
24397 struct drm_global_item *item = &glob[ref->global_type];
24398
24399 mutex_lock(&item->mutex);
24400- BUG_ON(item->refcount == 0);
24401+ BUG_ON(atomic_read(&item->refcount) == 0);
24402 BUG_ON(ref->object != item->object);
24403- if (--item->refcount == 0) {
24404+ if (atomic_dec_and_test(&item->refcount)) {
24405 ref->release(ref);
24406 item->object = NULL;
24407 }
24408diff -urNp linux-3.0.3/drivers/gpu/drm/drm_info.c linux-3.0.3/drivers/gpu/drm/drm_info.c
24409--- linux-3.0.3/drivers/gpu/drm/drm_info.c 2011-07-21 22:17:23.000000000 -0400
24410+++ linux-3.0.3/drivers/gpu/drm/drm_info.c 2011-08-23 21:48:14.000000000 -0400
24411@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
24412 struct drm_local_map *map;
24413 struct drm_map_list *r_list;
24414
24415- /* Hardcoded from _DRM_FRAME_BUFFER,
24416- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
24417- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
24418- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
24419+ static const char * const types[] = {
24420+ [_DRM_FRAME_BUFFER] = "FB",
24421+ [_DRM_REGISTERS] = "REG",
24422+ [_DRM_SHM] = "SHM",
24423+ [_DRM_AGP] = "AGP",
24424+ [_DRM_SCATTER_GATHER] = "SG",
24425+ [_DRM_CONSISTENT] = "PCI",
24426+ [_DRM_GEM] = "GEM" };
24427 const char *type;
24428 int i;
24429
24430@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
24431 map = r_list->map;
24432 if (!map)
24433 continue;
24434- if (map->type < 0 || map->type > 5)
24435+ if (map->type >= ARRAY_SIZE(types))
24436 type = "??";
24437 else
24438 type = types[map->type];
24439@@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, voi
24440 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
24441 vma->vm_flags & VM_LOCKED ? 'l' : '-',
24442 vma->vm_flags & VM_IO ? 'i' : '-',
24443+#ifdef CONFIG_GRKERNSEC_HIDESYM
24444+ 0);
24445+#else
24446 vma->vm_pgoff);
24447+#endif
24448
24449 #if defined(__i386__)
24450 pgprot = pgprot_val(vma->vm_page_prot);
24451diff -urNp linux-3.0.3/drivers/gpu/drm/drm_ioctl.c linux-3.0.3/drivers/gpu/drm/drm_ioctl.c
24452--- linux-3.0.3/drivers/gpu/drm/drm_ioctl.c 2011-07-21 22:17:23.000000000 -0400
24453+++ linux-3.0.3/drivers/gpu/drm/drm_ioctl.c 2011-08-23 21:47:55.000000000 -0400
24454@@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev,
24455 stats->data[i].value =
24456 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
24457 else
24458- stats->data[i].value = atomic_read(&dev->counts[i]);
24459+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
24460 stats->data[i].type = dev->types[i];
24461 }
24462
24463diff -urNp linux-3.0.3/drivers/gpu/drm/drm_lock.c linux-3.0.3/drivers/gpu/drm/drm_lock.c
24464--- linux-3.0.3/drivers/gpu/drm/drm_lock.c 2011-07-21 22:17:23.000000000 -0400
24465+++ linux-3.0.3/drivers/gpu/drm/drm_lock.c 2011-08-23 21:47:55.000000000 -0400
24466@@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, voi
24467 if (drm_lock_take(&master->lock, lock->context)) {
24468 master->lock.file_priv = file_priv;
24469 master->lock.lock_time = jiffies;
24470- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
24471+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
24472 break; /* Got lock */
24473 }
24474
24475@@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, v
24476 return -EINVAL;
24477 }
24478
24479- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
24480+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
24481
24482 if (drm_lock_free(&master->lock, lock->context)) {
24483 /* FIXME: Should really bail out here. */
24484diff -urNp linux-3.0.3/drivers/gpu/drm/i810/i810_dma.c linux-3.0.3/drivers/gpu/drm/i810/i810_dma.c
24485--- linux-3.0.3/drivers/gpu/drm/i810/i810_dma.c 2011-07-21 22:17:23.000000000 -0400
24486+++ linux-3.0.3/drivers/gpu/drm/i810/i810_dma.c 2011-08-23 21:47:55.000000000 -0400
24487@@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_de
24488 dma->buflist[vertex->idx],
24489 vertex->discard, vertex->used);
24490
24491- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
24492- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
24493+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
24494+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
24495 sarea_priv->last_enqueue = dev_priv->counter - 1;
24496 sarea_priv->last_dispatch = (int)hw_status[5];
24497
24498@@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device
24499 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
24500 mc->last_render);
24501
24502- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
24503- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
24504+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
24505+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
24506 sarea_priv->last_enqueue = dev_priv->counter - 1;
24507 sarea_priv->last_dispatch = (int)hw_status[5];
24508
24509diff -urNp linux-3.0.3/drivers/gpu/drm/i810/i810_drv.h linux-3.0.3/drivers/gpu/drm/i810/i810_drv.h
24510--- linux-3.0.3/drivers/gpu/drm/i810/i810_drv.h 2011-07-21 22:17:23.000000000 -0400
24511+++ linux-3.0.3/drivers/gpu/drm/i810/i810_drv.h 2011-08-23 21:47:55.000000000 -0400
24512@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
24513 int page_flipping;
24514
24515 wait_queue_head_t irq_queue;
24516- atomic_t irq_received;
24517- atomic_t irq_emitted;
24518+ atomic_unchecked_t irq_received;
24519+ atomic_unchecked_t irq_emitted;
24520
24521 int front_offset;
24522 } drm_i810_private_t;
24523diff -urNp linux-3.0.3/drivers/gpu/drm/i915/i915_debugfs.c linux-3.0.3/drivers/gpu/drm/i915/i915_debugfs.c
24524--- linux-3.0.3/drivers/gpu/drm/i915/i915_debugfs.c 2011-07-21 22:17:23.000000000 -0400
24525+++ linux-3.0.3/drivers/gpu/drm/i915/i915_debugfs.c 2011-08-23 21:47:55.000000000 -0400
24526@@ -497,7 +497,7 @@ static int i915_interrupt_info(struct se
24527 I915_READ(GTIMR));
24528 }
24529 seq_printf(m, "Interrupts received: %d\n",
24530- atomic_read(&dev_priv->irq_received));
24531+ atomic_read_unchecked(&dev_priv->irq_received));
24532 for (i = 0; i < I915_NUM_RINGS; i++) {
24533 if (IS_GEN6(dev)) {
24534 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
24535diff -urNp linux-3.0.3/drivers/gpu/drm/i915/i915_dma.c linux-3.0.3/drivers/gpu/drm/i915/i915_dma.c
24536--- linux-3.0.3/drivers/gpu/drm/i915/i915_dma.c 2011-08-23 21:44:40.000000000 -0400
24537+++ linux-3.0.3/drivers/gpu/drm/i915/i915_dma.c 2011-08-23 21:47:55.000000000 -0400
24538@@ -1169,7 +1169,7 @@ static bool i915_switcheroo_can_switch(s
24539 bool can_switch;
24540
24541 spin_lock(&dev->count_lock);
24542- can_switch = (dev->open_count == 0);
24543+ can_switch = (local_read(&dev->open_count) == 0);
24544 spin_unlock(&dev->count_lock);
24545 return can_switch;
24546 }
24547diff -urNp linux-3.0.3/drivers/gpu/drm/i915/i915_drv.h linux-3.0.3/drivers/gpu/drm/i915/i915_drv.h
24548--- linux-3.0.3/drivers/gpu/drm/i915/i915_drv.h 2011-07-21 22:17:23.000000000 -0400
24549+++ linux-3.0.3/drivers/gpu/drm/i915/i915_drv.h 2011-08-23 21:47:55.000000000 -0400
24550@@ -219,7 +219,7 @@ struct drm_i915_display_funcs {
24551 /* render clock increase/decrease */
24552 /* display clock increase/decrease */
24553 /* pll clock increase/decrease */
24554-};
24555+} __no_const;
24556
24557 struct intel_device_info {
24558 u8 gen;
24559@@ -300,7 +300,7 @@ typedef struct drm_i915_private {
24560 int current_page;
24561 int page_flipping;
24562
24563- atomic_t irq_received;
24564+ atomic_unchecked_t irq_received;
24565
24566 /* protects the irq masks */
24567 spinlock_t irq_lock;
24568@@ -874,7 +874,7 @@ struct drm_i915_gem_object {
24569 * will be page flipped away on the next vblank. When it
24570 * reaches 0, dev_priv->pending_flip_queue will be woken up.
24571 */
24572- atomic_t pending_flip;
24573+ atomic_unchecked_t pending_flip;
24574 };
24575
24576 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
24577@@ -1247,7 +1247,7 @@ extern int intel_setup_gmbus(struct drm_
24578 extern void intel_teardown_gmbus(struct drm_device *dev);
24579 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
24580 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
24581-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
24582+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
24583 {
24584 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
24585 }
24586diff -urNp linux-3.0.3/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-3.0.3/drivers/gpu/drm/i915/i915_gem_execbuffer.c
24587--- linux-3.0.3/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-07-21 22:17:23.000000000 -0400
24588+++ linux-3.0.3/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-08-23 21:47:55.000000000 -0400
24589@@ -188,7 +188,7 @@ i915_gem_object_set_to_gpu_domain(struct
24590 i915_gem_clflush_object(obj);
24591
24592 if (obj->base.pending_write_domain)
24593- cd->flips |= atomic_read(&obj->pending_flip);
24594+ cd->flips |= atomic_read_unchecked(&obj->pending_flip);
24595
24596 /* The actual obj->write_domain will be updated with
24597 * pending_write_domain after we emit the accumulated flush for all
24598diff -urNp linux-3.0.3/drivers/gpu/drm/i915/i915_irq.c linux-3.0.3/drivers/gpu/drm/i915/i915_irq.c
24599--- linux-3.0.3/drivers/gpu/drm/i915/i915_irq.c 2011-08-23 21:44:40.000000000 -0400
24600+++ linux-3.0.3/drivers/gpu/drm/i915/i915_irq.c 2011-08-23 21:47:55.000000000 -0400
24601@@ -473,7 +473,7 @@ static irqreturn_t ivybridge_irq_handler
24602 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
24603 struct drm_i915_master_private *master_priv;
24604
24605- atomic_inc(&dev_priv->irq_received);
24606+ atomic_inc_unchecked(&dev_priv->irq_received);
24607
24608 /* disable master interrupt before clearing iir */
24609 de_ier = I915_READ(DEIER);
24610@@ -563,7 +563,7 @@ static irqreturn_t ironlake_irq_handler(
24611 struct drm_i915_master_private *master_priv;
24612 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
24613
24614- atomic_inc(&dev_priv->irq_received);
24615+ atomic_inc_unchecked(&dev_priv->irq_received);
24616
24617 if (IS_GEN6(dev))
24618 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
24619@@ -1226,7 +1226,7 @@ static irqreturn_t i915_driver_irq_handl
24620 int ret = IRQ_NONE, pipe;
24621 bool blc_event = false;
24622
24623- atomic_inc(&dev_priv->irq_received);
24624+ atomic_inc_unchecked(&dev_priv->irq_received);
24625
24626 iir = I915_READ(IIR);
24627
24628@@ -1735,7 +1735,7 @@ static void ironlake_irq_preinstall(stru
24629 {
24630 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
24631
24632- atomic_set(&dev_priv->irq_received, 0);
24633+ atomic_set_unchecked(&dev_priv->irq_received, 0);
24634
24635 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
24636 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
24637@@ -1899,7 +1899,7 @@ static void i915_driver_irq_preinstall(s
24638 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
24639 int pipe;
24640
24641- atomic_set(&dev_priv->irq_received, 0);
24642+ atomic_set_unchecked(&dev_priv->irq_received, 0);
24643
24644 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
24645 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
24646diff -urNp linux-3.0.3/drivers/gpu/drm/i915/intel_display.c linux-3.0.3/drivers/gpu/drm/i915/intel_display.c
24647--- linux-3.0.3/drivers/gpu/drm/i915/intel_display.c 2011-08-23 21:44:40.000000000 -0400
24648+++ linux-3.0.3/drivers/gpu/drm/i915/intel_display.c 2011-08-23 21:47:55.000000000 -0400
24649@@ -1961,7 +1961,7 @@ intel_pipe_set_base(struct drm_crtc *crt
24650
24651 wait_event(dev_priv->pending_flip_queue,
24652 atomic_read(&dev_priv->mm.wedged) ||
24653- atomic_read(&obj->pending_flip) == 0);
24654+ atomic_read_unchecked(&obj->pending_flip) == 0);
24655
24656 /* Big Hammer, we also need to ensure that any pending
24657 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
24658@@ -2548,7 +2548,7 @@ static void intel_crtc_wait_for_pending_
24659 obj = to_intel_framebuffer(crtc->fb)->obj;
24660 dev_priv = crtc->dev->dev_private;
24661 wait_event(dev_priv->pending_flip_queue,
24662- atomic_read(&obj->pending_flip) == 0);
24663+ atomic_read_unchecked(&obj->pending_flip) == 0);
24664 }
24665
24666 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
24667@@ -6225,7 +6225,7 @@ static void do_intel_finish_page_flip(st
24668
24669 atomic_clear_mask(1 << intel_crtc->plane,
24670 &obj->pending_flip.counter);
24671- if (atomic_read(&obj->pending_flip) == 0)
24672+ if (atomic_read_unchecked(&obj->pending_flip) == 0)
24673 wake_up(&dev_priv->pending_flip_queue);
24674
24675 schedule_work(&work->work);
24676@@ -6514,7 +6514,7 @@ static int intel_crtc_page_flip(struct d
24677 /* Block clients from rendering to the new back buffer until
24678 * the flip occurs and the object is no longer visible.
24679 */
24680- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24681+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24682
24683 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
24684 if (ret)
24685@@ -6527,7 +6527,7 @@ static int intel_crtc_page_flip(struct d
24686 return 0;
24687
24688 cleanup_pending:
24689- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24690+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24691 cleanup_objs:
24692 drm_gem_object_unreference(&work->old_fb_obj->base);
24693 drm_gem_object_unreference(&obj->base);
24694diff -urNp linux-3.0.3/drivers/gpu/drm/mga/mga_drv.h linux-3.0.3/drivers/gpu/drm/mga/mga_drv.h
24695--- linux-3.0.3/drivers/gpu/drm/mga/mga_drv.h 2011-07-21 22:17:23.000000000 -0400
24696+++ linux-3.0.3/drivers/gpu/drm/mga/mga_drv.h 2011-08-23 21:47:55.000000000 -0400
24697@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
24698 u32 clear_cmd;
24699 u32 maccess;
24700
24701- atomic_t vbl_received; /**< Number of vblanks received. */
24702+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
24703 wait_queue_head_t fence_queue;
24704- atomic_t last_fence_retired;
24705+ atomic_unchecked_t last_fence_retired;
24706 u32 next_fence_to_post;
24707
24708 unsigned int fb_cpp;
24709diff -urNp linux-3.0.3/drivers/gpu/drm/mga/mga_irq.c linux-3.0.3/drivers/gpu/drm/mga/mga_irq.c
24710--- linux-3.0.3/drivers/gpu/drm/mga/mga_irq.c 2011-07-21 22:17:23.000000000 -0400
24711+++ linux-3.0.3/drivers/gpu/drm/mga/mga_irq.c 2011-08-23 21:47:55.000000000 -0400
24712@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
24713 if (crtc != 0)
24714 return 0;
24715
24716- return atomic_read(&dev_priv->vbl_received);
24717+ return atomic_read_unchecked(&dev_priv->vbl_received);
24718 }
24719
24720
24721@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
24722 /* VBLANK interrupt */
24723 if (status & MGA_VLINEPEN) {
24724 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
24725- atomic_inc(&dev_priv->vbl_received);
24726+ atomic_inc_unchecked(&dev_priv->vbl_received);
24727 drm_handle_vblank(dev, 0);
24728 handled = 1;
24729 }
24730@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
24731 if ((prim_start & ~0x03) != (prim_end & ~0x03))
24732 MGA_WRITE(MGA_PRIMEND, prim_end);
24733
24734- atomic_inc(&dev_priv->last_fence_retired);
24735+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
24736 DRM_WAKEUP(&dev_priv->fence_queue);
24737 handled = 1;
24738 }
24739@@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_dev
24740 * using fences.
24741 */
24742 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
24743- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
24744+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
24745 - *sequence) <= (1 << 23)));
24746
24747 *sequence = cur_fence;
24748diff -urNp linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_bios.c linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_bios.c
24749--- linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-07-21 22:17:23.000000000 -0400
24750+++ linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-08-23 21:47:55.000000000 -0400
24751@@ -5488,7 +5488,7 @@ parse_bit_displayport_tbl_entry(struct d
24752 struct bit_table {
24753 const char id;
24754 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
24755-};
24756+} __no_const;
24757
24758 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
24759
24760diff -urNp linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_drv.h linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_drv.h
24761--- linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-07-21 22:17:23.000000000 -0400
24762+++ linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-08-23 21:47:55.000000000 -0400
24763@@ -227,7 +227,7 @@ struct nouveau_channel {
24764 struct list_head pending;
24765 uint32_t sequence;
24766 uint32_t sequence_ack;
24767- atomic_t last_sequence_irq;
24768+ atomic_unchecked_t last_sequence_irq;
24769 } fence;
24770
24771 /* DMA push buffer */
24772@@ -304,7 +304,7 @@ struct nouveau_exec_engine {
24773 u32 handle, u16 class);
24774 void (*set_tile_region)(struct drm_device *dev, int i);
24775 void (*tlb_flush)(struct drm_device *, int engine);
24776-};
24777+} __no_const;
24778
24779 struct nouveau_instmem_engine {
24780 void *priv;
24781@@ -325,13 +325,13 @@ struct nouveau_instmem_engine {
24782 struct nouveau_mc_engine {
24783 int (*init)(struct drm_device *dev);
24784 void (*takedown)(struct drm_device *dev);
24785-};
24786+} __no_const;
24787
24788 struct nouveau_timer_engine {
24789 int (*init)(struct drm_device *dev);
24790 void (*takedown)(struct drm_device *dev);
24791 uint64_t (*read)(struct drm_device *dev);
24792-};
24793+} __no_const;
24794
24795 struct nouveau_fb_engine {
24796 int num_tiles;
24797@@ -494,7 +494,7 @@ struct nouveau_vram_engine {
24798 void (*put)(struct drm_device *, struct nouveau_mem **);
24799
24800 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
24801-};
24802+} __no_const;
24803
24804 struct nouveau_engine {
24805 struct nouveau_instmem_engine instmem;
24806@@ -640,7 +640,7 @@ struct drm_nouveau_private {
24807 struct drm_global_reference mem_global_ref;
24808 struct ttm_bo_global_ref bo_global_ref;
24809 struct ttm_bo_device bdev;
24810- atomic_t validate_sequence;
24811+ atomic_unchecked_t validate_sequence;
24812 } ttm;
24813
24814 struct {
24815diff -urNp linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_fence.c linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_fence.c
24816--- linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-07-21 22:17:23.000000000 -0400
24817+++ linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-08-23 21:47:55.000000000 -0400
24818@@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_chan
24819 if (USE_REFCNT(dev))
24820 sequence = nvchan_rd32(chan, 0x48);
24821 else
24822- sequence = atomic_read(&chan->fence.last_sequence_irq);
24823+ sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
24824
24825 if (chan->fence.sequence_ack == sequence)
24826 goto out;
24827@@ -544,7 +544,7 @@ nouveau_fence_channel_init(struct nouvea
24828
24829 INIT_LIST_HEAD(&chan->fence.pending);
24830 spin_lock_init(&chan->fence.lock);
24831- atomic_set(&chan->fence.last_sequence_irq, 0);
24832+ atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
24833 return 0;
24834 }
24835
24836diff -urNp linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_gem.c linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_gem.c
24837--- linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-07-21 22:17:23.000000000 -0400
24838+++ linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-08-23 21:47:55.000000000 -0400
24839@@ -249,7 +249,7 @@ validate_init(struct nouveau_channel *ch
24840 int trycnt = 0;
24841 int ret, i;
24842
24843- sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
24844+ sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
24845 retry:
24846 if (++trycnt > 100000) {
24847 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
24848diff -urNp linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_state.c linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_state.c
24849--- linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_state.c 2011-07-21 22:17:23.000000000 -0400
24850+++ linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_state.c 2011-08-23 21:47:55.000000000 -0400
24851@@ -488,7 +488,7 @@ static bool nouveau_switcheroo_can_switc
24852 bool can_switch;
24853
24854 spin_lock(&dev->count_lock);
24855- can_switch = (dev->open_count == 0);
24856+ can_switch = (local_read(&dev->open_count) == 0);
24857 spin_unlock(&dev->count_lock);
24858 return can_switch;
24859 }
24860diff -urNp linux-3.0.3/drivers/gpu/drm/nouveau/nv04_graph.c linux-3.0.3/drivers/gpu/drm/nouveau/nv04_graph.c
24861--- linux-3.0.3/drivers/gpu/drm/nouveau/nv04_graph.c 2011-07-21 22:17:23.000000000 -0400
24862+++ linux-3.0.3/drivers/gpu/drm/nouveau/nv04_graph.c 2011-08-23 21:47:55.000000000 -0400
24863@@ -560,7 +560,7 @@ static int
24864 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
24865 u32 class, u32 mthd, u32 data)
24866 {
24867- atomic_set(&chan->fence.last_sequence_irq, data);
24868+ atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
24869 return 0;
24870 }
24871
24872diff -urNp linux-3.0.3/drivers/gpu/drm/r128/r128_cce.c linux-3.0.3/drivers/gpu/drm/r128/r128_cce.c
24873--- linux-3.0.3/drivers/gpu/drm/r128/r128_cce.c 2011-07-21 22:17:23.000000000 -0400
24874+++ linux-3.0.3/drivers/gpu/drm/r128/r128_cce.c 2011-08-23 21:47:55.000000000 -0400
24875@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
24876
24877 /* GH: Simple idle check.
24878 */
24879- atomic_set(&dev_priv->idle_count, 0);
24880+ atomic_set_unchecked(&dev_priv->idle_count, 0);
24881
24882 /* We don't support anything other than bus-mastering ring mode,
24883 * but the ring can be in either AGP or PCI space for the ring
24884diff -urNp linux-3.0.3/drivers/gpu/drm/r128/r128_drv.h linux-3.0.3/drivers/gpu/drm/r128/r128_drv.h
24885--- linux-3.0.3/drivers/gpu/drm/r128/r128_drv.h 2011-07-21 22:17:23.000000000 -0400
24886+++ linux-3.0.3/drivers/gpu/drm/r128/r128_drv.h 2011-08-23 21:47:55.000000000 -0400
24887@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
24888 int is_pci;
24889 unsigned long cce_buffers_offset;
24890
24891- atomic_t idle_count;
24892+ atomic_unchecked_t idle_count;
24893
24894 int page_flipping;
24895 int current_page;
24896 u32 crtc_offset;
24897 u32 crtc_offset_cntl;
24898
24899- atomic_t vbl_received;
24900+ atomic_unchecked_t vbl_received;
24901
24902 u32 color_fmt;
24903 unsigned int front_offset;
24904diff -urNp linux-3.0.3/drivers/gpu/drm/r128/r128_irq.c linux-3.0.3/drivers/gpu/drm/r128/r128_irq.c
24905--- linux-3.0.3/drivers/gpu/drm/r128/r128_irq.c 2011-07-21 22:17:23.000000000 -0400
24906+++ linux-3.0.3/drivers/gpu/drm/r128/r128_irq.c 2011-08-23 21:47:55.000000000 -0400
24907@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
24908 if (crtc != 0)
24909 return 0;
24910
24911- return atomic_read(&dev_priv->vbl_received);
24912+ return atomic_read_unchecked(&dev_priv->vbl_received);
24913 }
24914
24915 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
24916@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
24917 /* VBLANK interrupt */
24918 if (status & R128_CRTC_VBLANK_INT) {
24919 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
24920- atomic_inc(&dev_priv->vbl_received);
24921+ atomic_inc_unchecked(&dev_priv->vbl_received);
24922 drm_handle_vblank(dev, 0);
24923 return IRQ_HANDLED;
24924 }
24925diff -urNp linux-3.0.3/drivers/gpu/drm/r128/r128_state.c linux-3.0.3/drivers/gpu/drm/r128/r128_state.c
24926--- linux-3.0.3/drivers/gpu/drm/r128/r128_state.c 2011-07-21 22:17:23.000000000 -0400
24927+++ linux-3.0.3/drivers/gpu/drm/r128/r128_state.c 2011-08-23 21:47:55.000000000 -0400
24928@@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_priv
24929
24930 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
24931 {
24932- if (atomic_read(&dev_priv->idle_count) == 0)
24933+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
24934 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
24935 else
24936- atomic_set(&dev_priv->idle_count, 0);
24937+ atomic_set_unchecked(&dev_priv->idle_count, 0);
24938 }
24939
24940 #endif
24941diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/atom.c linux-3.0.3/drivers/gpu/drm/radeon/atom.c
24942--- linux-3.0.3/drivers/gpu/drm/radeon/atom.c 2011-07-21 22:17:23.000000000 -0400
24943+++ linux-3.0.3/drivers/gpu/drm/radeon/atom.c 2011-08-23 21:48:14.000000000 -0400
24944@@ -1245,6 +1245,8 @@ struct atom_context *atom_parse(struct c
24945 char name[512];
24946 int i;
24947
24948+ pax_track_stack();
24949+
24950 ctx->card = card;
24951 ctx->bios = bios;
24952
24953diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/mkregtable.c linux-3.0.3/drivers/gpu/drm/radeon/mkregtable.c
24954--- linux-3.0.3/drivers/gpu/drm/radeon/mkregtable.c 2011-07-21 22:17:23.000000000 -0400
24955+++ linux-3.0.3/drivers/gpu/drm/radeon/mkregtable.c 2011-08-23 21:47:55.000000000 -0400
24956@@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
24957 regex_t mask_rex;
24958 regmatch_t match[4];
24959 char buf[1024];
24960- size_t end;
24961+ long end;
24962 int len;
24963 int done = 0;
24964 int r;
24965 unsigned o;
24966 struct offset *offset;
24967 char last_reg_s[10];
24968- int last_reg;
24969+ unsigned long last_reg;
24970
24971 if (regcomp
24972 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
24973diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_atombios.c linux-3.0.3/drivers/gpu/drm/radeon/radeon_atombios.c
24974--- linux-3.0.3/drivers/gpu/drm/radeon/radeon_atombios.c 2011-07-21 22:17:23.000000000 -0400
24975+++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_atombios.c 2011-08-23 21:48:14.000000000 -0400
24976@@ -545,6 +545,8 @@ bool radeon_get_atom_connector_info_from
24977 struct radeon_gpio_rec gpio;
24978 struct radeon_hpd hpd;
24979
24980+ pax_track_stack();
24981+
24982 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
24983 return false;
24984
24985diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_device.c linux-3.0.3/drivers/gpu/drm/radeon/radeon_device.c
24986--- linux-3.0.3/drivers/gpu/drm/radeon/radeon_device.c 2011-08-23 21:44:40.000000000 -0400
24987+++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_device.c 2011-08-23 21:47:55.000000000 -0400
24988@@ -678,7 +678,7 @@ static bool radeon_switcheroo_can_switch
24989 bool can_switch;
24990
24991 spin_lock(&dev->count_lock);
24992- can_switch = (dev->open_count == 0);
24993+ can_switch = (local_read(&dev->open_count) == 0);
24994 spin_unlock(&dev->count_lock);
24995 return can_switch;
24996 }
24997diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_display.c linux-3.0.3/drivers/gpu/drm/radeon/radeon_display.c
24998--- linux-3.0.3/drivers/gpu/drm/radeon/radeon_display.c 2011-08-23 21:44:40.000000000 -0400
24999+++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_display.c 2011-08-23 21:48:14.000000000 -0400
25000@@ -946,6 +946,8 @@ void radeon_compute_pll_legacy(struct ra
25001 uint32_t post_div;
25002 u32 pll_out_min, pll_out_max;
25003
25004+ pax_track_stack();
25005+
25006 DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
25007 freq = freq * 1000;
25008
25009diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_drv.h linux-3.0.3/drivers/gpu/drm/radeon/radeon_drv.h
25010--- linux-3.0.3/drivers/gpu/drm/radeon/radeon_drv.h 2011-07-21 22:17:23.000000000 -0400
25011+++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_drv.h 2011-08-23 21:47:55.000000000 -0400
25012@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
25013
25014 /* SW interrupt */
25015 wait_queue_head_t swi_queue;
25016- atomic_t swi_emitted;
25017+ atomic_unchecked_t swi_emitted;
25018 int vblank_crtc;
25019 uint32_t irq_enable_reg;
25020 uint32_t r500_disp_irq_reg;
25021diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_fence.c linux-3.0.3/drivers/gpu/drm/radeon/radeon_fence.c
25022--- linux-3.0.3/drivers/gpu/drm/radeon/radeon_fence.c 2011-07-21 22:17:23.000000000 -0400
25023+++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_fence.c 2011-08-23 21:47:55.000000000 -0400
25024@@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_devi
25025 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
25026 return 0;
25027 }
25028- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
25029+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
25030 if (!rdev->cp.ready)
25031 /* FIXME: cp is not running assume everythings is done right
25032 * away
25033@@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct rade
25034 return r;
25035 }
25036 radeon_fence_write(rdev, 0);
25037- atomic_set(&rdev->fence_drv.seq, 0);
25038+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
25039 INIT_LIST_HEAD(&rdev->fence_drv.created);
25040 INIT_LIST_HEAD(&rdev->fence_drv.emited);
25041 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
25042diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon.h linux-3.0.3/drivers/gpu/drm/radeon/radeon.h
25043--- linux-3.0.3/drivers/gpu/drm/radeon/radeon.h 2011-07-21 22:17:23.000000000 -0400
25044+++ linux-3.0.3/drivers/gpu/drm/radeon/radeon.h 2011-08-23 21:47:55.000000000 -0400
25045@@ -191,7 +191,7 @@ extern int sumo_get_temp(struct radeon_d
25046 */
25047 struct radeon_fence_driver {
25048 uint32_t scratch_reg;
25049- atomic_t seq;
25050+ atomic_unchecked_t seq;
25051 uint32_t last_seq;
25052 unsigned long last_jiffies;
25053 unsigned long last_timeout;
25054@@ -960,7 +960,7 @@ struct radeon_asic {
25055 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
25056 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
25057 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
25058-};
25059+} __no_const;
25060
25061 /*
25062 * Asic structures
25063diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_ioc32.c linux-3.0.3/drivers/gpu/drm/radeon/radeon_ioc32.c
25064--- linux-3.0.3/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-07-21 22:17:23.000000000 -0400
25065+++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-08-23 21:47:55.000000000 -0400
25066@@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(str
25067 request = compat_alloc_user_space(sizeof(*request));
25068 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
25069 || __put_user(req32.param, &request->param)
25070- || __put_user((void __user *)(unsigned long)req32.value,
25071+ || __put_user((unsigned long)req32.value,
25072 &request->value))
25073 return -EFAULT;
25074
25075diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_irq.c linux-3.0.3/drivers/gpu/drm/radeon/radeon_irq.c
25076--- linux-3.0.3/drivers/gpu/drm/radeon/radeon_irq.c 2011-07-21 22:17:23.000000000 -0400
25077+++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_irq.c 2011-08-23 21:47:55.000000000 -0400
25078@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
25079 unsigned int ret;
25080 RING_LOCALS;
25081
25082- atomic_inc(&dev_priv->swi_emitted);
25083- ret = atomic_read(&dev_priv->swi_emitted);
25084+ atomic_inc_unchecked(&dev_priv->swi_emitted);
25085+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
25086
25087 BEGIN_RING(4);
25088 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
25089@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
25090 drm_radeon_private_t *dev_priv =
25091 (drm_radeon_private_t *) dev->dev_private;
25092
25093- atomic_set(&dev_priv->swi_emitted, 0);
25094+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
25095 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
25096
25097 dev->max_vblank_count = 0x001fffff;
25098diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_state.c linux-3.0.3/drivers/gpu/drm/radeon/radeon_state.c
25099--- linux-3.0.3/drivers/gpu/drm/radeon/radeon_state.c 2011-07-21 22:17:23.000000000 -0400
25100+++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_state.c 2011-08-23 21:47:55.000000000 -0400
25101@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_de
25102 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
25103 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
25104
25105- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
25106+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
25107 sarea_priv->nbox * sizeof(depth_boxes[0])))
25108 return -EFAULT;
25109
25110@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm
25111 {
25112 drm_radeon_private_t *dev_priv = dev->dev_private;
25113 drm_radeon_getparam_t *param = data;
25114- int value;
25115+ int value = 0;
25116
25117 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
25118
25119diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_ttm.c linux-3.0.3/drivers/gpu/drm/radeon/radeon_ttm.c
25120--- linux-3.0.3/drivers/gpu/drm/radeon/radeon_ttm.c 2011-07-21 22:17:23.000000000 -0400
25121+++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_ttm.c 2011-08-23 21:47:55.000000000 -0400
25122@@ -644,8 +644,10 @@ int radeon_mmap(struct file *filp, struc
25123 }
25124 if (unlikely(ttm_vm_ops == NULL)) {
25125 ttm_vm_ops = vma->vm_ops;
25126- radeon_ttm_vm_ops = *ttm_vm_ops;
25127- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
25128+ pax_open_kernel();
25129+ memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
25130+ *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
25131+ pax_close_kernel();
25132 }
25133 vma->vm_ops = &radeon_ttm_vm_ops;
25134 return 0;
25135diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/rs690.c linux-3.0.3/drivers/gpu/drm/radeon/rs690.c
25136--- linux-3.0.3/drivers/gpu/drm/radeon/rs690.c 2011-07-21 22:17:23.000000000 -0400
25137+++ linux-3.0.3/drivers/gpu/drm/radeon/rs690.c 2011-08-23 21:47:55.000000000 -0400
25138@@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct
25139 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
25140 rdev->pm.sideport_bandwidth.full)
25141 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
25142- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
25143+ read_delay_latency.full = dfixed_const(800 * 1000);
25144 read_delay_latency.full = dfixed_div(read_delay_latency,
25145 rdev->pm.igp_sideport_mclk);
25146+ a.full = dfixed_const(370);
25147+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
25148 } else {
25149 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
25150 rdev->pm.k8_bandwidth.full)
25151diff -urNp linux-3.0.3/drivers/gpu/drm/ttm/ttm_page_alloc.c linux-3.0.3/drivers/gpu/drm/ttm/ttm_page_alloc.c
25152--- linux-3.0.3/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-07-21 22:17:23.000000000 -0400
25153+++ linux-3.0.3/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-08-23 21:47:55.000000000 -0400
25154@@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages
25155 static int ttm_pool_mm_shrink(struct shrinker *shrink,
25156 struct shrink_control *sc)
25157 {
25158- static atomic_t start_pool = ATOMIC_INIT(0);
25159+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
25160 unsigned i;
25161- unsigned pool_offset = atomic_add_return(1, &start_pool);
25162+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
25163 struct ttm_page_pool *pool;
25164 int shrink_pages = sc->nr_to_scan;
25165
25166diff -urNp linux-3.0.3/drivers/gpu/drm/via/via_drv.h linux-3.0.3/drivers/gpu/drm/via/via_drv.h
25167--- linux-3.0.3/drivers/gpu/drm/via/via_drv.h 2011-07-21 22:17:23.000000000 -0400
25168+++ linux-3.0.3/drivers/gpu/drm/via/via_drv.h 2011-08-23 21:47:55.000000000 -0400
25169@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
25170 typedef uint32_t maskarray_t[5];
25171
25172 typedef struct drm_via_irq {
25173- atomic_t irq_received;
25174+ atomic_unchecked_t irq_received;
25175 uint32_t pending_mask;
25176 uint32_t enable_mask;
25177 wait_queue_head_t irq_queue;
25178@@ -75,7 +75,7 @@ typedef struct drm_via_private {
25179 struct timeval last_vblank;
25180 int last_vblank_valid;
25181 unsigned usec_per_vblank;
25182- atomic_t vbl_received;
25183+ atomic_unchecked_t vbl_received;
25184 drm_via_state_t hc_state;
25185 char pci_buf[VIA_PCI_BUF_SIZE];
25186 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
25187diff -urNp linux-3.0.3/drivers/gpu/drm/via/via_irq.c linux-3.0.3/drivers/gpu/drm/via/via_irq.c
25188--- linux-3.0.3/drivers/gpu/drm/via/via_irq.c 2011-07-21 22:17:23.000000000 -0400
25189+++ linux-3.0.3/drivers/gpu/drm/via/via_irq.c 2011-08-23 21:47:55.000000000 -0400
25190@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
25191 if (crtc != 0)
25192 return 0;
25193
25194- return atomic_read(&dev_priv->vbl_received);
25195+ return atomic_read_unchecked(&dev_priv->vbl_received);
25196 }
25197
25198 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
25199@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
25200
25201 status = VIA_READ(VIA_REG_INTERRUPT);
25202 if (status & VIA_IRQ_VBLANK_PENDING) {
25203- atomic_inc(&dev_priv->vbl_received);
25204- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
25205+ atomic_inc_unchecked(&dev_priv->vbl_received);
25206+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
25207 do_gettimeofday(&cur_vblank);
25208 if (dev_priv->last_vblank_valid) {
25209 dev_priv->usec_per_vblank =
25210@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
25211 dev_priv->last_vblank = cur_vblank;
25212 dev_priv->last_vblank_valid = 1;
25213 }
25214- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
25215+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
25216 DRM_DEBUG("US per vblank is: %u\n",
25217 dev_priv->usec_per_vblank);
25218 }
25219@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
25220
25221 for (i = 0; i < dev_priv->num_irqs; ++i) {
25222 if (status & cur_irq->pending_mask) {
25223- atomic_inc(&cur_irq->irq_received);
25224+ atomic_inc_unchecked(&cur_irq->irq_received);
25225 DRM_WAKEUP(&cur_irq->irq_queue);
25226 handled = 1;
25227 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
25228@@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *d
25229 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
25230 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
25231 masks[irq][4]));
25232- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
25233+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
25234 } else {
25235 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
25236 (((cur_irq_sequence =
25237- atomic_read(&cur_irq->irq_received)) -
25238+ atomic_read_unchecked(&cur_irq->irq_received)) -
25239 *sequence) <= (1 << 23)));
25240 }
25241 *sequence = cur_irq_sequence;
25242@@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct dr
25243 }
25244
25245 for (i = 0; i < dev_priv->num_irqs; ++i) {
25246- atomic_set(&cur_irq->irq_received, 0);
25247+ atomic_set_unchecked(&cur_irq->irq_received, 0);
25248 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
25249 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
25250 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
25251@@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev,
25252 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
25253 case VIA_IRQ_RELATIVE:
25254 irqwait->request.sequence +=
25255- atomic_read(&cur_irq->irq_received);
25256+ atomic_read_unchecked(&cur_irq->irq_received);
25257 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
25258 case VIA_IRQ_ABSOLUTE:
25259 break;
25260diff -urNp linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
25261--- linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-07-21 22:17:23.000000000 -0400
25262+++ linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-08-23 21:47:55.000000000 -0400
25263@@ -240,7 +240,7 @@ struct vmw_private {
25264 * Fencing and IRQs.
25265 */
25266
25267- atomic_t fence_seq;
25268+ atomic_unchecked_t fence_seq;
25269 wait_queue_head_t fence_queue;
25270 wait_queue_head_t fifo_queue;
25271 atomic_t fence_queue_waiters;
25272diff -urNp linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
25273--- linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-07-21 22:17:23.000000000 -0400
25274+++ linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-08-23 21:47:55.000000000 -0400
25275@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev
25276 while (!vmw_lag_lt(queue, us)) {
25277 spin_lock(&queue->lock);
25278 if (list_empty(&queue->head))
25279- sequence = atomic_read(&dev_priv->fence_seq);
25280+ sequence = atomic_read_unchecked(&dev_priv->fence_seq);
25281 else {
25282 fence = list_first_entry(&queue->head,
25283 struct vmw_fence, head);
25284diff -urNp linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
25285--- linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-07-21 22:17:23.000000000 -0400
25286+++ linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-08-23 21:47:55.000000000 -0400
25287@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *de
25288 (unsigned int) min,
25289 (unsigned int) fifo->capabilities);
25290
25291- atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
25292+ atomic_set_unchecked(&dev_priv->fence_seq, dev_priv->last_read_sequence);
25293 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
25294 vmw_fence_queue_init(&fifo->fence_queue);
25295 return vmw_fifo_send_fence(dev_priv, &dummy);
25296@@ -476,7 +476,7 @@ int vmw_fifo_send_fence(struct vmw_priva
25297
25298 fm = vmw_fifo_reserve(dev_priv, bytes);
25299 if (unlikely(fm == NULL)) {
25300- *sequence = atomic_read(&dev_priv->fence_seq);
25301+ *sequence = atomic_read_unchecked(&dev_priv->fence_seq);
25302 ret = -ENOMEM;
25303 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
25304 false, 3*HZ);
25305@@ -484,7 +484,7 @@ int vmw_fifo_send_fence(struct vmw_priva
25306 }
25307
25308 do {
25309- *sequence = atomic_add_return(1, &dev_priv->fence_seq);
25310+ *sequence = atomic_add_return_unchecked(1, &dev_priv->fence_seq);
25311 } while (*sequence == 0);
25312
25313 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
25314diff -urNp linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
25315--- linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-07-21 22:17:23.000000000 -0400
25316+++ linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-08-23 21:47:55.000000000 -0400
25317@@ -100,7 +100,7 @@ bool vmw_fence_signaled(struct vmw_priva
25318 * emitted. Then the fence is stale and signaled.
25319 */
25320
25321- ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
25322+ ret = ((atomic_read_unchecked(&dev_priv->fence_seq) - sequence)
25323 > VMW_FENCE_WRAP);
25324
25325 return ret;
25326@@ -131,7 +131,7 @@ int vmw_fallback_wait(struct vmw_private
25327
25328 if (fifo_idle)
25329 down_read(&fifo_state->rwsem);
25330- signal_seq = atomic_read(&dev_priv->fence_seq);
25331+ signal_seq = atomic_read_unchecked(&dev_priv->fence_seq);
25332 ret = 0;
25333
25334 for (;;) {
25335diff -urNp linux-3.0.3/drivers/hid/hid-core.c linux-3.0.3/drivers/hid/hid-core.c
25336--- linux-3.0.3/drivers/hid/hid-core.c 2011-07-21 22:17:23.000000000 -0400
25337+++ linux-3.0.3/drivers/hid/hid-core.c 2011-08-23 21:47:55.000000000 -0400
25338@@ -1923,7 +1923,7 @@ static bool hid_ignore(struct hid_device
25339
25340 int hid_add_device(struct hid_device *hdev)
25341 {
25342- static atomic_t id = ATOMIC_INIT(0);
25343+ static atomic_unchecked_t id = ATOMIC_INIT(0);
25344 int ret;
25345
25346 if (WARN_ON(hdev->status & HID_STAT_ADDED))
25347@@ -1938,7 +1938,7 @@ int hid_add_device(struct hid_device *hd
25348 /* XXX hack, any other cleaner solution after the driver core
25349 * is converted to allow more than 20 bytes as the device name? */
25350 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
25351- hdev->vendor, hdev->product, atomic_inc_return(&id));
25352+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
25353
25354 hid_debug_register(hdev, dev_name(&hdev->dev));
25355 ret = device_add(&hdev->dev);
25356diff -urNp linux-3.0.3/drivers/hid/usbhid/hiddev.c linux-3.0.3/drivers/hid/usbhid/hiddev.c
25357--- linux-3.0.3/drivers/hid/usbhid/hiddev.c 2011-07-21 22:17:23.000000000 -0400
25358+++ linux-3.0.3/drivers/hid/usbhid/hiddev.c 2011-08-23 21:47:55.000000000 -0400
25359@@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *fi
25360 break;
25361
25362 case HIDIOCAPPLICATION:
25363- if (arg < 0 || arg >= hid->maxapplication)
25364+ if (arg >= hid->maxapplication)
25365 break;
25366
25367 for (i = 0; i < hid->maxcollection; i++)
25368diff -urNp linux-3.0.3/drivers/hwmon/acpi_power_meter.c linux-3.0.3/drivers/hwmon/acpi_power_meter.c
25369--- linux-3.0.3/drivers/hwmon/acpi_power_meter.c 2011-07-21 22:17:23.000000000 -0400
25370+++ linux-3.0.3/drivers/hwmon/acpi_power_meter.c 2011-08-23 21:47:55.000000000 -0400
25371@@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *d
25372 return res;
25373
25374 temp /= 1000;
25375- if (temp < 0)
25376- return -EINVAL;
25377
25378 mutex_lock(&resource->lock);
25379 resource->trip[attr->index - 7] = temp;
25380diff -urNp linux-3.0.3/drivers/hwmon/sht15.c linux-3.0.3/drivers/hwmon/sht15.c
25381--- linux-3.0.3/drivers/hwmon/sht15.c 2011-07-21 22:17:23.000000000 -0400
25382+++ linux-3.0.3/drivers/hwmon/sht15.c 2011-08-23 21:47:55.000000000 -0400
25383@@ -166,7 +166,7 @@ struct sht15_data {
25384 int supply_uV;
25385 bool supply_uV_valid;
25386 struct work_struct update_supply_work;
25387- atomic_t interrupt_handled;
25388+ atomic_unchecked_t interrupt_handled;
25389 };
25390
25391 /**
25392@@ -509,13 +509,13 @@ static int sht15_measurement(struct sht1
25393 return ret;
25394
25395 gpio_direction_input(data->pdata->gpio_data);
25396- atomic_set(&data->interrupt_handled, 0);
25397+ atomic_set_unchecked(&data->interrupt_handled, 0);
25398
25399 enable_irq(gpio_to_irq(data->pdata->gpio_data));
25400 if (gpio_get_value(data->pdata->gpio_data) == 0) {
25401 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
25402 /* Only relevant if the interrupt hasn't occurred. */
25403- if (!atomic_read(&data->interrupt_handled))
25404+ if (!atomic_read_unchecked(&data->interrupt_handled))
25405 schedule_work(&data->read_work);
25406 }
25407 ret = wait_event_timeout(data->wait_queue,
25408@@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired
25409
25410 /* First disable the interrupt */
25411 disable_irq_nosync(irq);
25412- atomic_inc(&data->interrupt_handled);
25413+ atomic_inc_unchecked(&data->interrupt_handled);
25414 /* Then schedule a reading work struct */
25415 if (data->state != SHT15_READING_NOTHING)
25416 schedule_work(&data->read_work);
25417@@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct wo
25418 * If not, then start the interrupt again - care here as could
25419 * have gone low in meantime so verify it hasn't!
25420 */
25421- atomic_set(&data->interrupt_handled, 0);
25422+ atomic_set_unchecked(&data->interrupt_handled, 0);
25423 enable_irq(gpio_to_irq(data->pdata->gpio_data));
25424 /* If still not occurred or another handler has been scheduled */
25425 if (gpio_get_value(data->pdata->gpio_data)
25426- || atomic_read(&data->interrupt_handled))
25427+ || atomic_read_unchecked(&data->interrupt_handled))
25428 return;
25429 }
25430
25431diff -urNp linux-3.0.3/drivers/hwmon/w83791d.c linux-3.0.3/drivers/hwmon/w83791d.c
25432--- linux-3.0.3/drivers/hwmon/w83791d.c 2011-07-21 22:17:23.000000000 -0400
25433+++ linux-3.0.3/drivers/hwmon/w83791d.c 2011-08-23 21:47:55.000000000 -0400
25434@@ -329,8 +329,8 @@ static int w83791d_detect(struct i2c_cli
25435 struct i2c_board_info *info);
25436 static int w83791d_remove(struct i2c_client *client);
25437
25438-static int w83791d_read(struct i2c_client *client, u8 register);
25439-static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
25440+static int w83791d_read(struct i2c_client *client, u8 reg);
25441+static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
25442 static struct w83791d_data *w83791d_update_device(struct device *dev);
25443
25444 #ifdef DEBUG
25445diff -urNp linux-3.0.3/drivers/i2c/busses/i2c-amd756-s4882.c linux-3.0.3/drivers/i2c/busses/i2c-amd756-s4882.c
25446--- linux-3.0.3/drivers/i2c/busses/i2c-amd756-s4882.c 2011-07-21 22:17:23.000000000 -0400
25447+++ linux-3.0.3/drivers/i2c/busses/i2c-amd756-s4882.c 2011-08-23 21:47:55.000000000 -0400
25448@@ -43,7 +43,7 @@
25449 extern struct i2c_adapter amd756_smbus;
25450
25451 static struct i2c_adapter *s4882_adapter;
25452-static struct i2c_algorithm *s4882_algo;
25453+static i2c_algorithm_no_const *s4882_algo;
25454
25455 /* Wrapper access functions for multiplexed SMBus */
25456 static DEFINE_MUTEX(amd756_lock);
25457diff -urNp linux-3.0.3/drivers/i2c/busses/i2c-nforce2-s4985.c linux-3.0.3/drivers/i2c/busses/i2c-nforce2-s4985.c
25458--- linux-3.0.3/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-07-21 22:17:23.000000000 -0400
25459+++ linux-3.0.3/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-08-23 21:47:55.000000000 -0400
25460@@ -41,7 +41,7 @@
25461 extern struct i2c_adapter *nforce2_smbus;
25462
25463 static struct i2c_adapter *s4985_adapter;
25464-static struct i2c_algorithm *s4985_algo;
25465+static i2c_algorithm_no_const *s4985_algo;
25466
25467 /* Wrapper access functions for multiplexed SMBus */
25468 static DEFINE_MUTEX(nforce2_lock);
25469diff -urNp linux-3.0.3/drivers/i2c/i2c-mux.c linux-3.0.3/drivers/i2c/i2c-mux.c
25470--- linux-3.0.3/drivers/i2c/i2c-mux.c 2011-07-21 22:17:23.000000000 -0400
25471+++ linux-3.0.3/drivers/i2c/i2c-mux.c 2011-08-23 21:47:55.000000000 -0400
25472@@ -28,7 +28,7 @@
25473 /* multiplexer per channel data */
25474 struct i2c_mux_priv {
25475 struct i2c_adapter adap;
25476- struct i2c_algorithm algo;
25477+ i2c_algorithm_no_const algo;
25478
25479 struct i2c_adapter *parent;
25480 void *mux_dev; /* the mux chip/device */
25481diff -urNp linux-3.0.3/drivers/ide/ide-cd.c linux-3.0.3/drivers/ide/ide-cd.c
25482--- linux-3.0.3/drivers/ide/ide-cd.c 2011-07-21 22:17:23.000000000 -0400
25483+++ linux-3.0.3/drivers/ide/ide-cd.c 2011-08-23 21:47:55.000000000 -0400
25484@@ -769,7 +769,7 @@ static void cdrom_do_block_pc(ide_drive_
25485 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
25486 if ((unsigned long)buf & alignment
25487 || blk_rq_bytes(rq) & q->dma_pad_mask
25488- || object_is_on_stack(buf))
25489+ || object_starts_on_stack(buf))
25490 drive->dma = 0;
25491 }
25492 }
25493diff -urNp linux-3.0.3/drivers/ide/ide-floppy.c linux-3.0.3/drivers/ide/ide-floppy.c
25494--- linux-3.0.3/drivers/ide/ide-floppy.c 2011-07-21 22:17:23.000000000 -0400
25495+++ linux-3.0.3/drivers/ide/ide-floppy.c 2011-08-23 21:48:14.000000000 -0400
25496@@ -379,6 +379,8 @@ static int ide_floppy_get_capacity(ide_d
25497 u8 pc_buf[256], header_len, desc_cnt;
25498 int i, rc = 1, blocks, length;
25499
25500+ pax_track_stack();
25501+
25502 ide_debug_log(IDE_DBG_FUNC, "enter");
25503
25504 drive->bios_cyl = 0;
25505diff -urNp linux-3.0.3/drivers/ide/setup-pci.c linux-3.0.3/drivers/ide/setup-pci.c
25506--- linux-3.0.3/drivers/ide/setup-pci.c 2011-07-21 22:17:23.000000000 -0400
25507+++ linux-3.0.3/drivers/ide/setup-pci.c 2011-08-23 21:48:14.000000000 -0400
25508@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
25509 int ret, i, n_ports = dev2 ? 4 : 2;
25510 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
25511
25512+ pax_track_stack();
25513+
25514 for (i = 0; i < n_ports / 2; i++) {
25515 ret = ide_setup_pci_controller(pdev[i], d, !i);
25516 if (ret < 0)
25517diff -urNp linux-3.0.3/drivers/infiniband/core/cm.c linux-3.0.3/drivers/infiniband/core/cm.c
25518--- linux-3.0.3/drivers/infiniband/core/cm.c 2011-07-21 22:17:23.000000000 -0400
25519+++ linux-3.0.3/drivers/infiniband/core/cm.c 2011-08-23 21:47:55.000000000 -0400
25520@@ -113,7 +113,7 @@ static char const counter_group_names[CM
25521
25522 struct cm_counter_group {
25523 struct kobject obj;
25524- atomic_long_t counter[CM_ATTR_COUNT];
25525+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
25526 };
25527
25528 struct cm_counter_attribute {
25529@@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm
25530 struct ib_mad_send_buf *msg = NULL;
25531 int ret;
25532
25533- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25534+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25535 counter[CM_REQ_COUNTER]);
25536
25537 /* Quick state check to discard duplicate REQs. */
25538@@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm
25539 if (!cm_id_priv)
25540 return;
25541
25542- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25543+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25544 counter[CM_REP_COUNTER]);
25545 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
25546 if (ret)
25547@@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work
25548 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
25549 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
25550 spin_unlock_irq(&cm_id_priv->lock);
25551- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25552+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25553 counter[CM_RTU_COUNTER]);
25554 goto out;
25555 }
25556@@ -2115,7 +2115,7 @@ static int cm_dreq_handler(struct cm_wor
25557 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
25558 dreq_msg->local_comm_id);
25559 if (!cm_id_priv) {
25560- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25561+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25562 counter[CM_DREQ_COUNTER]);
25563 cm_issue_drep(work->port, work->mad_recv_wc);
25564 return -EINVAL;
25565@@ -2140,7 +2140,7 @@ static int cm_dreq_handler(struct cm_wor
25566 case IB_CM_MRA_REP_RCVD:
25567 break;
25568 case IB_CM_TIMEWAIT:
25569- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25570+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25571 counter[CM_DREQ_COUNTER]);
25572 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
25573 goto unlock;
25574@@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_wor
25575 cm_free_msg(msg);
25576 goto deref;
25577 case IB_CM_DREQ_RCVD:
25578- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25579+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25580 counter[CM_DREQ_COUNTER]);
25581 goto unlock;
25582 default:
25583@@ -2521,7 +2521,7 @@ static int cm_mra_handler(struct cm_work
25584 ib_modify_mad(cm_id_priv->av.port->mad_agent,
25585 cm_id_priv->msg, timeout)) {
25586 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
25587- atomic_long_inc(&work->port->
25588+ atomic_long_inc_unchecked(&work->port->
25589 counter_group[CM_RECV_DUPLICATES].
25590 counter[CM_MRA_COUNTER]);
25591 goto out;
25592@@ -2530,7 +2530,7 @@ static int cm_mra_handler(struct cm_work
25593 break;
25594 case IB_CM_MRA_REQ_RCVD:
25595 case IB_CM_MRA_REP_RCVD:
25596- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25597+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25598 counter[CM_MRA_COUNTER]);
25599 /* fall through */
25600 default:
25601@@ -2692,7 +2692,7 @@ static int cm_lap_handler(struct cm_work
25602 case IB_CM_LAP_IDLE:
25603 break;
25604 case IB_CM_MRA_LAP_SENT:
25605- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25606+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25607 counter[CM_LAP_COUNTER]);
25608 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
25609 goto unlock;
25610@@ -2708,7 +2708,7 @@ static int cm_lap_handler(struct cm_work
25611 cm_free_msg(msg);
25612 goto deref;
25613 case IB_CM_LAP_RCVD:
25614- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25615+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25616 counter[CM_LAP_COUNTER]);
25617 goto unlock;
25618 default:
25619@@ -2992,7 +2992,7 @@ static int cm_sidr_req_handler(struct cm
25620 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
25621 if (cur_cm_id_priv) {
25622 spin_unlock_irq(&cm.lock);
25623- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25624+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25625 counter[CM_SIDR_REQ_COUNTER]);
25626 goto out; /* Duplicate message. */
25627 }
25628@@ -3204,10 +3204,10 @@ static void cm_send_handler(struct ib_ma
25629 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
25630 msg->retries = 1;
25631
25632- atomic_long_add(1 + msg->retries,
25633+ atomic_long_add_unchecked(1 + msg->retries,
25634 &port->counter_group[CM_XMIT].counter[attr_index]);
25635 if (msg->retries)
25636- atomic_long_add(msg->retries,
25637+ atomic_long_add_unchecked(msg->retries,
25638 &port->counter_group[CM_XMIT_RETRIES].
25639 counter[attr_index]);
25640
25641@@ -3417,7 +3417,7 @@ static void cm_recv_handler(struct ib_ma
25642 }
25643
25644 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
25645- atomic_long_inc(&port->counter_group[CM_RECV].
25646+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
25647 counter[attr_id - CM_ATTR_ID_OFFSET]);
25648
25649 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
25650@@ -3615,7 +3615,7 @@ static ssize_t cm_show_counter(struct ko
25651 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
25652
25653 return sprintf(buf, "%ld\n",
25654- atomic_long_read(&group->counter[cm_attr->index]));
25655+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
25656 }
25657
25658 static const struct sysfs_ops cm_counter_ops = {
25659diff -urNp linux-3.0.3/drivers/infiniband/core/fmr_pool.c linux-3.0.3/drivers/infiniband/core/fmr_pool.c
25660--- linux-3.0.3/drivers/infiniband/core/fmr_pool.c 2011-07-21 22:17:23.000000000 -0400
25661+++ linux-3.0.3/drivers/infiniband/core/fmr_pool.c 2011-08-23 21:47:55.000000000 -0400
25662@@ -97,8 +97,8 @@ struct ib_fmr_pool {
25663
25664 struct task_struct *thread;
25665
25666- atomic_t req_ser;
25667- atomic_t flush_ser;
25668+ atomic_unchecked_t req_ser;
25669+ atomic_unchecked_t flush_ser;
25670
25671 wait_queue_head_t force_wait;
25672 };
25673@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
25674 struct ib_fmr_pool *pool = pool_ptr;
25675
25676 do {
25677- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
25678+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
25679 ib_fmr_batch_release(pool);
25680
25681- atomic_inc(&pool->flush_ser);
25682+ atomic_inc_unchecked(&pool->flush_ser);
25683 wake_up_interruptible(&pool->force_wait);
25684
25685 if (pool->flush_function)
25686@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
25687 }
25688
25689 set_current_state(TASK_INTERRUPTIBLE);
25690- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
25691+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
25692 !kthread_should_stop())
25693 schedule();
25694 __set_current_state(TASK_RUNNING);
25695@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
25696 pool->dirty_watermark = params->dirty_watermark;
25697 pool->dirty_len = 0;
25698 spin_lock_init(&pool->pool_lock);
25699- atomic_set(&pool->req_ser, 0);
25700- atomic_set(&pool->flush_ser, 0);
25701+ atomic_set_unchecked(&pool->req_ser, 0);
25702+ atomic_set_unchecked(&pool->flush_ser, 0);
25703 init_waitqueue_head(&pool->force_wait);
25704
25705 pool->thread = kthread_run(ib_fmr_cleanup_thread,
25706@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
25707 }
25708 spin_unlock_irq(&pool->pool_lock);
25709
25710- serial = atomic_inc_return(&pool->req_ser);
25711+ serial = atomic_inc_return_unchecked(&pool->req_ser);
25712 wake_up_process(pool->thread);
25713
25714 if (wait_event_interruptible(pool->force_wait,
25715- atomic_read(&pool->flush_ser) - serial >= 0))
25716+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
25717 return -EINTR;
25718
25719 return 0;
25720@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
25721 } else {
25722 list_add_tail(&fmr->list, &pool->dirty_list);
25723 if (++pool->dirty_len >= pool->dirty_watermark) {
25724- atomic_inc(&pool->req_ser);
25725+ atomic_inc_unchecked(&pool->req_ser);
25726 wake_up_process(pool->thread);
25727 }
25728 }
25729diff -urNp linux-3.0.3/drivers/infiniband/hw/cxgb4/mem.c linux-3.0.3/drivers/infiniband/hw/cxgb4/mem.c
25730--- linux-3.0.3/drivers/infiniband/hw/cxgb4/mem.c 2011-07-21 22:17:23.000000000 -0400
25731+++ linux-3.0.3/drivers/infiniband/hw/cxgb4/mem.c 2011-08-23 21:47:55.000000000 -0400
25732@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_r
25733 int err;
25734 struct fw_ri_tpte tpt;
25735 u32 stag_idx;
25736- static atomic_t key;
25737+ static atomic_unchecked_t key;
25738
25739 if (c4iw_fatal_error(rdev))
25740 return -EIO;
25741@@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_r
25742 &rdev->resource.tpt_fifo_lock);
25743 if (!stag_idx)
25744 return -ENOMEM;
25745- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
25746+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
25747 }
25748 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
25749 __func__, stag_state, type, pdid, stag_idx);
25750diff -urNp linux-3.0.3/drivers/infiniband/hw/ipath/ipath_fs.c linux-3.0.3/drivers/infiniband/hw/ipath/ipath_fs.c
25751--- linux-3.0.3/drivers/infiniband/hw/ipath/ipath_fs.c 2011-07-21 22:17:23.000000000 -0400
25752+++ linux-3.0.3/drivers/infiniband/hw/ipath/ipath_fs.c 2011-08-23 21:48:14.000000000 -0400
25753@@ -113,6 +113,8 @@ static ssize_t atomic_counters_read(stru
25754 struct infinipath_counters counters;
25755 struct ipath_devdata *dd;
25756
25757+ pax_track_stack();
25758+
25759 dd = file->f_path.dentry->d_inode->i_private;
25760 dd->ipath_f_read_counters(dd, &counters);
25761
25762diff -urNp linux-3.0.3/drivers/infiniband/hw/ipath/ipath_rc.c linux-3.0.3/drivers/infiniband/hw/ipath/ipath_rc.c
25763--- linux-3.0.3/drivers/infiniband/hw/ipath/ipath_rc.c 2011-07-21 22:17:23.000000000 -0400
25764+++ linux-3.0.3/drivers/infiniband/hw/ipath/ipath_rc.c 2011-08-23 21:47:55.000000000 -0400
25765@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *de
25766 struct ib_atomic_eth *ateth;
25767 struct ipath_ack_entry *e;
25768 u64 vaddr;
25769- atomic64_t *maddr;
25770+ atomic64_unchecked_t *maddr;
25771 u64 sdata;
25772 u32 rkey;
25773 u8 next;
25774@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *de
25775 IB_ACCESS_REMOTE_ATOMIC)))
25776 goto nack_acc_unlck;
25777 /* Perform atomic OP and save result. */
25778- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
25779+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
25780 sdata = be64_to_cpu(ateth->swap_data);
25781 e = &qp->s_ack_queue[qp->r_head_ack_queue];
25782 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
25783- (u64) atomic64_add_return(sdata, maddr) - sdata :
25784+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
25785 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
25786 be64_to_cpu(ateth->compare_data),
25787 sdata);
25788diff -urNp linux-3.0.3/drivers/infiniband/hw/ipath/ipath_ruc.c linux-3.0.3/drivers/infiniband/hw/ipath/ipath_ruc.c
25789--- linux-3.0.3/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-07-21 22:17:23.000000000 -0400
25790+++ linux-3.0.3/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-08-23 21:47:55.000000000 -0400
25791@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ip
25792 unsigned long flags;
25793 struct ib_wc wc;
25794 u64 sdata;
25795- atomic64_t *maddr;
25796+ atomic64_unchecked_t *maddr;
25797 enum ib_wc_status send_status;
25798
25799 /*
25800@@ -382,11 +382,11 @@ again:
25801 IB_ACCESS_REMOTE_ATOMIC)))
25802 goto acc_err;
25803 /* Perform atomic OP and save result. */
25804- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
25805+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
25806 sdata = wqe->wr.wr.atomic.compare_add;
25807 *(u64 *) sqp->s_sge.sge.vaddr =
25808 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
25809- (u64) atomic64_add_return(sdata, maddr) - sdata :
25810+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
25811 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
25812 sdata, wqe->wr.wr.atomic.swap);
25813 goto send_comp;
25814diff -urNp linux-3.0.3/drivers/infiniband/hw/nes/nes.c linux-3.0.3/drivers/infiniband/hw/nes/nes.c
25815--- linux-3.0.3/drivers/infiniband/hw/nes/nes.c 2011-07-21 22:17:23.000000000 -0400
25816+++ linux-3.0.3/drivers/infiniband/hw/nes/nes.c 2011-08-23 21:47:55.000000000 -0400
25817@@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
25818 LIST_HEAD(nes_adapter_list);
25819 static LIST_HEAD(nes_dev_list);
25820
25821-atomic_t qps_destroyed;
25822+atomic_unchecked_t qps_destroyed;
25823
25824 static unsigned int ee_flsh_adapter;
25825 static unsigned int sysfs_nonidx_addr;
25826@@ -275,7 +275,7 @@ static void nes_cqp_rem_ref_callback(str
25827 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
25828 struct nes_adapter *nesadapter = nesdev->nesadapter;
25829
25830- atomic_inc(&qps_destroyed);
25831+ atomic_inc_unchecked(&qps_destroyed);
25832
25833 /* Free the control structures */
25834
25835diff -urNp linux-3.0.3/drivers/infiniband/hw/nes/nes_cm.c linux-3.0.3/drivers/infiniband/hw/nes/nes_cm.c
25836--- linux-3.0.3/drivers/infiniband/hw/nes/nes_cm.c 2011-07-21 22:17:23.000000000 -0400
25837+++ linux-3.0.3/drivers/infiniband/hw/nes/nes_cm.c 2011-08-23 21:47:55.000000000 -0400
25838@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
25839 u32 cm_packets_retrans;
25840 u32 cm_packets_created;
25841 u32 cm_packets_received;
25842-atomic_t cm_listens_created;
25843-atomic_t cm_listens_destroyed;
25844+atomic_unchecked_t cm_listens_created;
25845+atomic_unchecked_t cm_listens_destroyed;
25846 u32 cm_backlog_drops;
25847-atomic_t cm_loopbacks;
25848-atomic_t cm_nodes_created;
25849-atomic_t cm_nodes_destroyed;
25850-atomic_t cm_accel_dropped_pkts;
25851-atomic_t cm_resets_recvd;
25852+atomic_unchecked_t cm_loopbacks;
25853+atomic_unchecked_t cm_nodes_created;
25854+atomic_unchecked_t cm_nodes_destroyed;
25855+atomic_unchecked_t cm_accel_dropped_pkts;
25856+atomic_unchecked_t cm_resets_recvd;
25857
25858 static inline int mini_cm_accelerated(struct nes_cm_core *,
25859 struct nes_cm_node *);
25860@@ -151,13 +151,13 @@ static struct nes_cm_ops nes_cm_api = {
25861
25862 static struct nes_cm_core *g_cm_core;
25863
25864-atomic_t cm_connects;
25865-atomic_t cm_accepts;
25866-atomic_t cm_disconnects;
25867-atomic_t cm_closes;
25868-atomic_t cm_connecteds;
25869-atomic_t cm_connect_reqs;
25870-atomic_t cm_rejects;
25871+atomic_unchecked_t cm_connects;
25872+atomic_unchecked_t cm_accepts;
25873+atomic_unchecked_t cm_disconnects;
25874+atomic_unchecked_t cm_closes;
25875+atomic_unchecked_t cm_connecteds;
25876+atomic_unchecked_t cm_connect_reqs;
25877+atomic_unchecked_t cm_rejects;
25878
25879
25880 /**
25881@@ -1045,7 +1045,7 @@ static int mini_cm_dec_refcnt_listen(str
25882 kfree(listener);
25883 listener = NULL;
25884 ret = 0;
25885- atomic_inc(&cm_listens_destroyed);
25886+ atomic_inc_unchecked(&cm_listens_destroyed);
25887 } else {
25888 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
25889 }
25890@@ -1240,7 +1240,7 @@ static struct nes_cm_node *make_cm_node(
25891 cm_node->rem_mac);
25892
25893 add_hte_node(cm_core, cm_node);
25894- atomic_inc(&cm_nodes_created);
25895+ atomic_inc_unchecked(&cm_nodes_created);
25896
25897 return cm_node;
25898 }
25899@@ -1298,7 +1298,7 @@ static int rem_ref_cm_node(struct nes_cm
25900 }
25901
25902 atomic_dec(&cm_core->node_cnt);
25903- atomic_inc(&cm_nodes_destroyed);
25904+ atomic_inc_unchecked(&cm_nodes_destroyed);
25905 nesqp = cm_node->nesqp;
25906 if (nesqp) {
25907 nesqp->cm_node = NULL;
25908@@ -1365,7 +1365,7 @@ static int process_options(struct nes_cm
25909
25910 static void drop_packet(struct sk_buff *skb)
25911 {
25912- atomic_inc(&cm_accel_dropped_pkts);
25913+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
25914 dev_kfree_skb_any(skb);
25915 }
25916
25917@@ -1428,7 +1428,7 @@ static void handle_rst_pkt(struct nes_cm
25918 {
25919
25920 int reset = 0; /* whether to send reset in case of err.. */
25921- atomic_inc(&cm_resets_recvd);
25922+ atomic_inc_unchecked(&cm_resets_recvd);
25923 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
25924 " refcnt=%d\n", cm_node, cm_node->state,
25925 atomic_read(&cm_node->ref_count));
25926@@ -2057,7 +2057,7 @@ static struct nes_cm_node *mini_cm_conne
25927 rem_ref_cm_node(cm_node->cm_core, cm_node);
25928 return NULL;
25929 }
25930- atomic_inc(&cm_loopbacks);
25931+ atomic_inc_unchecked(&cm_loopbacks);
25932 loopbackremotenode->loopbackpartner = cm_node;
25933 loopbackremotenode->tcp_cntxt.rcv_wscale =
25934 NES_CM_DEFAULT_RCV_WND_SCALE;
25935@@ -2332,7 +2332,7 @@ static int mini_cm_recv_pkt(struct nes_c
25936 add_ref_cm_node(cm_node);
25937 } else if (cm_node->state == NES_CM_STATE_TSA) {
25938 rem_ref_cm_node(cm_core, cm_node);
25939- atomic_inc(&cm_accel_dropped_pkts);
25940+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
25941 dev_kfree_skb_any(skb);
25942 break;
25943 }
25944@@ -2638,7 +2638,7 @@ static int nes_cm_disconn_true(struct ne
25945
25946 if ((cm_id) && (cm_id->event_handler)) {
25947 if (issue_disconn) {
25948- atomic_inc(&cm_disconnects);
25949+ atomic_inc_unchecked(&cm_disconnects);
25950 cm_event.event = IW_CM_EVENT_DISCONNECT;
25951 cm_event.status = disconn_status;
25952 cm_event.local_addr = cm_id->local_addr;
25953@@ -2660,7 +2660,7 @@ static int nes_cm_disconn_true(struct ne
25954 }
25955
25956 if (issue_close) {
25957- atomic_inc(&cm_closes);
25958+ atomic_inc_unchecked(&cm_closes);
25959 nes_disconnect(nesqp, 1);
25960
25961 cm_id->provider_data = nesqp;
25962@@ -2791,7 +2791,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
25963
25964 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
25965 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
25966- atomic_inc(&cm_accepts);
25967+ atomic_inc_unchecked(&cm_accepts);
25968
25969 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
25970 netdev_refcnt_read(nesvnic->netdev));
25971@@ -3001,7 +3001,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
25972
25973 struct nes_cm_core *cm_core;
25974
25975- atomic_inc(&cm_rejects);
25976+ atomic_inc_unchecked(&cm_rejects);
25977 cm_node = (struct nes_cm_node *) cm_id->provider_data;
25978 loopback = cm_node->loopbackpartner;
25979 cm_core = cm_node->cm_core;
25980@@ -3067,7 +3067,7 @@ int nes_connect(struct iw_cm_id *cm_id,
25981 ntohl(cm_id->local_addr.sin_addr.s_addr),
25982 ntohs(cm_id->local_addr.sin_port));
25983
25984- atomic_inc(&cm_connects);
25985+ atomic_inc_unchecked(&cm_connects);
25986 nesqp->active_conn = 1;
25987
25988 /* cache the cm_id in the qp */
25989@@ -3173,7 +3173,7 @@ int nes_create_listen(struct iw_cm_id *c
25990 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
25991 return err;
25992 }
25993- atomic_inc(&cm_listens_created);
25994+ atomic_inc_unchecked(&cm_listens_created);
25995 }
25996
25997 cm_id->add_ref(cm_id);
25998@@ -3278,7 +3278,7 @@ static void cm_event_connected(struct ne
25999 if (nesqp->destroyed) {
26000 return;
26001 }
26002- atomic_inc(&cm_connecteds);
26003+ atomic_inc_unchecked(&cm_connecteds);
26004 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
26005 " local port 0x%04X. jiffies = %lu.\n",
26006 nesqp->hwqp.qp_id,
26007@@ -3493,7 +3493,7 @@ static void cm_event_reset(struct nes_cm
26008
26009 cm_id->add_ref(cm_id);
26010 ret = cm_id->event_handler(cm_id, &cm_event);
26011- atomic_inc(&cm_closes);
26012+ atomic_inc_unchecked(&cm_closes);
26013 cm_event.event = IW_CM_EVENT_CLOSE;
26014 cm_event.status = 0;
26015 cm_event.provider_data = cm_id->provider_data;
26016@@ -3529,7 +3529,7 @@ static void cm_event_mpa_req(struct nes_
26017 return;
26018 cm_id = cm_node->cm_id;
26019
26020- atomic_inc(&cm_connect_reqs);
26021+ atomic_inc_unchecked(&cm_connect_reqs);
26022 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
26023 cm_node, cm_id, jiffies);
26024
26025@@ -3567,7 +3567,7 @@ static void cm_event_mpa_reject(struct n
26026 return;
26027 cm_id = cm_node->cm_id;
26028
26029- atomic_inc(&cm_connect_reqs);
26030+ atomic_inc_unchecked(&cm_connect_reqs);
26031 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
26032 cm_node, cm_id, jiffies);
26033
26034diff -urNp linux-3.0.3/drivers/infiniband/hw/nes/nes.h linux-3.0.3/drivers/infiniband/hw/nes/nes.h
26035--- linux-3.0.3/drivers/infiniband/hw/nes/nes.h 2011-07-21 22:17:23.000000000 -0400
26036+++ linux-3.0.3/drivers/infiniband/hw/nes/nes.h 2011-08-23 21:47:55.000000000 -0400
26037@@ -175,17 +175,17 @@ extern unsigned int nes_debug_level;
26038 extern unsigned int wqm_quanta;
26039 extern struct list_head nes_adapter_list;
26040
26041-extern atomic_t cm_connects;
26042-extern atomic_t cm_accepts;
26043-extern atomic_t cm_disconnects;
26044-extern atomic_t cm_closes;
26045-extern atomic_t cm_connecteds;
26046-extern atomic_t cm_connect_reqs;
26047-extern atomic_t cm_rejects;
26048-extern atomic_t mod_qp_timouts;
26049-extern atomic_t qps_created;
26050-extern atomic_t qps_destroyed;
26051-extern atomic_t sw_qps_destroyed;
26052+extern atomic_unchecked_t cm_connects;
26053+extern atomic_unchecked_t cm_accepts;
26054+extern atomic_unchecked_t cm_disconnects;
26055+extern atomic_unchecked_t cm_closes;
26056+extern atomic_unchecked_t cm_connecteds;
26057+extern atomic_unchecked_t cm_connect_reqs;
26058+extern atomic_unchecked_t cm_rejects;
26059+extern atomic_unchecked_t mod_qp_timouts;
26060+extern atomic_unchecked_t qps_created;
26061+extern atomic_unchecked_t qps_destroyed;
26062+extern atomic_unchecked_t sw_qps_destroyed;
26063 extern u32 mh_detected;
26064 extern u32 mh_pauses_sent;
26065 extern u32 cm_packets_sent;
26066@@ -194,14 +194,14 @@ extern u32 cm_packets_created;
26067 extern u32 cm_packets_received;
26068 extern u32 cm_packets_dropped;
26069 extern u32 cm_packets_retrans;
26070-extern atomic_t cm_listens_created;
26071-extern atomic_t cm_listens_destroyed;
26072+extern atomic_unchecked_t cm_listens_created;
26073+extern atomic_unchecked_t cm_listens_destroyed;
26074 extern u32 cm_backlog_drops;
26075-extern atomic_t cm_loopbacks;
26076-extern atomic_t cm_nodes_created;
26077-extern atomic_t cm_nodes_destroyed;
26078-extern atomic_t cm_accel_dropped_pkts;
26079-extern atomic_t cm_resets_recvd;
26080+extern atomic_unchecked_t cm_loopbacks;
26081+extern atomic_unchecked_t cm_nodes_created;
26082+extern atomic_unchecked_t cm_nodes_destroyed;
26083+extern atomic_unchecked_t cm_accel_dropped_pkts;
26084+extern atomic_unchecked_t cm_resets_recvd;
26085
26086 extern u32 int_mod_timer_init;
26087 extern u32 int_mod_cq_depth_256;
26088diff -urNp linux-3.0.3/drivers/infiniband/hw/nes/nes_nic.c linux-3.0.3/drivers/infiniband/hw/nes/nes_nic.c
26089--- linux-3.0.3/drivers/infiniband/hw/nes/nes_nic.c 2011-07-21 22:17:23.000000000 -0400
26090+++ linux-3.0.3/drivers/infiniband/hw/nes/nes_nic.c 2011-08-23 21:47:55.000000000 -0400
26091@@ -1274,31 +1274,31 @@ static void nes_netdev_get_ethtool_stats
26092 target_stat_values[++index] = mh_detected;
26093 target_stat_values[++index] = mh_pauses_sent;
26094 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
26095- target_stat_values[++index] = atomic_read(&cm_connects);
26096- target_stat_values[++index] = atomic_read(&cm_accepts);
26097- target_stat_values[++index] = atomic_read(&cm_disconnects);
26098- target_stat_values[++index] = atomic_read(&cm_connecteds);
26099- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
26100- target_stat_values[++index] = atomic_read(&cm_rejects);
26101- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
26102- target_stat_values[++index] = atomic_read(&qps_created);
26103- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
26104- target_stat_values[++index] = atomic_read(&qps_destroyed);
26105- target_stat_values[++index] = atomic_read(&cm_closes);
26106+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
26107+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
26108+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
26109+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
26110+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
26111+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
26112+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
26113+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
26114+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
26115+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
26116+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
26117 target_stat_values[++index] = cm_packets_sent;
26118 target_stat_values[++index] = cm_packets_bounced;
26119 target_stat_values[++index] = cm_packets_created;
26120 target_stat_values[++index] = cm_packets_received;
26121 target_stat_values[++index] = cm_packets_dropped;
26122 target_stat_values[++index] = cm_packets_retrans;
26123- target_stat_values[++index] = atomic_read(&cm_listens_created);
26124- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
26125+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
26126+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
26127 target_stat_values[++index] = cm_backlog_drops;
26128- target_stat_values[++index] = atomic_read(&cm_loopbacks);
26129- target_stat_values[++index] = atomic_read(&cm_nodes_created);
26130- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
26131- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
26132- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
26133+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
26134+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
26135+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
26136+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
26137+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
26138 target_stat_values[++index] = nesadapter->free_4kpbl;
26139 target_stat_values[++index] = nesadapter->free_256pbl;
26140 target_stat_values[++index] = int_mod_timer_init;
26141diff -urNp linux-3.0.3/drivers/infiniband/hw/nes/nes_verbs.c linux-3.0.3/drivers/infiniband/hw/nes/nes_verbs.c
26142--- linux-3.0.3/drivers/infiniband/hw/nes/nes_verbs.c 2011-07-21 22:17:23.000000000 -0400
26143+++ linux-3.0.3/drivers/infiniband/hw/nes/nes_verbs.c 2011-08-23 21:47:55.000000000 -0400
26144@@ -46,9 +46,9 @@
26145
26146 #include <rdma/ib_umem.h>
26147
26148-atomic_t mod_qp_timouts;
26149-atomic_t qps_created;
26150-atomic_t sw_qps_destroyed;
26151+atomic_unchecked_t mod_qp_timouts;
26152+atomic_unchecked_t qps_created;
26153+atomic_unchecked_t sw_qps_destroyed;
26154
26155 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
26156
26157@@ -1141,7 +1141,7 @@ static struct ib_qp *nes_create_qp(struc
26158 if (init_attr->create_flags)
26159 return ERR_PTR(-EINVAL);
26160
26161- atomic_inc(&qps_created);
26162+ atomic_inc_unchecked(&qps_created);
26163 switch (init_attr->qp_type) {
26164 case IB_QPT_RC:
26165 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
26166@@ -1470,7 +1470,7 @@ static int nes_destroy_qp(struct ib_qp *
26167 struct iw_cm_event cm_event;
26168 int ret;
26169
26170- atomic_inc(&sw_qps_destroyed);
26171+ atomic_inc_unchecked(&sw_qps_destroyed);
26172 nesqp->destroyed = 1;
26173
26174 /* Blow away the connection if it exists. */
26175diff -urNp linux-3.0.3/drivers/infiniband/hw/qib/qib.h linux-3.0.3/drivers/infiniband/hw/qib/qib.h
26176--- linux-3.0.3/drivers/infiniband/hw/qib/qib.h 2011-07-21 22:17:23.000000000 -0400
26177+++ linux-3.0.3/drivers/infiniband/hw/qib/qib.h 2011-08-23 21:47:55.000000000 -0400
26178@@ -51,6 +51,7 @@
26179 #include <linux/completion.h>
26180 #include <linux/kref.h>
26181 #include <linux/sched.h>
26182+#include <linux/slab.h>
26183
26184 #include "qib_common.h"
26185 #include "qib_verbs.h"
26186diff -urNp linux-3.0.3/drivers/input/gameport/gameport.c linux-3.0.3/drivers/input/gameport/gameport.c
26187--- linux-3.0.3/drivers/input/gameport/gameport.c 2011-07-21 22:17:23.000000000 -0400
26188+++ linux-3.0.3/drivers/input/gameport/gameport.c 2011-08-23 21:47:55.000000000 -0400
26189@@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
26190 */
26191 static void gameport_init_port(struct gameport *gameport)
26192 {
26193- static atomic_t gameport_no = ATOMIC_INIT(0);
26194+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
26195
26196 __module_get(THIS_MODULE);
26197
26198 mutex_init(&gameport->drv_mutex);
26199 device_initialize(&gameport->dev);
26200 dev_set_name(&gameport->dev, "gameport%lu",
26201- (unsigned long)atomic_inc_return(&gameport_no) - 1);
26202+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
26203 gameport->dev.bus = &gameport_bus;
26204 gameport->dev.release = gameport_release_port;
26205 if (gameport->parent)
26206diff -urNp linux-3.0.3/drivers/input/input.c linux-3.0.3/drivers/input/input.c
26207--- linux-3.0.3/drivers/input/input.c 2011-07-21 22:17:23.000000000 -0400
26208+++ linux-3.0.3/drivers/input/input.c 2011-08-23 21:47:55.000000000 -0400
26209@@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struc
26210 */
26211 int input_register_device(struct input_dev *dev)
26212 {
26213- static atomic_t input_no = ATOMIC_INIT(0);
26214+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
26215 struct input_handler *handler;
26216 const char *path;
26217 int error;
26218@@ -1851,7 +1851,7 @@ int input_register_device(struct input_d
26219 dev->setkeycode = input_default_setkeycode;
26220
26221 dev_set_name(&dev->dev, "input%ld",
26222- (unsigned long) atomic_inc_return(&input_no) - 1);
26223+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
26224
26225 error = device_add(&dev->dev);
26226 if (error)
26227diff -urNp linux-3.0.3/drivers/input/joystick/sidewinder.c linux-3.0.3/drivers/input/joystick/sidewinder.c
26228--- linux-3.0.3/drivers/input/joystick/sidewinder.c 2011-07-21 22:17:23.000000000 -0400
26229+++ linux-3.0.3/drivers/input/joystick/sidewinder.c 2011-08-23 21:48:14.000000000 -0400
26230@@ -30,6 +30,7 @@
26231 #include <linux/kernel.h>
26232 #include <linux/module.h>
26233 #include <linux/slab.h>
26234+#include <linux/sched.h>
26235 #include <linux/init.h>
26236 #include <linux/input.h>
26237 #include <linux/gameport.h>
26238@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
26239 unsigned char buf[SW_LENGTH];
26240 int i;
26241
26242+ pax_track_stack();
26243+
26244 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
26245
26246 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
26247diff -urNp linux-3.0.3/drivers/input/joystick/xpad.c linux-3.0.3/drivers/input/joystick/xpad.c
26248--- linux-3.0.3/drivers/input/joystick/xpad.c 2011-07-21 22:17:23.000000000 -0400
26249+++ linux-3.0.3/drivers/input/joystick/xpad.c 2011-08-23 21:47:55.000000000 -0400
26250@@ -689,7 +689,7 @@ static void xpad_led_set(struct led_clas
26251
26252 static int xpad_led_probe(struct usb_xpad *xpad)
26253 {
26254- static atomic_t led_seq = ATOMIC_INIT(0);
26255+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
26256 long led_no;
26257 struct xpad_led *led;
26258 struct led_classdev *led_cdev;
26259@@ -702,7 +702,7 @@ static int xpad_led_probe(struct usb_xpa
26260 if (!led)
26261 return -ENOMEM;
26262
26263- led_no = (long)atomic_inc_return(&led_seq) - 1;
26264+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
26265
26266 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
26267 led->xpad = xpad;
26268diff -urNp linux-3.0.3/drivers/input/mousedev.c linux-3.0.3/drivers/input/mousedev.c
26269--- linux-3.0.3/drivers/input/mousedev.c 2011-07-21 22:17:23.000000000 -0400
26270+++ linux-3.0.3/drivers/input/mousedev.c 2011-08-23 21:47:55.000000000 -0400
26271@@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file
26272
26273 spin_unlock_irq(&client->packet_lock);
26274
26275- if (copy_to_user(buffer, data, count))
26276+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
26277 return -EFAULT;
26278
26279 return count;
26280diff -urNp linux-3.0.3/drivers/input/serio/serio.c linux-3.0.3/drivers/input/serio/serio.c
26281--- linux-3.0.3/drivers/input/serio/serio.c 2011-07-21 22:17:23.000000000 -0400
26282+++ linux-3.0.3/drivers/input/serio/serio.c 2011-08-23 21:47:55.000000000 -0400
26283@@ -497,7 +497,7 @@ static void serio_release_port(struct de
26284 */
26285 static void serio_init_port(struct serio *serio)
26286 {
26287- static atomic_t serio_no = ATOMIC_INIT(0);
26288+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
26289
26290 __module_get(THIS_MODULE);
26291
26292@@ -508,7 +508,7 @@ static void serio_init_port(struct serio
26293 mutex_init(&serio->drv_mutex);
26294 device_initialize(&serio->dev);
26295 dev_set_name(&serio->dev, "serio%ld",
26296- (long)atomic_inc_return(&serio_no) - 1);
26297+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
26298 serio->dev.bus = &serio_bus;
26299 serio->dev.release = serio_release_port;
26300 serio->dev.groups = serio_device_attr_groups;
26301diff -urNp linux-3.0.3/drivers/isdn/capi/capi.c linux-3.0.3/drivers/isdn/capi/capi.c
26302--- linux-3.0.3/drivers/isdn/capi/capi.c 2011-07-21 22:17:23.000000000 -0400
26303+++ linux-3.0.3/drivers/isdn/capi/capi.c 2011-08-23 21:47:55.000000000 -0400
26304@@ -83,8 +83,8 @@ struct capiminor {
26305
26306 struct capi20_appl *ap;
26307 u32 ncci;
26308- atomic_t datahandle;
26309- atomic_t msgid;
26310+ atomic_unchecked_t datahandle;
26311+ atomic_unchecked_t msgid;
26312
26313 struct tty_port port;
26314 int ttyinstop;
26315@@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *m
26316 capimsg_setu16(s, 2, mp->ap->applid);
26317 capimsg_setu8 (s, 4, CAPI_DATA_B3);
26318 capimsg_setu8 (s, 5, CAPI_RESP);
26319- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
26320+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
26321 capimsg_setu32(s, 8, mp->ncci);
26322 capimsg_setu16(s, 12, datahandle);
26323 }
26324@@ -518,14 +518,14 @@ static void handle_minor_send(struct cap
26325 mp->outbytes -= len;
26326 spin_unlock_bh(&mp->outlock);
26327
26328- datahandle = atomic_inc_return(&mp->datahandle);
26329+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
26330 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
26331 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
26332 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
26333 capimsg_setu16(skb->data, 2, mp->ap->applid);
26334 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
26335 capimsg_setu8 (skb->data, 5, CAPI_REQ);
26336- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
26337+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
26338 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
26339 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
26340 capimsg_setu16(skb->data, 16, len); /* Data length */
26341diff -urNp linux-3.0.3/drivers/isdn/gigaset/common.c linux-3.0.3/drivers/isdn/gigaset/common.c
26342--- linux-3.0.3/drivers/isdn/gigaset/common.c 2011-07-21 22:17:23.000000000 -0400
26343+++ linux-3.0.3/drivers/isdn/gigaset/common.c 2011-08-23 21:47:55.000000000 -0400
26344@@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct
26345 cs->commands_pending = 0;
26346 cs->cur_at_seq = 0;
26347 cs->gotfwver = -1;
26348- cs->open_count = 0;
26349+ local_set(&cs->open_count, 0);
26350 cs->dev = NULL;
26351 cs->tty = NULL;
26352 cs->tty_dev = NULL;
26353diff -urNp linux-3.0.3/drivers/isdn/gigaset/gigaset.h linux-3.0.3/drivers/isdn/gigaset/gigaset.h
26354--- linux-3.0.3/drivers/isdn/gigaset/gigaset.h 2011-07-21 22:17:23.000000000 -0400
26355+++ linux-3.0.3/drivers/isdn/gigaset/gigaset.h 2011-08-23 21:47:55.000000000 -0400
26356@@ -35,6 +35,7 @@
26357 #include <linux/tty_driver.h>
26358 #include <linux/list.h>
26359 #include <asm/atomic.h>
26360+#include <asm/local.h>
26361
26362 #define GIG_VERSION {0, 5, 0, 0}
26363 #define GIG_COMPAT {0, 4, 0, 0}
26364@@ -433,7 +434,7 @@ struct cardstate {
26365 spinlock_t cmdlock;
26366 unsigned curlen, cmdbytes;
26367
26368- unsigned open_count;
26369+ local_t open_count;
26370 struct tty_struct *tty;
26371 struct tasklet_struct if_wake_tasklet;
26372 unsigned control_state;
26373diff -urNp linux-3.0.3/drivers/isdn/gigaset/interface.c linux-3.0.3/drivers/isdn/gigaset/interface.c
26374--- linux-3.0.3/drivers/isdn/gigaset/interface.c 2011-07-21 22:17:23.000000000 -0400
26375+++ linux-3.0.3/drivers/isdn/gigaset/interface.c 2011-08-23 21:47:55.000000000 -0400
26376@@ -162,9 +162,7 @@ static int if_open(struct tty_struct *tt
26377 }
26378 tty->driver_data = cs;
26379
26380- ++cs->open_count;
26381-
26382- if (cs->open_count == 1) {
26383+ if (local_inc_return(&cs->open_count) == 1) {
26384 spin_lock_irqsave(&cs->lock, flags);
26385 cs->tty = tty;
26386 spin_unlock_irqrestore(&cs->lock, flags);
26387@@ -192,10 +190,10 @@ static void if_close(struct tty_struct *
26388
26389 if (!cs->connected)
26390 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26391- else if (!cs->open_count)
26392+ else if (!local_read(&cs->open_count))
26393 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26394 else {
26395- if (!--cs->open_count) {
26396+ if (!local_dec_return(&cs->open_count)) {
26397 spin_lock_irqsave(&cs->lock, flags);
26398 cs->tty = NULL;
26399 spin_unlock_irqrestore(&cs->lock, flags);
26400@@ -230,7 +228,7 @@ static int if_ioctl(struct tty_struct *t
26401 if (!cs->connected) {
26402 gig_dbg(DEBUG_IF, "not connected");
26403 retval = -ENODEV;
26404- } else if (!cs->open_count)
26405+ } else if (!local_read(&cs->open_count))
26406 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26407 else {
26408 retval = 0;
26409@@ -360,7 +358,7 @@ static int if_write(struct tty_struct *t
26410 retval = -ENODEV;
26411 goto done;
26412 }
26413- if (!cs->open_count) {
26414+ if (!local_read(&cs->open_count)) {
26415 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26416 retval = -ENODEV;
26417 goto done;
26418@@ -413,7 +411,7 @@ static int if_write_room(struct tty_stru
26419 if (!cs->connected) {
26420 gig_dbg(DEBUG_IF, "not connected");
26421 retval = -ENODEV;
26422- } else if (!cs->open_count)
26423+ } else if (!local_read(&cs->open_count))
26424 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26425 else if (cs->mstate != MS_LOCKED) {
26426 dev_warn(cs->dev, "can't write to unlocked device\n");
26427@@ -443,7 +441,7 @@ static int if_chars_in_buffer(struct tty
26428
26429 if (!cs->connected)
26430 gig_dbg(DEBUG_IF, "not connected");
26431- else if (!cs->open_count)
26432+ else if (!local_read(&cs->open_count))
26433 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26434 else if (cs->mstate != MS_LOCKED)
26435 dev_warn(cs->dev, "can't write to unlocked device\n");
26436@@ -471,7 +469,7 @@ static void if_throttle(struct tty_struc
26437
26438 if (!cs->connected)
26439 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26440- else if (!cs->open_count)
26441+ else if (!local_read(&cs->open_count))
26442 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26443 else
26444 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
26445@@ -495,7 +493,7 @@ static void if_unthrottle(struct tty_str
26446
26447 if (!cs->connected)
26448 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26449- else if (!cs->open_count)
26450+ else if (!local_read(&cs->open_count))
26451 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26452 else
26453 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
26454@@ -526,7 +524,7 @@ static void if_set_termios(struct tty_st
26455 goto out;
26456 }
26457
26458- if (!cs->open_count) {
26459+ if (!local_read(&cs->open_count)) {
26460 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26461 goto out;
26462 }
26463diff -urNp linux-3.0.3/drivers/isdn/hardware/avm/b1.c linux-3.0.3/drivers/isdn/hardware/avm/b1.c
26464--- linux-3.0.3/drivers/isdn/hardware/avm/b1.c 2011-07-21 22:17:23.000000000 -0400
26465+++ linux-3.0.3/drivers/isdn/hardware/avm/b1.c 2011-08-23 21:47:55.000000000 -0400
26466@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capilo
26467 }
26468 if (left) {
26469 if (t4file->user) {
26470- if (copy_from_user(buf, dp, left))
26471+ if (left > sizeof buf || copy_from_user(buf, dp, left))
26472 return -EFAULT;
26473 } else {
26474 memcpy(buf, dp, left);
26475@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capilo
26476 }
26477 if (left) {
26478 if (config->user) {
26479- if (copy_from_user(buf, dp, left))
26480+ if (left > sizeof buf || copy_from_user(buf, dp, left))
26481 return -EFAULT;
26482 } else {
26483 memcpy(buf, dp, left);
26484diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/capidtmf.c linux-3.0.3/drivers/isdn/hardware/eicon/capidtmf.c
26485--- linux-3.0.3/drivers/isdn/hardware/eicon/capidtmf.c 2011-07-21 22:17:23.000000000 -0400
26486+++ linux-3.0.3/drivers/isdn/hardware/eicon/capidtmf.c 2011-08-23 21:48:14.000000000 -0400
26487@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
26488 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
26489 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
26490
26491+ pax_track_stack();
26492
26493 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
26494 {
26495diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/capifunc.c linux-3.0.3/drivers/isdn/hardware/eicon/capifunc.c
26496--- linux-3.0.3/drivers/isdn/hardware/eicon/capifunc.c 2011-07-21 22:17:23.000000000 -0400
26497+++ linux-3.0.3/drivers/isdn/hardware/eicon/capifunc.c 2011-08-23 21:48:14.000000000 -0400
26498@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
26499 IDI_SYNC_REQ req;
26500 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26501
26502+ pax_track_stack();
26503+
26504 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26505
26506 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26507diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/diddfunc.c linux-3.0.3/drivers/isdn/hardware/eicon/diddfunc.c
26508--- linux-3.0.3/drivers/isdn/hardware/eicon/diddfunc.c 2011-07-21 22:17:23.000000000 -0400
26509+++ linux-3.0.3/drivers/isdn/hardware/eicon/diddfunc.c 2011-08-23 21:48:14.000000000 -0400
26510@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
26511 IDI_SYNC_REQ req;
26512 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26513
26514+ pax_track_stack();
26515+
26516 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26517
26518 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26519diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/divasfunc.c linux-3.0.3/drivers/isdn/hardware/eicon/divasfunc.c
26520--- linux-3.0.3/drivers/isdn/hardware/eicon/divasfunc.c 2011-07-21 22:17:23.000000000 -0400
26521+++ linux-3.0.3/drivers/isdn/hardware/eicon/divasfunc.c 2011-08-23 21:48:14.000000000 -0400
26522@@ -160,6 +160,8 @@ static int DIVA_INIT_FUNCTION connect_di
26523 IDI_SYNC_REQ req;
26524 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26525
26526+ pax_track_stack();
26527+
26528 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26529
26530 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26531diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/divasync.h linux-3.0.3/drivers/isdn/hardware/eicon/divasync.h
26532--- linux-3.0.3/drivers/isdn/hardware/eicon/divasync.h 2011-07-21 22:17:23.000000000 -0400
26533+++ linux-3.0.3/drivers/isdn/hardware/eicon/divasync.h 2011-08-23 21:47:55.000000000 -0400
26534@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
26535 } diva_didd_add_adapter_t;
26536 typedef struct _diva_didd_remove_adapter {
26537 IDI_CALL p_request;
26538-} diva_didd_remove_adapter_t;
26539+} __no_const diva_didd_remove_adapter_t;
26540 typedef struct _diva_didd_read_adapter_array {
26541 void * buffer;
26542 dword length;
26543diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/idifunc.c linux-3.0.3/drivers/isdn/hardware/eicon/idifunc.c
26544--- linux-3.0.3/drivers/isdn/hardware/eicon/idifunc.c 2011-07-21 22:17:23.000000000 -0400
26545+++ linux-3.0.3/drivers/isdn/hardware/eicon/idifunc.c 2011-08-23 21:48:14.000000000 -0400
26546@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
26547 IDI_SYNC_REQ req;
26548 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26549
26550+ pax_track_stack();
26551+
26552 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26553
26554 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26555diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/message.c linux-3.0.3/drivers/isdn/hardware/eicon/message.c
26556--- linux-3.0.3/drivers/isdn/hardware/eicon/message.c 2011-07-21 22:17:23.000000000 -0400
26557+++ linux-3.0.3/drivers/isdn/hardware/eicon/message.c 2011-08-23 21:48:14.000000000 -0400
26558@@ -4886,6 +4886,8 @@ static void sig_ind(PLCI *plci)
26559 dword d;
26560 word w;
26561
26562+ pax_track_stack();
26563+
26564 a = plci->adapter;
26565 Id = ((word)plci->Id<<8)|a->Id;
26566 PUT_WORD(&SS_Ind[4],0x0000);
26567@@ -7480,6 +7482,8 @@ static word add_b1(PLCI *plci, API_PARSE
26568 word j, n, w;
26569 dword d;
26570
26571+ pax_track_stack();
26572+
26573
26574 for(i=0;i<8;i++) bp_parms[i].length = 0;
26575 for(i=0;i<2;i++) global_config[i].length = 0;
26576@@ -7954,6 +7958,8 @@ static word add_b23(PLCI *plci, API_PARS
26577 const byte llc3[] = {4,3,2,2,6,6,0};
26578 const byte header[] = {0,2,3,3,0,0,0};
26579
26580+ pax_track_stack();
26581+
26582 for(i=0;i<8;i++) bp_parms[i].length = 0;
26583 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
26584 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
26585@@ -14741,6 +14747,8 @@ static void group_optimization(DIVA_CAPI
26586 word appl_number_group_type[MAX_APPL];
26587 PLCI *auxplci;
26588
26589+ pax_track_stack();
26590+
26591 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
26592
26593 if(!a->group_optimization_enabled)
26594diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/mntfunc.c linux-3.0.3/drivers/isdn/hardware/eicon/mntfunc.c
26595--- linux-3.0.3/drivers/isdn/hardware/eicon/mntfunc.c 2011-07-21 22:17:23.000000000 -0400
26596+++ linux-3.0.3/drivers/isdn/hardware/eicon/mntfunc.c 2011-08-23 21:48:14.000000000 -0400
26597@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
26598 IDI_SYNC_REQ req;
26599 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26600
26601+ pax_track_stack();
26602+
26603 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26604
26605 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26606diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/xdi_adapter.h linux-3.0.3/drivers/isdn/hardware/eicon/xdi_adapter.h
26607--- linux-3.0.3/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-07-21 22:17:23.000000000 -0400
26608+++ linux-3.0.3/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-08-23 21:47:55.000000000 -0400
26609@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
26610 typedef struct _diva_os_idi_adapter_interface {
26611 diva_init_card_proc_t cleanup_adapter_proc;
26612 diva_cmd_card_proc_t cmd_proc;
26613-} diva_os_idi_adapter_interface_t;
26614+} __no_const diva_os_idi_adapter_interface_t;
26615
26616 typedef struct _diva_os_xdi_adapter {
26617 struct list_head link;
26618diff -urNp linux-3.0.3/drivers/isdn/i4l/isdn_common.c linux-3.0.3/drivers/isdn/i4l/isdn_common.c
26619--- linux-3.0.3/drivers/isdn/i4l/isdn_common.c 2011-07-21 22:17:23.000000000 -0400
26620+++ linux-3.0.3/drivers/isdn/i4l/isdn_common.c 2011-08-23 21:48:14.000000000 -0400
26621@@ -1286,6 +1286,8 @@ isdn_ioctl(struct file *file, uint cmd,
26622 } iocpar;
26623 void __user *argp = (void __user *)arg;
26624
26625+ pax_track_stack();
26626+
26627 #define name iocpar.name
26628 #define bname iocpar.bname
26629 #define iocts iocpar.iocts
26630diff -urNp linux-3.0.3/drivers/isdn/icn/icn.c linux-3.0.3/drivers/isdn/icn/icn.c
26631--- linux-3.0.3/drivers/isdn/icn/icn.c 2011-07-21 22:17:23.000000000 -0400
26632+++ linux-3.0.3/drivers/isdn/icn/icn.c 2011-08-23 21:47:55.000000000 -0400
26633@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len
26634 if (count > len)
26635 count = len;
26636 if (user) {
26637- if (copy_from_user(msg, buf, count))
26638+ if (count > sizeof msg || copy_from_user(msg, buf, count))
26639 return -EFAULT;
26640 } else
26641 memcpy(msg, buf, count);
26642diff -urNp linux-3.0.3/drivers/lguest/core.c linux-3.0.3/drivers/lguest/core.c
26643--- linux-3.0.3/drivers/lguest/core.c 2011-07-21 22:17:23.000000000 -0400
26644+++ linux-3.0.3/drivers/lguest/core.c 2011-08-23 21:47:55.000000000 -0400
26645@@ -92,9 +92,17 @@ static __init int map_switcher(void)
26646 * it's worked so far. The end address needs +1 because __get_vm_area
26647 * allocates an extra guard page, so we need space for that.
26648 */
26649+
26650+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26651+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
26652+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
26653+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
26654+#else
26655 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
26656 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
26657 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
26658+#endif
26659+
26660 if (!switcher_vma) {
26661 err = -ENOMEM;
26662 printk("lguest: could not map switcher pages high\n");
26663@@ -119,7 +127,7 @@ static __init int map_switcher(void)
26664 * Now the Switcher is mapped at the right address, we can't fail!
26665 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
26666 */
26667- memcpy(switcher_vma->addr, start_switcher_text,
26668+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
26669 end_switcher_text - start_switcher_text);
26670
26671 printk(KERN_INFO "lguest: mapped switcher at %p\n",
26672diff -urNp linux-3.0.3/drivers/lguest/x86/core.c linux-3.0.3/drivers/lguest/x86/core.c
26673--- linux-3.0.3/drivers/lguest/x86/core.c 2011-07-21 22:17:23.000000000 -0400
26674+++ linux-3.0.3/drivers/lguest/x86/core.c 2011-08-23 21:47:55.000000000 -0400
26675@@ -59,7 +59,7 @@ static struct {
26676 /* Offset from where switcher.S was compiled to where we've copied it */
26677 static unsigned long switcher_offset(void)
26678 {
26679- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
26680+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
26681 }
26682
26683 /* This cpu's struct lguest_pages. */
26684@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
26685 * These copies are pretty cheap, so we do them unconditionally: */
26686 /* Save the current Host top-level page directory.
26687 */
26688+
26689+#ifdef CONFIG_PAX_PER_CPU_PGD
26690+ pages->state.host_cr3 = read_cr3();
26691+#else
26692 pages->state.host_cr3 = __pa(current->mm->pgd);
26693+#endif
26694+
26695 /*
26696 * Set up the Guest's page tables to see this CPU's pages (and no
26697 * other CPU's pages).
26698@@ -547,7 +553,7 @@ void __init lguest_arch_host_init(void)
26699 * compiled-in switcher code and the high-mapped copy we just made.
26700 */
26701 for (i = 0; i < IDT_ENTRIES; i++)
26702- default_idt_entries[i] += switcher_offset();
26703+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
26704
26705 /*
26706 * Set up the Switcher's per-cpu areas.
26707@@ -630,7 +636,7 @@ void __init lguest_arch_host_init(void)
26708 * it will be undisturbed when we switch. To change %cs and jump we
26709 * need this structure to feed to Intel's "lcall" instruction.
26710 */
26711- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
26712+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
26713 lguest_entry.segment = LGUEST_CS;
26714
26715 /*
26716diff -urNp linux-3.0.3/drivers/lguest/x86/switcher_32.S linux-3.0.3/drivers/lguest/x86/switcher_32.S
26717--- linux-3.0.3/drivers/lguest/x86/switcher_32.S 2011-07-21 22:17:23.000000000 -0400
26718+++ linux-3.0.3/drivers/lguest/x86/switcher_32.S 2011-08-23 21:47:55.000000000 -0400
26719@@ -87,6 +87,7 @@
26720 #include <asm/page.h>
26721 #include <asm/segment.h>
26722 #include <asm/lguest.h>
26723+#include <asm/processor-flags.h>
26724
26725 // We mark the start of the code to copy
26726 // It's placed in .text tho it's never run here
26727@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
26728 // Changes type when we load it: damn Intel!
26729 // For after we switch over our page tables
26730 // That entry will be read-only: we'd crash.
26731+
26732+#ifdef CONFIG_PAX_KERNEXEC
26733+ mov %cr0, %edx
26734+ xor $X86_CR0_WP, %edx
26735+ mov %edx, %cr0
26736+#endif
26737+
26738 movl $(GDT_ENTRY_TSS*8), %edx
26739 ltr %dx
26740
26741@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
26742 // Let's clear it again for our return.
26743 // The GDT descriptor of the Host
26744 // Points to the table after two "size" bytes
26745- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
26746+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
26747 // Clear "used" from type field (byte 5, bit 2)
26748- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
26749+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
26750+
26751+#ifdef CONFIG_PAX_KERNEXEC
26752+ mov %cr0, %eax
26753+ xor $X86_CR0_WP, %eax
26754+ mov %eax, %cr0
26755+#endif
26756
26757 // Once our page table's switched, the Guest is live!
26758 // The Host fades as we run this final step.
26759@@ -295,13 +309,12 @@ deliver_to_host:
26760 // I consulted gcc, and it gave
26761 // These instructions, which I gladly credit:
26762 leal (%edx,%ebx,8), %eax
26763- movzwl (%eax),%edx
26764- movl 4(%eax), %eax
26765- xorw %ax, %ax
26766- orl %eax, %edx
26767+ movl 4(%eax), %edx
26768+ movw (%eax), %dx
26769 // Now the address of the handler's in %edx
26770 // We call it now: its "iret" drops us home.
26771- jmp *%edx
26772+ ljmp $__KERNEL_CS, $1f
26773+1: jmp *%edx
26774
26775 // Every interrupt can come to us here
26776 // But we must truly tell each apart.
26777diff -urNp linux-3.0.3/drivers/md/dm.c linux-3.0.3/drivers/md/dm.c
26778--- linux-3.0.3/drivers/md/dm.c 2011-08-23 21:44:40.000000000 -0400
26779+++ linux-3.0.3/drivers/md/dm.c 2011-08-23 21:47:55.000000000 -0400
26780@@ -164,9 +164,9 @@ struct mapped_device {
26781 /*
26782 * Event handling.
26783 */
26784- atomic_t event_nr;
26785+ atomic_unchecked_t event_nr;
26786 wait_queue_head_t eventq;
26787- atomic_t uevent_seq;
26788+ atomic_unchecked_t uevent_seq;
26789 struct list_head uevent_list;
26790 spinlock_t uevent_lock; /* Protect access to uevent_list */
26791
26792@@ -1842,8 +1842,8 @@ static struct mapped_device *alloc_dev(i
26793 rwlock_init(&md->map_lock);
26794 atomic_set(&md->holders, 1);
26795 atomic_set(&md->open_count, 0);
26796- atomic_set(&md->event_nr, 0);
26797- atomic_set(&md->uevent_seq, 0);
26798+ atomic_set_unchecked(&md->event_nr, 0);
26799+ atomic_set_unchecked(&md->uevent_seq, 0);
26800 INIT_LIST_HEAD(&md->uevent_list);
26801 spin_lock_init(&md->uevent_lock);
26802
26803@@ -1977,7 +1977,7 @@ static void event_callback(void *context
26804
26805 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
26806
26807- atomic_inc(&md->event_nr);
26808+ atomic_inc_unchecked(&md->event_nr);
26809 wake_up(&md->eventq);
26810 }
26811
26812@@ -2553,18 +2553,18 @@ int dm_kobject_uevent(struct mapped_devi
26813
26814 uint32_t dm_next_uevent_seq(struct mapped_device *md)
26815 {
26816- return atomic_add_return(1, &md->uevent_seq);
26817+ return atomic_add_return_unchecked(1, &md->uevent_seq);
26818 }
26819
26820 uint32_t dm_get_event_nr(struct mapped_device *md)
26821 {
26822- return atomic_read(&md->event_nr);
26823+ return atomic_read_unchecked(&md->event_nr);
26824 }
26825
26826 int dm_wait_event(struct mapped_device *md, int event_nr)
26827 {
26828 return wait_event_interruptible(md->eventq,
26829- (event_nr != atomic_read(&md->event_nr)));
26830+ (event_nr != atomic_read_unchecked(&md->event_nr)));
26831 }
26832
26833 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
26834diff -urNp linux-3.0.3/drivers/md/dm-ioctl.c linux-3.0.3/drivers/md/dm-ioctl.c
26835--- linux-3.0.3/drivers/md/dm-ioctl.c 2011-07-21 22:17:23.000000000 -0400
26836+++ linux-3.0.3/drivers/md/dm-ioctl.c 2011-08-23 21:47:55.000000000 -0400
26837@@ -1551,7 +1551,7 @@ static int validate_params(uint cmd, str
26838 cmd == DM_LIST_VERSIONS_CMD)
26839 return 0;
26840
26841- if ((cmd == DM_DEV_CREATE_CMD)) {
26842+ if (cmd == DM_DEV_CREATE_CMD) {
26843 if (!*param->name) {
26844 DMWARN("name not supplied when creating device");
26845 return -EINVAL;
26846diff -urNp linux-3.0.3/drivers/md/dm-raid1.c linux-3.0.3/drivers/md/dm-raid1.c
26847--- linux-3.0.3/drivers/md/dm-raid1.c 2011-07-21 22:17:23.000000000 -0400
26848+++ linux-3.0.3/drivers/md/dm-raid1.c 2011-08-23 21:47:55.000000000 -0400
26849@@ -40,7 +40,7 @@ enum dm_raid1_error {
26850
26851 struct mirror {
26852 struct mirror_set *ms;
26853- atomic_t error_count;
26854+ atomic_unchecked_t error_count;
26855 unsigned long error_type;
26856 struct dm_dev *dev;
26857 sector_t offset;
26858@@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(s
26859 struct mirror *m;
26860
26861 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
26862- if (!atomic_read(&m->error_count))
26863+ if (!atomic_read_unchecked(&m->error_count))
26864 return m;
26865
26866 return NULL;
26867@@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m
26868 * simple way to tell if a device has encountered
26869 * errors.
26870 */
26871- atomic_inc(&m->error_count);
26872+ atomic_inc_unchecked(&m->error_count);
26873
26874 if (test_and_set_bit(error_type, &m->error_type))
26875 return;
26876@@ -408,7 +408,7 @@ static struct mirror *choose_mirror(stru
26877 struct mirror *m = get_default_mirror(ms);
26878
26879 do {
26880- if (likely(!atomic_read(&m->error_count)))
26881+ if (likely(!atomic_read_unchecked(&m->error_count)))
26882 return m;
26883
26884 if (m-- == ms->mirror)
26885@@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
26886 {
26887 struct mirror *default_mirror = get_default_mirror(m->ms);
26888
26889- return !atomic_read(&default_mirror->error_count);
26890+ return !atomic_read_unchecked(&default_mirror->error_count);
26891 }
26892
26893 static int mirror_available(struct mirror_set *ms, struct bio *bio)
26894@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *
26895 */
26896 if (likely(region_in_sync(ms, region, 1)))
26897 m = choose_mirror(ms, bio->bi_sector);
26898- else if (m && atomic_read(&m->error_count))
26899+ else if (m && atomic_read_unchecked(&m->error_count))
26900 m = NULL;
26901
26902 if (likely(m))
26903@@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set
26904 }
26905
26906 ms->mirror[mirror].ms = ms;
26907- atomic_set(&(ms->mirror[mirror].error_count), 0);
26908+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
26909 ms->mirror[mirror].error_type = 0;
26910 ms->mirror[mirror].offset = offset;
26911
26912@@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_targ
26913 */
26914 static char device_status_char(struct mirror *m)
26915 {
26916- if (!atomic_read(&(m->error_count)))
26917+ if (!atomic_read_unchecked(&(m->error_count)))
26918 return 'A';
26919
26920 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
26921diff -urNp linux-3.0.3/drivers/md/dm-stripe.c linux-3.0.3/drivers/md/dm-stripe.c
26922--- linux-3.0.3/drivers/md/dm-stripe.c 2011-07-21 22:17:23.000000000 -0400
26923+++ linux-3.0.3/drivers/md/dm-stripe.c 2011-08-23 21:47:55.000000000 -0400
26924@@ -20,7 +20,7 @@ struct stripe {
26925 struct dm_dev *dev;
26926 sector_t physical_start;
26927
26928- atomic_t error_count;
26929+ atomic_unchecked_t error_count;
26930 };
26931
26932 struct stripe_c {
26933@@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *
26934 kfree(sc);
26935 return r;
26936 }
26937- atomic_set(&(sc->stripe[i].error_count), 0);
26938+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
26939 }
26940
26941 ti->private = sc;
26942@@ -314,7 +314,7 @@ static int stripe_status(struct dm_targe
26943 DMEMIT("%d ", sc->stripes);
26944 for (i = 0; i < sc->stripes; i++) {
26945 DMEMIT("%s ", sc->stripe[i].dev->name);
26946- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
26947+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
26948 'D' : 'A';
26949 }
26950 buffer[i] = '\0';
26951@@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_targe
26952 */
26953 for (i = 0; i < sc->stripes; i++)
26954 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
26955- atomic_inc(&(sc->stripe[i].error_count));
26956- if (atomic_read(&(sc->stripe[i].error_count)) <
26957+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
26958+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
26959 DM_IO_ERROR_THRESHOLD)
26960 schedule_work(&sc->trigger_event);
26961 }
26962diff -urNp linux-3.0.3/drivers/md/dm-table.c linux-3.0.3/drivers/md/dm-table.c
26963--- linux-3.0.3/drivers/md/dm-table.c 2011-07-21 22:17:23.000000000 -0400
26964+++ linux-3.0.3/drivers/md/dm-table.c 2011-08-23 21:47:55.000000000 -0400
26965@@ -390,7 +390,7 @@ static int device_area_is_invalid(struct
26966 if (!dev_size)
26967 return 0;
26968
26969- if ((start >= dev_size) || (start + len > dev_size)) {
26970+ if ((start >= dev_size) || (len > dev_size - start)) {
26971 DMWARN("%s: %s too small for target: "
26972 "start=%llu, len=%llu, dev_size=%llu",
26973 dm_device_name(ti->table->md), bdevname(bdev, b),
26974diff -urNp linux-3.0.3/drivers/md/md.c linux-3.0.3/drivers/md/md.c
26975--- linux-3.0.3/drivers/md/md.c 2011-07-21 22:17:23.000000000 -0400
26976+++ linux-3.0.3/drivers/md/md.c 2011-08-23 21:47:55.000000000 -0400
26977@@ -226,10 +226,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
26978 * start build, activate spare
26979 */
26980 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
26981-static atomic_t md_event_count;
26982+static atomic_unchecked_t md_event_count;
26983 void md_new_event(mddev_t *mddev)
26984 {
26985- atomic_inc(&md_event_count);
26986+ atomic_inc_unchecked(&md_event_count);
26987 wake_up(&md_event_waiters);
26988 }
26989 EXPORT_SYMBOL_GPL(md_new_event);
26990@@ -239,7 +239,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
26991 */
26992 static void md_new_event_inintr(mddev_t *mddev)
26993 {
26994- atomic_inc(&md_event_count);
26995+ atomic_inc_unchecked(&md_event_count);
26996 wake_up(&md_event_waiters);
26997 }
26998
26999@@ -1457,7 +1457,7 @@ static int super_1_load(mdk_rdev_t *rdev
27000
27001 rdev->preferred_minor = 0xffff;
27002 rdev->data_offset = le64_to_cpu(sb->data_offset);
27003- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
27004+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
27005
27006 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
27007 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
27008@@ -1635,7 +1635,7 @@ static void super_1_sync(mddev_t *mddev,
27009 else
27010 sb->resync_offset = cpu_to_le64(0);
27011
27012- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
27013+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
27014
27015 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
27016 sb->size = cpu_to_le64(mddev->dev_sectors);
27017@@ -2428,7 +2428,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
27018 static ssize_t
27019 errors_show(mdk_rdev_t *rdev, char *page)
27020 {
27021- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
27022+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
27023 }
27024
27025 static ssize_t
27026@@ -2437,7 +2437,7 @@ errors_store(mdk_rdev_t *rdev, const cha
27027 char *e;
27028 unsigned long n = simple_strtoul(buf, &e, 10);
27029 if (*buf && (*e == 0 || *e == '\n')) {
27030- atomic_set(&rdev->corrected_errors, n);
27031+ atomic_set_unchecked(&rdev->corrected_errors, n);
27032 return len;
27033 }
27034 return -EINVAL;
27035@@ -2793,8 +2793,8 @@ void md_rdev_init(mdk_rdev_t *rdev)
27036 rdev->last_read_error.tv_sec = 0;
27037 rdev->last_read_error.tv_nsec = 0;
27038 atomic_set(&rdev->nr_pending, 0);
27039- atomic_set(&rdev->read_errors, 0);
27040- atomic_set(&rdev->corrected_errors, 0);
27041+ atomic_set_unchecked(&rdev->read_errors, 0);
27042+ atomic_set_unchecked(&rdev->corrected_errors, 0);
27043
27044 INIT_LIST_HEAD(&rdev->same_set);
27045 init_waitqueue_head(&rdev->blocked_wait);
27046@@ -6415,7 +6415,7 @@ static int md_seq_show(struct seq_file *
27047
27048 spin_unlock(&pers_lock);
27049 seq_printf(seq, "\n");
27050- mi->event = atomic_read(&md_event_count);
27051+ mi->event = atomic_read_unchecked(&md_event_count);
27052 return 0;
27053 }
27054 if (v == (void*)2) {
27055@@ -6504,7 +6504,7 @@ static int md_seq_show(struct seq_file *
27056 chunk_kb ? "KB" : "B");
27057 if (bitmap->file) {
27058 seq_printf(seq, ", file: ");
27059- seq_path(seq, &bitmap->file->f_path, " \t\n");
27060+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
27061 }
27062
27063 seq_printf(seq, "\n");
27064@@ -6538,7 +6538,7 @@ static int md_seq_open(struct inode *ino
27065 else {
27066 struct seq_file *p = file->private_data;
27067 p->private = mi;
27068- mi->event = atomic_read(&md_event_count);
27069+ mi->event = atomic_read_unchecked(&md_event_count);
27070 }
27071 return error;
27072 }
27073@@ -6554,7 +6554,7 @@ static unsigned int mdstat_poll(struct f
27074 /* always allow read */
27075 mask = POLLIN | POLLRDNORM;
27076
27077- if (mi->event != atomic_read(&md_event_count))
27078+ if (mi->event != atomic_read_unchecked(&md_event_count))
27079 mask |= POLLERR | POLLPRI;
27080 return mask;
27081 }
27082@@ -6598,7 +6598,7 @@ static int is_mddev_idle(mddev_t *mddev,
27083 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
27084 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
27085 (int)part_stat_read(&disk->part0, sectors[1]) -
27086- atomic_read(&disk->sync_io);
27087+ atomic_read_unchecked(&disk->sync_io);
27088 /* sync IO will cause sync_io to increase before the disk_stats
27089 * as sync_io is counted when a request starts, and
27090 * disk_stats is counted when it completes.
27091diff -urNp linux-3.0.3/drivers/md/md.h linux-3.0.3/drivers/md/md.h
27092--- linux-3.0.3/drivers/md/md.h 2011-07-21 22:17:23.000000000 -0400
27093+++ linux-3.0.3/drivers/md/md.h 2011-08-23 21:47:55.000000000 -0400
27094@@ -97,13 +97,13 @@ struct mdk_rdev_s
27095 * only maintained for arrays that
27096 * support hot removal
27097 */
27098- atomic_t read_errors; /* number of consecutive read errors that
27099+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
27100 * we have tried to ignore.
27101 */
27102 struct timespec last_read_error; /* monotonic time since our
27103 * last read error
27104 */
27105- atomic_t corrected_errors; /* number of corrected read errors,
27106+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
27107 * for reporting to userspace and storing
27108 * in superblock.
27109 */
27110@@ -344,7 +344,7 @@ static inline void rdev_dec_pending(mdk_
27111
27112 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
27113 {
27114- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
27115+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
27116 }
27117
27118 struct mdk_personality
27119diff -urNp linux-3.0.3/drivers/md/raid10.c linux-3.0.3/drivers/md/raid10.c
27120--- linux-3.0.3/drivers/md/raid10.c 2011-07-21 22:17:23.000000000 -0400
27121+++ linux-3.0.3/drivers/md/raid10.c 2011-08-23 21:47:55.000000000 -0400
27122@@ -1186,7 +1186,7 @@ static void end_sync_read(struct bio *bi
27123 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
27124 set_bit(R10BIO_Uptodate, &r10_bio->state);
27125 else {
27126- atomic_add(r10_bio->sectors,
27127+ atomic_add_unchecked(r10_bio->sectors,
27128 &conf->mirrors[d].rdev->corrected_errors);
27129 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
27130 md_error(r10_bio->mddev,
27131@@ -1394,7 +1394,7 @@ static void check_decay_read_errors(mdde
27132 {
27133 struct timespec cur_time_mon;
27134 unsigned long hours_since_last;
27135- unsigned int read_errors = atomic_read(&rdev->read_errors);
27136+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
27137
27138 ktime_get_ts(&cur_time_mon);
27139
27140@@ -1416,9 +1416,9 @@ static void check_decay_read_errors(mdde
27141 * overflowing the shift of read_errors by hours_since_last.
27142 */
27143 if (hours_since_last >= 8 * sizeof(read_errors))
27144- atomic_set(&rdev->read_errors, 0);
27145+ atomic_set_unchecked(&rdev->read_errors, 0);
27146 else
27147- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
27148+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
27149 }
27150
27151 /*
27152@@ -1448,8 +1448,8 @@ static void fix_read_error(conf_t *conf,
27153 return;
27154
27155 check_decay_read_errors(mddev, rdev);
27156- atomic_inc(&rdev->read_errors);
27157- if (atomic_read(&rdev->read_errors) > max_read_errors) {
27158+ atomic_inc_unchecked(&rdev->read_errors);
27159+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
27160 char b[BDEVNAME_SIZE];
27161 bdevname(rdev->bdev, b);
27162
27163@@ -1457,7 +1457,7 @@ static void fix_read_error(conf_t *conf,
27164 "md/raid10:%s: %s: Raid device exceeded "
27165 "read_error threshold [cur %d:max %d]\n",
27166 mdname(mddev), b,
27167- atomic_read(&rdev->read_errors), max_read_errors);
27168+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
27169 printk(KERN_NOTICE
27170 "md/raid10:%s: %s: Failing raid device\n",
27171 mdname(mddev), b);
27172@@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
27173 test_bit(In_sync, &rdev->flags)) {
27174 atomic_inc(&rdev->nr_pending);
27175 rcu_read_unlock();
27176- atomic_add(s, &rdev->corrected_errors);
27177+ atomic_add_unchecked(s, &rdev->corrected_errors);
27178 if (sync_page_io(rdev,
27179 r10_bio->devs[sl].addr +
27180 sect,
27181diff -urNp linux-3.0.3/drivers/md/raid1.c linux-3.0.3/drivers/md/raid1.c
27182--- linux-3.0.3/drivers/md/raid1.c 2011-07-21 22:17:23.000000000 -0400
27183+++ linux-3.0.3/drivers/md/raid1.c 2011-08-23 21:47:55.000000000 -0400
27184@@ -1263,7 +1263,7 @@ static int fix_sync_read_error(r1bio_t *
27185 rdev_dec_pending(rdev, mddev);
27186 md_error(mddev, rdev);
27187 } else
27188- atomic_add(s, &rdev->corrected_errors);
27189+ atomic_add_unchecked(s, &rdev->corrected_errors);
27190 }
27191 d = start;
27192 while (d != r1_bio->read_disk) {
27193@@ -1492,7 +1492,7 @@ static void fix_read_error(conf_t *conf,
27194 /* Well, this device is dead */
27195 md_error(mddev, rdev);
27196 else {
27197- atomic_add(s, &rdev->corrected_errors);
27198+ atomic_add_unchecked(s, &rdev->corrected_errors);
27199 printk(KERN_INFO
27200 "md/raid1:%s: read error corrected "
27201 "(%d sectors at %llu on %s)\n",
27202diff -urNp linux-3.0.3/drivers/md/raid5.c linux-3.0.3/drivers/md/raid5.c
27203--- linux-3.0.3/drivers/md/raid5.c 2011-07-21 22:17:23.000000000 -0400
27204+++ linux-3.0.3/drivers/md/raid5.c 2011-08-23 21:48:14.000000000 -0400
27205@@ -550,7 +550,7 @@ static void ops_run_io(struct stripe_hea
27206 bi->bi_next = NULL;
27207 if ((rw & WRITE) &&
27208 test_bit(R5_ReWrite, &sh->dev[i].flags))
27209- atomic_add(STRIPE_SECTORS,
27210+ atomic_add_unchecked(STRIPE_SECTORS,
27211 &rdev->corrected_errors);
27212 generic_make_request(bi);
27213 } else {
27214@@ -1596,15 +1596,15 @@ static void raid5_end_read_request(struc
27215 clear_bit(R5_ReadError, &sh->dev[i].flags);
27216 clear_bit(R5_ReWrite, &sh->dev[i].flags);
27217 }
27218- if (atomic_read(&conf->disks[i].rdev->read_errors))
27219- atomic_set(&conf->disks[i].rdev->read_errors, 0);
27220+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
27221+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
27222 } else {
27223 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
27224 int retry = 0;
27225 rdev = conf->disks[i].rdev;
27226
27227 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
27228- atomic_inc(&rdev->read_errors);
27229+ atomic_inc_unchecked(&rdev->read_errors);
27230 if (conf->mddev->degraded >= conf->max_degraded)
27231 printk_rl(KERN_WARNING
27232 "md/raid:%s: read error not correctable "
27233@@ -1622,7 +1622,7 @@ static void raid5_end_read_request(struc
27234 (unsigned long long)(sh->sector
27235 + rdev->data_offset),
27236 bdn);
27237- else if (atomic_read(&rdev->read_errors)
27238+ else if (atomic_read_unchecked(&rdev->read_errors)
27239 > conf->max_nr_stripes)
27240 printk(KERN_WARNING
27241 "md/raid:%s: Too many read errors, failing device %s.\n",
27242@@ -1945,6 +1945,7 @@ static sector_t compute_blocknr(struct s
27243 sector_t r_sector;
27244 struct stripe_head sh2;
27245
27246+ pax_track_stack();
27247
27248 chunk_offset = sector_div(new_sector, sectors_per_chunk);
27249 stripe = new_sector;
27250diff -urNp linux-3.0.3/drivers/media/common/saa7146_hlp.c linux-3.0.3/drivers/media/common/saa7146_hlp.c
27251--- linux-3.0.3/drivers/media/common/saa7146_hlp.c 2011-07-21 22:17:23.000000000 -0400
27252+++ linux-3.0.3/drivers/media/common/saa7146_hlp.c 2011-08-23 21:48:14.000000000 -0400
27253@@ -353,6 +353,8 @@ static void calculate_clipping_registers
27254
27255 int x[32], y[32], w[32], h[32];
27256
27257+ pax_track_stack();
27258+
27259 /* clear out memory */
27260 memset(&line_list[0], 0x00, sizeof(u32)*32);
27261 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
27262diff -urNp linux-3.0.3/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-3.0.3/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
27263--- linux-3.0.3/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-07-21 22:17:23.000000000 -0400
27264+++ linux-3.0.3/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-08-23 21:48:14.000000000 -0400
27265@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
27266 u8 buf[HOST_LINK_BUF_SIZE];
27267 int i;
27268
27269+ pax_track_stack();
27270+
27271 dprintk("%s\n", __func__);
27272
27273 /* check if we have space for a link buf in the rx_buffer */
27274@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
27275 unsigned long timeout;
27276 int written;
27277
27278+ pax_track_stack();
27279+
27280 dprintk("%s\n", __func__);
27281
27282 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
27283diff -urNp linux-3.0.3/drivers/media/dvb/dvb-core/dvb_demux.h linux-3.0.3/drivers/media/dvb/dvb-core/dvb_demux.h
27284--- linux-3.0.3/drivers/media/dvb/dvb-core/dvb_demux.h 2011-07-21 22:17:23.000000000 -0400
27285+++ linux-3.0.3/drivers/media/dvb/dvb-core/dvb_demux.h 2011-08-24 18:24:40.000000000 -0400
27286@@ -68,12 +68,12 @@ struct dvb_demux_feed {
27287 union {
27288 struct dmx_ts_feed ts;
27289 struct dmx_section_feed sec;
27290- } feed;
27291+ } __no_const feed;
27292
27293 union {
27294 dmx_ts_cb ts;
27295 dmx_section_cb sec;
27296- } cb;
27297+ } __no_const cb;
27298
27299 struct dvb_demux *demux;
27300 void *priv;
27301diff -urNp linux-3.0.3/drivers/media/dvb/dvb-core/dvbdev.c linux-3.0.3/drivers/media/dvb/dvb-core/dvbdev.c
27302--- linux-3.0.3/drivers/media/dvb/dvb-core/dvbdev.c 2011-07-21 22:17:23.000000000 -0400
27303+++ linux-3.0.3/drivers/media/dvb/dvb-core/dvbdev.c 2011-08-24 18:24:19.000000000 -0400
27304@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapt
27305 const struct dvb_device *template, void *priv, int type)
27306 {
27307 struct dvb_device *dvbdev;
27308- struct file_operations *dvbdevfops;
27309+ file_operations_no_const *dvbdevfops;
27310 struct device *clsdev;
27311 int minor;
27312 int id;
27313diff -urNp linux-3.0.3/drivers/media/dvb/dvb-usb/cxusb.c linux-3.0.3/drivers/media/dvb/dvb-usb/cxusb.c
27314--- linux-3.0.3/drivers/media/dvb/dvb-usb/cxusb.c 2011-07-21 22:17:23.000000000 -0400
27315+++ linux-3.0.3/drivers/media/dvb/dvb-usb/cxusb.c 2011-08-24 18:26:33.000000000 -0400
27316@@ -1059,7 +1059,7 @@ static struct dib0070_config dib7070p_di
27317 struct dib0700_adapter_state {
27318 int (*set_param_save) (struct dvb_frontend *,
27319 struct dvb_frontend_parameters *);
27320-};
27321+} __no_const;
27322
27323 static int dib7070_set_param_override(struct dvb_frontend *fe,
27324 struct dvb_frontend_parameters *fep)
27325diff -urNp linux-3.0.3/drivers/media/dvb/dvb-usb/dib0700_core.c linux-3.0.3/drivers/media/dvb/dvb-usb/dib0700_core.c
27326--- linux-3.0.3/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-07-21 22:17:23.000000000 -0400
27327+++ linux-3.0.3/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-08-23 21:48:14.000000000 -0400
27328@@ -434,6 +434,8 @@ int dib0700_download_firmware(struct usb
27329 if (!buf)
27330 return -ENOMEM;
27331
27332+ pax_track_stack();
27333+
27334 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
27335 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",
27336 hx.addr, hx.len, hx.chk);
27337diff -urNp linux-3.0.3/drivers/media/dvb/dvb-usb/dibusb.h linux-3.0.3/drivers/media/dvb/dvb-usb/dibusb.h
27338--- linux-3.0.3/drivers/media/dvb/dvb-usb/dibusb.h 2011-07-21 22:17:23.000000000 -0400
27339+++ linux-3.0.3/drivers/media/dvb/dvb-usb/dibusb.h 2011-08-24 18:27:27.000000000 -0400
27340@@ -97,7 +97,7 @@
27341 #define DIBUSB_IOCTL_CMD_DISABLE_STREAM 0x02
27342
27343 struct dibusb_state {
27344- struct dib_fe_xfer_ops ops;
27345+ dib_fe_xfer_ops_no_const ops;
27346 int mt2060_present;
27347 u8 tuner_addr;
27348 };
27349diff -urNp linux-3.0.3/drivers/media/dvb/dvb-usb/dw2102.c linux-3.0.3/drivers/media/dvb/dvb-usb/dw2102.c
27350--- linux-3.0.3/drivers/media/dvb/dvb-usb/dw2102.c 2011-07-21 22:17:23.000000000 -0400
27351+++ linux-3.0.3/drivers/media/dvb/dvb-usb/dw2102.c 2011-08-24 18:27:45.000000000 -0400
27352@@ -95,7 +95,7 @@ struct su3000_state {
27353
27354 struct s6x0_state {
27355 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
27356-};
27357+} __no_const;
27358
27359 /* debug */
27360 static int dvb_usb_dw2102_debug;
27361diff -urNp linux-3.0.3/drivers/media/dvb/dvb-usb/lmedm04.c linux-3.0.3/drivers/media/dvb/dvb-usb/lmedm04.c
27362--- linux-3.0.3/drivers/media/dvb/dvb-usb/lmedm04.c 2011-07-21 22:17:23.000000000 -0400
27363+++ linux-3.0.3/drivers/media/dvb/dvb-usb/lmedm04.c 2011-08-23 21:48:14.000000000 -0400
27364@@ -742,6 +742,7 @@ static int lme2510_download_firmware(str
27365 usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
27366 0x06, 0x80, 0x0200, 0x00, data, 0x0109, 1000);
27367
27368+ pax_track_stack();
27369
27370 data[0] = 0x8a;
27371 len_in = 1;
27372@@ -764,6 +765,8 @@ static void lme_coldreset(struct usb_dev
27373 int ret = 0, len_in;
27374 u8 data[512] = {0};
27375
27376+ pax_track_stack();
27377+
27378 data[0] = 0x0a;
27379 len_in = 1;
27380 info("FRM Firmware Cold Reset");
27381diff -urNp linux-3.0.3/drivers/media/dvb/frontends/dib3000.h linux-3.0.3/drivers/media/dvb/frontends/dib3000.h
27382--- linux-3.0.3/drivers/media/dvb/frontends/dib3000.h 2011-07-21 22:17:23.000000000 -0400
27383+++ linux-3.0.3/drivers/media/dvb/frontends/dib3000.h 2011-08-24 18:28:18.000000000 -0400
27384@@ -40,10 +40,11 @@ struct dib_fe_xfer_ops
27385 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
27386 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
27387 };
27388+typedef struct dib_fe_xfer_ops __no_const dib_fe_xfer_ops_no_const;
27389
27390 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
27391 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
27392- struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops);
27393+ struct i2c_adapter* i2c, dib_fe_xfer_ops_no_const *xfer_ops);
27394 #else
27395 static inline struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
27396 struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops)
27397diff -urNp linux-3.0.3/drivers/media/dvb/frontends/dib3000mb.c linux-3.0.3/drivers/media/dvb/frontends/dib3000mb.c
27398--- linux-3.0.3/drivers/media/dvb/frontends/dib3000mb.c 2011-07-21 22:17:23.000000000 -0400
27399+++ linux-3.0.3/drivers/media/dvb/frontends/dib3000mb.c 2011-08-24 18:28:42.000000000 -0400
27400@@ -756,7 +756,7 @@ static int dib3000mb_tuner_pass_ctrl(str
27401 static struct dvb_frontend_ops dib3000mb_ops;
27402
27403 struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
27404- struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops)
27405+ struct i2c_adapter* i2c, dib_fe_xfer_ops_no_const *xfer_ops)
27406 {
27407 struct dib3000_state* state = NULL;
27408
27409diff -urNp linux-3.0.3/drivers/media/dvb/frontends/mb86a16.c linux-3.0.3/drivers/media/dvb/frontends/mb86a16.c
27410--- linux-3.0.3/drivers/media/dvb/frontends/mb86a16.c 2011-07-21 22:17:23.000000000 -0400
27411+++ linux-3.0.3/drivers/media/dvb/frontends/mb86a16.c 2011-08-23 21:48:14.000000000 -0400
27412@@ -1060,6 +1060,8 @@ static int mb86a16_set_fe(struct mb86a16
27413 int ret = -1;
27414 int sync;
27415
27416+ pax_track_stack();
27417+
27418 dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate);
27419
27420 fcp = 3000;
27421diff -urNp linux-3.0.3/drivers/media/dvb/frontends/or51211.c linux-3.0.3/drivers/media/dvb/frontends/or51211.c
27422--- linux-3.0.3/drivers/media/dvb/frontends/or51211.c 2011-07-21 22:17:23.000000000 -0400
27423+++ linux-3.0.3/drivers/media/dvb/frontends/or51211.c 2011-08-23 21:48:14.000000000 -0400
27424@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
27425 u8 tudata[585];
27426 int i;
27427
27428+ pax_track_stack();
27429+
27430 dprintk("Firmware is %zd bytes\n",fw->size);
27431
27432 /* Get eprom data */
27433diff -urNp linux-3.0.3/drivers/media/video/cx18/cx18-driver.c linux-3.0.3/drivers/media/video/cx18/cx18-driver.c
27434--- linux-3.0.3/drivers/media/video/cx18/cx18-driver.c 2011-07-21 22:17:23.000000000 -0400
27435+++ linux-3.0.3/drivers/media/video/cx18/cx18-driver.c 2011-08-23 21:48:14.000000000 -0400
27436@@ -327,6 +327,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
27437 struct i2c_client c;
27438 u8 eedata[256];
27439
27440+ pax_track_stack();
27441+
27442 memset(&c, 0, sizeof(c));
27443 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
27444 c.adapter = &cx->i2c_adap[0];
27445diff -urNp linux-3.0.3/drivers/media/video/cx23885/cx23885-input.c linux-3.0.3/drivers/media/video/cx23885/cx23885-input.c
27446--- linux-3.0.3/drivers/media/video/cx23885/cx23885-input.c 2011-07-21 22:17:23.000000000 -0400
27447+++ linux-3.0.3/drivers/media/video/cx23885/cx23885-input.c 2011-08-23 21:48:14.000000000 -0400
27448@@ -53,6 +53,8 @@ static void cx23885_input_process_measur
27449 bool handle = false;
27450 struct ir_raw_event ir_core_event[64];
27451
27452+ pax_track_stack();
27453+
27454 do {
27455 num = 0;
27456 v4l2_subdev_call(dev->sd_ir, ir, rx_read, (u8 *) ir_core_event,
27457diff -urNp linux-3.0.3/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-3.0.3/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
27458--- linux-3.0.3/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-07-21 22:17:23.000000000 -0400
27459+++ linux-3.0.3/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-08-23 21:48:14.000000000 -0400
27460@@ -120,6 +120,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
27461 u8 *eeprom;
27462 struct tveeprom tvdata;
27463
27464+ pax_track_stack();
27465+
27466 memset(&tvdata,0,sizeof(tvdata));
27467
27468 eeprom = pvr2_eeprom_fetch(hdw);
27469diff -urNp linux-3.0.3/drivers/media/video/saa7134/saa6752hs.c linux-3.0.3/drivers/media/video/saa7134/saa6752hs.c
27470--- linux-3.0.3/drivers/media/video/saa7134/saa6752hs.c 2011-07-21 22:17:23.000000000 -0400
27471+++ linux-3.0.3/drivers/media/video/saa7134/saa6752hs.c 2011-08-23 21:48:14.000000000 -0400
27472@@ -682,6 +682,8 @@ static int saa6752hs_init(struct v4l2_su
27473 unsigned char localPAT[256];
27474 unsigned char localPMT[256];
27475
27476+ pax_track_stack();
27477+
27478 /* Set video format - must be done first as it resets other settings */
27479 set_reg8(client, 0x41, h->video_format);
27480
27481diff -urNp linux-3.0.3/drivers/media/video/saa7164/saa7164-cmd.c linux-3.0.3/drivers/media/video/saa7164/saa7164-cmd.c
27482--- linux-3.0.3/drivers/media/video/saa7164/saa7164-cmd.c 2011-07-21 22:17:23.000000000 -0400
27483+++ linux-3.0.3/drivers/media/video/saa7164/saa7164-cmd.c 2011-08-23 21:48:14.000000000 -0400
27484@@ -88,6 +88,8 @@ int saa7164_irq_dequeue(struct saa7164_d
27485 u8 tmp[512];
27486 dprintk(DBGLVL_CMD, "%s()\n", __func__);
27487
27488+ pax_track_stack();
27489+
27490 /* While any outstand message on the bus exists... */
27491 do {
27492
27493@@ -141,6 +143,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
27494 u8 tmp[512];
27495 dprintk(DBGLVL_CMD, "%s()\n", __func__);
27496
27497+ pax_track_stack();
27498+
27499 while (loop) {
27500
27501 struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 };
27502diff -urNp linux-3.0.3/drivers/media/video/timblogiw.c linux-3.0.3/drivers/media/video/timblogiw.c
27503--- linux-3.0.3/drivers/media/video/timblogiw.c 2011-07-21 22:17:23.000000000 -0400
27504+++ linux-3.0.3/drivers/media/video/timblogiw.c 2011-08-24 18:29:20.000000000 -0400
27505@@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *f
27506
27507 /* Platform device functions */
27508
27509-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
27510+static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
27511 .vidioc_querycap = timblogiw_querycap,
27512 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
27513 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
27514diff -urNp linux-3.0.3/drivers/media/video/usbvision/usbvision-core.c linux-3.0.3/drivers/media/video/usbvision/usbvision-core.c
27515--- linux-3.0.3/drivers/media/video/usbvision/usbvision-core.c 2011-07-21 22:17:23.000000000 -0400
27516+++ linux-3.0.3/drivers/media/video/usbvision/usbvision-core.c 2011-08-23 21:48:14.000000000 -0400
27517@@ -707,6 +707,8 @@ static enum parse_state usbvision_parse_
27518 unsigned char rv, gv, bv;
27519 static unsigned char *Y, *U, *V;
27520
27521+ pax_track_stack();
27522+
27523 frame = usbvision->cur_frame;
27524 image_size = frame->frmwidth * frame->frmheight;
27525 if ((frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
27526diff -urNp linux-3.0.3/drivers/media/video/videobuf-dma-sg.c linux-3.0.3/drivers/media/video/videobuf-dma-sg.c
27527--- linux-3.0.3/drivers/media/video/videobuf-dma-sg.c 2011-07-21 22:17:23.000000000 -0400
27528+++ linux-3.0.3/drivers/media/video/videobuf-dma-sg.c 2011-08-23 21:48:14.000000000 -0400
27529@@ -606,6 +606,8 @@ void *videobuf_sg_alloc(size_t size)
27530 {
27531 struct videobuf_queue q;
27532
27533+ pax_track_stack();
27534+
27535 /* Required to make generic handler to call __videobuf_alloc */
27536 q.int_ops = &sg_ops;
27537
27538diff -urNp linux-3.0.3/drivers/message/fusion/mptbase.c linux-3.0.3/drivers/message/fusion/mptbase.c
27539--- linux-3.0.3/drivers/message/fusion/mptbase.c 2011-07-21 22:17:23.000000000 -0400
27540+++ linux-3.0.3/drivers/message/fusion/mptbase.c 2011-08-23 21:48:14.000000000 -0400
27541@@ -6681,8 +6681,13 @@ static int mpt_iocinfo_proc_show(struct
27542 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
27543 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
27544
27545+#ifdef CONFIG_GRKERNSEC_HIDESYM
27546+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
27547+#else
27548 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
27549 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
27550+#endif
27551+
27552 /*
27553 * Rounding UP to nearest 4-kB boundary here...
27554 */
27555diff -urNp linux-3.0.3/drivers/message/fusion/mptsas.c linux-3.0.3/drivers/message/fusion/mptsas.c
27556--- linux-3.0.3/drivers/message/fusion/mptsas.c 2011-07-21 22:17:23.000000000 -0400
27557+++ linux-3.0.3/drivers/message/fusion/mptsas.c 2011-08-23 21:47:55.000000000 -0400
27558@@ -439,6 +439,23 @@ mptsas_is_end_device(struct mptsas_devin
27559 return 0;
27560 }
27561
27562+static inline void
27563+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
27564+{
27565+ if (phy_info->port_details) {
27566+ phy_info->port_details->rphy = rphy;
27567+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
27568+ ioc->name, rphy));
27569+ }
27570+
27571+ if (rphy) {
27572+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
27573+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
27574+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
27575+ ioc->name, rphy, rphy->dev.release));
27576+ }
27577+}
27578+
27579 /* no mutex */
27580 static void
27581 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
27582@@ -477,23 +494,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
27583 return NULL;
27584 }
27585
27586-static inline void
27587-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
27588-{
27589- if (phy_info->port_details) {
27590- phy_info->port_details->rphy = rphy;
27591- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
27592- ioc->name, rphy));
27593- }
27594-
27595- if (rphy) {
27596- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
27597- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
27598- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
27599- ioc->name, rphy, rphy->dev.release));
27600- }
27601-}
27602-
27603 static inline struct sas_port *
27604 mptsas_get_port(struct mptsas_phyinfo *phy_info)
27605 {
27606diff -urNp linux-3.0.3/drivers/message/fusion/mptscsih.c linux-3.0.3/drivers/message/fusion/mptscsih.c
27607--- linux-3.0.3/drivers/message/fusion/mptscsih.c 2011-07-21 22:17:23.000000000 -0400
27608+++ linux-3.0.3/drivers/message/fusion/mptscsih.c 2011-08-23 21:47:55.000000000 -0400
27609@@ -1268,15 +1268,16 @@ mptscsih_info(struct Scsi_Host *SChost)
27610
27611 h = shost_priv(SChost);
27612
27613- if (h) {
27614- if (h->info_kbuf == NULL)
27615- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
27616- return h->info_kbuf;
27617- h->info_kbuf[0] = '\0';
27618+ if (!h)
27619+ return NULL;
27620
27621- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
27622- h->info_kbuf[size-1] = '\0';
27623- }
27624+ if (h->info_kbuf == NULL)
27625+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
27626+ return h->info_kbuf;
27627+ h->info_kbuf[0] = '\0';
27628+
27629+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
27630+ h->info_kbuf[size-1] = '\0';
27631
27632 return h->info_kbuf;
27633 }
27634diff -urNp linux-3.0.3/drivers/message/i2o/i2o_config.c linux-3.0.3/drivers/message/i2o/i2o_config.c
27635--- linux-3.0.3/drivers/message/i2o/i2o_config.c 2011-07-21 22:17:23.000000000 -0400
27636+++ linux-3.0.3/drivers/message/i2o/i2o_config.c 2011-08-23 21:48:14.000000000 -0400
27637@@ -781,6 +781,8 @@ static int i2o_cfg_passthru(unsigned lon
27638 struct i2o_message *msg;
27639 unsigned int iop;
27640
27641+ pax_track_stack();
27642+
27643 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
27644 return -EFAULT;
27645
27646diff -urNp linux-3.0.3/drivers/message/i2o/i2o_proc.c linux-3.0.3/drivers/message/i2o/i2o_proc.c
27647--- linux-3.0.3/drivers/message/i2o/i2o_proc.c 2011-07-21 22:17:23.000000000 -0400
27648+++ linux-3.0.3/drivers/message/i2o/i2o_proc.c 2011-08-23 21:47:55.000000000 -0400
27649@@ -255,13 +255,6 @@ static char *scsi_devices[] = {
27650 "Array Controller Device"
27651 };
27652
27653-static char *chtostr(u8 * chars, int n)
27654-{
27655- char tmp[256];
27656- tmp[0] = 0;
27657- return strncat(tmp, (char *)chars, n);
27658-}
27659-
27660 static int i2o_report_query_status(struct seq_file *seq, int block_status,
27661 char *group)
27662 {
27663@@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct
27664
27665 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
27666 seq_printf(seq, "%-#8x", ddm_table.module_id);
27667- seq_printf(seq, "%-29s",
27668- chtostr(ddm_table.module_name_version, 28));
27669+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
27670 seq_printf(seq, "%9d ", ddm_table.data_size);
27671 seq_printf(seq, "%8d", ddm_table.code_size);
27672
27673@@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(s
27674
27675 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
27676 seq_printf(seq, "%-#8x", dst->module_id);
27677- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
27678- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
27679+ seq_printf(seq, "%-.28s", dst->module_name_version);
27680+ seq_printf(seq, "%-.8s", dst->date);
27681 seq_printf(seq, "%8d ", dst->module_size);
27682 seq_printf(seq, "%8d ", dst->mpb_size);
27683 seq_printf(seq, "0x%04x", dst->module_flags);
27684@@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(str
27685 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
27686 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
27687 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
27688- seq_printf(seq, "Vendor info : %s\n",
27689- chtostr((u8 *) (work32 + 2), 16));
27690- seq_printf(seq, "Product info : %s\n",
27691- chtostr((u8 *) (work32 + 6), 16));
27692- seq_printf(seq, "Description : %s\n",
27693- chtostr((u8 *) (work32 + 10), 16));
27694- seq_printf(seq, "Product rev. : %s\n",
27695- chtostr((u8 *) (work32 + 14), 8));
27696+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
27697+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
27698+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
27699+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
27700
27701 seq_printf(seq, "Serial number : ");
27702 print_serial_number(seq, (u8 *) (work32 + 16),
27703@@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(str
27704 }
27705
27706 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
27707- seq_printf(seq, "Module name : %s\n",
27708- chtostr(result.module_name, 24));
27709- seq_printf(seq, "Module revision : %s\n",
27710- chtostr(result.module_rev, 8));
27711+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
27712+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
27713
27714 seq_printf(seq, "Serial number : ");
27715 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
27716@@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq
27717 return 0;
27718 }
27719
27720- seq_printf(seq, "Device name : %s\n",
27721- chtostr(result.device_name, 64));
27722- seq_printf(seq, "Service name : %s\n",
27723- chtostr(result.service_name, 64));
27724- seq_printf(seq, "Physical name : %s\n",
27725- chtostr(result.physical_location, 64));
27726- seq_printf(seq, "Instance number : %s\n",
27727- chtostr(result.instance_number, 4));
27728+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
27729+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
27730+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
27731+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
27732
27733 return 0;
27734 }
27735diff -urNp linux-3.0.3/drivers/message/i2o/iop.c linux-3.0.3/drivers/message/i2o/iop.c
27736--- linux-3.0.3/drivers/message/i2o/iop.c 2011-07-21 22:17:23.000000000 -0400
27737+++ linux-3.0.3/drivers/message/i2o/iop.c 2011-08-23 21:47:55.000000000 -0400
27738@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
27739
27740 spin_lock_irqsave(&c->context_list_lock, flags);
27741
27742- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
27743- atomic_inc(&c->context_list_counter);
27744+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
27745+ atomic_inc_unchecked(&c->context_list_counter);
27746
27747- entry->context = atomic_read(&c->context_list_counter);
27748+ entry->context = atomic_read_unchecked(&c->context_list_counter);
27749
27750 list_add(&entry->list, &c->context_list);
27751
27752@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(voi
27753
27754 #if BITS_PER_LONG == 64
27755 spin_lock_init(&c->context_list_lock);
27756- atomic_set(&c->context_list_counter, 0);
27757+ atomic_set_unchecked(&c->context_list_counter, 0);
27758 INIT_LIST_HEAD(&c->context_list);
27759 #endif
27760
27761diff -urNp linux-3.0.3/drivers/mfd/abx500-core.c linux-3.0.3/drivers/mfd/abx500-core.c
27762--- linux-3.0.3/drivers/mfd/abx500-core.c 2011-07-21 22:17:23.000000000 -0400
27763+++ linux-3.0.3/drivers/mfd/abx500-core.c 2011-08-23 21:47:55.000000000 -0400
27764@@ -14,7 +14,7 @@ static LIST_HEAD(abx500_list);
27765
27766 struct abx500_device_entry {
27767 struct list_head list;
27768- struct abx500_ops ops;
27769+ abx500_ops_no_const ops;
27770 struct device *dev;
27771 };
27772
27773diff -urNp linux-3.0.3/drivers/mfd/janz-cmodio.c linux-3.0.3/drivers/mfd/janz-cmodio.c
27774--- linux-3.0.3/drivers/mfd/janz-cmodio.c 2011-07-21 22:17:23.000000000 -0400
27775+++ linux-3.0.3/drivers/mfd/janz-cmodio.c 2011-08-23 21:47:55.000000000 -0400
27776@@ -13,6 +13,7 @@
27777
27778 #include <linux/kernel.h>
27779 #include <linux/module.h>
27780+#include <linux/slab.h>
27781 #include <linux/init.h>
27782 #include <linux/pci.h>
27783 #include <linux/interrupt.h>
27784diff -urNp linux-3.0.3/drivers/mfd/wm8350-i2c.c linux-3.0.3/drivers/mfd/wm8350-i2c.c
27785--- linux-3.0.3/drivers/mfd/wm8350-i2c.c 2011-07-21 22:17:23.000000000 -0400
27786+++ linux-3.0.3/drivers/mfd/wm8350-i2c.c 2011-08-23 21:48:14.000000000 -0400
27787@@ -44,6 +44,8 @@ static int wm8350_i2c_write_device(struc
27788 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
27789 int ret;
27790
27791+ pax_track_stack();
27792+
27793 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
27794 return -EINVAL;
27795
27796diff -urNp linux-3.0.3/drivers/misc/lis3lv02d/lis3lv02d.c linux-3.0.3/drivers/misc/lis3lv02d/lis3lv02d.c
27797--- linux-3.0.3/drivers/misc/lis3lv02d/lis3lv02d.c 2011-07-21 22:17:23.000000000 -0400
27798+++ linux-3.0.3/drivers/misc/lis3lv02d/lis3lv02d.c 2011-08-23 21:47:55.000000000 -0400
27799@@ -435,7 +435,7 @@ static irqreturn_t lis302dl_interrupt(in
27800 * the lid is closed. This leads to interrupts as soon as a little move
27801 * is done.
27802 */
27803- atomic_inc(&lis3_dev.count);
27804+ atomic_inc_unchecked(&lis3_dev.count);
27805
27806 wake_up_interruptible(&lis3_dev.misc_wait);
27807 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
27808@@ -518,7 +518,7 @@ static int lis3lv02d_misc_open(struct in
27809 if (lis3_dev.pm_dev)
27810 pm_runtime_get_sync(lis3_dev.pm_dev);
27811
27812- atomic_set(&lis3_dev.count, 0);
27813+ atomic_set_unchecked(&lis3_dev.count, 0);
27814 return 0;
27815 }
27816
27817@@ -545,7 +545,7 @@ static ssize_t lis3lv02d_misc_read(struc
27818 add_wait_queue(&lis3_dev.misc_wait, &wait);
27819 while (true) {
27820 set_current_state(TASK_INTERRUPTIBLE);
27821- data = atomic_xchg(&lis3_dev.count, 0);
27822+ data = atomic_xchg_unchecked(&lis3_dev.count, 0);
27823 if (data)
27824 break;
27825
27826@@ -583,7 +583,7 @@ out:
27827 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
27828 {
27829 poll_wait(file, &lis3_dev.misc_wait, wait);
27830- if (atomic_read(&lis3_dev.count))
27831+ if (atomic_read_unchecked(&lis3_dev.count))
27832 return POLLIN | POLLRDNORM;
27833 return 0;
27834 }
27835diff -urNp linux-3.0.3/drivers/misc/lis3lv02d/lis3lv02d.h linux-3.0.3/drivers/misc/lis3lv02d/lis3lv02d.h
27836--- linux-3.0.3/drivers/misc/lis3lv02d/lis3lv02d.h 2011-07-21 22:17:23.000000000 -0400
27837+++ linux-3.0.3/drivers/misc/lis3lv02d/lis3lv02d.h 2011-08-23 21:47:55.000000000 -0400
27838@@ -265,7 +265,7 @@ struct lis3lv02d {
27839 struct input_polled_dev *idev; /* input device */
27840 struct platform_device *pdev; /* platform device */
27841 struct regulator_bulk_data regulators[2];
27842- atomic_t count; /* interrupt count after last read */
27843+ atomic_unchecked_t count; /* interrupt count after last read */
27844 union axis_conversion ac; /* hw -> logical axis */
27845 int mapped_btns[3];
27846
27847diff -urNp linux-3.0.3/drivers/misc/sgi-gru/gruhandles.c linux-3.0.3/drivers/misc/sgi-gru/gruhandles.c
27848--- linux-3.0.3/drivers/misc/sgi-gru/gruhandles.c 2011-07-21 22:17:23.000000000 -0400
27849+++ linux-3.0.3/drivers/misc/sgi-gru/gruhandles.c 2011-08-23 21:47:55.000000000 -0400
27850@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op
27851 unsigned long nsec;
27852
27853 nsec = CLKS2NSEC(clks);
27854- atomic_long_inc(&mcs_op_statistics[op].count);
27855- atomic_long_add(nsec, &mcs_op_statistics[op].total);
27856+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
27857+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
27858 if (mcs_op_statistics[op].max < nsec)
27859 mcs_op_statistics[op].max = nsec;
27860 }
27861diff -urNp linux-3.0.3/drivers/misc/sgi-gru/gruprocfs.c linux-3.0.3/drivers/misc/sgi-gru/gruprocfs.c
27862--- linux-3.0.3/drivers/misc/sgi-gru/gruprocfs.c 2011-07-21 22:17:23.000000000 -0400
27863+++ linux-3.0.3/drivers/misc/sgi-gru/gruprocfs.c 2011-08-23 21:47:55.000000000 -0400
27864@@ -32,9 +32,9 @@
27865
27866 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
27867
27868-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
27869+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
27870 {
27871- unsigned long val = atomic_long_read(v);
27872+ unsigned long val = atomic_long_read_unchecked(v);
27873
27874 seq_printf(s, "%16lu %s\n", val, id);
27875 }
27876@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct se
27877
27878 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
27879 for (op = 0; op < mcsop_last; op++) {
27880- count = atomic_long_read(&mcs_op_statistics[op].count);
27881- total = atomic_long_read(&mcs_op_statistics[op].total);
27882+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
27883+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
27884 max = mcs_op_statistics[op].max;
27885 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
27886 count ? total / count : 0, max);
27887diff -urNp linux-3.0.3/drivers/misc/sgi-gru/grutables.h linux-3.0.3/drivers/misc/sgi-gru/grutables.h
27888--- linux-3.0.3/drivers/misc/sgi-gru/grutables.h 2011-07-21 22:17:23.000000000 -0400
27889+++ linux-3.0.3/drivers/misc/sgi-gru/grutables.h 2011-08-23 21:47:55.000000000 -0400
27890@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
27891 * GRU statistics.
27892 */
27893 struct gru_stats_s {
27894- atomic_long_t vdata_alloc;
27895- atomic_long_t vdata_free;
27896- atomic_long_t gts_alloc;
27897- atomic_long_t gts_free;
27898- atomic_long_t gms_alloc;
27899- atomic_long_t gms_free;
27900- atomic_long_t gts_double_allocate;
27901- atomic_long_t assign_context;
27902- atomic_long_t assign_context_failed;
27903- atomic_long_t free_context;
27904- atomic_long_t load_user_context;
27905- atomic_long_t load_kernel_context;
27906- atomic_long_t lock_kernel_context;
27907- atomic_long_t unlock_kernel_context;
27908- atomic_long_t steal_user_context;
27909- atomic_long_t steal_kernel_context;
27910- atomic_long_t steal_context_failed;
27911- atomic_long_t nopfn;
27912- atomic_long_t asid_new;
27913- atomic_long_t asid_next;
27914- atomic_long_t asid_wrap;
27915- atomic_long_t asid_reuse;
27916- atomic_long_t intr;
27917- atomic_long_t intr_cbr;
27918- atomic_long_t intr_tfh;
27919- atomic_long_t intr_spurious;
27920- atomic_long_t intr_mm_lock_failed;
27921- atomic_long_t call_os;
27922- atomic_long_t call_os_wait_queue;
27923- atomic_long_t user_flush_tlb;
27924- atomic_long_t user_unload_context;
27925- atomic_long_t user_exception;
27926- atomic_long_t set_context_option;
27927- atomic_long_t check_context_retarget_intr;
27928- atomic_long_t check_context_unload;
27929- atomic_long_t tlb_dropin;
27930- atomic_long_t tlb_preload_page;
27931- atomic_long_t tlb_dropin_fail_no_asid;
27932- atomic_long_t tlb_dropin_fail_upm;
27933- atomic_long_t tlb_dropin_fail_invalid;
27934- atomic_long_t tlb_dropin_fail_range_active;
27935- atomic_long_t tlb_dropin_fail_idle;
27936- atomic_long_t tlb_dropin_fail_fmm;
27937- atomic_long_t tlb_dropin_fail_no_exception;
27938- atomic_long_t tfh_stale_on_fault;
27939- atomic_long_t mmu_invalidate_range;
27940- atomic_long_t mmu_invalidate_page;
27941- atomic_long_t flush_tlb;
27942- atomic_long_t flush_tlb_gru;
27943- atomic_long_t flush_tlb_gru_tgh;
27944- atomic_long_t flush_tlb_gru_zero_asid;
27945-
27946- atomic_long_t copy_gpa;
27947- atomic_long_t read_gpa;
27948-
27949- atomic_long_t mesq_receive;
27950- atomic_long_t mesq_receive_none;
27951- atomic_long_t mesq_send;
27952- atomic_long_t mesq_send_failed;
27953- atomic_long_t mesq_noop;
27954- atomic_long_t mesq_send_unexpected_error;
27955- atomic_long_t mesq_send_lb_overflow;
27956- atomic_long_t mesq_send_qlimit_reached;
27957- atomic_long_t mesq_send_amo_nacked;
27958- atomic_long_t mesq_send_put_nacked;
27959- atomic_long_t mesq_page_overflow;
27960- atomic_long_t mesq_qf_locked;
27961- atomic_long_t mesq_qf_noop_not_full;
27962- atomic_long_t mesq_qf_switch_head_failed;
27963- atomic_long_t mesq_qf_unexpected_error;
27964- atomic_long_t mesq_noop_unexpected_error;
27965- atomic_long_t mesq_noop_lb_overflow;
27966- atomic_long_t mesq_noop_qlimit_reached;
27967- atomic_long_t mesq_noop_amo_nacked;
27968- atomic_long_t mesq_noop_put_nacked;
27969- atomic_long_t mesq_noop_page_overflow;
27970+ atomic_long_unchecked_t vdata_alloc;
27971+ atomic_long_unchecked_t vdata_free;
27972+ atomic_long_unchecked_t gts_alloc;
27973+ atomic_long_unchecked_t gts_free;
27974+ atomic_long_unchecked_t gms_alloc;
27975+ atomic_long_unchecked_t gms_free;
27976+ atomic_long_unchecked_t gts_double_allocate;
27977+ atomic_long_unchecked_t assign_context;
27978+ atomic_long_unchecked_t assign_context_failed;
27979+ atomic_long_unchecked_t free_context;
27980+ atomic_long_unchecked_t load_user_context;
27981+ atomic_long_unchecked_t load_kernel_context;
27982+ atomic_long_unchecked_t lock_kernel_context;
27983+ atomic_long_unchecked_t unlock_kernel_context;
27984+ atomic_long_unchecked_t steal_user_context;
27985+ atomic_long_unchecked_t steal_kernel_context;
27986+ atomic_long_unchecked_t steal_context_failed;
27987+ atomic_long_unchecked_t nopfn;
27988+ atomic_long_unchecked_t asid_new;
27989+ atomic_long_unchecked_t asid_next;
27990+ atomic_long_unchecked_t asid_wrap;
27991+ atomic_long_unchecked_t asid_reuse;
27992+ atomic_long_unchecked_t intr;
27993+ atomic_long_unchecked_t intr_cbr;
27994+ atomic_long_unchecked_t intr_tfh;
27995+ atomic_long_unchecked_t intr_spurious;
27996+ atomic_long_unchecked_t intr_mm_lock_failed;
27997+ atomic_long_unchecked_t call_os;
27998+ atomic_long_unchecked_t call_os_wait_queue;
27999+ atomic_long_unchecked_t user_flush_tlb;
28000+ atomic_long_unchecked_t user_unload_context;
28001+ atomic_long_unchecked_t user_exception;
28002+ atomic_long_unchecked_t set_context_option;
28003+ atomic_long_unchecked_t check_context_retarget_intr;
28004+ atomic_long_unchecked_t check_context_unload;
28005+ atomic_long_unchecked_t tlb_dropin;
28006+ atomic_long_unchecked_t tlb_preload_page;
28007+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
28008+ atomic_long_unchecked_t tlb_dropin_fail_upm;
28009+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
28010+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
28011+ atomic_long_unchecked_t tlb_dropin_fail_idle;
28012+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
28013+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
28014+ atomic_long_unchecked_t tfh_stale_on_fault;
28015+ atomic_long_unchecked_t mmu_invalidate_range;
28016+ atomic_long_unchecked_t mmu_invalidate_page;
28017+ atomic_long_unchecked_t flush_tlb;
28018+ atomic_long_unchecked_t flush_tlb_gru;
28019+ atomic_long_unchecked_t flush_tlb_gru_tgh;
28020+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
28021+
28022+ atomic_long_unchecked_t copy_gpa;
28023+ atomic_long_unchecked_t read_gpa;
28024+
28025+ atomic_long_unchecked_t mesq_receive;
28026+ atomic_long_unchecked_t mesq_receive_none;
28027+ atomic_long_unchecked_t mesq_send;
28028+ atomic_long_unchecked_t mesq_send_failed;
28029+ atomic_long_unchecked_t mesq_noop;
28030+ atomic_long_unchecked_t mesq_send_unexpected_error;
28031+ atomic_long_unchecked_t mesq_send_lb_overflow;
28032+ atomic_long_unchecked_t mesq_send_qlimit_reached;
28033+ atomic_long_unchecked_t mesq_send_amo_nacked;
28034+ atomic_long_unchecked_t mesq_send_put_nacked;
28035+ atomic_long_unchecked_t mesq_page_overflow;
28036+ atomic_long_unchecked_t mesq_qf_locked;
28037+ atomic_long_unchecked_t mesq_qf_noop_not_full;
28038+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
28039+ atomic_long_unchecked_t mesq_qf_unexpected_error;
28040+ atomic_long_unchecked_t mesq_noop_unexpected_error;
28041+ atomic_long_unchecked_t mesq_noop_lb_overflow;
28042+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
28043+ atomic_long_unchecked_t mesq_noop_amo_nacked;
28044+ atomic_long_unchecked_t mesq_noop_put_nacked;
28045+ atomic_long_unchecked_t mesq_noop_page_overflow;
28046
28047 };
28048
28049@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start
28050 tghop_invalidate, mcsop_last};
28051
28052 struct mcs_op_statistic {
28053- atomic_long_t count;
28054- atomic_long_t total;
28055+ atomic_long_unchecked_t count;
28056+ atomic_long_unchecked_t total;
28057 unsigned long max;
28058 };
28059
28060@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_st
28061
28062 #define STAT(id) do { \
28063 if (gru_options & OPT_STATS) \
28064- atomic_long_inc(&gru_stats.id); \
28065+ atomic_long_inc_unchecked(&gru_stats.id); \
28066 } while (0)
28067
28068 #ifdef CONFIG_SGI_GRU_DEBUG
28069diff -urNp linux-3.0.3/drivers/misc/sgi-xp/xp.h linux-3.0.3/drivers/misc/sgi-xp/xp.h
28070--- linux-3.0.3/drivers/misc/sgi-xp/xp.h 2011-07-21 22:17:23.000000000 -0400
28071+++ linux-3.0.3/drivers/misc/sgi-xp/xp.h 2011-08-23 21:47:55.000000000 -0400
28072@@ -289,7 +289,7 @@ struct xpc_interface {
28073 xpc_notify_func, void *);
28074 void (*received) (short, int, void *);
28075 enum xp_retval (*partid_to_nasids) (short, void *);
28076-};
28077+} __no_const;
28078
28079 extern struct xpc_interface xpc_interface;
28080
28081diff -urNp linux-3.0.3/drivers/mtd/chips/cfi_cmdset_0001.c linux-3.0.3/drivers/mtd/chips/cfi_cmdset_0001.c
28082--- linux-3.0.3/drivers/mtd/chips/cfi_cmdset_0001.c 2011-07-21 22:17:23.000000000 -0400
28083+++ linux-3.0.3/drivers/mtd/chips/cfi_cmdset_0001.c 2011-08-23 21:48:14.000000000 -0400
28084@@ -757,6 +757,8 @@ static int chip_ready (struct map_info *
28085 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
28086 unsigned long timeo = jiffies + HZ;
28087
28088+ pax_track_stack();
28089+
28090 /* Prevent setting state FL_SYNCING for chip in suspended state. */
28091 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
28092 goto sleep;
28093@@ -1653,6 +1655,8 @@ static int __xipram do_write_buffer(stru
28094 unsigned long initial_adr;
28095 int initial_len = len;
28096
28097+ pax_track_stack();
28098+
28099 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
28100 adr += chip->start;
28101 initial_adr = adr;
28102@@ -1871,6 +1875,8 @@ static int __xipram do_erase_oneblock(st
28103 int retries = 3;
28104 int ret;
28105
28106+ pax_track_stack();
28107+
28108 adr += chip->start;
28109
28110 retry:
28111diff -urNp linux-3.0.3/drivers/mtd/chips/cfi_cmdset_0020.c linux-3.0.3/drivers/mtd/chips/cfi_cmdset_0020.c
28112--- linux-3.0.3/drivers/mtd/chips/cfi_cmdset_0020.c 2011-07-21 22:17:23.000000000 -0400
28113+++ linux-3.0.3/drivers/mtd/chips/cfi_cmdset_0020.c 2011-08-23 21:48:14.000000000 -0400
28114@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
28115 unsigned long cmd_addr;
28116 struct cfi_private *cfi = map->fldrv_priv;
28117
28118+ pax_track_stack();
28119+
28120 adr += chip->start;
28121
28122 /* Ensure cmd read/writes are aligned. */
28123@@ -429,6 +431,8 @@ static inline int do_write_buffer(struct
28124 DECLARE_WAITQUEUE(wait, current);
28125 int wbufsize, z;
28126
28127+ pax_track_stack();
28128+
28129 /* M58LW064A requires bus alignment for buffer wriets -- saw */
28130 if (adr & (map_bankwidth(map)-1))
28131 return -EINVAL;
28132@@ -743,6 +747,8 @@ static inline int do_erase_oneblock(stru
28133 DECLARE_WAITQUEUE(wait, current);
28134 int ret = 0;
28135
28136+ pax_track_stack();
28137+
28138 adr += chip->start;
28139
28140 /* Let's determine this according to the interleave only once */
28141@@ -1048,6 +1054,8 @@ static inline int do_lock_oneblock(struc
28142 unsigned long timeo = jiffies + HZ;
28143 DECLARE_WAITQUEUE(wait, current);
28144
28145+ pax_track_stack();
28146+
28147 adr += chip->start;
28148
28149 /* Let's determine this according to the interleave only once */
28150@@ -1197,6 +1205,8 @@ static inline int do_unlock_oneblock(str
28151 unsigned long timeo = jiffies + HZ;
28152 DECLARE_WAITQUEUE(wait, current);
28153
28154+ pax_track_stack();
28155+
28156 adr += chip->start;
28157
28158 /* Let's determine this according to the interleave only once */
28159diff -urNp linux-3.0.3/drivers/mtd/devices/doc2000.c linux-3.0.3/drivers/mtd/devices/doc2000.c
28160--- linux-3.0.3/drivers/mtd/devices/doc2000.c 2011-07-21 22:17:23.000000000 -0400
28161+++ linux-3.0.3/drivers/mtd/devices/doc2000.c 2011-08-23 21:47:55.000000000 -0400
28162@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
28163
28164 /* The ECC will not be calculated correctly if less than 512 is written */
28165 /* DBB-
28166- if (len != 0x200 && eccbuf)
28167+ if (len != 0x200)
28168 printk(KERN_WARNING
28169 "ECC needs a full sector write (adr: %lx size %lx)\n",
28170 (long) to, (long) len);
28171diff -urNp linux-3.0.3/drivers/mtd/devices/doc2001.c linux-3.0.3/drivers/mtd/devices/doc2001.c
28172--- linux-3.0.3/drivers/mtd/devices/doc2001.c 2011-07-21 22:17:23.000000000 -0400
28173+++ linux-3.0.3/drivers/mtd/devices/doc2001.c 2011-08-23 21:47:55.000000000 -0400
28174@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
28175 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
28176
28177 /* Don't allow read past end of device */
28178- if (from >= this->totlen)
28179+ if (from >= this->totlen || !len)
28180 return -EINVAL;
28181
28182 /* Don't allow a single read to cross a 512-byte block boundary */
28183diff -urNp linux-3.0.3/drivers/mtd/ftl.c linux-3.0.3/drivers/mtd/ftl.c
28184--- linux-3.0.3/drivers/mtd/ftl.c 2011-07-21 22:17:23.000000000 -0400
28185+++ linux-3.0.3/drivers/mtd/ftl.c 2011-08-23 21:48:14.000000000 -0400
28186@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
28187 loff_t offset;
28188 uint16_t srcunitswap = cpu_to_le16(srcunit);
28189
28190+ pax_track_stack();
28191+
28192 eun = &part->EUNInfo[srcunit];
28193 xfer = &part->XferInfo[xferunit];
28194 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
28195diff -urNp linux-3.0.3/drivers/mtd/inftlcore.c linux-3.0.3/drivers/mtd/inftlcore.c
28196--- linux-3.0.3/drivers/mtd/inftlcore.c 2011-07-21 22:17:23.000000000 -0400
28197+++ linux-3.0.3/drivers/mtd/inftlcore.c 2011-08-23 21:48:14.000000000 -0400
28198@@ -259,6 +259,8 @@ static u16 INFTL_foldchain(struct INFTLr
28199 struct inftl_oob oob;
28200 size_t retlen;
28201
28202+ pax_track_stack();
28203+
28204 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
28205 "pending=%d)\n", inftl, thisVUC, pendingblock);
28206
28207diff -urNp linux-3.0.3/drivers/mtd/inftlmount.c linux-3.0.3/drivers/mtd/inftlmount.c
28208--- linux-3.0.3/drivers/mtd/inftlmount.c 2011-07-21 22:17:23.000000000 -0400
28209+++ linux-3.0.3/drivers/mtd/inftlmount.c 2011-08-23 21:48:14.000000000 -0400
28210@@ -53,6 +53,8 @@ static int find_boot_record(struct INFTL
28211 struct INFTLPartition *ip;
28212 size_t retlen;
28213
28214+ pax_track_stack();
28215+
28216 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
28217
28218 /*
28219diff -urNp linux-3.0.3/drivers/mtd/lpddr/qinfo_probe.c linux-3.0.3/drivers/mtd/lpddr/qinfo_probe.c
28220--- linux-3.0.3/drivers/mtd/lpddr/qinfo_probe.c 2011-07-21 22:17:23.000000000 -0400
28221+++ linux-3.0.3/drivers/mtd/lpddr/qinfo_probe.c 2011-08-23 21:48:14.000000000 -0400
28222@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
28223 {
28224 map_word pfow_val[4];
28225
28226+ pax_track_stack();
28227+
28228 /* Check identification string */
28229 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
28230 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
28231diff -urNp linux-3.0.3/drivers/mtd/mtdchar.c linux-3.0.3/drivers/mtd/mtdchar.c
28232--- linux-3.0.3/drivers/mtd/mtdchar.c 2011-07-21 22:17:23.000000000 -0400
28233+++ linux-3.0.3/drivers/mtd/mtdchar.c 2011-08-23 21:48:14.000000000 -0400
28234@@ -553,6 +553,8 @@ static int mtd_ioctl(struct file *file,
28235 u_long size;
28236 struct mtd_info_user info;
28237
28238+ pax_track_stack();
28239+
28240 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
28241
28242 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
28243diff -urNp linux-3.0.3/drivers/mtd/nand/denali.c linux-3.0.3/drivers/mtd/nand/denali.c
28244--- linux-3.0.3/drivers/mtd/nand/denali.c 2011-07-21 22:17:23.000000000 -0400
28245+++ linux-3.0.3/drivers/mtd/nand/denali.c 2011-08-23 21:47:55.000000000 -0400
28246@@ -26,6 +26,7 @@
28247 #include <linux/pci.h>
28248 #include <linux/mtd/mtd.h>
28249 #include <linux/module.h>
28250+#include <linux/slab.h>
28251
28252 #include "denali.h"
28253
28254diff -urNp linux-3.0.3/drivers/mtd/nftlcore.c linux-3.0.3/drivers/mtd/nftlcore.c
28255--- linux-3.0.3/drivers/mtd/nftlcore.c 2011-07-21 22:17:23.000000000 -0400
28256+++ linux-3.0.3/drivers/mtd/nftlcore.c 2011-08-23 21:48:14.000000000 -0400
28257@@ -264,6 +264,8 @@ static u16 NFTL_foldchain (struct NFTLre
28258 int inplace = 1;
28259 size_t retlen;
28260
28261+ pax_track_stack();
28262+
28263 memset(BlockMap, 0xff, sizeof(BlockMap));
28264 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
28265
28266diff -urNp linux-3.0.3/drivers/mtd/nftlmount.c linux-3.0.3/drivers/mtd/nftlmount.c
28267--- linux-3.0.3/drivers/mtd/nftlmount.c 2011-07-21 22:17:23.000000000 -0400
28268+++ linux-3.0.3/drivers/mtd/nftlmount.c 2011-08-23 21:48:14.000000000 -0400
28269@@ -24,6 +24,7 @@
28270 #include <asm/errno.h>
28271 #include <linux/delay.h>
28272 #include <linux/slab.h>
28273+#include <linux/sched.h>
28274 #include <linux/mtd/mtd.h>
28275 #include <linux/mtd/nand.h>
28276 #include <linux/mtd/nftl.h>
28277@@ -45,6 +46,8 @@ static int find_boot_record(struct NFTLr
28278 struct mtd_info *mtd = nftl->mbd.mtd;
28279 unsigned int i;
28280
28281+ pax_track_stack();
28282+
28283 /* Assume logical EraseSize == physical erasesize for starting the scan.
28284 We'll sort it out later if we find a MediaHeader which says otherwise */
28285 /* Actually, we won't. The new DiskOnChip driver has already scanned
28286diff -urNp linux-3.0.3/drivers/mtd/ubi/build.c linux-3.0.3/drivers/mtd/ubi/build.c
28287--- linux-3.0.3/drivers/mtd/ubi/build.c 2011-07-21 22:17:23.000000000 -0400
28288+++ linux-3.0.3/drivers/mtd/ubi/build.c 2011-08-23 21:47:55.000000000 -0400
28289@@ -1287,7 +1287,7 @@ module_exit(ubi_exit);
28290 static int __init bytes_str_to_int(const char *str)
28291 {
28292 char *endp;
28293- unsigned long result;
28294+ unsigned long result, scale = 1;
28295
28296 result = simple_strtoul(str, &endp, 0);
28297 if (str == endp || result >= INT_MAX) {
28298@@ -1298,11 +1298,11 @@ static int __init bytes_str_to_int(const
28299
28300 switch (*endp) {
28301 case 'G':
28302- result *= 1024;
28303+ scale *= 1024;
28304 case 'M':
28305- result *= 1024;
28306+ scale *= 1024;
28307 case 'K':
28308- result *= 1024;
28309+ scale *= 1024;
28310 if (endp[1] == 'i' && endp[2] == 'B')
28311 endp += 2;
28312 case '\0':
28313@@ -1313,7 +1313,13 @@ static int __init bytes_str_to_int(const
28314 return -EINVAL;
28315 }
28316
28317- return result;
28318+ if ((intoverflow_t)result*scale >= INT_MAX) {
28319+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
28320+ str);
28321+ return -EINVAL;
28322+ }
28323+
28324+ return result*scale;
28325 }
28326
28327 /**
28328diff -urNp linux-3.0.3/drivers/net/bna/bfa_ioc_ct.c linux-3.0.3/drivers/net/bna/bfa_ioc_ct.c
28329--- linux-3.0.3/drivers/net/bna/bfa_ioc_ct.c 2011-07-21 22:17:23.000000000 -0400
28330+++ linux-3.0.3/drivers/net/bna/bfa_ioc_ct.c 2011-08-23 21:47:55.000000000 -0400
28331@@ -48,7 +48,21 @@ static void bfa_ioc_ct_sync_ack(struct b
28332 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
28333 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
28334
28335-static struct bfa_ioc_hwif nw_hwif_ct;
28336+static struct bfa_ioc_hwif nw_hwif_ct = {
28337+ .ioc_pll_init = bfa_ioc_ct_pll_init,
28338+ .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
28339+ .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
28340+ .ioc_reg_init = bfa_ioc_ct_reg_init,
28341+ .ioc_map_port = bfa_ioc_ct_map_port,
28342+ .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
28343+ .ioc_notify_fail = bfa_ioc_ct_notify_fail,
28344+ .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
28345+ .ioc_sync_start = bfa_ioc_ct_sync_start,
28346+ .ioc_sync_join = bfa_ioc_ct_sync_join,
28347+ .ioc_sync_leave = bfa_ioc_ct_sync_leave,
28348+ .ioc_sync_ack = bfa_ioc_ct_sync_ack,
28349+ .ioc_sync_complete = bfa_ioc_ct_sync_complete
28350+};
28351
28352 /**
28353 * Called from bfa_ioc_attach() to map asic specific calls.
28354@@ -56,20 +70,6 @@ static struct bfa_ioc_hwif nw_hwif_ct;
28355 void
28356 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
28357 {
28358- nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
28359- nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
28360- nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
28361- nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
28362- nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
28363- nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
28364- nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
28365- nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
28366- nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
28367- nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
28368- nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
28369- nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
28370- nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
28371-
28372 ioc->ioc_hwif = &nw_hwif_ct;
28373 }
28374
28375diff -urNp linux-3.0.3/drivers/net/bna/bnad.c linux-3.0.3/drivers/net/bna/bnad.c
28376--- linux-3.0.3/drivers/net/bna/bnad.c 2011-07-21 22:17:23.000000000 -0400
28377+++ linux-3.0.3/drivers/net/bna/bnad.c 2011-08-23 21:47:55.000000000 -0400
28378@@ -1681,7 +1681,14 @@ bnad_setup_tx(struct bnad *bnad, uint tx
28379 struct bna_intr_info *intr_info =
28380 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
28381 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
28382- struct bna_tx_event_cbfn tx_cbfn;
28383+ static struct bna_tx_event_cbfn tx_cbfn = {
28384+ /* Initialize the tx event handlers */
28385+ .tcb_setup_cbfn = bnad_cb_tcb_setup,
28386+ .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
28387+ .tx_stall_cbfn = bnad_cb_tx_stall,
28388+ .tx_resume_cbfn = bnad_cb_tx_resume,
28389+ .tx_cleanup_cbfn = bnad_cb_tx_cleanup
28390+ };
28391 struct bna_tx *tx;
28392 unsigned long flags;
28393
28394@@ -1690,13 +1697,6 @@ bnad_setup_tx(struct bnad *bnad, uint tx
28395 tx_config->txq_depth = bnad->txq_depth;
28396 tx_config->tx_type = BNA_TX_T_REGULAR;
28397
28398- /* Initialize the tx event handlers */
28399- tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
28400- tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
28401- tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
28402- tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
28403- tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
28404-
28405 /* Get BNA's resource requirement for one tx object */
28406 spin_lock_irqsave(&bnad->bna_lock, flags);
28407 bna_tx_res_req(bnad->num_txq_per_tx,
28408@@ -1827,21 +1827,21 @@ bnad_setup_rx(struct bnad *bnad, uint rx
28409 struct bna_intr_info *intr_info =
28410 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
28411 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
28412- struct bna_rx_event_cbfn rx_cbfn;
28413+ static struct bna_rx_event_cbfn rx_cbfn = {
28414+ /* Initialize the Rx event handlers */
28415+ .rcb_setup_cbfn = bnad_cb_rcb_setup,
28416+ .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
28417+ .ccb_setup_cbfn = bnad_cb_ccb_setup,
28418+ .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
28419+ .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
28420+ .rx_post_cbfn = bnad_cb_rx_post
28421+ };
28422 struct bna_rx *rx;
28423 unsigned long flags;
28424
28425 /* Initialize the Rx object configuration */
28426 bnad_init_rx_config(bnad, rx_config);
28427
28428- /* Initialize the Rx event handlers */
28429- rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
28430- rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
28431- rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
28432- rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
28433- rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
28434- rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
28435-
28436 /* Get BNA's resource requirement for one Rx object */
28437 spin_lock_irqsave(&bnad->bna_lock, flags);
28438 bna_rx_res_req(rx_config, res_info);
28439diff -urNp linux-3.0.3/drivers/net/bnx2.c linux-3.0.3/drivers/net/bnx2.c
28440--- linux-3.0.3/drivers/net/bnx2.c 2011-07-21 22:17:23.000000000 -0400
28441+++ linux-3.0.3/drivers/net/bnx2.c 2011-08-23 21:48:14.000000000 -0400
28442@@ -5828,6 +5828,8 @@ bnx2_test_nvram(struct bnx2 *bp)
28443 int rc = 0;
28444 u32 magic, csum;
28445
28446+ pax_track_stack();
28447+
28448 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
28449 goto test_nvram_done;
28450
28451diff -urNp linux-3.0.3/drivers/net/bnx2x/bnx2x_ethtool.c linux-3.0.3/drivers/net/bnx2x/bnx2x_ethtool.c
28452--- linux-3.0.3/drivers/net/bnx2x/bnx2x_ethtool.c 2011-07-21 22:17:23.000000000 -0400
28453+++ linux-3.0.3/drivers/net/bnx2x/bnx2x_ethtool.c 2011-08-23 21:48:14.000000000 -0400
28454@@ -1705,6 +1705,8 @@ static int bnx2x_test_nvram(struct bnx2x
28455 int i, rc;
28456 u32 magic, crc;
28457
28458+ pax_track_stack();
28459+
28460 if (BP_NOMCP(bp))
28461 return 0;
28462
28463diff -urNp linux-3.0.3/drivers/net/cxgb3/l2t.h linux-3.0.3/drivers/net/cxgb3/l2t.h
28464--- linux-3.0.3/drivers/net/cxgb3/l2t.h 2011-07-21 22:17:23.000000000 -0400
28465+++ linux-3.0.3/drivers/net/cxgb3/l2t.h 2011-08-23 21:47:55.000000000 -0400
28466@@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)
28467 */
28468 struct l2t_skb_cb {
28469 arp_failure_handler_func arp_failure_handler;
28470-};
28471+} __no_const;
28472
28473 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
28474
28475diff -urNp linux-3.0.3/drivers/net/cxgb4/cxgb4_main.c linux-3.0.3/drivers/net/cxgb4/cxgb4_main.c
28476--- linux-3.0.3/drivers/net/cxgb4/cxgb4_main.c 2011-07-21 22:17:23.000000000 -0400
28477+++ linux-3.0.3/drivers/net/cxgb4/cxgb4_main.c 2011-08-23 21:48:14.000000000 -0400
28478@@ -3396,6 +3396,8 @@ static int __devinit enable_msix(struct
28479 unsigned int nchan = adap->params.nports;
28480 struct msix_entry entries[MAX_INGQ + 1];
28481
28482+ pax_track_stack();
28483+
28484 for (i = 0; i < ARRAY_SIZE(entries); ++i)
28485 entries[i].entry = i;
28486
28487diff -urNp linux-3.0.3/drivers/net/cxgb4/t4_hw.c linux-3.0.3/drivers/net/cxgb4/t4_hw.c
28488--- linux-3.0.3/drivers/net/cxgb4/t4_hw.c 2011-07-21 22:17:23.000000000 -0400
28489+++ linux-3.0.3/drivers/net/cxgb4/t4_hw.c 2011-08-23 21:48:14.000000000 -0400
28490@@ -362,6 +362,8 @@ static int get_vpd_params(struct adapter
28491 u8 vpd[VPD_LEN], csum;
28492 unsigned int vpdr_len, kw_offset, id_len;
28493
28494+ pax_track_stack();
28495+
28496 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
28497 if (ret < 0)
28498 return ret;
28499diff -urNp linux-3.0.3/drivers/net/e1000e/82571.c linux-3.0.3/drivers/net/e1000e/82571.c
28500--- linux-3.0.3/drivers/net/e1000e/82571.c 2011-07-21 22:17:23.000000000 -0400
28501+++ linux-3.0.3/drivers/net/e1000e/82571.c 2011-08-23 21:47:55.000000000 -0400
28502@@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(s
28503 {
28504 struct e1000_hw *hw = &adapter->hw;
28505 struct e1000_mac_info *mac = &hw->mac;
28506- struct e1000_mac_operations *func = &mac->ops;
28507+ e1000_mac_operations_no_const *func = &mac->ops;
28508 u32 swsm = 0;
28509 u32 swsm2 = 0;
28510 bool force_clear_smbi = false;
28511diff -urNp linux-3.0.3/drivers/net/e1000e/es2lan.c linux-3.0.3/drivers/net/e1000e/es2lan.c
28512--- linux-3.0.3/drivers/net/e1000e/es2lan.c 2011-07-21 22:17:23.000000000 -0400
28513+++ linux-3.0.3/drivers/net/e1000e/es2lan.c 2011-08-23 21:47:55.000000000 -0400
28514@@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es
28515 {
28516 struct e1000_hw *hw = &adapter->hw;
28517 struct e1000_mac_info *mac = &hw->mac;
28518- struct e1000_mac_operations *func = &mac->ops;
28519+ e1000_mac_operations_no_const *func = &mac->ops;
28520
28521 /* Set media type */
28522 switch (adapter->pdev->device) {
28523diff -urNp linux-3.0.3/drivers/net/e1000e/hw.h linux-3.0.3/drivers/net/e1000e/hw.h
28524--- linux-3.0.3/drivers/net/e1000e/hw.h 2011-07-21 22:17:23.000000000 -0400
28525+++ linux-3.0.3/drivers/net/e1000e/hw.h 2011-08-23 21:47:55.000000000 -0400
28526@@ -776,6 +776,7 @@ struct e1000_mac_operations {
28527 void (*write_vfta)(struct e1000_hw *, u32, u32);
28528 s32 (*read_mac_addr)(struct e1000_hw *);
28529 };
28530+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28531
28532 /* Function pointers for the PHY. */
28533 struct e1000_phy_operations {
28534@@ -799,6 +800,7 @@ struct e1000_phy_operations {
28535 void (*power_up)(struct e1000_hw *);
28536 void (*power_down)(struct e1000_hw *);
28537 };
28538+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
28539
28540 /* Function pointers for the NVM. */
28541 struct e1000_nvm_operations {
28542@@ -810,9 +812,10 @@ struct e1000_nvm_operations {
28543 s32 (*validate)(struct e1000_hw *);
28544 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
28545 };
28546+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
28547
28548 struct e1000_mac_info {
28549- struct e1000_mac_operations ops;
28550+ e1000_mac_operations_no_const ops;
28551 u8 addr[ETH_ALEN];
28552 u8 perm_addr[ETH_ALEN];
28553
28554@@ -853,7 +856,7 @@ struct e1000_mac_info {
28555 };
28556
28557 struct e1000_phy_info {
28558- struct e1000_phy_operations ops;
28559+ e1000_phy_operations_no_const ops;
28560
28561 enum e1000_phy_type type;
28562
28563@@ -887,7 +890,7 @@ struct e1000_phy_info {
28564 };
28565
28566 struct e1000_nvm_info {
28567- struct e1000_nvm_operations ops;
28568+ e1000_nvm_operations_no_const ops;
28569
28570 enum e1000_nvm_type type;
28571 enum e1000_nvm_override override;
28572diff -urNp linux-3.0.3/drivers/net/hamradio/6pack.c linux-3.0.3/drivers/net/hamradio/6pack.c
28573--- linux-3.0.3/drivers/net/hamradio/6pack.c 2011-07-21 22:17:23.000000000 -0400
28574+++ linux-3.0.3/drivers/net/hamradio/6pack.c 2011-08-23 21:48:14.000000000 -0400
28575@@ -463,6 +463,8 @@ static void sixpack_receive_buf(struct t
28576 unsigned char buf[512];
28577 int count1;
28578
28579+ pax_track_stack();
28580+
28581 if (!count)
28582 return;
28583
28584diff -urNp linux-3.0.3/drivers/net/igb/e1000_hw.h linux-3.0.3/drivers/net/igb/e1000_hw.h
28585--- linux-3.0.3/drivers/net/igb/e1000_hw.h 2011-07-21 22:17:23.000000000 -0400
28586+++ linux-3.0.3/drivers/net/igb/e1000_hw.h 2011-08-23 21:47:55.000000000 -0400
28587@@ -314,6 +314,7 @@ struct e1000_mac_operations {
28588 s32 (*read_mac_addr)(struct e1000_hw *);
28589 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
28590 };
28591+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28592
28593 struct e1000_phy_operations {
28594 s32 (*acquire)(struct e1000_hw *);
28595@@ -330,6 +331,7 @@ struct e1000_phy_operations {
28596 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
28597 s32 (*write_reg)(struct e1000_hw *, u32, u16);
28598 };
28599+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
28600
28601 struct e1000_nvm_operations {
28602 s32 (*acquire)(struct e1000_hw *);
28603@@ -339,6 +341,7 @@ struct e1000_nvm_operations {
28604 s32 (*update)(struct e1000_hw *);
28605 s32 (*validate)(struct e1000_hw *);
28606 };
28607+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
28608
28609 struct e1000_info {
28610 s32 (*get_invariants)(struct e1000_hw *);
28611@@ -350,7 +353,7 @@ struct e1000_info {
28612 extern const struct e1000_info e1000_82575_info;
28613
28614 struct e1000_mac_info {
28615- struct e1000_mac_operations ops;
28616+ e1000_mac_operations_no_const ops;
28617
28618 u8 addr[6];
28619 u8 perm_addr[6];
28620@@ -388,7 +391,7 @@ struct e1000_mac_info {
28621 };
28622
28623 struct e1000_phy_info {
28624- struct e1000_phy_operations ops;
28625+ e1000_phy_operations_no_const ops;
28626
28627 enum e1000_phy_type type;
28628
28629@@ -423,7 +426,7 @@ struct e1000_phy_info {
28630 };
28631
28632 struct e1000_nvm_info {
28633- struct e1000_nvm_operations ops;
28634+ e1000_nvm_operations_no_const ops;
28635 enum e1000_nvm_type type;
28636 enum e1000_nvm_override override;
28637
28638@@ -468,6 +471,7 @@ struct e1000_mbx_operations {
28639 s32 (*check_for_ack)(struct e1000_hw *, u16);
28640 s32 (*check_for_rst)(struct e1000_hw *, u16);
28641 };
28642+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
28643
28644 struct e1000_mbx_stats {
28645 u32 msgs_tx;
28646@@ -479,7 +483,7 @@ struct e1000_mbx_stats {
28647 };
28648
28649 struct e1000_mbx_info {
28650- struct e1000_mbx_operations ops;
28651+ e1000_mbx_operations_no_const ops;
28652 struct e1000_mbx_stats stats;
28653 u32 timeout;
28654 u32 usec_delay;
28655diff -urNp linux-3.0.3/drivers/net/igbvf/vf.h linux-3.0.3/drivers/net/igbvf/vf.h
28656--- linux-3.0.3/drivers/net/igbvf/vf.h 2011-07-21 22:17:23.000000000 -0400
28657+++ linux-3.0.3/drivers/net/igbvf/vf.h 2011-08-23 21:47:55.000000000 -0400
28658@@ -189,9 +189,10 @@ struct e1000_mac_operations {
28659 s32 (*read_mac_addr)(struct e1000_hw *);
28660 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
28661 };
28662+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28663
28664 struct e1000_mac_info {
28665- struct e1000_mac_operations ops;
28666+ e1000_mac_operations_no_const ops;
28667 u8 addr[6];
28668 u8 perm_addr[6];
28669
28670@@ -213,6 +214,7 @@ struct e1000_mbx_operations {
28671 s32 (*check_for_ack)(struct e1000_hw *);
28672 s32 (*check_for_rst)(struct e1000_hw *);
28673 };
28674+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
28675
28676 struct e1000_mbx_stats {
28677 u32 msgs_tx;
28678@@ -224,7 +226,7 @@ struct e1000_mbx_stats {
28679 };
28680
28681 struct e1000_mbx_info {
28682- struct e1000_mbx_operations ops;
28683+ e1000_mbx_operations_no_const ops;
28684 struct e1000_mbx_stats stats;
28685 u32 timeout;
28686 u32 usec_delay;
28687diff -urNp linux-3.0.3/drivers/net/ixgb/ixgb_main.c linux-3.0.3/drivers/net/ixgb/ixgb_main.c
28688--- linux-3.0.3/drivers/net/ixgb/ixgb_main.c 2011-07-21 22:17:23.000000000 -0400
28689+++ linux-3.0.3/drivers/net/ixgb/ixgb_main.c 2011-08-23 21:48:14.000000000 -0400
28690@@ -1070,6 +1070,8 @@ ixgb_set_multi(struct net_device *netdev
28691 u32 rctl;
28692 int i;
28693
28694+ pax_track_stack();
28695+
28696 /* Check for Promiscuous and All Multicast modes */
28697
28698 rctl = IXGB_READ_REG(hw, RCTL);
28699diff -urNp linux-3.0.3/drivers/net/ixgb/ixgb_param.c linux-3.0.3/drivers/net/ixgb/ixgb_param.c
28700--- linux-3.0.3/drivers/net/ixgb/ixgb_param.c 2011-07-21 22:17:23.000000000 -0400
28701+++ linux-3.0.3/drivers/net/ixgb/ixgb_param.c 2011-08-23 21:48:14.000000000 -0400
28702@@ -261,6 +261,9 @@ void __devinit
28703 ixgb_check_options(struct ixgb_adapter *adapter)
28704 {
28705 int bd = adapter->bd_number;
28706+
28707+ pax_track_stack();
28708+
28709 if (bd >= IXGB_MAX_NIC) {
28710 pr_notice("Warning: no configuration for board #%i\n", bd);
28711 pr_notice("Using defaults for all values\n");
28712diff -urNp linux-3.0.3/drivers/net/ixgbe/ixgbe_type.h linux-3.0.3/drivers/net/ixgbe/ixgbe_type.h
28713--- linux-3.0.3/drivers/net/ixgbe/ixgbe_type.h 2011-07-21 22:17:23.000000000 -0400
28714+++ linux-3.0.3/drivers/net/ixgbe/ixgbe_type.h 2011-08-23 21:47:55.000000000 -0400
28715@@ -2584,6 +2584,7 @@ struct ixgbe_eeprom_operations {
28716 s32 (*update_checksum)(struct ixgbe_hw *);
28717 u16 (*calc_checksum)(struct ixgbe_hw *);
28718 };
28719+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
28720
28721 struct ixgbe_mac_operations {
28722 s32 (*init_hw)(struct ixgbe_hw *);
28723@@ -2639,6 +2640,7 @@ struct ixgbe_mac_operations {
28724 /* Flow Control */
28725 s32 (*fc_enable)(struct ixgbe_hw *, s32);
28726 };
28727+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
28728
28729 struct ixgbe_phy_operations {
28730 s32 (*identify)(struct ixgbe_hw *);
28731@@ -2658,9 +2660,10 @@ struct ixgbe_phy_operations {
28732 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
28733 s32 (*check_overtemp)(struct ixgbe_hw *);
28734 };
28735+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
28736
28737 struct ixgbe_eeprom_info {
28738- struct ixgbe_eeprom_operations ops;
28739+ ixgbe_eeprom_operations_no_const ops;
28740 enum ixgbe_eeprom_type type;
28741 u32 semaphore_delay;
28742 u16 word_size;
28743@@ -2670,7 +2673,7 @@ struct ixgbe_eeprom_info {
28744
28745 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
28746 struct ixgbe_mac_info {
28747- struct ixgbe_mac_operations ops;
28748+ ixgbe_mac_operations_no_const ops;
28749 enum ixgbe_mac_type type;
28750 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
28751 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
28752@@ -2698,7 +2701,7 @@ struct ixgbe_mac_info {
28753 };
28754
28755 struct ixgbe_phy_info {
28756- struct ixgbe_phy_operations ops;
28757+ ixgbe_phy_operations_no_const ops;
28758 struct mdio_if_info mdio;
28759 enum ixgbe_phy_type type;
28760 u32 id;
28761@@ -2726,6 +2729,7 @@ struct ixgbe_mbx_operations {
28762 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
28763 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
28764 };
28765+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
28766
28767 struct ixgbe_mbx_stats {
28768 u32 msgs_tx;
28769@@ -2737,7 +2741,7 @@ struct ixgbe_mbx_stats {
28770 };
28771
28772 struct ixgbe_mbx_info {
28773- struct ixgbe_mbx_operations ops;
28774+ ixgbe_mbx_operations_no_const ops;
28775 struct ixgbe_mbx_stats stats;
28776 u32 timeout;
28777 u32 usec_delay;
28778diff -urNp linux-3.0.3/drivers/net/ixgbevf/vf.h linux-3.0.3/drivers/net/ixgbevf/vf.h
28779--- linux-3.0.3/drivers/net/ixgbevf/vf.h 2011-07-21 22:17:23.000000000 -0400
28780+++ linux-3.0.3/drivers/net/ixgbevf/vf.h 2011-08-23 21:47:55.000000000 -0400
28781@@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
28782 s32 (*clear_vfta)(struct ixgbe_hw *);
28783 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
28784 };
28785+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
28786
28787 enum ixgbe_mac_type {
28788 ixgbe_mac_unknown = 0,
28789@@ -79,7 +80,7 @@ enum ixgbe_mac_type {
28790 };
28791
28792 struct ixgbe_mac_info {
28793- struct ixgbe_mac_operations ops;
28794+ ixgbe_mac_operations_no_const ops;
28795 u8 addr[6];
28796 u8 perm_addr[6];
28797
28798@@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
28799 s32 (*check_for_ack)(struct ixgbe_hw *);
28800 s32 (*check_for_rst)(struct ixgbe_hw *);
28801 };
28802+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
28803
28804 struct ixgbe_mbx_stats {
28805 u32 msgs_tx;
28806@@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
28807 };
28808
28809 struct ixgbe_mbx_info {
28810- struct ixgbe_mbx_operations ops;
28811+ ixgbe_mbx_operations_no_const ops;
28812 struct ixgbe_mbx_stats stats;
28813 u32 timeout;
28814 u32 udelay;
28815diff -urNp linux-3.0.3/drivers/net/ksz884x.c linux-3.0.3/drivers/net/ksz884x.c
28816--- linux-3.0.3/drivers/net/ksz884x.c 2011-07-21 22:17:23.000000000 -0400
28817+++ linux-3.0.3/drivers/net/ksz884x.c 2011-08-23 21:48:14.000000000 -0400
28818@@ -6534,6 +6534,8 @@ static void netdev_get_ethtool_stats(str
28819 int rc;
28820 u64 counter[TOTAL_PORT_COUNTER_NUM];
28821
28822+ pax_track_stack();
28823+
28824 mutex_lock(&hw_priv->lock);
28825 n = SWITCH_PORT_NUM;
28826 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
28827diff -urNp linux-3.0.3/drivers/net/mlx4/main.c linux-3.0.3/drivers/net/mlx4/main.c
28828--- linux-3.0.3/drivers/net/mlx4/main.c 2011-07-21 22:17:23.000000000 -0400
28829+++ linux-3.0.3/drivers/net/mlx4/main.c 2011-08-23 21:48:14.000000000 -0400
28830@@ -40,6 +40,7 @@
28831 #include <linux/dma-mapping.h>
28832 #include <linux/slab.h>
28833 #include <linux/io-mapping.h>
28834+#include <linux/sched.h>
28835
28836 #include <linux/mlx4/device.h>
28837 #include <linux/mlx4/doorbell.h>
28838@@ -764,6 +765,8 @@ static int mlx4_init_hca(struct mlx4_dev
28839 u64 icm_size;
28840 int err;
28841
28842+ pax_track_stack();
28843+
28844 err = mlx4_QUERY_FW(dev);
28845 if (err) {
28846 if (err == -EACCES)
28847diff -urNp linux-3.0.3/drivers/net/niu.c linux-3.0.3/drivers/net/niu.c
28848--- linux-3.0.3/drivers/net/niu.c 2011-08-23 21:44:40.000000000 -0400
28849+++ linux-3.0.3/drivers/net/niu.c 2011-08-23 21:48:14.000000000 -0400
28850@@ -9056,6 +9056,8 @@ static void __devinit niu_try_msix(struc
28851 int i, num_irqs, err;
28852 u8 first_ldg;
28853
28854+ pax_track_stack();
28855+
28856 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
28857 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
28858 ldg_num_map[i] = first_ldg + i;
28859diff -urNp linux-3.0.3/drivers/net/pcnet32.c linux-3.0.3/drivers/net/pcnet32.c
28860--- linux-3.0.3/drivers/net/pcnet32.c 2011-07-21 22:17:23.000000000 -0400
28861+++ linux-3.0.3/drivers/net/pcnet32.c 2011-08-23 21:47:55.000000000 -0400
28862@@ -82,7 +82,7 @@ static int cards_found;
28863 /*
28864 * VLB I/O addresses
28865 */
28866-static unsigned int pcnet32_portlist[] __initdata =
28867+static unsigned int pcnet32_portlist[] __devinitdata =
28868 { 0x300, 0x320, 0x340, 0x360, 0 };
28869
28870 static int pcnet32_debug;
28871@@ -270,7 +270,7 @@ struct pcnet32_private {
28872 struct sk_buff **rx_skbuff;
28873 dma_addr_t *tx_dma_addr;
28874 dma_addr_t *rx_dma_addr;
28875- struct pcnet32_access a;
28876+ struct pcnet32_access *a;
28877 spinlock_t lock; /* Guard lock */
28878 unsigned int cur_rx, cur_tx; /* The next free ring entry */
28879 unsigned int rx_ring_size; /* current rx ring size */
28880@@ -460,9 +460,9 @@ static void pcnet32_netif_start(struct n
28881 u16 val;
28882
28883 netif_wake_queue(dev);
28884- val = lp->a.read_csr(ioaddr, CSR3);
28885+ val = lp->a->read_csr(ioaddr, CSR3);
28886 val &= 0x00ff;
28887- lp->a.write_csr(ioaddr, CSR3, val);
28888+ lp->a->write_csr(ioaddr, CSR3, val);
28889 napi_enable(&lp->napi);
28890 }
28891
28892@@ -730,7 +730,7 @@ static u32 pcnet32_get_link(struct net_d
28893 r = mii_link_ok(&lp->mii_if);
28894 } else if (lp->chip_version >= PCNET32_79C970A) {
28895 ulong ioaddr = dev->base_addr; /* card base I/O address */
28896- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
28897+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
28898 } else { /* can not detect link on really old chips */
28899 r = 1;
28900 }
28901@@ -792,7 +792,7 @@ static int pcnet32_set_ringparam(struct
28902 pcnet32_netif_stop(dev);
28903
28904 spin_lock_irqsave(&lp->lock, flags);
28905- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
28906+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
28907
28908 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
28909
28910@@ -868,7 +868,7 @@ static void pcnet32_ethtool_test(struct
28911 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
28912 {
28913 struct pcnet32_private *lp = netdev_priv(dev);
28914- struct pcnet32_access *a = &lp->a; /* access to registers */
28915+ struct pcnet32_access *a = lp->a; /* access to registers */
28916 ulong ioaddr = dev->base_addr; /* card base I/O address */
28917 struct sk_buff *skb; /* sk buff */
28918 int x, i; /* counters */
28919@@ -888,21 +888,21 @@ static int pcnet32_loopback_test(struct
28920 pcnet32_netif_stop(dev);
28921
28922 spin_lock_irqsave(&lp->lock, flags);
28923- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
28924+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
28925
28926 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
28927
28928 /* Reset the PCNET32 */
28929- lp->a.reset(ioaddr);
28930- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
28931+ lp->a->reset(ioaddr);
28932+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
28933
28934 /* switch pcnet32 to 32bit mode */
28935- lp->a.write_bcr(ioaddr, 20, 2);
28936+ lp->a->write_bcr(ioaddr, 20, 2);
28937
28938 /* purge & init rings but don't actually restart */
28939 pcnet32_restart(dev, 0x0000);
28940
28941- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
28942+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
28943
28944 /* Initialize Transmit buffers. */
28945 size = data_len + 15;
28946@@ -947,10 +947,10 @@ static int pcnet32_loopback_test(struct
28947
28948 /* set int loopback in CSR15 */
28949 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
28950- lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
28951+ lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
28952
28953 teststatus = cpu_to_le16(0x8000);
28954- lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
28955+ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
28956
28957 /* Check status of descriptors */
28958 for (x = 0; x < numbuffs; x++) {
28959@@ -969,7 +969,7 @@ static int pcnet32_loopback_test(struct
28960 }
28961 }
28962
28963- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
28964+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
28965 wmb();
28966 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
28967 netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
28968@@ -1015,7 +1015,7 @@ clean_up:
28969 pcnet32_restart(dev, CSR0_NORMAL);
28970 } else {
28971 pcnet32_purge_rx_ring(dev);
28972- lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
28973+ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
28974 }
28975 spin_unlock_irqrestore(&lp->lock, flags);
28976
28977@@ -1026,7 +1026,7 @@ static int pcnet32_set_phys_id(struct ne
28978 enum ethtool_phys_id_state state)
28979 {
28980 struct pcnet32_private *lp = netdev_priv(dev);
28981- struct pcnet32_access *a = &lp->a;
28982+ struct pcnet32_access *a = lp->a;
28983 ulong ioaddr = dev->base_addr;
28984 unsigned long flags;
28985 int i;
28986@@ -1067,7 +1067,7 @@ static int pcnet32_suspend(struct net_de
28987 {
28988 int csr5;
28989 struct pcnet32_private *lp = netdev_priv(dev);
28990- struct pcnet32_access *a = &lp->a;
28991+ struct pcnet32_access *a = lp->a;
28992 ulong ioaddr = dev->base_addr;
28993 int ticks;
28994
28995@@ -1324,8 +1324,8 @@ static int pcnet32_poll(struct napi_stru
28996 spin_lock_irqsave(&lp->lock, flags);
28997 if (pcnet32_tx(dev)) {
28998 /* reset the chip to clear the error condition, then restart */
28999- lp->a.reset(ioaddr);
29000- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29001+ lp->a->reset(ioaddr);
29002+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29003 pcnet32_restart(dev, CSR0_START);
29004 netif_wake_queue(dev);
29005 }
29006@@ -1337,12 +1337,12 @@ static int pcnet32_poll(struct napi_stru
29007 __napi_complete(napi);
29008
29009 /* clear interrupt masks */
29010- val = lp->a.read_csr(ioaddr, CSR3);
29011+ val = lp->a->read_csr(ioaddr, CSR3);
29012 val &= 0x00ff;
29013- lp->a.write_csr(ioaddr, CSR3, val);
29014+ lp->a->write_csr(ioaddr, CSR3, val);
29015
29016 /* Set interrupt enable. */
29017- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
29018+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
29019
29020 spin_unlock_irqrestore(&lp->lock, flags);
29021 }
29022@@ -1365,7 +1365,7 @@ static void pcnet32_get_regs(struct net_
29023 int i, csr0;
29024 u16 *buff = ptr;
29025 struct pcnet32_private *lp = netdev_priv(dev);
29026- struct pcnet32_access *a = &lp->a;
29027+ struct pcnet32_access *a = lp->a;
29028 ulong ioaddr = dev->base_addr;
29029 unsigned long flags;
29030
29031@@ -1401,9 +1401,9 @@ static void pcnet32_get_regs(struct net_
29032 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
29033 if (lp->phymask & (1 << j)) {
29034 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
29035- lp->a.write_bcr(ioaddr, 33,
29036+ lp->a->write_bcr(ioaddr, 33,
29037 (j << 5) | i);
29038- *buff++ = lp->a.read_bcr(ioaddr, 34);
29039+ *buff++ = lp->a->read_bcr(ioaddr, 34);
29040 }
29041 }
29042 }
29043@@ -1785,7 +1785,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29044 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
29045 lp->options |= PCNET32_PORT_FD;
29046
29047- lp->a = *a;
29048+ lp->a = a;
29049
29050 /* prior to register_netdev, dev->name is not yet correct */
29051 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
29052@@ -1844,7 +1844,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29053 if (lp->mii) {
29054 /* lp->phycount and lp->phymask are set to 0 by memset above */
29055
29056- lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
29057+ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
29058 /* scan for PHYs */
29059 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
29060 unsigned short id1, id2;
29061@@ -1864,7 +1864,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29062 pr_info("Found PHY %04x:%04x at address %d\n",
29063 id1, id2, i);
29064 }
29065- lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
29066+ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
29067 if (lp->phycount > 1)
29068 lp->options |= PCNET32_PORT_MII;
29069 }
29070@@ -2020,10 +2020,10 @@ static int pcnet32_open(struct net_devic
29071 }
29072
29073 /* Reset the PCNET32 */
29074- lp->a.reset(ioaddr);
29075+ lp->a->reset(ioaddr);
29076
29077 /* switch pcnet32 to 32bit mode */
29078- lp->a.write_bcr(ioaddr, 20, 2);
29079+ lp->a->write_bcr(ioaddr, 20, 2);
29080
29081 netif_printk(lp, ifup, KERN_DEBUG, dev,
29082 "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
29083@@ -2032,14 +2032,14 @@ static int pcnet32_open(struct net_devic
29084 (u32) (lp->init_dma_addr));
29085
29086 /* set/reset autoselect bit */
29087- val = lp->a.read_bcr(ioaddr, 2) & ~2;
29088+ val = lp->a->read_bcr(ioaddr, 2) & ~2;
29089 if (lp->options & PCNET32_PORT_ASEL)
29090 val |= 2;
29091- lp->a.write_bcr(ioaddr, 2, val);
29092+ lp->a->write_bcr(ioaddr, 2, val);
29093
29094 /* handle full duplex setting */
29095 if (lp->mii_if.full_duplex) {
29096- val = lp->a.read_bcr(ioaddr, 9) & ~3;
29097+ val = lp->a->read_bcr(ioaddr, 9) & ~3;
29098 if (lp->options & PCNET32_PORT_FD) {
29099 val |= 1;
29100 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
29101@@ -2049,14 +2049,14 @@ static int pcnet32_open(struct net_devic
29102 if (lp->chip_version == 0x2627)
29103 val |= 3;
29104 }
29105- lp->a.write_bcr(ioaddr, 9, val);
29106+ lp->a->write_bcr(ioaddr, 9, val);
29107 }
29108
29109 /* set/reset GPSI bit in test register */
29110- val = lp->a.read_csr(ioaddr, 124) & ~0x10;
29111+ val = lp->a->read_csr(ioaddr, 124) & ~0x10;
29112 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
29113 val |= 0x10;
29114- lp->a.write_csr(ioaddr, 124, val);
29115+ lp->a->write_csr(ioaddr, 124, val);
29116
29117 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
29118 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
29119@@ -2075,24 +2075,24 @@ static int pcnet32_open(struct net_devic
29120 * duplex, and/or enable auto negotiation, and clear DANAS
29121 */
29122 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
29123- lp->a.write_bcr(ioaddr, 32,
29124- lp->a.read_bcr(ioaddr, 32) | 0x0080);
29125+ lp->a->write_bcr(ioaddr, 32,
29126+ lp->a->read_bcr(ioaddr, 32) | 0x0080);
29127 /* disable Auto Negotiation, set 10Mpbs, HD */
29128- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
29129+ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
29130 if (lp->options & PCNET32_PORT_FD)
29131 val |= 0x10;
29132 if (lp->options & PCNET32_PORT_100)
29133 val |= 0x08;
29134- lp->a.write_bcr(ioaddr, 32, val);
29135+ lp->a->write_bcr(ioaddr, 32, val);
29136 } else {
29137 if (lp->options & PCNET32_PORT_ASEL) {
29138- lp->a.write_bcr(ioaddr, 32,
29139- lp->a.read_bcr(ioaddr,
29140+ lp->a->write_bcr(ioaddr, 32,
29141+ lp->a->read_bcr(ioaddr,
29142 32) | 0x0080);
29143 /* enable auto negotiate, setup, disable fd */
29144- val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
29145+ val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
29146 val |= 0x20;
29147- lp->a.write_bcr(ioaddr, 32, val);
29148+ lp->a->write_bcr(ioaddr, 32, val);
29149 }
29150 }
29151 } else {
29152@@ -2105,10 +2105,10 @@ static int pcnet32_open(struct net_devic
29153 * There is really no good other way to handle multiple PHYs
29154 * other than turning off all automatics
29155 */
29156- val = lp->a.read_bcr(ioaddr, 2);
29157- lp->a.write_bcr(ioaddr, 2, val & ~2);
29158- val = lp->a.read_bcr(ioaddr, 32);
29159- lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
29160+ val = lp->a->read_bcr(ioaddr, 2);
29161+ lp->a->write_bcr(ioaddr, 2, val & ~2);
29162+ val = lp->a->read_bcr(ioaddr, 32);
29163+ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
29164
29165 if (!(lp->options & PCNET32_PORT_ASEL)) {
29166 /* setup ecmd */
29167@@ -2118,7 +2118,7 @@ static int pcnet32_open(struct net_devic
29168 ethtool_cmd_speed_set(&ecmd,
29169 (lp->options & PCNET32_PORT_100) ?
29170 SPEED_100 : SPEED_10);
29171- bcr9 = lp->a.read_bcr(ioaddr, 9);
29172+ bcr9 = lp->a->read_bcr(ioaddr, 9);
29173
29174 if (lp->options & PCNET32_PORT_FD) {
29175 ecmd.duplex = DUPLEX_FULL;
29176@@ -2127,7 +2127,7 @@ static int pcnet32_open(struct net_devic
29177 ecmd.duplex = DUPLEX_HALF;
29178 bcr9 |= ~(1 << 0);
29179 }
29180- lp->a.write_bcr(ioaddr, 9, bcr9);
29181+ lp->a->write_bcr(ioaddr, 9, bcr9);
29182 }
29183
29184 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
29185@@ -2158,9 +2158,9 @@ static int pcnet32_open(struct net_devic
29186
29187 #ifdef DO_DXSUFLO
29188 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
29189- val = lp->a.read_csr(ioaddr, CSR3);
29190+ val = lp->a->read_csr(ioaddr, CSR3);
29191 val |= 0x40;
29192- lp->a.write_csr(ioaddr, CSR3, val);
29193+ lp->a->write_csr(ioaddr, CSR3, val);
29194 }
29195 #endif
29196
29197@@ -2176,11 +2176,11 @@ static int pcnet32_open(struct net_devic
29198 napi_enable(&lp->napi);
29199
29200 /* Re-initialize the PCNET32, and start it when done. */
29201- lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
29202- lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
29203+ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
29204+ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
29205
29206- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29207- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
29208+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29209+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
29210
29211 netif_start_queue(dev);
29212
29213@@ -2192,19 +2192,19 @@ static int pcnet32_open(struct net_devic
29214
29215 i = 0;
29216 while (i++ < 100)
29217- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
29218+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
29219 break;
29220 /*
29221 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
29222 * reports that doing so triggers a bug in the '974.
29223 */
29224- lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
29225+ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
29226
29227 netif_printk(lp, ifup, KERN_DEBUG, dev,
29228 "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
29229 i,
29230 (u32) (lp->init_dma_addr),
29231- lp->a.read_csr(ioaddr, CSR0));
29232+ lp->a->read_csr(ioaddr, CSR0));
29233
29234 spin_unlock_irqrestore(&lp->lock, flags);
29235
29236@@ -2218,7 +2218,7 @@ err_free_ring:
29237 * Switch back to 16bit mode to avoid problems with dumb
29238 * DOS packet driver after a warm reboot
29239 */
29240- lp->a.write_bcr(ioaddr, 20, 4);
29241+ lp->a->write_bcr(ioaddr, 20, 4);
29242
29243 err_free_irq:
29244 spin_unlock_irqrestore(&lp->lock, flags);
29245@@ -2323,7 +2323,7 @@ static void pcnet32_restart(struct net_d
29246
29247 /* wait for stop */
29248 for (i = 0; i < 100; i++)
29249- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
29250+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
29251 break;
29252
29253 if (i >= 100)
29254@@ -2335,13 +2335,13 @@ static void pcnet32_restart(struct net_d
29255 return;
29256
29257 /* ReInit Ring */
29258- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
29259+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
29260 i = 0;
29261 while (i++ < 1000)
29262- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
29263+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
29264 break;
29265
29266- lp->a.write_csr(ioaddr, CSR0, csr0_bits);
29267+ lp->a->write_csr(ioaddr, CSR0, csr0_bits);
29268 }
29269
29270 static void pcnet32_tx_timeout(struct net_device *dev)
29271@@ -2353,8 +2353,8 @@ static void pcnet32_tx_timeout(struct ne
29272 /* Transmitter timeout, serious problems. */
29273 if (pcnet32_debug & NETIF_MSG_DRV)
29274 pr_err("%s: transmit timed out, status %4.4x, resetting\n",
29275- dev->name, lp->a.read_csr(ioaddr, CSR0));
29276- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29277+ dev->name, lp->a->read_csr(ioaddr, CSR0));
29278+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29279 dev->stats.tx_errors++;
29280 if (netif_msg_tx_err(lp)) {
29281 int i;
29282@@ -2397,7 +2397,7 @@ static netdev_tx_t pcnet32_start_xmit(st
29283
29284 netif_printk(lp, tx_queued, KERN_DEBUG, dev,
29285 "%s() called, csr0 %4.4x\n",
29286- __func__, lp->a.read_csr(ioaddr, CSR0));
29287+ __func__, lp->a->read_csr(ioaddr, CSR0));
29288
29289 /* Default status -- will not enable Successful-TxDone
29290 * interrupt when that option is available to us.
29291@@ -2427,7 +2427,7 @@ static netdev_tx_t pcnet32_start_xmit(st
29292 dev->stats.tx_bytes += skb->len;
29293
29294 /* Trigger an immediate send poll. */
29295- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
29296+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
29297
29298 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
29299 lp->tx_full = 1;
29300@@ -2452,16 +2452,16 @@ pcnet32_interrupt(int irq, void *dev_id)
29301
29302 spin_lock(&lp->lock);
29303
29304- csr0 = lp->a.read_csr(ioaddr, CSR0);
29305+ csr0 = lp->a->read_csr(ioaddr, CSR0);
29306 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
29307 if (csr0 == 0xffff)
29308 break; /* PCMCIA remove happened */
29309 /* Acknowledge all of the current interrupt sources ASAP. */
29310- lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
29311+ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
29312
29313 netif_printk(lp, intr, KERN_DEBUG, dev,
29314 "interrupt csr0=%#2.2x new csr=%#2.2x\n",
29315- csr0, lp->a.read_csr(ioaddr, CSR0));
29316+ csr0, lp->a->read_csr(ioaddr, CSR0));
29317
29318 /* Log misc errors. */
29319 if (csr0 & 0x4000)
29320@@ -2488,19 +2488,19 @@ pcnet32_interrupt(int irq, void *dev_id)
29321 if (napi_schedule_prep(&lp->napi)) {
29322 u16 val;
29323 /* set interrupt masks */
29324- val = lp->a.read_csr(ioaddr, CSR3);
29325+ val = lp->a->read_csr(ioaddr, CSR3);
29326 val |= 0x5f00;
29327- lp->a.write_csr(ioaddr, CSR3, val);
29328+ lp->a->write_csr(ioaddr, CSR3, val);
29329
29330 __napi_schedule(&lp->napi);
29331 break;
29332 }
29333- csr0 = lp->a.read_csr(ioaddr, CSR0);
29334+ csr0 = lp->a->read_csr(ioaddr, CSR0);
29335 }
29336
29337 netif_printk(lp, intr, KERN_DEBUG, dev,
29338 "exiting interrupt, csr0=%#4.4x\n",
29339- lp->a.read_csr(ioaddr, CSR0));
29340+ lp->a->read_csr(ioaddr, CSR0));
29341
29342 spin_unlock(&lp->lock);
29343
29344@@ -2520,20 +2520,20 @@ static int pcnet32_close(struct net_devi
29345
29346 spin_lock_irqsave(&lp->lock, flags);
29347
29348- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
29349+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
29350
29351 netif_printk(lp, ifdown, KERN_DEBUG, dev,
29352 "Shutting down ethercard, status was %2.2x\n",
29353- lp->a.read_csr(ioaddr, CSR0));
29354+ lp->a->read_csr(ioaddr, CSR0));
29355
29356 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
29357- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29358+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29359
29360 /*
29361 * Switch back to 16bit mode to avoid problems with dumb
29362 * DOS packet driver after a warm reboot
29363 */
29364- lp->a.write_bcr(ioaddr, 20, 4);
29365+ lp->a->write_bcr(ioaddr, 20, 4);
29366
29367 spin_unlock_irqrestore(&lp->lock, flags);
29368
29369@@ -2556,7 +2556,7 @@ static struct net_device_stats *pcnet32_
29370 unsigned long flags;
29371
29372 spin_lock_irqsave(&lp->lock, flags);
29373- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
29374+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
29375 spin_unlock_irqrestore(&lp->lock, flags);
29376
29377 return &dev->stats;
29378@@ -2578,10 +2578,10 @@ static void pcnet32_load_multicast(struc
29379 if (dev->flags & IFF_ALLMULTI) {
29380 ib->filter[0] = cpu_to_le32(~0U);
29381 ib->filter[1] = cpu_to_le32(~0U);
29382- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
29383- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
29384- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
29385- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
29386+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
29387+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
29388+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
29389+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
29390 return;
29391 }
29392 /* clear the multicast filter */
29393@@ -2601,7 +2601,7 @@ static void pcnet32_load_multicast(struc
29394 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
29395 }
29396 for (i = 0; i < 4; i++)
29397- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
29398+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
29399 le16_to_cpu(mcast_table[i]));
29400 }
29401
29402@@ -2616,28 +2616,28 @@ static void pcnet32_set_multicast_list(s
29403
29404 spin_lock_irqsave(&lp->lock, flags);
29405 suspended = pcnet32_suspend(dev, &flags, 0);
29406- csr15 = lp->a.read_csr(ioaddr, CSR15);
29407+ csr15 = lp->a->read_csr(ioaddr, CSR15);
29408 if (dev->flags & IFF_PROMISC) {
29409 /* Log any net taps. */
29410 netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
29411 lp->init_block->mode =
29412 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
29413 7);
29414- lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
29415+ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
29416 } else {
29417 lp->init_block->mode =
29418 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
29419- lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
29420+ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
29421 pcnet32_load_multicast(dev);
29422 }
29423
29424 if (suspended) {
29425 int csr5;
29426 /* clear SUSPEND (SPND) - CSR5 bit 0 */
29427- csr5 = lp->a.read_csr(ioaddr, CSR5);
29428- lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
29429+ csr5 = lp->a->read_csr(ioaddr, CSR5);
29430+ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
29431 } else {
29432- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29433+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29434 pcnet32_restart(dev, CSR0_NORMAL);
29435 netif_wake_queue(dev);
29436 }
29437@@ -2655,8 +2655,8 @@ static int mdio_read(struct net_device *
29438 if (!lp->mii)
29439 return 0;
29440
29441- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29442- val_out = lp->a.read_bcr(ioaddr, 34);
29443+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29444+ val_out = lp->a->read_bcr(ioaddr, 34);
29445
29446 return val_out;
29447 }
29448@@ -2670,8 +2670,8 @@ static void mdio_write(struct net_device
29449 if (!lp->mii)
29450 return;
29451
29452- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29453- lp->a.write_bcr(ioaddr, 34, val);
29454+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29455+ lp->a->write_bcr(ioaddr, 34, val);
29456 }
29457
29458 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
29459@@ -2748,7 +2748,7 @@ static void pcnet32_check_media(struct n
29460 curr_link = mii_link_ok(&lp->mii_if);
29461 } else {
29462 ulong ioaddr = dev->base_addr; /* card base I/O address */
29463- curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
29464+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
29465 }
29466 if (!curr_link) {
29467 if (prev_link || verbose) {
29468@@ -2771,13 +2771,13 @@ static void pcnet32_check_media(struct n
29469 (ecmd.duplex == DUPLEX_FULL)
29470 ? "full" : "half");
29471 }
29472- bcr9 = lp->a.read_bcr(dev->base_addr, 9);
29473+ bcr9 = lp->a->read_bcr(dev->base_addr, 9);
29474 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
29475 if (lp->mii_if.full_duplex)
29476 bcr9 |= (1 << 0);
29477 else
29478 bcr9 &= ~(1 << 0);
29479- lp->a.write_bcr(dev->base_addr, 9, bcr9);
29480+ lp->a->write_bcr(dev->base_addr, 9, bcr9);
29481 }
29482 } else {
29483 netif_info(lp, link, dev, "link up\n");
29484diff -urNp linux-3.0.3/drivers/net/ppp_generic.c linux-3.0.3/drivers/net/ppp_generic.c
29485--- linux-3.0.3/drivers/net/ppp_generic.c 2011-07-21 22:17:23.000000000 -0400
29486+++ linux-3.0.3/drivers/net/ppp_generic.c 2011-08-23 21:47:55.000000000 -0400
29487@@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, st
29488 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
29489 struct ppp_stats stats;
29490 struct ppp_comp_stats cstats;
29491- char *vers;
29492
29493 switch (cmd) {
29494 case SIOCGPPPSTATS:
29495@@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, st
29496 break;
29497
29498 case SIOCGPPPVER:
29499- vers = PPP_VERSION;
29500- if (copy_to_user(addr, vers, strlen(vers) + 1))
29501+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
29502 break;
29503 err = 0;
29504 break;
29505diff -urNp linux-3.0.3/drivers/net/r8169.c linux-3.0.3/drivers/net/r8169.c
29506--- linux-3.0.3/drivers/net/r8169.c 2011-08-23 21:44:40.000000000 -0400
29507+++ linux-3.0.3/drivers/net/r8169.c 2011-08-23 21:47:55.000000000 -0400
29508@@ -645,12 +645,12 @@ struct rtl8169_private {
29509 struct mdio_ops {
29510 void (*write)(void __iomem *, int, int);
29511 int (*read)(void __iomem *, int);
29512- } mdio_ops;
29513+ } __no_const mdio_ops;
29514
29515 struct pll_power_ops {
29516 void (*down)(struct rtl8169_private *);
29517 void (*up)(struct rtl8169_private *);
29518- } pll_power_ops;
29519+ } __no_const pll_power_ops;
29520
29521 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
29522 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
29523diff -urNp linux-3.0.3/drivers/net/tg3.h linux-3.0.3/drivers/net/tg3.h
29524--- linux-3.0.3/drivers/net/tg3.h 2011-07-21 22:17:23.000000000 -0400
29525+++ linux-3.0.3/drivers/net/tg3.h 2011-08-23 21:47:55.000000000 -0400
29526@@ -134,6 +134,7 @@
29527 #define CHIPREV_ID_5750_A0 0x4000
29528 #define CHIPREV_ID_5750_A1 0x4001
29529 #define CHIPREV_ID_5750_A3 0x4003
29530+#define CHIPREV_ID_5750_C1 0x4201
29531 #define CHIPREV_ID_5750_C2 0x4202
29532 #define CHIPREV_ID_5752_A0_HW 0x5000
29533 #define CHIPREV_ID_5752_A0 0x6000
29534diff -urNp linux-3.0.3/drivers/net/tokenring/abyss.c linux-3.0.3/drivers/net/tokenring/abyss.c
29535--- linux-3.0.3/drivers/net/tokenring/abyss.c 2011-07-21 22:17:23.000000000 -0400
29536+++ linux-3.0.3/drivers/net/tokenring/abyss.c 2011-08-23 21:47:55.000000000 -0400
29537@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
29538
29539 static int __init abyss_init (void)
29540 {
29541- abyss_netdev_ops = tms380tr_netdev_ops;
29542+ pax_open_kernel();
29543+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29544
29545- abyss_netdev_ops.ndo_open = abyss_open;
29546- abyss_netdev_ops.ndo_stop = abyss_close;
29547+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
29548+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
29549+ pax_close_kernel();
29550
29551 return pci_register_driver(&abyss_driver);
29552 }
29553diff -urNp linux-3.0.3/drivers/net/tokenring/madgemc.c linux-3.0.3/drivers/net/tokenring/madgemc.c
29554--- linux-3.0.3/drivers/net/tokenring/madgemc.c 2011-07-21 22:17:23.000000000 -0400
29555+++ linux-3.0.3/drivers/net/tokenring/madgemc.c 2011-08-23 21:47:55.000000000 -0400
29556@@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver
29557
29558 static int __init madgemc_init (void)
29559 {
29560- madgemc_netdev_ops = tms380tr_netdev_ops;
29561- madgemc_netdev_ops.ndo_open = madgemc_open;
29562- madgemc_netdev_ops.ndo_stop = madgemc_close;
29563+ pax_open_kernel();
29564+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29565+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
29566+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
29567+ pax_close_kernel();
29568
29569 return mca_register_driver (&madgemc_driver);
29570 }
29571diff -urNp linux-3.0.3/drivers/net/tokenring/proteon.c linux-3.0.3/drivers/net/tokenring/proteon.c
29572--- linux-3.0.3/drivers/net/tokenring/proteon.c 2011-07-21 22:17:23.000000000 -0400
29573+++ linux-3.0.3/drivers/net/tokenring/proteon.c 2011-08-23 21:47:55.000000000 -0400
29574@@ -353,9 +353,11 @@ static int __init proteon_init(void)
29575 struct platform_device *pdev;
29576 int i, num = 0, err = 0;
29577
29578- proteon_netdev_ops = tms380tr_netdev_ops;
29579- proteon_netdev_ops.ndo_open = proteon_open;
29580- proteon_netdev_ops.ndo_stop = tms380tr_close;
29581+ pax_open_kernel();
29582+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29583+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
29584+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
29585+ pax_close_kernel();
29586
29587 err = platform_driver_register(&proteon_driver);
29588 if (err)
29589diff -urNp linux-3.0.3/drivers/net/tokenring/skisa.c linux-3.0.3/drivers/net/tokenring/skisa.c
29590--- linux-3.0.3/drivers/net/tokenring/skisa.c 2011-07-21 22:17:23.000000000 -0400
29591+++ linux-3.0.3/drivers/net/tokenring/skisa.c 2011-08-23 21:47:55.000000000 -0400
29592@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
29593 struct platform_device *pdev;
29594 int i, num = 0, err = 0;
29595
29596- sk_isa_netdev_ops = tms380tr_netdev_ops;
29597- sk_isa_netdev_ops.ndo_open = sk_isa_open;
29598- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
29599+ pax_open_kernel();
29600+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29601+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
29602+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
29603+ pax_close_kernel();
29604
29605 err = platform_driver_register(&sk_isa_driver);
29606 if (err)
29607diff -urNp linux-3.0.3/drivers/net/tulip/de2104x.c linux-3.0.3/drivers/net/tulip/de2104x.c
29608--- linux-3.0.3/drivers/net/tulip/de2104x.c 2011-07-21 22:17:23.000000000 -0400
29609+++ linux-3.0.3/drivers/net/tulip/de2104x.c 2011-08-23 21:48:14.000000000 -0400
29610@@ -1794,6 +1794,8 @@ static void __devinit de21041_get_srom_i
29611 struct de_srom_info_leaf *il;
29612 void *bufp;
29613
29614+ pax_track_stack();
29615+
29616 /* download entire eeprom */
29617 for (i = 0; i < DE_EEPROM_WORDS; i++)
29618 ((__le16 *)ee_data)[i] =
29619diff -urNp linux-3.0.3/drivers/net/tulip/de4x5.c linux-3.0.3/drivers/net/tulip/de4x5.c
29620--- linux-3.0.3/drivers/net/tulip/de4x5.c 2011-07-21 22:17:23.000000000 -0400
29621+++ linux-3.0.3/drivers/net/tulip/de4x5.c 2011-08-23 21:47:55.000000000 -0400
29622@@ -5401,7 +5401,7 @@ de4x5_ioctl(struct net_device *dev, stru
29623 for (i=0; i<ETH_ALEN; i++) {
29624 tmp.addr[i] = dev->dev_addr[i];
29625 }
29626- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
29627+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
29628 break;
29629
29630 case DE4X5_SET_HWADDR: /* Set the hardware address */
29631@@ -5441,7 +5441,7 @@ de4x5_ioctl(struct net_device *dev, stru
29632 spin_lock_irqsave(&lp->lock, flags);
29633 memcpy(&statbuf, &lp->pktStats, ioc->len);
29634 spin_unlock_irqrestore(&lp->lock, flags);
29635- if (copy_to_user(ioc->data, &statbuf, ioc->len))
29636+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
29637 return -EFAULT;
29638 break;
29639 }
29640diff -urNp linux-3.0.3/drivers/net/usb/hso.c linux-3.0.3/drivers/net/usb/hso.c
29641--- linux-3.0.3/drivers/net/usb/hso.c 2011-07-21 22:17:23.000000000 -0400
29642+++ linux-3.0.3/drivers/net/usb/hso.c 2011-08-23 21:47:55.000000000 -0400
29643@@ -71,7 +71,7 @@
29644 #include <asm/byteorder.h>
29645 #include <linux/serial_core.h>
29646 #include <linux/serial.h>
29647-
29648+#include <asm/local.h>
29649
29650 #define MOD_AUTHOR "Option Wireless"
29651 #define MOD_DESCRIPTION "USB High Speed Option driver"
29652@@ -257,7 +257,7 @@ struct hso_serial {
29653
29654 /* from usb_serial_port */
29655 struct tty_struct *tty;
29656- int open_count;
29657+ local_t open_count;
29658 spinlock_t serial_lock;
29659
29660 int (*write_data) (struct hso_serial *serial);
29661@@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_
29662 struct urb *urb;
29663
29664 urb = serial->rx_urb[0];
29665- if (serial->open_count > 0) {
29666+ if (local_read(&serial->open_count) > 0) {
29667 count = put_rxbuf_data(urb, serial);
29668 if (count == -1)
29669 return;
29670@@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_cal
29671 DUMP1(urb->transfer_buffer, urb->actual_length);
29672
29673 /* Anyone listening? */
29674- if (serial->open_count == 0)
29675+ if (local_read(&serial->open_count) == 0)
29676 return;
29677
29678 if (status == 0) {
29679@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
29680 spin_unlock_irq(&serial->serial_lock);
29681
29682 /* check for port already opened, if not set the termios */
29683- serial->open_count++;
29684- if (serial->open_count == 1) {
29685+ if (local_inc_return(&serial->open_count) == 1) {
29686 serial->rx_state = RX_IDLE;
29687 /* Force default termio settings */
29688 _hso_serial_set_termios(tty, NULL);
29689@@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_st
29690 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
29691 if (result) {
29692 hso_stop_serial_device(serial->parent);
29693- serial->open_count--;
29694+ local_dec(&serial->open_count);
29695 kref_put(&serial->parent->ref, hso_serial_ref_free);
29696 }
29697 } else {
29698@@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_
29699
29700 /* reset the rts and dtr */
29701 /* do the actual close */
29702- serial->open_count--;
29703+ local_dec(&serial->open_count);
29704
29705- if (serial->open_count <= 0) {
29706- serial->open_count = 0;
29707+ if (local_read(&serial->open_count) <= 0) {
29708+ local_set(&serial->open_count, 0);
29709 spin_lock_irq(&serial->serial_lock);
29710 if (serial->tty == tty) {
29711 serial->tty->driver_data = NULL;
29712@@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struc
29713
29714 /* the actual setup */
29715 spin_lock_irqsave(&serial->serial_lock, flags);
29716- if (serial->open_count)
29717+ if (local_read(&serial->open_count))
29718 _hso_serial_set_termios(tty, old);
29719 else
29720 tty->termios = old;
29721@@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *ur
29722 D1("Pending read interrupt on port %d\n", i);
29723 spin_lock(&serial->serial_lock);
29724 if (serial->rx_state == RX_IDLE &&
29725- serial->open_count > 0) {
29726+ local_read(&serial->open_count) > 0) {
29727 /* Setup and send a ctrl req read on
29728 * port i */
29729 if (!serial->rx_urb_filled[0]) {
29730@@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interfa
29731 /* Start all serial ports */
29732 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
29733 if (serial_table[i] && (serial_table[i]->interface == iface)) {
29734- if (dev2ser(serial_table[i])->open_count) {
29735+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
29736 result =
29737 hso_start_serial_device(serial_table[i], GFP_NOIO);
29738 hso_kick_transmit(dev2ser(serial_table[i]));
29739diff -urNp linux-3.0.3/drivers/net/vmxnet3/vmxnet3_ethtool.c linux-3.0.3/drivers/net/vmxnet3/vmxnet3_ethtool.c
29740--- linux-3.0.3/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-07-21 22:17:23.000000000 -0400
29741+++ linux-3.0.3/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-08-23 21:47:55.000000000 -0400
29742@@ -594,8 +594,7 @@ vmxnet3_set_rss_indir(struct net_device
29743 * Return with error code if any of the queue indices
29744 * is out of range
29745 */
29746- if (p->ring_index[i] < 0 ||
29747- p->ring_index[i] >= adapter->num_rx_queues)
29748+ if (p->ring_index[i] >= adapter->num_rx_queues)
29749 return -EINVAL;
29750 }
29751
29752diff -urNp linux-3.0.3/drivers/net/vxge/vxge-config.h linux-3.0.3/drivers/net/vxge/vxge-config.h
29753--- linux-3.0.3/drivers/net/vxge/vxge-config.h 2011-07-21 22:17:23.000000000 -0400
29754+++ linux-3.0.3/drivers/net/vxge/vxge-config.h 2011-08-23 21:47:55.000000000 -0400
29755@@ -512,7 +512,7 @@ struct vxge_hw_uld_cbs {
29756 void (*link_down)(struct __vxge_hw_device *devh);
29757 void (*crit_err)(struct __vxge_hw_device *devh,
29758 enum vxge_hw_event type, u64 ext_data);
29759-};
29760+} __no_const;
29761
29762 /*
29763 * struct __vxge_hw_blockpool_entry - Block private data structure
29764diff -urNp linux-3.0.3/drivers/net/vxge/vxge-main.c linux-3.0.3/drivers/net/vxge/vxge-main.c
29765--- linux-3.0.3/drivers/net/vxge/vxge-main.c 2011-07-21 22:17:23.000000000 -0400
29766+++ linux-3.0.3/drivers/net/vxge/vxge-main.c 2011-08-23 21:48:14.000000000 -0400
29767@@ -98,6 +98,8 @@ static inline void VXGE_COMPLETE_VPATH_T
29768 struct sk_buff *completed[NR_SKB_COMPLETED];
29769 int more;
29770
29771+ pax_track_stack();
29772+
29773 do {
29774 more = 0;
29775 skb_ptr = completed;
29776@@ -1920,6 +1922,8 @@ static enum vxge_hw_status vxge_rth_conf
29777 u8 mtable[256] = {0}; /* CPU to vpath mapping */
29778 int index;
29779
29780+ pax_track_stack();
29781+
29782 /*
29783 * Filling
29784 * - itable with bucket numbers
29785diff -urNp linux-3.0.3/drivers/net/vxge/vxge-traffic.h linux-3.0.3/drivers/net/vxge/vxge-traffic.h
29786--- linux-3.0.3/drivers/net/vxge/vxge-traffic.h 2011-07-21 22:17:23.000000000 -0400
29787+++ linux-3.0.3/drivers/net/vxge/vxge-traffic.h 2011-08-23 21:47:55.000000000 -0400
29788@@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
29789 struct vxge_hw_mempool_dma *dma_object,
29790 u32 index,
29791 u32 is_last);
29792-};
29793+} __no_const;
29794
29795 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
29796 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
29797diff -urNp linux-3.0.3/drivers/net/wan/cycx_x25.c linux-3.0.3/drivers/net/wan/cycx_x25.c
29798--- linux-3.0.3/drivers/net/wan/cycx_x25.c 2011-07-21 22:17:23.000000000 -0400
29799+++ linux-3.0.3/drivers/net/wan/cycx_x25.c 2011-08-23 21:48:14.000000000 -0400
29800@@ -1018,6 +1018,8 @@ static void hex_dump(char *msg, unsigned
29801 unsigned char hex[1024],
29802 * phex = hex;
29803
29804+ pax_track_stack();
29805+
29806 if (len >= (sizeof(hex) / 2))
29807 len = (sizeof(hex) / 2) - 1;
29808
29809diff -urNp linux-3.0.3/drivers/net/wan/hdlc_x25.c linux-3.0.3/drivers/net/wan/hdlc_x25.c
29810--- linux-3.0.3/drivers/net/wan/hdlc_x25.c 2011-07-21 22:17:23.000000000 -0400
29811+++ linux-3.0.3/drivers/net/wan/hdlc_x25.c 2011-08-23 21:47:55.000000000 -0400
29812@@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
29813
29814 static int x25_open(struct net_device *dev)
29815 {
29816- struct lapb_register_struct cb;
29817+ static struct lapb_register_struct cb = {
29818+ .connect_confirmation = x25_connected,
29819+ .connect_indication = x25_connected,
29820+ .disconnect_confirmation = x25_disconnected,
29821+ .disconnect_indication = x25_disconnected,
29822+ .data_indication = x25_data_indication,
29823+ .data_transmit = x25_data_transmit
29824+ };
29825 int result;
29826
29827- cb.connect_confirmation = x25_connected;
29828- cb.connect_indication = x25_connected;
29829- cb.disconnect_confirmation = x25_disconnected;
29830- cb.disconnect_indication = x25_disconnected;
29831- cb.data_indication = x25_data_indication;
29832- cb.data_transmit = x25_data_transmit;
29833-
29834 result = lapb_register(dev, &cb);
29835 if (result != LAPB_OK)
29836 return result;
29837diff -urNp linux-3.0.3/drivers/net/wimax/i2400m/usb-fw.c linux-3.0.3/drivers/net/wimax/i2400m/usb-fw.c
29838--- linux-3.0.3/drivers/net/wimax/i2400m/usb-fw.c 2011-07-21 22:17:23.000000000 -0400
29839+++ linux-3.0.3/drivers/net/wimax/i2400m/usb-fw.c 2011-08-23 21:48:14.000000000 -0400
29840@@ -287,6 +287,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
29841 int do_autopm = 1;
29842 DECLARE_COMPLETION_ONSTACK(notif_completion);
29843
29844+ pax_track_stack();
29845+
29846 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
29847 i2400m, ack, ack_size);
29848 BUG_ON(_ack == i2400m->bm_ack_buf);
29849diff -urNp linux-3.0.3/drivers/net/wireless/airo.c linux-3.0.3/drivers/net/wireless/airo.c
29850--- linux-3.0.3/drivers/net/wireless/airo.c 2011-08-23 21:44:40.000000000 -0400
29851+++ linux-3.0.3/drivers/net/wireless/airo.c 2011-08-23 21:48:14.000000000 -0400
29852@@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
29853 BSSListElement * loop_net;
29854 BSSListElement * tmp_net;
29855
29856+ pax_track_stack();
29857+
29858 /* Blow away current list of scan results */
29859 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
29860 list_move_tail (&loop_net->list, &ai->network_free_list);
29861@@ -3794,6 +3796,8 @@ static u16 setup_card(struct airo_info *
29862 WepKeyRid wkr;
29863 int rc;
29864
29865+ pax_track_stack();
29866+
29867 memset( &mySsid, 0, sizeof( mySsid ) );
29868 kfree (ai->flash);
29869 ai->flash = NULL;
29870@@ -4753,6 +4757,8 @@ static int proc_stats_rid_open( struct i
29871 __le32 *vals = stats.vals;
29872 int len;
29873
29874+ pax_track_stack();
29875+
29876 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
29877 return -ENOMEM;
29878 data = file->private_data;
29879@@ -5476,6 +5482,8 @@ static int proc_BSSList_open( struct ino
29880 /* If doLoseSync is not 1, we won't do a Lose Sync */
29881 int doLoseSync = -1;
29882
29883+ pax_track_stack();
29884+
29885 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
29886 return -ENOMEM;
29887 data = file->private_data;
29888@@ -7181,6 +7189,8 @@ static int airo_get_aplist(struct net_de
29889 int i;
29890 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
29891
29892+ pax_track_stack();
29893+
29894 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
29895 if (!qual)
29896 return -ENOMEM;
29897@@ -7741,6 +7751,8 @@ static void airo_read_wireless_stats(str
29898 CapabilityRid cap_rid;
29899 __le32 *vals = stats_rid.vals;
29900
29901+ pax_track_stack();
29902+
29903 /* Get stats out of the card */
29904 clear_bit(JOB_WSTATS, &local->jobs);
29905 if (local->power.event) {
29906diff -urNp linux-3.0.3/drivers/net/wireless/ath/ath5k/debug.c linux-3.0.3/drivers/net/wireless/ath/ath5k/debug.c
29907--- linux-3.0.3/drivers/net/wireless/ath/ath5k/debug.c 2011-07-21 22:17:23.000000000 -0400
29908+++ linux-3.0.3/drivers/net/wireless/ath/ath5k/debug.c 2011-08-23 21:48:14.000000000 -0400
29909@@ -204,6 +204,8 @@ static ssize_t read_file_beacon(struct f
29910 unsigned int v;
29911 u64 tsf;
29912
29913+ pax_track_stack();
29914+
29915 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
29916 len += snprintf(buf+len, sizeof(buf)-len,
29917 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
29918@@ -323,6 +325,8 @@ static ssize_t read_file_debug(struct fi
29919 unsigned int len = 0;
29920 unsigned int i;
29921
29922+ pax_track_stack();
29923+
29924 len += snprintf(buf+len, sizeof(buf)-len,
29925 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
29926
29927@@ -384,6 +388,8 @@ static ssize_t read_file_antenna(struct
29928 unsigned int i;
29929 unsigned int v;
29930
29931+ pax_track_stack();
29932+
29933 len += snprintf(buf+len, sizeof(buf)-len, "antenna mode\t%d\n",
29934 sc->ah->ah_ant_mode);
29935 len += snprintf(buf+len, sizeof(buf)-len, "default antenna\t%d\n",
29936@@ -494,6 +500,8 @@ static ssize_t read_file_misc(struct fil
29937 unsigned int len = 0;
29938 u32 filt = ath5k_hw_get_rx_filter(sc->ah);
29939
29940+ pax_track_stack();
29941+
29942 len += snprintf(buf+len, sizeof(buf)-len, "bssid-mask: %pM\n",
29943 sc->bssidmask);
29944 len += snprintf(buf+len, sizeof(buf)-len, "filter-flags: 0x%x ",
29945@@ -550,6 +558,8 @@ static ssize_t read_file_frameerrors(str
29946 unsigned int len = 0;
29947 int i;
29948
29949+ pax_track_stack();
29950+
29951 len += snprintf(buf+len, sizeof(buf)-len,
29952 "RX\n---------------------\n");
29953 len += snprintf(buf+len, sizeof(buf)-len, "CRC\t%u\t(%u%%)\n",
29954@@ -667,6 +677,8 @@ static ssize_t read_file_ani(struct file
29955 char buf[700];
29956 unsigned int len = 0;
29957
29958+ pax_track_stack();
29959+
29960 len += snprintf(buf+len, sizeof(buf)-len,
29961 "HW has PHY error counters:\t%s\n",
29962 sc->ah->ah_capabilities.cap_has_phyerr_counters ?
29963@@ -827,6 +839,8 @@ static ssize_t read_file_queue(struct fi
29964 struct ath5k_buf *bf, *bf0;
29965 int i, n;
29966
29967+ pax_track_stack();
29968+
29969 len += snprintf(buf+len, sizeof(buf)-len,
29970 "available txbuffers: %d\n", sc->txbuf_len);
29971
29972diff -urNp linux-3.0.3/drivers/net/wireless/ath/ath9k/ar9003_calib.c linux-3.0.3/drivers/net/wireless/ath/ath9k/ar9003_calib.c
29973--- linux-3.0.3/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-07-21 22:17:23.000000000 -0400
29974+++ linux-3.0.3/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-08-23 21:48:14.000000000 -0400
29975@@ -757,6 +757,8 @@ static void ar9003_hw_tx_iq_cal_post_pro
29976 int i, im, j;
29977 int nmeasurement;
29978
29979+ pax_track_stack();
29980+
29981 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
29982 if (ah->txchainmask & (1 << i))
29983 num_chains++;
29984diff -urNp linux-3.0.3/drivers/net/wireless/ath/ath9k/ar9003_paprd.c linux-3.0.3/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
29985--- linux-3.0.3/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-07-21 22:17:23.000000000 -0400
29986+++ linux-3.0.3/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-08-23 21:48:14.000000000 -0400
29987@@ -356,6 +356,8 @@ static bool create_pa_curve(u32 *data_L,
29988 int theta_low_bin = 0;
29989 int i;
29990
29991+ pax_track_stack();
29992+
29993 /* disregard any bin that contains <= 16 samples */
29994 thresh_accum_cnt = 16;
29995 scale_factor = 5;
29996diff -urNp linux-3.0.3/drivers/net/wireless/ath/ath9k/debug.c linux-3.0.3/drivers/net/wireless/ath/ath9k/debug.c
29997--- linux-3.0.3/drivers/net/wireless/ath/ath9k/debug.c 2011-07-21 22:17:23.000000000 -0400
29998+++ linux-3.0.3/drivers/net/wireless/ath/ath9k/debug.c 2011-08-23 21:48:14.000000000 -0400
29999@@ -337,6 +337,8 @@ static ssize_t read_file_interrupt(struc
30000 char buf[512];
30001 unsigned int len = 0;
30002
30003+ pax_track_stack();
30004+
30005 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
30006 len += snprintf(buf + len, sizeof(buf) - len,
30007 "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
30008@@ -427,6 +429,8 @@ static ssize_t read_file_wiphy(struct fi
30009 u8 addr[ETH_ALEN];
30010 u32 tmp;
30011
30012+ pax_track_stack();
30013+
30014 len += snprintf(buf + len, sizeof(buf) - len,
30015 "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n",
30016 wiphy_name(sc->hw->wiphy),
30017diff -urNp linux-3.0.3/drivers/net/wireless/ath/ath9k/htc_drv_debug.c linux-3.0.3/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
30018--- linux-3.0.3/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-07-21 22:17:23.000000000 -0400
30019+++ linux-3.0.3/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-08-23 21:48:14.000000000 -0400
30020@@ -31,6 +31,8 @@ static ssize_t read_file_tgt_int_stats(s
30021 unsigned int len = 0;
30022 int ret = 0;
30023
30024+ pax_track_stack();
30025+
30026 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
30027
30028 ath9k_htc_ps_wakeup(priv);
30029@@ -89,6 +91,8 @@ static ssize_t read_file_tgt_tx_stats(st
30030 unsigned int len = 0;
30031 int ret = 0;
30032
30033+ pax_track_stack();
30034+
30035 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
30036
30037 ath9k_htc_ps_wakeup(priv);
30038@@ -159,6 +163,8 @@ static ssize_t read_file_tgt_rx_stats(st
30039 unsigned int len = 0;
30040 int ret = 0;
30041
30042+ pax_track_stack();
30043+
30044 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
30045
30046 ath9k_htc_ps_wakeup(priv);
30047@@ -203,6 +209,8 @@ static ssize_t read_file_xmit(struct fil
30048 char buf[512];
30049 unsigned int len = 0;
30050
30051+ pax_track_stack();
30052+
30053 len += snprintf(buf + len, sizeof(buf) - len,
30054 "%20s : %10u\n", "Buffers queued",
30055 priv->debug.tx_stats.buf_queued);
30056@@ -376,6 +384,8 @@ static ssize_t read_file_slot(struct fil
30057 char buf[512];
30058 unsigned int len = 0;
30059
30060+ pax_track_stack();
30061+
30062 spin_lock_bh(&priv->tx.tx_lock);
30063
30064 len += snprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
30065@@ -411,6 +421,8 @@ static ssize_t read_file_queue(struct fi
30066 char buf[512];
30067 unsigned int len = 0;
30068
30069+ pax_track_stack();
30070+
30071 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
30072 "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
30073
30074diff -urNp linux-3.0.3/drivers/net/wireless/ath/ath9k/hw.h linux-3.0.3/drivers/net/wireless/ath/ath9k/hw.h
30075--- linux-3.0.3/drivers/net/wireless/ath/ath9k/hw.h 2011-08-23 21:44:40.000000000 -0400
30076+++ linux-3.0.3/drivers/net/wireless/ath/ath9k/hw.h 2011-08-23 21:47:55.000000000 -0400
30077@@ -585,7 +585,7 @@ struct ath_hw_private_ops {
30078
30079 /* ANI */
30080 void (*ani_cache_ini_regs)(struct ath_hw *ah);
30081-};
30082+} __no_const;
30083
30084 /**
30085 * struct ath_hw_ops - callbacks used by hardware code and driver code
30086@@ -637,7 +637,7 @@ struct ath_hw_ops {
30087 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
30088 struct ath_hw_antcomb_conf *antconf);
30089
30090-};
30091+} __no_const;
30092
30093 struct ath_nf_limits {
30094 s16 max;
30095@@ -650,7 +650,7 @@ struct ath_nf_limits {
30096 #define AH_UNPLUGGED 0x2 /* The card has been physically removed. */
30097
30098 struct ath_hw {
30099- struct ath_ops reg_ops;
30100+ ath_ops_no_const reg_ops;
30101
30102 struct ieee80211_hw *hw;
30103 struct ath_common common;
30104diff -urNp linux-3.0.3/drivers/net/wireless/ath/ath.h linux-3.0.3/drivers/net/wireless/ath/ath.h
30105--- linux-3.0.3/drivers/net/wireless/ath/ath.h 2011-07-21 22:17:23.000000000 -0400
30106+++ linux-3.0.3/drivers/net/wireless/ath/ath.h 2011-08-23 21:47:55.000000000 -0400
30107@@ -121,6 +121,7 @@ struct ath_ops {
30108 void (*write_flush) (void *);
30109 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
30110 };
30111+typedef struct ath_ops __no_const ath_ops_no_const;
30112
30113 struct ath_common;
30114 struct ath_bus_ops;
30115diff -urNp linux-3.0.3/drivers/net/wireless/ipw2x00/ipw2100.c linux-3.0.3/drivers/net/wireless/ipw2x00/ipw2100.c
30116--- linux-3.0.3/drivers/net/wireless/ipw2x00/ipw2100.c 2011-07-21 22:17:23.000000000 -0400
30117+++ linux-3.0.3/drivers/net/wireless/ipw2x00/ipw2100.c 2011-08-23 21:48:14.000000000 -0400
30118@@ -2100,6 +2100,8 @@ static int ipw2100_set_essid(struct ipw2
30119 int err;
30120 DECLARE_SSID_BUF(ssid);
30121
30122+ pax_track_stack();
30123+
30124 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
30125
30126 if (ssid_len)
30127@@ -5449,6 +5451,8 @@ static int ipw2100_set_key(struct ipw210
30128 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
30129 int err;
30130
30131+ pax_track_stack();
30132+
30133 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
30134 idx, keylen, len);
30135
30136diff -urNp linux-3.0.3/drivers/net/wireless/ipw2x00/libipw_rx.c linux-3.0.3/drivers/net/wireless/ipw2x00/libipw_rx.c
30137--- linux-3.0.3/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-07-21 22:17:23.000000000 -0400
30138+++ linux-3.0.3/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-08-23 21:48:14.000000000 -0400
30139@@ -1565,6 +1565,8 @@ static void libipw_process_probe_respons
30140 unsigned long flags;
30141 DECLARE_SSID_BUF(ssid);
30142
30143+ pax_track_stack();
30144+
30145 LIBIPW_DEBUG_SCAN("'%s' (%pM"
30146 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
30147 print_ssid(ssid, info_element->data, info_element->len),
30148diff -urNp linux-3.0.3/drivers/net/wireless/iwlegacy/iwl3945-base.c linux-3.0.3/drivers/net/wireless/iwlegacy/iwl3945-base.c
30149--- linux-3.0.3/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-07-21 22:17:23.000000000 -0400
30150+++ linux-3.0.3/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-08-23 21:47:55.000000000 -0400
30151@@ -3962,7 +3962,9 @@ static int iwl3945_pci_probe(struct pci_
30152 */
30153 if (iwl3945_mod_params.disable_hw_scan) {
30154 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
30155- iwl3945_hw_ops.hw_scan = NULL;
30156+ pax_open_kernel();
30157+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
30158+ pax_close_kernel();
30159 }
30160
30161 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
30162diff -urNp linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
30163--- linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-07-21 22:17:23.000000000 -0400
30164+++ linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-08-23 21:48:14.000000000 -0400
30165@@ -910,6 +910,8 @@ static void rs_tx_status(void *priv_r, s
30166 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
30167 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
30168
30169+ pax_track_stack();
30170+
30171 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
30172
30173 /* Treat uninitialized rate scaling data same as non-existing. */
30174@@ -2918,6 +2920,8 @@ static void rs_fill_link_cmd(struct iwl_
30175 container_of(lq_sta, struct iwl_station_priv, lq_sta);
30176 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
30177
30178+ pax_track_stack();
30179+
30180 /* Override starting rate (index 0) if needed for debug purposes */
30181 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
30182
30183diff -urNp linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-debugfs.c
30184--- linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-07-21 22:17:23.000000000 -0400
30185+++ linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-08-23 21:48:14.000000000 -0400
30186@@ -548,6 +548,8 @@ static ssize_t iwl_dbgfs_status_read(str
30187 int pos = 0;
30188 const size_t bufsz = sizeof(buf);
30189
30190+ pax_track_stack();
30191+
30192 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
30193 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
30194 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
30195@@ -680,6 +682,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
30196 char buf[256 * NUM_IWL_RXON_CTX];
30197 const size_t bufsz = sizeof(buf);
30198
30199+ pax_track_stack();
30200+
30201 for_each_context(priv, ctx) {
30202 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
30203 ctx->ctxid);
30204diff -urNp linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-debug.h linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-debug.h
30205--- linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-07-21 22:17:23.000000000 -0400
30206+++ linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-08-23 21:47:55.000000000 -0400
30207@@ -68,8 +68,8 @@ do {
30208 } while (0)
30209
30210 #else
30211-#define IWL_DEBUG(__priv, level, fmt, args...)
30212-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
30213+#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
30214+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
30215 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
30216 const void *p, u32 len)
30217 {}
30218diff -urNp linux-3.0.3/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-3.0.3/drivers/net/wireless/iwmc3200wifi/debugfs.c
30219--- linux-3.0.3/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-07-21 22:17:23.000000000 -0400
30220+++ linux-3.0.3/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-08-23 21:48:14.000000000 -0400
30221@@ -327,6 +327,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
30222 int buf_len = 512;
30223 size_t len = 0;
30224
30225+ pax_track_stack();
30226+
30227 if (*ppos != 0)
30228 return 0;
30229 if (count < sizeof(buf))
30230diff -urNp linux-3.0.3/drivers/net/wireless/mac80211_hwsim.c linux-3.0.3/drivers/net/wireless/mac80211_hwsim.c
30231--- linux-3.0.3/drivers/net/wireless/mac80211_hwsim.c 2011-07-21 22:17:23.000000000 -0400
30232+++ linux-3.0.3/drivers/net/wireless/mac80211_hwsim.c 2011-08-23 21:47:55.000000000 -0400
30233@@ -1260,9 +1260,11 @@ static int __init init_mac80211_hwsim(vo
30234 return -EINVAL;
30235
30236 if (fake_hw_scan) {
30237- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
30238- mac80211_hwsim_ops.sw_scan_start = NULL;
30239- mac80211_hwsim_ops.sw_scan_complete = NULL;
30240+ pax_open_kernel();
30241+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
30242+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
30243+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
30244+ pax_close_kernel();
30245 }
30246
30247 spin_lock_init(&hwsim_radio_lock);
30248diff -urNp linux-3.0.3/drivers/net/wireless/rndis_wlan.c linux-3.0.3/drivers/net/wireless/rndis_wlan.c
30249--- linux-3.0.3/drivers/net/wireless/rndis_wlan.c 2011-07-21 22:17:23.000000000 -0400
30250+++ linux-3.0.3/drivers/net/wireless/rndis_wlan.c 2011-08-23 21:47:55.000000000 -0400
30251@@ -1277,7 +1277,7 @@ static int set_rts_threshold(struct usbn
30252
30253 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
30254
30255- if (rts_threshold < 0 || rts_threshold > 2347)
30256+ if (rts_threshold > 2347)
30257 rts_threshold = 2347;
30258
30259 tmp = cpu_to_le32(rts_threshold);
30260diff -urNp linux-3.0.3/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c linux-3.0.3/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
30261--- linux-3.0.3/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-07-21 22:17:23.000000000 -0400
30262+++ linux-3.0.3/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-08-23 21:48:14.000000000 -0400
30263@@ -837,6 +837,8 @@ bool _rtl92c_phy_sw_chnl_step_by_step(st
30264 u8 rfpath;
30265 u8 num_total_rfpath = rtlphy->num_total_rfpath;
30266
30267+ pax_track_stack();
30268+
30269 precommoncmdcnt = 0;
30270 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
30271 MAX_PRECMD_CNT,
30272diff -urNp linux-3.0.3/drivers/net/wireless/wl1251/wl1251.h linux-3.0.3/drivers/net/wireless/wl1251/wl1251.h
30273--- linux-3.0.3/drivers/net/wireless/wl1251/wl1251.h 2011-07-21 22:17:23.000000000 -0400
30274+++ linux-3.0.3/drivers/net/wireless/wl1251/wl1251.h 2011-08-23 21:47:55.000000000 -0400
30275@@ -266,7 +266,7 @@ struct wl1251_if_operations {
30276 void (*reset)(struct wl1251 *wl);
30277 void (*enable_irq)(struct wl1251 *wl);
30278 void (*disable_irq)(struct wl1251 *wl);
30279-};
30280+} __no_const;
30281
30282 struct wl1251 {
30283 struct ieee80211_hw *hw;
30284diff -urNp linux-3.0.3/drivers/net/wireless/wl12xx/spi.c linux-3.0.3/drivers/net/wireless/wl12xx/spi.c
30285--- linux-3.0.3/drivers/net/wireless/wl12xx/spi.c 2011-07-21 22:17:23.000000000 -0400
30286+++ linux-3.0.3/drivers/net/wireless/wl12xx/spi.c 2011-08-23 21:48:14.000000000 -0400
30287@@ -280,6 +280,8 @@ static void wl1271_spi_raw_write(struct
30288 u32 chunk_len;
30289 int i;
30290
30291+ pax_track_stack();
30292+
30293 WARN_ON(len > WL1271_AGGR_BUFFER_SIZE);
30294
30295 spi_message_init(&m);
30296diff -urNp linux-3.0.3/drivers/oprofile/buffer_sync.c linux-3.0.3/drivers/oprofile/buffer_sync.c
30297--- linux-3.0.3/drivers/oprofile/buffer_sync.c 2011-07-21 22:17:23.000000000 -0400
30298+++ linux-3.0.3/drivers/oprofile/buffer_sync.c 2011-08-23 21:47:55.000000000 -0400
30299@@ -343,7 +343,7 @@ static void add_data(struct op_entry *en
30300 if (cookie == NO_COOKIE)
30301 offset = pc;
30302 if (cookie == INVALID_COOKIE) {
30303- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
30304+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
30305 offset = pc;
30306 }
30307 if (cookie != last_cookie) {
30308@@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct
30309 /* add userspace sample */
30310
30311 if (!mm) {
30312- atomic_inc(&oprofile_stats.sample_lost_no_mm);
30313+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
30314 return 0;
30315 }
30316
30317 cookie = lookup_dcookie(mm, s->eip, &offset);
30318
30319 if (cookie == INVALID_COOKIE) {
30320- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
30321+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
30322 return 0;
30323 }
30324
30325@@ -563,7 +563,7 @@ void sync_buffer(int cpu)
30326 /* ignore backtraces if failed to add a sample */
30327 if (state == sb_bt_start) {
30328 state = sb_bt_ignore;
30329- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
30330+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
30331 }
30332 }
30333 release_mm(mm);
30334diff -urNp linux-3.0.3/drivers/oprofile/event_buffer.c linux-3.0.3/drivers/oprofile/event_buffer.c
30335--- linux-3.0.3/drivers/oprofile/event_buffer.c 2011-07-21 22:17:23.000000000 -0400
30336+++ linux-3.0.3/drivers/oprofile/event_buffer.c 2011-08-23 21:47:55.000000000 -0400
30337@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
30338 }
30339
30340 if (buffer_pos == buffer_size) {
30341- atomic_inc(&oprofile_stats.event_lost_overflow);
30342+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
30343 return;
30344 }
30345
30346diff -urNp linux-3.0.3/drivers/oprofile/oprof.c linux-3.0.3/drivers/oprofile/oprof.c
30347--- linux-3.0.3/drivers/oprofile/oprof.c 2011-07-21 22:17:23.000000000 -0400
30348+++ linux-3.0.3/drivers/oprofile/oprof.c 2011-08-23 21:47:55.000000000 -0400
30349@@ -110,7 +110,7 @@ static void switch_worker(struct work_st
30350 if (oprofile_ops.switch_events())
30351 return;
30352
30353- atomic_inc(&oprofile_stats.multiplex_counter);
30354+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
30355 start_switch_worker();
30356 }
30357
30358diff -urNp linux-3.0.3/drivers/oprofile/oprofilefs.c linux-3.0.3/drivers/oprofile/oprofilefs.c
30359--- linux-3.0.3/drivers/oprofile/oprofilefs.c 2011-07-21 22:17:23.000000000 -0400
30360+++ linux-3.0.3/drivers/oprofile/oprofilefs.c 2011-08-23 21:47:55.000000000 -0400
30361@@ -186,7 +186,7 @@ static const struct file_operations atom
30362
30363
30364 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
30365- char const *name, atomic_t *val)
30366+ char const *name, atomic_unchecked_t *val)
30367 {
30368 return __oprofilefs_create_file(sb, root, name,
30369 &atomic_ro_fops, 0444, val);
30370diff -urNp linux-3.0.3/drivers/oprofile/oprofile_stats.c linux-3.0.3/drivers/oprofile/oprofile_stats.c
30371--- linux-3.0.3/drivers/oprofile/oprofile_stats.c 2011-07-21 22:17:23.000000000 -0400
30372+++ linux-3.0.3/drivers/oprofile/oprofile_stats.c 2011-08-23 21:47:55.000000000 -0400
30373@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
30374 cpu_buf->sample_invalid_eip = 0;
30375 }
30376
30377- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
30378- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
30379- atomic_set(&oprofile_stats.event_lost_overflow, 0);
30380- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
30381- atomic_set(&oprofile_stats.multiplex_counter, 0);
30382+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
30383+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
30384+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
30385+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
30386+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
30387 }
30388
30389
30390diff -urNp linux-3.0.3/drivers/oprofile/oprofile_stats.h linux-3.0.3/drivers/oprofile/oprofile_stats.h
30391--- linux-3.0.3/drivers/oprofile/oprofile_stats.h 2011-07-21 22:17:23.000000000 -0400
30392+++ linux-3.0.3/drivers/oprofile/oprofile_stats.h 2011-08-23 21:47:55.000000000 -0400
30393@@ -13,11 +13,11 @@
30394 #include <asm/atomic.h>
30395
30396 struct oprofile_stat_struct {
30397- atomic_t sample_lost_no_mm;
30398- atomic_t sample_lost_no_mapping;
30399- atomic_t bt_lost_no_mapping;
30400- atomic_t event_lost_overflow;
30401- atomic_t multiplex_counter;
30402+ atomic_unchecked_t sample_lost_no_mm;
30403+ atomic_unchecked_t sample_lost_no_mapping;
30404+ atomic_unchecked_t bt_lost_no_mapping;
30405+ atomic_unchecked_t event_lost_overflow;
30406+ atomic_unchecked_t multiplex_counter;
30407 };
30408
30409 extern struct oprofile_stat_struct oprofile_stats;
30410diff -urNp linux-3.0.3/drivers/parport/procfs.c linux-3.0.3/drivers/parport/procfs.c
30411--- linux-3.0.3/drivers/parport/procfs.c 2011-07-21 22:17:23.000000000 -0400
30412+++ linux-3.0.3/drivers/parport/procfs.c 2011-08-23 21:47:55.000000000 -0400
30413@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
30414
30415 *ppos += len;
30416
30417- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
30418+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
30419 }
30420
30421 #ifdef CONFIG_PARPORT_1284
30422@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
30423
30424 *ppos += len;
30425
30426- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
30427+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
30428 }
30429 #endif /* IEEE1284.3 support. */
30430
30431diff -urNp linux-3.0.3/drivers/pci/hotplug/cpci_hotplug.h linux-3.0.3/drivers/pci/hotplug/cpci_hotplug.h
30432--- linux-3.0.3/drivers/pci/hotplug/cpci_hotplug.h 2011-07-21 22:17:23.000000000 -0400
30433+++ linux-3.0.3/drivers/pci/hotplug/cpci_hotplug.h 2011-08-23 21:47:55.000000000 -0400
30434@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
30435 int (*hardware_test) (struct slot* slot, u32 value);
30436 u8 (*get_power) (struct slot* slot);
30437 int (*set_power) (struct slot* slot, int value);
30438-};
30439+} __no_const;
30440
30441 struct cpci_hp_controller {
30442 unsigned int irq;
30443diff -urNp linux-3.0.3/drivers/pci/hotplug/cpqphp_nvram.c linux-3.0.3/drivers/pci/hotplug/cpqphp_nvram.c
30444--- linux-3.0.3/drivers/pci/hotplug/cpqphp_nvram.c 2011-07-21 22:17:23.000000000 -0400
30445+++ linux-3.0.3/drivers/pci/hotplug/cpqphp_nvram.c 2011-08-23 21:47:55.000000000 -0400
30446@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
30447
30448 void compaq_nvram_init (void __iomem *rom_start)
30449 {
30450+
30451+#ifndef CONFIG_PAX_KERNEXEC
30452 if (rom_start) {
30453 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
30454 }
30455+#endif
30456+
30457 dbg("int15 entry = %p\n", compaq_int15_entry_point);
30458
30459 /* initialize our int15 lock */
30460diff -urNp linux-3.0.3/drivers/pci/pcie/aspm.c linux-3.0.3/drivers/pci/pcie/aspm.c
30461--- linux-3.0.3/drivers/pci/pcie/aspm.c 2011-07-21 22:17:23.000000000 -0400
30462+++ linux-3.0.3/drivers/pci/pcie/aspm.c 2011-08-23 21:47:55.000000000 -0400
30463@@ -27,9 +27,9 @@
30464 #define MODULE_PARAM_PREFIX "pcie_aspm."
30465
30466 /* Note: those are not register definitions */
30467-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
30468-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
30469-#define ASPM_STATE_L1 (4) /* L1 state */
30470+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
30471+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
30472+#define ASPM_STATE_L1 (4U) /* L1 state */
30473 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
30474 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
30475
30476diff -urNp linux-3.0.3/drivers/pci/probe.c linux-3.0.3/drivers/pci/probe.c
30477--- linux-3.0.3/drivers/pci/probe.c 2011-07-21 22:17:23.000000000 -0400
30478+++ linux-3.0.3/drivers/pci/probe.c 2011-08-23 21:47:55.000000000 -0400
30479@@ -129,7 +129,7 @@ int __pci_read_base(struct pci_dev *dev,
30480 u32 l, sz, mask;
30481 u16 orig_cmd;
30482
30483- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
30484+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
30485
30486 if (!dev->mmio_always_on) {
30487 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
30488diff -urNp linux-3.0.3/drivers/pci/proc.c linux-3.0.3/drivers/pci/proc.c
30489--- linux-3.0.3/drivers/pci/proc.c 2011-07-21 22:17:23.000000000 -0400
30490+++ linux-3.0.3/drivers/pci/proc.c 2011-08-23 21:48:14.000000000 -0400
30491@@ -476,7 +476,16 @@ static const struct file_operations proc
30492 static int __init pci_proc_init(void)
30493 {
30494 struct pci_dev *dev = NULL;
30495+
30496+#ifdef CONFIG_GRKERNSEC_PROC_ADD
30497+#ifdef CONFIG_GRKERNSEC_PROC_USER
30498+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
30499+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
30500+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
30501+#endif
30502+#else
30503 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
30504+#endif
30505 proc_create("devices", 0, proc_bus_pci_dir,
30506 &proc_bus_pci_dev_operations);
30507 proc_initialized = 1;
30508diff -urNp linux-3.0.3/drivers/pci/xen-pcifront.c linux-3.0.3/drivers/pci/xen-pcifront.c
30509--- linux-3.0.3/drivers/pci/xen-pcifront.c 2011-07-21 22:17:23.000000000 -0400
30510+++ linux-3.0.3/drivers/pci/xen-pcifront.c 2011-08-23 21:48:14.000000000 -0400
30511@@ -187,6 +187,8 @@ static int pcifront_bus_read(struct pci_
30512 struct pcifront_sd *sd = bus->sysdata;
30513 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30514
30515+ pax_track_stack();
30516+
30517 if (verbose_request)
30518 dev_info(&pdev->xdev->dev,
30519 "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
30520@@ -226,6 +228,8 @@ static int pcifront_bus_write(struct pci
30521 struct pcifront_sd *sd = bus->sysdata;
30522 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30523
30524+ pax_track_stack();
30525+
30526 if (verbose_request)
30527 dev_info(&pdev->xdev->dev,
30528 "write dev=%04x:%02x:%02x.%01x - "
30529@@ -258,6 +262,8 @@ static int pci_frontend_enable_msix(stru
30530 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30531 struct msi_desc *entry;
30532
30533+ pax_track_stack();
30534+
30535 if (nvec > SH_INFO_MAX_VEC) {
30536 dev_err(&dev->dev, "too much vector for pci frontend: %x."
30537 " Increase SH_INFO_MAX_VEC.\n", nvec);
30538@@ -309,6 +315,8 @@ static void pci_frontend_disable_msix(st
30539 struct pcifront_sd *sd = dev->bus->sysdata;
30540 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30541
30542+ pax_track_stack();
30543+
30544 err = do_pci_op(pdev, &op);
30545
30546 /* What should do for error ? */
30547@@ -328,6 +336,8 @@ static int pci_frontend_enable_msi(struc
30548 struct pcifront_sd *sd = dev->bus->sysdata;
30549 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30550
30551+ pax_track_stack();
30552+
30553 err = do_pci_op(pdev, &op);
30554 if (likely(!err)) {
30555 vector[0] = op.value;
30556diff -urNp linux-3.0.3/drivers/platform/x86/thinkpad_acpi.c linux-3.0.3/drivers/platform/x86/thinkpad_acpi.c
30557--- linux-3.0.3/drivers/platform/x86/thinkpad_acpi.c 2011-07-21 22:17:23.000000000 -0400
30558+++ linux-3.0.3/drivers/platform/x86/thinkpad_acpi.c 2011-08-23 21:47:55.000000000 -0400
30559@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
30560 return 0;
30561 }
30562
30563-void static hotkey_mask_warn_incomplete_mask(void)
30564+static void hotkey_mask_warn_incomplete_mask(void)
30565 {
30566 /* log only what the user can fix... */
30567 const u32 wantedmask = hotkey_driver_mask &
30568diff -urNp linux-3.0.3/drivers/pnp/pnpbios/bioscalls.c linux-3.0.3/drivers/pnp/pnpbios/bioscalls.c
30569--- linux-3.0.3/drivers/pnp/pnpbios/bioscalls.c 2011-07-21 22:17:23.000000000 -0400
30570+++ linux-3.0.3/drivers/pnp/pnpbios/bioscalls.c 2011-08-23 21:47:55.000000000 -0400
30571@@ -59,7 +59,7 @@ do { \
30572 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
30573 } while(0)
30574
30575-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
30576+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
30577 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
30578
30579 /*
30580@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func
30581
30582 cpu = get_cpu();
30583 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
30584+
30585+ pax_open_kernel();
30586 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
30587+ pax_close_kernel();
30588
30589 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
30590 spin_lock_irqsave(&pnp_bios_lock, flags);
30591@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func
30592 :"memory");
30593 spin_unlock_irqrestore(&pnp_bios_lock, flags);
30594
30595+ pax_open_kernel();
30596 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
30597+ pax_close_kernel();
30598+
30599 put_cpu();
30600
30601 /* If we get here and this is set then the PnP BIOS faulted on us. */
30602@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 n
30603 return status;
30604 }
30605
30606-void pnpbios_calls_init(union pnp_bios_install_struct *header)
30607+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
30608 {
30609 int i;
30610
30611@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_i
30612 pnp_bios_callpoint.offset = header->fields.pm16offset;
30613 pnp_bios_callpoint.segment = PNP_CS16;
30614
30615+ pax_open_kernel();
30616+
30617 for_each_possible_cpu(i) {
30618 struct desc_struct *gdt = get_cpu_gdt_table(i);
30619 if (!gdt)
30620@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_i
30621 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
30622 (unsigned long)__va(header->fields.pm16dseg));
30623 }
30624+
30625+ pax_close_kernel();
30626 }
30627diff -urNp linux-3.0.3/drivers/pnp/resource.c linux-3.0.3/drivers/pnp/resource.c
30628--- linux-3.0.3/drivers/pnp/resource.c 2011-07-21 22:17:23.000000000 -0400
30629+++ linux-3.0.3/drivers/pnp/resource.c 2011-08-23 21:47:55.000000000 -0400
30630@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
30631 return 1;
30632
30633 /* check if the resource is valid */
30634- if (*irq < 0 || *irq > 15)
30635+ if (*irq > 15)
30636 return 0;
30637
30638 /* check if the resource is reserved */
30639@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
30640 return 1;
30641
30642 /* check if the resource is valid */
30643- if (*dma < 0 || *dma == 4 || *dma > 7)
30644+ if (*dma == 4 || *dma > 7)
30645 return 0;
30646
30647 /* check if the resource is reserved */
30648diff -urNp linux-3.0.3/drivers/power/bq27x00_battery.c linux-3.0.3/drivers/power/bq27x00_battery.c
30649--- linux-3.0.3/drivers/power/bq27x00_battery.c 2011-07-21 22:17:23.000000000 -0400
30650+++ linux-3.0.3/drivers/power/bq27x00_battery.c 2011-08-23 21:47:55.000000000 -0400
30651@@ -67,7 +67,7 @@
30652 struct bq27x00_device_info;
30653 struct bq27x00_access_methods {
30654 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
30655-};
30656+} __no_const;
30657
30658 enum bq27x00_chip { BQ27000, BQ27500 };
30659
30660diff -urNp linux-3.0.3/drivers/regulator/max8660.c linux-3.0.3/drivers/regulator/max8660.c
30661--- linux-3.0.3/drivers/regulator/max8660.c 2011-07-21 22:17:23.000000000 -0400
30662+++ linux-3.0.3/drivers/regulator/max8660.c 2011-08-23 21:47:55.000000000 -0400
30663@@ -383,8 +383,10 @@ static int __devinit max8660_probe(struc
30664 max8660->shadow_regs[MAX8660_OVER1] = 5;
30665 } else {
30666 /* Otherwise devices can be toggled via software */
30667- max8660_dcdc_ops.enable = max8660_dcdc_enable;
30668- max8660_dcdc_ops.disable = max8660_dcdc_disable;
30669+ pax_open_kernel();
30670+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
30671+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
30672+ pax_close_kernel();
30673 }
30674
30675 /*
30676diff -urNp linux-3.0.3/drivers/regulator/mc13892-regulator.c linux-3.0.3/drivers/regulator/mc13892-regulator.c
30677--- linux-3.0.3/drivers/regulator/mc13892-regulator.c 2011-07-21 22:17:23.000000000 -0400
30678+++ linux-3.0.3/drivers/regulator/mc13892-regulator.c 2011-08-23 21:47:55.000000000 -0400
30679@@ -564,10 +564,12 @@ static int __devinit mc13892_regulator_p
30680 }
30681 mc13xxx_unlock(mc13892);
30682
30683- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
30684+ pax_open_kernel();
30685+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
30686 = mc13892_vcam_set_mode;
30687- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
30688+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
30689 = mc13892_vcam_get_mode;
30690+ pax_close_kernel();
30691 for (i = 0; i < pdata->num_regulators; i++) {
30692 init_data = &pdata->regulators[i];
30693 priv->regulators[i] = regulator_register(
30694diff -urNp linux-3.0.3/drivers/rtc/rtc-dev.c linux-3.0.3/drivers/rtc/rtc-dev.c
30695--- linux-3.0.3/drivers/rtc/rtc-dev.c 2011-07-21 22:17:23.000000000 -0400
30696+++ linux-3.0.3/drivers/rtc/rtc-dev.c 2011-08-23 21:48:14.000000000 -0400
30697@@ -14,6 +14,7 @@
30698 #include <linux/module.h>
30699 #include <linux/rtc.h>
30700 #include <linux/sched.h>
30701+#include <linux/grsecurity.h>
30702 #include "rtc-core.h"
30703
30704 static dev_t rtc_devt;
30705@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *f
30706 if (copy_from_user(&tm, uarg, sizeof(tm)))
30707 return -EFAULT;
30708
30709+ gr_log_timechange();
30710+
30711 return rtc_set_time(rtc, &tm);
30712
30713 case RTC_PIE_ON:
30714diff -urNp linux-3.0.3/drivers/scsi/aacraid/aacraid.h linux-3.0.3/drivers/scsi/aacraid/aacraid.h
30715--- linux-3.0.3/drivers/scsi/aacraid/aacraid.h 2011-07-21 22:17:23.000000000 -0400
30716+++ linux-3.0.3/drivers/scsi/aacraid/aacraid.h 2011-08-23 21:47:55.000000000 -0400
30717@@ -492,7 +492,7 @@ struct adapter_ops
30718 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
30719 /* Administrative operations */
30720 int (*adapter_comm)(struct aac_dev * dev, int comm);
30721-};
30722+} __no_const;
30723
30724 /*
30725 * Define which interrupt handler needs to be installed
30726diff -urNp linux-3.0.3/drivers/scsi/aacraid/commctrl.c linux-3.0.3/drivers/scsi/aacraid/commctrl.c
30727--- linux-3.0.3/drivers/scsi/aacraid/commctrl.c 2011-07-21 22:17:23.000000000 -0400
30728+++ linux-3.0.3/drivers/scsi/aacraid/commctrl.c 2011-08-23 21:48:14.000000000 -0400
30729@@ -482,6 +482,7 @@ static int aac_send_raw_srb(struct aac_d
30730 u32 actual_fibsize64, actual_fibsize = 0;
30731 int i;
30732
30733+ pax_track_stack();
30734
30735 if (dev->in_reset) {
30736 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
30737diff -urNp linux-3.0.3/drivers/scsi/bfa/bfad.c linux-3.0.3/drivers/scsi/bfa/bfad.c
30738--- linux-3.0.3/drivers/scsi/bfa/bfad.c 2011-07-21 22:17:23.000000000 -0400
30739+++ linux-3.0.3/drivers/scsi/bfa/bfad.c 2011-08-23 21:48:14.000000000 -0400
30740@@ -1032,6 +1032,8 @@ bfad_start_ops(struct bfad_s *bfad) {
30741 struct bfad_vport_s *vport, *vport_new;
30742 struct bfa_fcs_driver_info_s driver_info;
30743
30744+ pax_track_stack();
30745+
30746 /* Fill the driver_info info to fcs*/
30747 memset(&driver_info, 0, sizeof(driver_info));
30748 strncpy(driver_info.version, BFAD_DRIVER_VERSION,
30749diff -urNp linux-3.0.3/drivers/scsi/bfa/bfa_fcs_lport.c linux-3.0.3/drivers/scsi/bfa/bfa_fcs_lport.c
30750--- linux-3.0.3/drivers/scsi/bfa/bfa_fcs_lport.c 2011-07-21 22:17:23.000000000 -0400
30751+++ linux-3.0.3/drivers/scsi/bfa/bfa_fcs_lport.c 2011-08-23 21:48:14.000000000 -0400
30752@@ -1559,6 +1559,8 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struc
30753 u16 len, count;
30754 u16 templen;
30755
30756+ pax_track_stack();
30757+
30758 /*
30759 * get hba attributes
30760 */
30761@@ -1836,6 +1838,8 @@ bfa_fcs_lport_fdmi_build_portattr_block(
30762 u8 count = 0;
30763 u16 templen;
30764
30765+ pax_track_stack();
30766+
30767 /*
30768 * get port attributes
30769 */
30770diff -urNp linux-3.0.3/drivers/scsi/bfa/bfa_fcs_rport.c linux-3.0.3/drivers/scsi/bfa/bfa_fcs_rport.c
30771--- linux-3.0.3/drivers/scsi/bfa/bfa_fcs_rport.c 2011-07-21 22:17:23.000000000 -0400
30772+++ linux-3.0.3/drivers/scsi/bfa/bfa_fcs_rport.c 2011-08-23 21:48:14.000000000 -0400
30773@@ -1844,6 +1844,8 @@ bfa_fcs_rport_process_rpsc(struct bfa_fc
30774 struct fc_rpsc_speed_info_s speeds;
30775 struct bfa_port_attr_s pport_attr;
30776
30777+ pax_track_stack();
30778+
30779 bfa_trc(port->fcs, rx_fchs->s_id);
30780 bfa_trc(port->fcs, rx_fchs->d_id);
30781
30782diff -urNp linux-3.0.3/drivers/scsi/bfa/bfa.h linux-3.0.3/drivers/scsi/bfa/bfa.h
30783--- linux-3.0.3/drivers/scsi/bfa/bfa.h 2011-07-21 22:17:23.000000000 -0400
30784+++ linux-3.0.3/drivers/scsi/bfa/bfa.h 2011-08-23 21:47:55.000000000 -0400
30785@@ -238,7 +238,7 @@ struct bfa_hwif_s {
30786 u32 *nvecs, u32 *maxvec);
30787 void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start,
30788 u32 *end);
30789-};
30790+} __no_const;
30791 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
30792
30793 struct bfa_iocfc_s {
30794diff -urNp linux-3.0.3/drivers/scsi/bfa/bfa_ioc.h linux-3.0.3/drivers/scsi/bfa/bfa_ioc.h
30795--- linux-3.0.3/drivers/scsi/bfa/bfa_ioc.h 2011-07-21 22:17:23.000000000 -0400
30796+++ linux-3.0.3/drivers/scsi/bfa/bfa_ioc.h 2011-08-23 21:47:55.000000000 -0400
30797@@ -196,7 +196,7 @@ struct bfa_ioc_cbfn_s {
30798 bfa_ioc_disable_cbfn_t disable_cbfn;
30799 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
30800 bfa_ioc_reset_cbfn_t reset_cbfn;
30801-};
30802+} __no_const;
30803
30804 /*
30805 * Heartbeat failure notification queue element.
30806@@ -268,7 +268,7 @@ struct bfa_ioc_hwif_s {
30807 void (*ioc_sync_leave) (struct bfa_ioc_s *ioc);
30808 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
30809 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
30810-};
30811+} __no_const;
30812
30813 #define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
30814 #define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id)
30815diff -urNp linux-3.0.3/drivers/scsi/BusLogic.c linux-3.0.3/drivers/scsi/BusLogic.c
30816--- linux-3.0.3/drivers/scsi/BusLogic.c 2011-07-21 22:17:23.000000000 -0400
30817+++ linux-3.0.3/drivers/scsi/BusLogic.c 2011-08-23 21:48:14.000000000 -0400
30818@@ -962,6 +962,8 @@ static int __init BusLogic_InitializeFla
30819 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
30820 *PrototypeHostAdapter)
30821 {
30822+ pax_track_stack();
30823+
30824 /*
30825 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
30826 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
30827diff -urNp linux-3.0.3/drivers/scsi/dpt_i2o.c linux-3.0.3/drivers/scsi/dpt_i2o.c
30828--- linux-3.0.3/drivers/scsi/dpt_i2o.c 2011-07-21 22:17:23.000000000 -0400
30829+++ linux-3.0.3/drivers/scsi/dpt_i2o.c 2011-08-23 21:48:14.000000000 -0400
30830@@ -1811,6 +1811,8 @@ static int adpt_i2o_passthru(adpt_hba* p
30831 dma_addr_t addr;
30832 ulong flags = 0;
30833
30834+ pax_track_stack();
30835+
30836 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
30837 // get user msg size in u32s
30838 if(get_user(size, &user_msg[0])){
30839@@ -2317,6 +2319,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
30840 s32 rcode;
30841 dma_addr_t addr;
30842
30843+ pax_track_stack();
30844+
30845 memset(msg, 0 , sizeof(msg));
30846 len = scsi_bufflen(cmd);
30847 direction = 0x00000000;
30848diff -urNp linux-3.0.3/drivers/scsi/eata.c linux-3.0.3/drivers/scsi/eata.c
30849--- linux-3.0.3/drivers/scsi/eata.c 2011-07-21 22:17:23.000000000 -0400
30850+++ linux-3.0.3/drivers/scsi/eata.c 2011-08-23 21:48:14.000000000 -0400
30851@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
30852 struct hostdata *ha;
30853 char name[16];
30854
30855+ pax_track_stack();
30856+
30857 sprintf(name, "%s%d", driver_name, j);
30858
30859 if (!request_region(port_base, REGION_SIZE, driver_name)) {
30860diff -urNp linux-3.0.3/drivers/scsi/fcoe/fcoe_ctlr.c linux-3.0.3/drivers/scsi/fcoe/fcoe_ctlr.c
30861--- linux-3.0.3/drivers/scsi/fcoe/fcoe_ctlr.c 2011-07-21 22:17:23.000000000 -0400
30862+++ linux-3.0.3/drivers/scsi/fcoe/fcoe_ctlr.c 2011-08-23 21:48:14.000000000 -0400
30863@@ -2503,6 +2503,8 @@ static int fcoe_ctlr_vn_recv(struct fcoe
30864 } buf;
30865 int rc;
30866
30867+ pax_track_stack();
30868+
30869 fiph = (struct fip_header *)skb->data;
30870 sub = fiph->fip_subcode;
30871
30872diff -urNp linux-3.0.3/drivers/scsi/gdth.c linux-3.0.3/drivers/scsi/gdth.c
30873--- linux-3.0.3/drivers/scsi/gdth.c 2011-07-21 22:17:23.000000000 -0400
30874+++ linux-3.0.3/drivers/scsi/gdth.c 2011-08-23 21:48:14.000000000 -0400
30875@@ -4107,6 +4107,8 @@ static int ioc_lockdrv(void __user *arg)
30876 unsigned long flags;
30877 gdth_ha_str *ha;
30878
30879+ pax_track_stack();
30880+
30881 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
30882 return -EFAULT;
30883 ha = gdth_find_ha(ldrv.ionode);
30884@@ -4139,6 +4141,8 @@ static int ioc_resetdrv(void __user *arg
30885 gdth_ha_str *ha;
30886 int rval;
30887
30888+ pax_track_stack();
30889+
30890 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
30891 res.number >= MAX_HDRIVES)
30892 return -EFAULT;
30893@@ -4174,6 +4178,8 @@ static int ioc_general(void __user *arg,
30894 gdth_ha_str *ha;
30895 int rval;
30896
30897+ pax_track_stack();
30898+
30899 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
30900 return -EFAULT;
30901 ha = gdth_find_ha(gen.ionode);
30902@@ -4642,6 +4648,9 @@ static void gdth_flush(gdth_ha_str *ha)
30903 int i;
30904 gdth_cmd_str gdtcmd;
30905 char cmnd[MAX_COMMAND_SIZE];
30906+
30907+ pax_track_stack();
30908+
30909 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
30910
30911 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
30912diff -urNp linux-3.0.3/drivers/scsi/gdth_proc.c linux-3.0.3/drivers/scsi/gdth_proc.c
30913--- linux-3.0.3/drivers/scsi/gdth_proc.c 2011-07-21 22:17:23.000000000 -0400
30914+++ linux-3.0.3/drivers/scsi/gdth_proc.c 2011-08-23 21:48:14.000000000 -0400
30915@@ -47,6 +47,9 @@ static int gdth_set_asc_info(struct Scsi
30916 u64 paddr;
30917
30918 char cmnd[MAX_COMMAND_SIZE];
30919+
30920+ pax_track_stack();
30921+
30922 memset(cmnd, 0xff, 12);
30923 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
30924
30925@@ -175,6 +178,8 @@ static int gdth_get_info(char *buffer,ch
30926 gdth_hget_str *phg;
30927 char cmnd[MAX_COMMAND_SIZE];
30928
30929+ pax_track_stack();
30930+
30931 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
30932 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
30933 if (!gdtcmd || !estr)
30934diff -urNp linux-3.0.3/drivers/scsi/hosts.c linux-3.0.3/drivers/scsi/hosts.c
30935--- linux-3.0.3/drivers/scsi/hosts.c 2011-07-21 22:17:23.000000000 -0400
30936+++ linux-3.0.3/drivers/scsi/hosts.c 2011-08-23 21:47:55.000000000 -0400
30937@@ -42,7 +42,7 @@
30938 #include "scsi_logging.h"
30939
30940
30941-static atomic_t scsi_host_next_hn; /* host_no for next new host */
30942+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
30943
30944
30945 static void scsi_host_cls_release(struct device *dev)
30946@@ -354,7 +354,7 @@ struct Scsi_Host *scsi_host_alloc(struct
30947 * subtract one because we increment first then return, but we need to
30948 * know what the next host number was before increment
30949 */
30950- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
30951+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
30952 shost->dma_channel = 0xff;
30953
30954 /* These three are default values which can be overridden */
30955diff -urNp linux-3.0.3/drivers/scsi/hpsa.c linux-3.0.3/drivers/scsi/hpsa.c
30956--- linux-3.0.3/drivers/scsi/hpsa.c 2011-07-21 22:17:23.000000000 -0400
30957+++ linux-3.0.3/drivers/scsi/hpsa.c 2011-08-23 21:47:55.000000000 -0400
30958@@ -498,7 +498,7 @@ static inline u32 next_command(struct ct
30959 u32 a;
30960
30961 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
30962- return h->access.command_completed(h);
30963+ return h->access->command_completed(h);
30964
30965 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
30966 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
30967@@ -2938,7 +2938,7 @@ static void start_io(struct ctlr_info *h
30968 while (!list_empty(&h->reqQ)) {
30969 c = list_entry(h->reqQ.next, struct CommandList, list);
30970 /* can't do anything if fifo is full */
30971- if ((h->access.fifo_full(h))) {
30972+ if ((h->access->fifo_full(h))) {
30973 dev_warn(&h->pdev->dev, "fifo full\n");
30974 break;
30975 }
30976@@ -2948,7 +2948,7 @@ static void start_io(struct ctlr_info *h
30977 h->Qdepth--;
30978
30979 /* Tell the controller execute command */
30980- h->access.submit_command(h, c);
30981+ h->access->submit_command(h, c);
30982
30983 /* Put job onto the completed Q */
30984 addQ(&h->cmpQ, c);
30985@@ -2957,17 +2957,17 @@ static void start_io(struct ctlr_info *h
30986
30987 static inline unsigned long get_next_completion(struct ctlr_info *h)
30988 {
30989- return h->access.command_completed(h);
30990+ return h->access->command_completed(h);
30991 }
30992
30993 static inline bool interrupt_pending(struct ctlr_info *h)
30994 {
30995- return h->access.intr_pending(h);
30996+ return h->access->intr_pending(h);
30997 }
30998
30999 static inline long interrupt_not_for_us(struct ctlr_info *h)
31000 {
31001- return (h->access.intr_pending(h) == 0) ||
31002+ return (h->access->intr_pending(h) == 0) ||
31003 (h->interrupts_enabled == 0);
31004 }
31005
31006@@ -3857,7 +3857,7 @@ static int __devinit hpsa_pci_init(struc
31007 if (prod_index < 0)
31008 return -ENODEV;
31009 h->product_name = products[prod_index].product_name;
31010- h->access = *(products[prod_index].access);
31011+ h->access = products[prod_index].access;
31012
31013 if (hpsa_board_disabled(h->pdev)) {
31014 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
31015@@ -4134,7 +4134,7 @@ reinit_after_soft_reset:
31016 }
31017
31018 /* make sure the board interrupts are off */
31019- h->access.set_intr_mask(h, HPSA_INTR_OFF);
31020+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
31021
31022 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
31023 goto clean2;
31024@@ -4168,7 +4168,7 @@ reinit_after_soft_reset:
31025 * fake ones to scoop up any residual completions.
31026 */
31027 spin_lock_irqsave(&h->lock, flags);
31028- h->access.set_intr_mask(h, HPSA_INTR_OFF);
31029+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
31030 spin_unlock_irqrestore(&h->lock, flags);
31031 free_irq(h->intr[h->intr_mode], h);
31032 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
31033@@ -4187,9 +4187,9 @@ reinit_after_soft_reset:
31034 dev_info(&h->pdev->dev, "Board READY.\n");
31035 dev_info(&h->pdev->dev,
31036 "Waiting for stale completions to drain.\n");
31037- h->access.set_intr_mask(h, HPSA_INTR_ON);
31038+ h->access->set_intr_mask(h, HPSA_INTR_ON);
31039 msleep(10000);
31040- h->access.set_intr_mask(h, HPSA_INTR_OFF);
31041+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
31042
31043 rc = controller_reset_failed(h->cfgtable);
31044 if (rc)
31045@@ -4210,7 +4210,7 @@ reinit_after_soft_reset:
31046 }
31047
31048 /* Turn the interrupts on so we can service requests */
31049- h->access.set_intr_mask(h, HPSA_INTR_ON);
31050+ h->access->set_intr_mask(h, HPSA_INTR_ON);
31051
31052 hpsa_hba_inquiry(h);
31053 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
31054@@ -4263,7 +4263,7 @@ static void hpsa_shutdown(struct pci_dev
31055 * To write all data in the battery backed cache to disks
31056 */
31057 hpsa_flush_cache(h);
31058- h->access.set_intr_mask(h, HPSA_INTR_OFF);
31059+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
31060 free_irq(h->intr[h->intr_mode], h);
31061 #ifdef CONFIG_PCI_MSI
31062 if (h->msix_vector)
31063@@ -4426,7 +4426,7 @@ static __devinit void hpsa_enter_perform
31064 return;
31065 }
31066 /* Change the access methods to the performant access methods */
31067- h->access = SA5_performant_access;
31068+ h->access = &SA5_performant_access;
31069 h->transMethod = CFGTBL_Trans_Performant;
31070 }
31071
31072diff -urNp linux-3.0.3/drivers/scsi/hpsa.h linux-3.0.3/drivers/scsi/hpsa.h
31073--- linux-3.0.3/drivers/scsi/hpsa.h 2011-08-23 21:44:40.000000000 -0400
31074+++ linux-3.0.3/drivers/scsi/hpsa.h 2011-08-23 21:47:55.000000000 -0400
31075@@ -73,7 +73,7 @@ struct ctlr_info {
31076 unsigned int msix_vector;
31077 unsigned int msi_vector;
31078 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
31079- struct access_method access;
31080+ struct access_method *access;
31081
31082 /* queue and queue Info */
31083 struct list_head reqQ;
31084diff -urNp linux-3.0.3/drivers/scsi/ips.h linux-3.0.3/drivers/scsi/ips.h
31085--- linux-3.0.3/drivers/scsi/ips.h 2011-07-21 22:17:23.000000000 -0400
31086+++ linux-3.0.3/drivers/scsi/ips.h 2011-08-23 21:47:55.000000000 -0400
31087@@ -1027,7 +1027,7 @@ typedef struct {
31088 int (*intr)(struct ips_ha *);
31089 void (*enableint)(struct ips_ha *);
31090 uint32_t (*statupd)(struct ips_ha *);
31091-} ips_hw_func_t;
31092+} __no_const ips_hw_func_t;
31093
31094 typedef struct ips_ha {
31095 uint8_t ha_id[IPS_MAX_CHANNELS+1];
31096diff -urNp linux-3.0.3/drivers/scsi/libfc/fc_exch.c linux-3.0.3/drivers/scsi/libfc/fc_exch.c
31097--- linux-3.0.3/drivers/scsi/libfc/fc_exch.c 2011-07-21 22:17:23.000000000 -0400
31098+++ linux-3.0.3/drivers/scsi/libfc/fc_exch.c 2011-08-23 21:47:55.000000000 -0400
31099@@ -105,12 +105,12 @@ struct fc_exch_mgr {
31100 * all together if not used XXX
31101 */
31102 struct {
31103- atomic_t no_free_exch;
31104- atomic_t no_free_exch_xid;
31105- atomic_t xid_not_found;
31106- atomic_t xid_busy;
31107- atomic_t seq_not_found;
31108- atomic_t non_bls_resp;
31109+ atomic_unchecked_t no_free_exch;
31110+ atomic_unchecked_t no_free_exch_xid;
31111+ atomic_unchecked_t xid_not_found;
31112+ atomic_unchecked_t xid_busy;
31113+ atomic_unchecked_t seq_not_found;
31114+ atomic_unchecked_t non_bls_resp;
31115 } stats;
31116 };
31117
31118@@ -700,7 +700,7 @@ static struct fc_exch *fc_exch_em_alloc(
31119 /* allocate memory for exchange */
31120 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
31121 if (!ep) {
31122- atomic_inc(&mp->stats.no_free_exch);
31123+ atomic_inc_unchecked(&mp->stats.no_free_exch);
31124 goto out;
31125 }
31126 memset(ep, 0, sizeof(*ep));
31127@@ -761,7 +761,7 @@ out:
31128 return ep;
31129 err:
31130 spin_unlock_bh(&pool->lock);
31131- atomic_inc(&mp->stats.no_free_exch_xid);
31132+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
31133 mempool_free(ep, mp->ep_pool);
31134 return NULL;
31135 }
31136@@ -906,7 +906,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31137 xid = ntohs(fh->fh_ox_id); /* we originated exch */
31138 ep = fc_exch_find(mp, xid);
31139 if (!ep) {
31140- atomic_inc(&mp->stats.xid_not_found);
31141+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31142 reject = FC_RJT_OX_ID;
31143 goto out;
31144 }
31145@@ -936,7 +936,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31146 ep = fc_exch_find(mp, xid);
31147 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
31148 if (ep) {
31149- atomic_inc(&mp->stats.xid_busy);
31150+ atomic_inc_unchecked(&mp->stats.xid_busy);
31151 reject = FC_RJT_RX_ID;
31152 goto rel;
31153 }
31154@@ -947,7 +947,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31155 }
31156 xid = ep->xid; /* get our XID */
31157 } else if (!ep) {
31158- atomic_inc(&mp->stats.xid_not_found);
31159+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31160 reject = FC_RJT_RX_ID; /* XID not found */
31161 goto out;
31162 }
31163@@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31164 } else {
31165 sp = &ep->seq;
31166 if (sp->id != fh->fh_seq_id) {
31167- atomic_inc(&mp->stats.seq_not_found);
31168+ atomic_inc_unchecked(&mp->stats.seq_not_found);
31169 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
31170 goto rel;
31171 }
31172@@ -1392,22 +1392,22 @@ static void fc_exch_recv_seq_resp(struct
31173
31174 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
31175 if (!ep) {
31176- atomic_inc(&mp->stats.xid_not_found);
31177+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31178 goto out;
31179 }
31180 if (ep->esb_stat & ESB_ST_COMPLETE) {
31181- atomic_inc(&mp->stats.xid_not_found);
31182+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31183 goto rel;
31184 }
31185 if (ep->rxid == FC_XID_UNKNOWN)
31186 ep->rxid = ntohs(fh->fh_rx_id);
31187 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
31188- atomic_inc(&mp->stats.xid_not_found);
31189+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31190 goto rel;
31191 }
31192 if (ep->did != ntoh24(fh->fh_s_id) &&
31193 ep->did != FC_FID_FLOGI) {
31194- atomic_inc(&mp->stats.xid_not_found);
31195+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31196 goto rel;
31197 }
31198 sof = fr_sof(fp);
31199@@ -1416,7 +1416,7 @@ static void fc_exch_recv_seq_resp(struct
31200 sp->ssb_stat |= SSB_ST_RESP;
31201 sp->id = fh->fh_seq_id;
31202 } else if (sp->id != fh->fh_seq_id) {
31203- atomic_inc(&mp->stats.seq_not_found);
31204+ atomic_inc_unchecked(&mp->stats.seq_not_found);
31205 goto rel;
31206 }
31207
31208@@ -1480,9 +1480,9 @@ static void fc_exch_recv_resp(struct fc_
31209 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
31210
31211 if (!sp)
31212- atomic_inc(&mp->stats.xid_not_found);
31213+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31214 else
31215- atomic_inc(&mp->stats.non_bls_resp);
31216+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
31217
31218 fc_frame_free(fp);
31219 }
31220diff -urNp linux-3.0.3/drivers/scsi/libsas/sas_ata.c linux-3.0.3/drivers/scsi/libsas/sas_ata.c
31221--- linux-3.0.3/drivers/scsi/libsas/sas_ata.c 2011-07-21 22:17:23.000000000 -0400
31222+++ linux-3.0.3/drivers/scsi/libsas/sas_ata.c 2011-08-23 21:47:55.000000000 -0400
31223@@ -368,7 +368,7 @@ static struct ata_port_operations sas_sa
31224 .postreset = ata_std_postreset,
31225 .error_handler = ata_std_error_handler,
31226 .post_internal_cmd = sas_ata_post_internal,
31227- .qc_defer = ata_std_qc_defer,
31228+ .qc_defer = ata_std_qc_defer,
31229 .qc_prep = ata_noop_qc_prep,
31230 .qc_issue = sas_ata_qc_issue,
31231 .qc_fill_rtf = sas_ata_qc_fill_rtf,
31232diff -urNp linux-3.0.3/drivers/scsi/lpfc/lpfc_debugfs.c linux-3.0.3/drivers/scsi/lpfc/lpfc_debugfs.c
31233--- linux-3.0.3/drivers/scsi/lpfc/lpfc_debugfs.c 2011-07-21 22:17:23.000000000 -0400
31234+++ linux-3.0.3/drivers/scsi/lpfc/lpfc_debugfs.c 2011-08-23 21:48:14.000000000 -0400
31235@@ -104,7 +104,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_
31236
31237 #include <linux/debugfs.h>
31238
31239-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
31240+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
31241 static unsigned long lpfc_debugfs_start_time = 0L;
31242
31243 /* iDiag */
31244@@ -141,7 +141,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
31245 lpfc_debugfs_enable = 0;
31246
31247 len = 0;
31248- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
31249+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
31250 (lpfc_debugfs_max_disc_trc - 1);
31251 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
31252 dtp = vport->disc_trc + i;
31253@@ -202,7 +202,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
31254 lpfc_debugfs_enable = 0;
31255
31256 len = 0;
31257- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
31258+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
31259 (lpfc_debugfs_max_slow_ring_trc - 1);
31260 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
31261 dtp = phba->slow_ring_trc + i;
31262@@ -380,6 +380,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
31263 uint32_t *ptr;
31264 char buffer[1024];
31265
31266+ pax_track_stack();
31267+
31268 off = 0;
31269 spin_lock_irq(&phba->hbalock);
31270
31271@@ -617,14 +619,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
31272 !vport || !vport->disc_trc)
31273 return;
31274
31275- index = atomic_inc_return(&vport->disc_trc_cnt) &
31276+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
31277 (lpfc_debugfs_max_disc_trc - 1);
31278 dtp = vport->disc_trc + index;
31279 dtp->fmt = fmt;
31280 dtp->data1 = data1;
31281 dtp->data2 = data2;
31282 dtp->data3 = data3;
31283- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
31284+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
31285 dtp->jif = jiffies;
31286 #endif
31287 return;
31288@@ -655,14 +657,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
31289 !phba || !phba->slow_ring_trc)
31290 return;
31291
31292- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
31293+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
31294 (lpfc_debugfs_max_slow_ring_trc - 1);
31295 dtp = phba->slow_ring_trc + index;
31296 dtp->fmt = fmt;
31297 dtp->data1 = data1;
31298 dtp->data2 = data2;
31299 dtp->data3 = data3;
31300- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
31301+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
31302 dtp->jif = jiffies;
31303 #endif
31304 return;
31305@@ -2606,7 +2608,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
31306 "slow_ring buffer\n");
31307 goto debug_failed;
31308 }
31309- atomic_set(&phba->slow_ring_trc_cnt, 0);
31310+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
31311 memset(phba->slow_ring_trc, 0,
31312 (sizeof(struct lpfc_debugfs_trc) *
31313 lpfc_debugfs_max_slow_ring_trc));
31314@@ -2652,7 +2654,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
31315 "buffer\n");
31316 goto debug_failed;
31317 }
31318- atomic_set(&vport->disc_trc_cnt, 0);
31319+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
31320
31321 snprintf(name, sizeof(name), "discovery_trace");
31322 vport->debug_disc_trc =
31323diff -urNp linux-3.0.3/drivers/scsi/lpfc/lpfc.h linux-3.0.3/drivers/scsi/lpfc/lpfc.h
31324--- linux-3.0.3/drivers/scsi/lpfc/lpfc.h 2011-07-21 22:17:23.000000000 -0400
31325+++ linux-3.0.3/drivers/scsi/lpfc/lpfc.h 2011-08-23 21:47:55.000000000 -0400
31326@@ -420,7 +420,7 @@ struct lpfc_vport {
31327 struct dentry *debug_nodelist;
31328 struct dentry *vport_debugfs_root;
31329 struct lpfc_debugfs_trc *disc_trc;
31330- atomic_t disc_trc_cnt;
31331+ atomic_unchecked_t disc_trc_cnt;
31332 #endif
31333 uint8_t stat_data_enabled;
31334 uint8_t stat_data_blocked;
31335@@ -826,8 +826,8 @@ struct lpfc_hba {
31336 struct timer_list fabric_block_timer;
31337 unsigned long bit_flags;
31338 #define FABRIC_COMANDS_BLOCKED 0
31339- atomic_t num_rsrc_err;
31340- atomic_t num_cmd_success;
31341+ atomic_unchecked_t num_rsrc_err;
31342+ atomic_unchecked_t num_cmd_success;
31343 unsigned long last_rsrc_error_time;
31344 unsigned long last_ramp_down_time;
31345 unsigned long last_ramp_up_time;
31346@@ -841,7 +841,7 @@ struct lpfc_hba {
31347 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
31348 struct dentry *debug_slow_ring_trc;
31349 struct lpfc_debugfs_trc *slow_ring_trc;
31350- atomic_t slow_ring_trc_cnt;
31351+ atomic_unchecked_t slow_ring_trc_cnt;
31352 /* iDiag debugfs sub-directory */
31353 struct dentry *idiag_root;
31354 struct dentry *idiag_pci_cfg;
31355diff -urNp linux-3.0.3/drivers/scsi/lpfc/lpfc_init.c linux-3.0.3/drivers/scsi/lpfc/lpfc_init.c
31356--- linux-3.0.3/drivers/scsi/lpfc/lpfc_init.c 2011-07-21 22:17:23.000000000 -0400
31357+++ linux-3.0.3/drivers/scsi/lpfc/lpfc_init.c 2011-08-23 21:47:56.000000000 -0400
31358@@ -9923,8 +9923,10 @@ lpfc_init(void)
31359 printk(LPFC_COPYRIGHT "\n");
31360
31361 if (lpfc_enable_npiv) {
31362- lpfc_transport_functions.vport_create = lpfc_vport_create;
31363- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
31364+ pax_open_kernel();
31365+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
31366+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
31367+ pax_close_kernel();
31368 }
31369 lpfc_transport_template =
31370 fc_attach_transport(&lpfc_transport_functions);
31371diff -urNp linux-3.0.3/drivers/scsi/lpfc/lpfc_scsi.c linux-3.0.3/drivers/scsi/lpfc/lpfc_scsi.c
31372--- linux-3.0.3/drivers/scsi/lpfc/lpfc_scsi.c 2011-07-21 22:17:23.000000000 -0400
31373+++ linux-3.0.3/drivers/scsi/lpfc/lpfc_scsi.c 2011-08-23 21:47:56.000000000 -0400
31374@@ -297,7 +297,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
31375 uint32_t evt_posted;
31376
31377 spin_lock_irqsave(&phba->hbalock, flags);
31378- atomic_inc(&phba->num_rsrc_err);
31379+ atomic_inc_unchecked(&phba->num_rsrc_err);
31380 phba->last_rsrc_error_time = jiffies;
31381
31382 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
31383@@ -338,7 +338,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
31384 unsigned long flags;
31385 struct lpfc_hba *phba = vport->phba;
31386 uint32_t evt_posted;
31387- atomic_inc(&phba->num_cmd_success);
31388+ atomic_inc_unchecked(&phba->num_cmd_success);
31389
31390 if (vport->cfg_lun_queue_depth <= queue_depth)
31391 return;
31392@@ -382,8 +382,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
31393 unsigned long num_rsrc_err, num_cmd_success;
31394 int i;
31395
31396- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
31397- num_cmd_success = atomic_read(&phba->num_cmd_success);
31398+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
31399+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
31400
31401 vports = lpfc_create_vport_work_array(phba);
31402 if (vports != NULL)
31403@@ -403,8 +403,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
31404 }
31405 }
31406 lpfc_destroy_vport_work_array(phba, vports);
31407- atomic_set(&phba->num_rsrc_err, 0);
31408- atomic_set(&phba->num_cmd_success, 0);
31409+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
31410+ atomic_set_unchecked(&phba->num_cmd_success, 0);
31411 }
31412
31413 /**
31414@@ -438,8 +438,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
31415 }
31416 }
31417 lpfc_destroy_vport_work_array(phba, vports);
31418- atomic_set(&phba->num_rsrc_err, 0);
31419- atomic_set(&phba->num_cmd_success, 0);
31420+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
31421+ atomic_set_unchecked(&phba->num_cmd_success, 0);
31422 }
31423
31424 /**
31425diff -urNp linux-3.0.3/drivers/scsi/megaraid/megaraid_mbox.c linux-3.0.3/drivers/scsi/megaraid/megaraid_mbox.c
31426--- linux-3.0.3/drivers/scsi/megaraid/megaraid_mbox.c 2011-07-21 22:17:23.000000000 -0400
31427+++ linux-3.0.3/drivers/scsi/megaraid/megaraid_mbox.c 2011-08-23 21:48:14.000000000 -0400
31428@@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
31429 int rval;
31430 int i;
31431
31432+ pax_track_stack();
31433+
31434 // Allocate memory for the base list of scb for management module.
31435 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
31436
31437diff -urNp linux-3.0.3/drivers/scsi/osd/osd_initiator.c linux-3.0.3/drivers/scsi/osd/osd_initiator.c
31438--- linux-3.0.3/drivers/scsi/osd/osd_initiator.c 2011-07-21 22:17:23.000000000 -0400
31439+++ linux-3.0.3/drivers/scsi/osd/osd_initiator.c 2011-08-23 21:48:14.000000000 -0400
31440@@ -97,6 +97,8 @@ static int _osd_get_print_system_info(st
31441 int nelem = ARRAY_SIZE(get_attrs), a = 0;
31442 int ret;
31443
31444+ pax_track_stack();
31445+
31446 or = osd_start_request(od, GFP_KERNEL);
31447 if (!or)
31448 return -ENOMEM;
31449diff -urNp linux-3.0.3/drivers/scsi/pmcraid.c linux-3.0.3/drivers/scsi/pmcraid.c
31450--- linux-3.0.3/drivers/scsi/pmcraid.c 2011-08-23 21:44:40.000000000 -0400
31451+++ linux-3.0.3/drivers/scsi/pmcraid.c 2011-08-23 21:47:56.000000000 -0400
31452@@ -201,8 +201,8 @@ static int pmcraid_slave_alloc(struct sc
31453 res->scsi_dev = scsi_dev;
31454 scsi_dev->hostdata = res;
31455 res->change_detected = 0;
31456- atomic_set(&res->read_failures, 0);
31457- atomic_set(&res->write_failures, 0);
31458+ atomic_set_unchecked(&res->read_failures, 0);
31459+ atomic_set_unchecked(&res->write_failures, 0);
31460 rc = 0;
31461 }
31462 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
31463@@ -2677,9 +2677,9 @@ static int pmcraid_error_handler(struct
31464
31465 /* If this was a SCSI read/write command keep count of errors */
31466 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
31467- atomic_inc(&res->read_failures);
31468+ atomic_inc_unchecked(&res->read_failures);
31469 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
31470- atomic_inc(&res->write_failures);
31471+ atomic_inc_unchecked(&res->write_failures);
31472
31473 if (!RES_IS_GSCSI(res->cfg_entry) &&
31474 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
31475@@ -3535,7 +3535,7 @@ static int pmcraid_queuecommand_lck(
31476 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
31477 * hrrq_id assigned here in queuecommand
31478 */
31479- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
31480+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
31481 pinstance->num_hrrq;
31482 cmd->cmd_done = pmcraid_io_done;
31483
31484@@ -3860,7 +3860,7 @@ static long pmcraid_ioctl_passthrough(
31485 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
31486 * hrrq_id assigned here in queuecommand
31487 */
31488- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
31489+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
31490 pinstance->num_hrrq;
31491
31492 if (request_size) {
31493@@ -4498,7 +4498,7 @@ static void pmcraid_worker_function(stru
31494
31495 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
31496 /* add resources only after host is added into system */
31497- if (!atomic_read(&pinstance->expose_resources))
31498+ if (!atomic_read_unchecked(&pinstance->expose_resources))
31499 return;
31500
31501 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
31502@@ -5332,8 +5332,8 @@ static int __devinit pmcraid_init_instan
31503 init_waitqueue_head(&pinstance->reset_wait_q);
31504
31505 atomic_set(&pinstance->outstanding_cmds, 0);
31506- atomic_set(&pinstance->last_message_id, 0);
31507- atomic_set(&pinstance->expose_resources, 0);
31508+ atomic_set_unchecked(&pinstance->last_message_id, 0);
31509+ atomic_set_unchecked(&pinstance->expose_resources, 0);
31510
31511 INIT_LIST_HEAD(&pinstance->free_res_q);
31512 INIT_LIST_HEAD(&pinstance->used_res_q);
31513@@ -6048,7 +6048,7 @@ static int __devinit pmcraid_probe(
31514 /* Schedule worker thread to handle CCN and take care of adding and
31515 * removing devices to OS
31516 */
31517- atomic_set(&pinstance->expose_resources, 1);
31518+ atomic_set_unchecked(&pinstance->expose_resources, 1);
31519 schedule_work(&pinstance->worker_q);
31520 return rc;
31521
31522diff -urNp linux-3.0.3/drivers/scsi/pmcraid.h linux-3.0.3/drivers/scsi/pmcraid.h
31523--- linux-3.0.3/drivers/scsi/pmcraid.h 2011-07-21 22:17:23.000000000 -0400
31524+++ linux-3.0.3/drivers/scsi/pmcraid.h 2011-08-23 21:47:56.000000000 -0400
31525@@ -749,7 +749,7 @@ struct pmcraid_instance {
31526 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
31527
31528 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
31529- atomic_t last_message_id;
31530+ atomic_unchecked_t last_message_id;
31531
31532 /* configuration table */
31533 struct pmcraid_config_table *cfg_table;
31534@@ -778,7 +778,7 @@ struct pmcraid_instance {
31535 atomic_t outstanding_cmds;
31536
31537 /* should add/delete resources to mid-layer now ?*/
31538- atomic_t expose_resources;
31539+ atomic_unchecked_t expose_resources;
31540
31541
31542
31543@@ -814,8 +814,8 @@ struct pmcraid_resource_entry {
31544 struct pmcraid_config_table_entry_ext cfg_entry_ext;
31545 };
31546 struct scsi_device *scsi_dev; /* Link scsi_device structure */
31547- atomic_t read_failures; /* count of failed READ commands */
31548- atomic_t write_failures; /* count of failed WRITE commands */
31549+ atomic_unchecked_t read_failures; /* count of failed READ commands */
31550+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
31551
31552 /* To indicate add/delete/modify during CCN */
31553 u8 change_detected;
31554diff -urNp linux-3.0.3/drivers/scsi/qla2xxx/qla_def.h linux-3.0.3/drivers/scsi/qla2xxx/qla_def.h
31555--- linux-3.0.3/drivers/scsi/qla2xxx/qla_def.h 2011-07-21 22:17:23.000000000 -0400
31556+++ linux-3.0.3/drivers/scsi/qla2xxx/qla_def.h 2011-08-23 21:47:56.000000000 -0400
31557@@ -2244,7 +2244,7 @@ struct isp_operations {
31558 int (*get_flash_version) (struct scsi_qla_host *, void *);
31559 int (*start_scsi) (srb_t *);
31560 int (*abort_isp) (struct scsi_qla_host *);
31561-};
31562+} __no_const;
31563
31564 /* MSI-X Support *************************************************************/
31565
31566diff -urNp linux-3.0.3/drivers/scsi/qla4xxx/ql4_def.h linux-3.0.3/drivers/scsi/qla4xxx/ql4_def.h
31567--- linux-3.0.3/drivers/scsi/qla4xxx/ql4_def.h 2011-07-21 22:17:23.000000000 -0400
31568+++ linux-3.0.3/drivers/scsi/qla4xxx/ql4_def.h 2011-08-23 21:47:56.000000000 -0400
31569@@ -256,7 +256,7 @@ struct ddb_entry {
31570 atomic_t retry_relogin_timer; /* Min Time between relogins
31571 * (4000 only) */
31572 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
31573- atomic_t relogin_retry_count; /* Num of times relogin has been
31574+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
31575 * retried */
31576
31577 uint16_t port;
31578diff -urNp linux-3.0.3/drivers/scsi/qla4xxx/ql4_init.c linux-3.0.3/drivers/scsi/qla4xxx/ql4_init.c
31579--- linux-3.0.3/drivers/scsi/qla4xxx/ql4_init.c 2011-07-21 22:17:23.000000000 -0400
31580+++ linux-3.0.3/drivers/scsi/qla4xxx/ql4_init.c 2011-08-23 21:47:56.000000000 -0400
31581@@ -680,7 +680,7 @@ static struct ddb_entry * qla4xxx_alloc_
31582 ddb_entry->fw_ddb_index = fw_ddb_index;
31583 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
31584 atomic_set(&ddb_entry->relogin_timer, 0);
31585- atomic_set(&ddb_entry->relogin_retry_count, 0);
31586+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
31587 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
31588 list_add_tail(&ddb_entry->list, &ha->ddb_list);
31589 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
31590@@ -1433,7 +1433,7 @@ int qla4xxx_process_ddb_changed(struct s
31591 if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) &&
31592 (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) {
31593 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
31594- atomic_set(&ddb_entry->relogin_retry_count, 0);
31595+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
31596 atomic_set(&ddb_entry->relogin_timer, 0);
31597 clear_bit(DF_RELOGIN, &ddb_entry->flags);
31598 iscsi_unblock_session(ddb_entry->sess);
31599diff -urNp linux-3.0.3/drivers/scsi/qla4xxx/ql4_os.c linux-3.0.3/drivers/scsi/qla4xxx/ql4_os.c
31600--- linux-3.0.3/drivers/scsi/qla4xxx/ql4_os.c 2011-07-21 22:17:23.000000000 -0400
31601+++ linux-3.0.3/drivers/scsi/qla4xxx/ql4_os.c 2011-08-23 21:47:56.000000000 -0400
31602@@ -811,13 +811,13 @@ static void qla4xxx_timer(struct scsi_ql
31603 ddb_entry->fw_ddb_device_state ==
31604 DDB_DS_SESSION_FAILED) {
31605 /* Reset retry relogin timer */
31606- atomic_inc(&ddb_entry->relogin_retry_count);
31607+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
31608 DEBUG2(printk("scsi%ld: ddb [%d] relogin"
31609 " timed out-retrying"
31610 " relogin (%d)\n",
31611 ha->host_no,
31612 ddb_entry->fw_ddb_index,
31613- atomic_read(&ddb_entry->
31614+ atomic_read_unchecked(&ddb_entry->
31615 relogin_retry_count))
31616 );
31617 start_dpc++;
31618diff -urNp linux-3.0.3/drivers/scsi/scsi.c linux-3.0.3/drivers/scsi/scsi.c
31619--- linux-3.0.3/drivers/scsi/scsi.c 2011-07-21 22:17:23.000000000 -0400
31620+++ linux-3.0.3/drivers/scsi/scsi.c 2011-08-23 21:47:56.000000000 -0400
31621@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
31622 unsigned long timeout;
31623 int rtn = 0;
31624
31625- atomic_inc(&cmd->device->iorequest_cnt);
31626+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
31627
31628 /* check if the device is still usable */
31629 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
31630diff -urNp linux-3.0.3/drivers/scsi/scsi_debug.c linux-3.0.3/drivers/scsi/scsi_debug.c
31631--- linux-3.0.3/drivers/scsi/scsi_debug.c 2011-07-21 22:17:23.000000000 -0400
31632+++ linux-3.0.3/drivers/scsi/scsi_debug.c 2011-08-23 21:48:14.000000000 -0400
31633@@ -1493,6 +1493,8 @@ static int resp_mode_select(struct scsi_
31634 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
31635 unsigned char *cmd = (unsigned char *)scp->cmnd;
31636
31637+ pax_track_stack();
31638+
31639 if ((errsts = check_readiness(scp, 1, devip)))
31640 return errsts;
31641 memset(arr, 0, sizeof(arr));
31642@@ -1590,6 +1592,8 @@ static int resp_log_sense(struct scsi_cm
31643 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
31644 unsigned char *cmd = (unsigned char *)scp->cmnd;
31645
31646+ pax_track_stack();
31647+
31648 if ((errsts = check_readiness(scp, 1, devip)))
31649 return errsts;
31650 memset(arr, 0, sizeof(arr));
31651diff -urNp linux-3.0.3/drivers/scsi/scsi_lib.c linux-3.0.3/drivers/scsi/scsi_lib.c
31652--- linux-3.0.3/drivers/scsi/scsi_lib.c 2011-08-23 21:44:40.000000000 -0400
31653+++ linux-3.0.3/drivers/scsi/scsi_lib.c 2011-08-23 21:47:56.000000000 -0400
31654@@ -1412,7 +1412,7 @@ static void scsi_kill_request(struct req
31655 shost = sdev->host;
31656 scsi_init_cmd_errh(cmd);
31657 cmd->result = DID_NO_CONNECT << 16;
31658- atomic_inc(&cmd->device->iorequest_cnt);
31659+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
31660
31661 /*
31662 * SCSI request completion path will do scsi_device_unbusy(),
31663@@ -1438,9 +1438,9 @@ static void scsi_softirq_done(struct req
31664
31665 INIT_LIST_HEAD(&cmd->eh_entry);
31666
31667- atomic_inc(&cmd->device->iodone_cnt);
31668+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
31669 if (cmd->result)
31670- atomic_inc(&cmd->device->ioerr_cnt);
31671+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
31672
31673 disposition = scsi_decide_disposition(cmd);
31674 if (disposition != SUCCESS &&
31675diff -urNp linux-3.0.3/drivers/scsi/scsi_sysfs.c linux-3.0.3/drivers/scsi/scsi_sysfs.c
31676--- linux-3.0.3/drivers/scsi/scsi_sysfs.c 2011-07-21 22:17:23.000000000 -0400
31677+++ linux-3.0.3/drivers/scsi/scsi_sysfs.c 2011-08-23 21:47:56.000000000 -0400
31678@@ -622,7 +622,7 @@ show_iostat_##field(struct device *dev,
31679 char *buf) \
31680 { \
31681 struct scsi_device *sdev = to_scsi_device(dev); \
31682- unsigned long long count = atomic_read(&sdev->field); \
31683+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
31684 return snprintf(buf, 20, "0x%llx\n", count); \
31685 } \
31686 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
31687diff -urNp linux-3.0.3/drivers/scsi/scsi_transport_fc.c linux-3.0.3/drivers/scsi/scsi_transport_fc.c
31688--- linux-3.0.3/drivers/scsi/scsi_transport_fc.c 2011-07-21 22:17:23.000000000 -0400
31689+++ linux-3.0.3/drivers/scsi/scsi_transport_fc.c 2011-08-23 21:47:56.000000000 -0400
31690@@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_
31691 * Netlink Infrastructure
31692 */
31693
31694-static atomic_t fc_event_seq;
31695+static atomic_unchecked_t fc_event_seq;
31696
31697 /**
31698 * fc_get_event_number - Obtain the next sequential FC event number
31699@@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
31700 u32
31701 fc_get_event_number(void)
31702 {
31703- return atomic_add_return(1, &fc_event_seq);
31704+ return atomic_add_return_unchecked(1, &fc_event_seq);
31705 }
31706 EXPORT_SYMBOL(fc_get_event_number);
31707
31708@@ -645,7 +645,7 @@ static __init int fc_transport_init(void
31709 {
31710 int error;
31711
31712- atomic_set(&fc_event_seq, 0);
31713+ atomic_set_unchecked(&fc_event_seq, 0);
31714
31715 error = transport_class_register(&fc_host_class);
31716 if (error)
31717@@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char
31718 char *cp;
31719
31720 *val = simple_strtoul(buf, &cp, 0);
31721- if ((*cp && (*cp != '\n')) || (*val < 0))
31722+ if (*cp && (*cp != '\n'))
31723 return -EINVAL;
31724 /*
31725 * Check for overflow; dev_loss_tmo is u32
31726diff -urNp linux-3.0.3/drivers/scsi/scsi_transport_iscsi.c linux-3.0.3/drivers/scsi/scsi_transport_iscsi.c
31727--- linux-3.0.3/drivers/scsi/scsi_transport_iscsi.c 2011-07-21 22:17:23.000000000 -0400
31728+++ linux-3.0.3/drivers/scsi/scsi_transport_iscsi.c 2011-08-23 21:47:56.000000000 -0400
31729@@ -83,7 +83,7 @@ struct iscsi_internal {
31730 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
31731 };
31732
31733-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
31734+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
31735 static struct workqueue_struct *iscsi_eh_timer_workq;
31736
31737 /*
31738@@ -761,7 +761,7 @@ int iscsi_add_session(struct iscsi_cls_s
31739 int err;
31740
31741 ihost = shost->shost_data;
31742- session->sid = atomic_add_return(1, &iscsi_session_nr);
31743+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
31744
31745 if (id == ISCSI_MAX_TARGET) {
31746 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
31747@@ -2200,7 +2200,7 @@ static __init int iscsi_transport_init(v
31748 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
31749 ISCSI_TRANSPORT_VERSION);
31750
31751- atomic_set(&iscsi_session_nr, 0);
31752+ atomic_set_unchecked(&iscsi_session_nr, 0);
31753
31754 err = class_register(&iscsi_transport_class);
31755 if (err)
31756diff -urNp linux-3.0.3/drivers/scsi/scsi_transport_srp.c linux-3.0.3/drivers/scsi/scsi_transport_srp.c
31757--- linux-3.0.3/drivers/scsi/scsi_transport_srp.c 2011-07-21 22:17:23.000000000 -0400
31758+++ linux-3.0.3/drivers/scsi/scsi_transport_srp.c 2011-08-23 21:47:56.000000000 -0400
31759@@ -33,7 +33,7 @@
31760 #include "scsi_transport_srp_internal.h"
31761
31762 struct srp_host_attrs {
31763- atomic_t next_port_id;
31764+ atomic_unchecked_t next_port_id;
31765 };
31766 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
31767
31768@@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
31769 struct Scsi_Host *shost = dev_to_shost(dev);
31770 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
31771
31772- atomic_set(&srp_host->next_port_id, 0);
31773+ atomic_set_unchecked(&srp_host->next_port_id, 0);
31774 return 0;
31775 }
31776
31777@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
31778 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
31779 rport->roles = ids->roles;
31780
31781- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
31782+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
31783 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
31784
31785 transport_setup_device(&rport->dev);
31786diff -urNp linux-3.0.3/drivers/scsi/sg.c linux-3.0.3/drivers/scsi/sg.c
31787--- linux-3.0.3/drivers/scsi/sg.c 2011-07-21 22:17:23.000000000 -0400
31788+++ linux-3.0.3/drivers/scsi/sg.c 2011-08-23 21:47:56.000000000 -0400
31789@@ -2310,7 +2310,7 @@ struct sg_proc_leaf {
31790 const struct file_operations * fops;
31791 };
31792
31793-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
31794+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
31795 {"allow_dio", &adio_fops},
31796 {"debug", &debug_fops},
31797 {"def_reserved_size", &dressz_fops},
31798@@ -2325,7 +2325,7 @@ sg_proc_init(void)
31799 {
31800 int k, mask;
31801 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
31802- struct sg_proc_leaf * leaf;
31803+ const struct sg_proc_leaf * leaf;
31804
31805 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
31806 if (!sg_proc_sgp)
31807diff -urNp linux-3.0.3/drivers/scsi/sym53c8xx_2/sym_glue.c linux-3.0.3/drivers/scsi/sym53c8xx_2/sym_glue.c
31808--- linux-3.0.3/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-07-21 22:17:23.000000000 -0400
31809+++ linux-3.0.3/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-08-23 21:48:14.000000000 -0400
31810@@ -1756,6 +1756,8 @@ static int __devinit sym2_probe(struct p
31811 int do_iounmap = 0;
31812 int do_disable_device = 1;
31813
31814+ pax_track_stack();
31815+
31816 memset(&sym_dev, 0, sizeof(sym_dev));
31817 memset(&nvram, 0, sizeof(nvram));
31818 sym_dev.pdev = pdev;
31819diff -urNp linux-3.0.3/drivers/scsi/vmw_pvscsi.c linux-3.0.3/drivers/scsi/vmw_pvscsi.c
31820--- linux-3.0.3/drivers/scsi/vmw_pvscsi.c 2011-07-21 22:17:23.000000000 -0400
31821+++ linux-3.0.3/drivers/scsi/vmw_pvscsi.c 2011-08-23 21:48:14.000000000 -0400
31822@@ -447,6 +447,8 @@ static void pvscsi_setup_all_rings(const
31823 dma_addr_t base;
31824 unsigned i;
31825
31826+ pax_track_stack();
31827+
31828 cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
31829 cmd.reqRingNumPages = adapter->req_pages;
31830 cmd.cmpRingNumPages = adapter->cmp_pages;
31831diff -urNp linux-3.0.3/drivers/spi/spi.c linux-3.0.3/drivers/spi/spi.c
31832--- linux-3.0.3/drivers/spi/spi.c 2011-07-21 22:17:23.000000000 -0400
31833+++ linux-3.0.3/drivers/spi/spi.c 2011-08-23 21:47:56.000000000 -0400
31834@@ -1023,7 +1023,7 @@ int spi_bus_unlock(struct spi_master *ma
31835 EXPORT_SYMBOL_GPL(spi_bus_unlock);
31836
31837 /* portable code must never pass more than 32 bytes */
31838-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
31839+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
31840
31841 static u8 *buf;
31842
31843diff -urNp linux-3.0.3/drivers/staging/ath6kl/os/linux/ar6000_drv.c linux-3.0.3/drivers/staging/ath6kl/os/linux/ar6000_drv.c
31844--- linux-3.0.3/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-08-23 21:44:40.000000000 -0400
31845+++ linux-3.0.3/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-08-23 21:48:14.000000000 -0400
31846@@ -362,7 +362,7 @@ static struct ar_cookie s_ar_cookie_mem[
31847 (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0))
31848
31849
31850-static struct net_device_ops ar6000_netdev_ops = {
31851+static net_device_ops_no_const ar6000_netdev_ops = {
31852 .ndo_init = NULL,
31853 .ndo_open = ar6000_open,
31854 .ndo_stop = ar6000_close,
31855diff -urNp linux-3.0.3/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h linux-3.0.3/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
31856--- linux-3.0.3/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-07-21 22:17:23.000000000 -0400
31857+++ linux-3.0.3/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-08-23 21:47:56.000000000 -0400
31858@@ -30,7 +30,7 @@ typedef bool (*ar6k_pal_recv_pkt_t)(void
31859 typedef struct ar6k_pal_config_s
31860 {
31861 ar6k_pal_recv_pkt_t fpar6k_pal_recv_pkt;
31862-}ar6k_pal_config_t;
31863+} __no_const ar6k_pal_config_t;
31864
31865 void register_pal_cb(ar6k_pal_config_t *palConfig_p);
31866 #endif /* _AR6K_PAL_H_ */
31867diff -urNp linux-3.0.3/drivers/staging/brcm80211/brcmfmac/dhd_linux.c linux-3.0.3/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
31868--- linux-3.0.3/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-07-21 22:17:23.000000000 -0400
31869+++ linux-3.0.3/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-08-23 21:47:56.000000000 -0400
31870@@ -853,14 +853,14 @@ static void dhd_op_if(dhd_if_t *ifp)
31871 free_netdev(ifp->net);
31872 }
31873 /* Allocate etherdev, including space for private structure */
31874- ifp->net = alloc_etherdev(sizeof(dhd));
31875+ ifp->net = alloc_etherdev(sizeof(*dhd));
31876 if (!ifp->net) {
31877 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
31878 ret = -ENOMEM;
31879 }
31880 if (ret == 0) {
31881 strcpy(ifp->net->name, ifp->name);
31882- memcpy(netdev_priv(ifp->net), &dhd, sizeof(dhd));
31883+ memcpy(netdev_priv(ifp->net), dhd, sizeof(*dhd));
31884 err = dhd_net_attach(&dhd->pub, ifp->idx);
31885 if (err != 0) {
31886 DHD_ERROR(("%s: dhd_net_attach failed, "
31887@@ -1872,7 +1872,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
31888 strcpy(nv_path, nvram_path);
31889
31890 /* Allocate etherdev, including space for private structure */
31891- net = alloc_etherdev(sizeof(dhd));
31892+ net = alloc_etherdev(sizeof(*dhd));
31893 if (!net) {
31894 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
31895 goto fail;
31896@@ -1888,7 +1888,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
31897 /*
31898 * Save the dhd_info into the priv
31899 */
31900- memcpy(netdev_priv(net), &dhd, sizeof(dhd));
31901+ memcpy(netdev_priv(net), dhd, sizeof(*dhd));
31902
31903 /* Set network interface name if it was provided as module parameter */
31904 if (iface_name[0]) {
31905@@ -2004,7 +2004,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
31906 /*
31907 * Save the dhd_info into the priv
31908 */
31909- memcpy(netdev_priv(net), &dhd, sizeof(dhd));
31910+ memcpy(netdev_priv(net), dhd, sizeof(*dhd));
31911
31912 #if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
31913 g_bus = bus;
31914diff -urNp linux-3.0.3/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h linux-3.0.3/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h
31915--- linux-3.0.3/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h 2011-07-21 22:17:23.000000000 -0400
31916+++ linux-3.0.3/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h 2011-08-23 21:47:56.000000000 -0400
31917@@ -593,7 +593,7 @@ struct phy_func_ptr {
31918 initfn_t carrsuppr;
31919 rxsigpwrfn_t rxsigpwr;
31920 detachfn_t detach;
31921-};
31922+} __no_const;
31923 typedef struct phy_func_ptr phy_func_ptr_t;
31924
31925 struct phy_info {
31926diff -urNp linux-3.0.3/drivers/staging/brcm80211/include/bcmsdh.h linux-3.0.3/drivers/staging/brcm80211/include/bcmsdh.h
31927--- linux-3.0.3/drivers/staging/brcm80211/include/bcmsdh.h 2011-07-21 22:17:23.000000000 -0400
31928+++ linux-3.0.3/drivers/staging/brcm80211/include/bcmsdh.h 2011-08-23 21:47:56.000000000 -0400
31929@@ -185,7 +185,7 @@ typedef struct {
31930 u16 func, uint bustype, void *regsva, void *param);
31931 /* detach from device */
31932 void (*detach) (void *ch);
31933-} bcmsdh_driver_t;
31934+} __no_const bcmsdh_driver_t;
31935
31936 /* platform specific/high level functions */
31937 extern int bcmsdh_register(bcmsdh_driver_t *driver);
31938diff -urNp linux-3.0.3/drivers/staging/et131x/et1310_tx.c linux-3.0.3/drivers/staging/et131x/et1310_tx.c
31939--- linux-3.0.3/drivers/staging/et131x/et1310_tx.c 2011-07-21 22:17:23.000000000 -0400
31940+++ linux-3.0.3/drivers/staging/et131x/et1310_tx.c 2011-08-23 21:47:56.000000000 -0400
31941@@ -635,11 +635,11 @@ inline void et131x_free_send_packet(stru
31942 struct net_device_stats *stats = &etdev->net_stats;
31943
31944 if (tcb->flags & fMP_DEST_BROAD)
31945- atomic_inc(&etdev->Stats.brdcstxmt);
31946+ atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
31947 else if (tcb->flags & fMP_DEST_MULTI)
31948- atomic_inc(&etdev->Stats.multixmt);
31949+ atomic_inc_unchecked(&etdev->Stats.multixmt);
31950 else
31951- atomic_inc(&etdev->Stats.unixmt);
31952+ atomic_inc_unchecked(&etdev->Stats.unixmt);
31953
31954 if (tcb->skb) {
31955 stats->tx_bytes += tcb->skb->len;
31956diff -urNp linux-3.0.3/drivers/staging/et131x/et131x_adapter.h linux-3.0.3/drivers/staging/et131x/et131x_adapter.h
31957--- linux-3.0.3/drivers/staging/et131x/et131x_adapter.h 2011-07-21 22:17:23.000000000 -0400
31958+++ linux-3.0.3/drivers/staging/et131x/et131x_adapter.h 2011-08-23 21:47:56.000000000 -0400
31959@@ -110,11 +110,11 @@ typedef struct _ce_stats_t {
31960 * operations
31961 */
31962 u32 unircv; /* # multicast packets received */
31963- atomic_t unixmt; /* # multicast packets for Tx */
31964+ atomic_unchecked_t unixmt; /* # multicast packets for Tx */
31965 u32 multircv; /* # multicast packets received */
31966- atomic_t multixmt; /* # multicast packets for Tx */
31967+ atomic_unchecked_t multixmt; /* # multicast packets for Tx */
31968 u32 brdcstrcv; /* # broadcast packets received */
31969- atomic_t brdcstxmt; /* # broadcast packets for Tx */
31970+ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
31971 u32 norcvbuf; /* # Rx packets discarded */
31972 u32 noxmtbuf; /* # Tx packets discarded */
31973
31974diff -urNp linux-3.0.3/drivers/staging/hv/channel.c linux-3.0.3/drivers/staging/hv/channel.c
31975--- linux-3.0.3/drivers/staging/hv/channel.c 2011-08-23 21:44:40.000000000 -0400
31976+++ linux-3.0.3/drivers/staging/hv/channel.c 2011-08-23 21:47:56.000000000 -0400
31977@@ -433,8 +433,8 @@ int vmbus_establish_gpadl(struct vmbus_c
31978 int ret = 0;
31979 int t;
31980
31981- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
31982- atomic_inc(&vmbus_connection.next_gpadl_handle);
31983+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
31984+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
31985
31986 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
31987 if (ret)
31988diff -urNp linux-3.0.3/drivers/staging/hv/hv.c linux-3.0.3/drivers/staging/hv/hv.c
31989--- linux-3.0.3/drivers/staging/hv/hv.c 2011-07-21 22:17:23.000000000 -0400
31990+++ linux-3.0.3/drivers/staging/hv/hv.c 2011-08-23 21:47:56.000000000 -0400
31991@@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, voi
31992 u64 output_address = (output) ? virt_to_phys(output) : 0;
31993 u32 output_address_hi = output_address >> 32;
31994 u32 output_address_lo = output_address & 0xFFFFFFFF;
31995- volatile void *hypercall_page = hv_context.hypercall_page;
31996+ volatile void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
31997
31998 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
31999 "=a"(hv_status_lo) : "d" (control_hi),
32000diff -urNp linux-3.0.3/drivers/staging/hv/hv_mouse.c linux-3.0.3/drivers/staging/hv/hv_mouse.c
32001--- linux-3.0.3/drivers/staging/hv/hv_mouse.c 2011-07-21 22:17:23.000000000 -0400
32002+++ linux-3.0.3/drivers/staging/hv/hv_mouse.c 2011-08-23 21:47:56.000000000 -0400
32003@@ -879,8 +879,10 @@ static void reportdesc_callback(struct h
32004 if (hid_dev) {
32005 DPRINT_INFO(INPUTVSC_DRV, "hid_device created");
32006
32007- hid_dev->ll_driver->open = mousevsc_hid_open;
32008- hid_dev->ll_driver->close = mousevsc_hid_close;
32009+ pax_open_kernel();
32010+ *(void **)&hid_dev->ll_driver->open = mousevsc_hid_open;
32011+ *(void **)&hid_dev->ll_driver->close = mousevsc_hid_close;
32012+ pax_close_kernel();
32013
32014 hid_dev->bus = BUS_VIRTUAL;
32015 hid_dev->vendor = input_device_ctx->device_info.vendor;
32016diff -urNp linux-3.0.3/drivers/staging/hv/hyperv_vmbus.h linux-3.0.3/drivers/staging/hv/hyperv_vmbus.h
32017--- linux-3.0.3/drivers/staging/hv/hyperv_vmbus.h 2011-07-21 22:17:23.000000000 -0400
32018+++ linux-3.0.3/drivers/staging/hv/hyperv_vmbus.h 2011-08-23 21:47:56.000000000 -0400
32019@@ -559,7 +559,7 @@ enum vmbus_connect_state {
32020 struct vmbus_connection {
32021 enum vmbus_connect_state conn_state;
32022
32023- atomic_t next_gpadl_handle;
32024+ atomic_unchecked_t next_gpadl_handle;
32025
32026 /*
32027 * Represents channel interrupts. Each bit position represents a
32028diff -urNp linux-3.0.3/drivers/staging/hv/rndis_filter.c linux-3.0.3/drivers/staging/hv/rndis_filter.c
32029--- linux-3.0.3/drivers/staging/hv/rndis_filter.c 2011-08-23 21:44:40.000000000 -0400
32030+++ linux-3.0.3/drivers/staging/hv/rndis_filter.c 2011-08-23 21:47:56.000000000 -0400
32031@@ -43,7 +43,7 @@ struct rndis_device {
32032
32033 enum rndis_device_state state;
32034 u32 link_stat;
32035- atomic_t new_req_id;
32036+ atomic_unchecked_t new_req_id;
32037
32038 spinlock_t request_lock;
32039 struct list_head req_list;
32040@@ -117,7 +117,7 @@ static struct rndis_request *get_rndis_r
32041 * template
32042 */
32043 set = &rndis_msg->msg.set_req;
32044- set->req_id = atomic_inc_return(&dev->new_req_id);
32045+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
32046
32047 /* Add to the request list */
32048 spin_lock_irqsave(&dev->request_lock, flags);
32049@@ -637,7 +637,7 @@ static void rndis_filter_halt_device(str
32050
32051 /* Setup the rndis set */
32052 halt = &request->request_msg.msg.halt_req;
32053- halt->req_id = atomic_inc_return(&dev->new_req_id);
32054+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
32055
32056 /* Ignore return since this msg is optional. */
32057 rndis_filter_send_request(dev, request);
32058diff -urNp linux-3.0.3/drivers/staging/hv/vmbus_drv.c linux-3.0.3/drivers/staging/hv/vmbus_drv.c
32059--- linux-3.0.3/drivers/staging/hv/vmbus_drv.c 2011-07-21 22:17:23.000000000 -0400
32060+++ linux-3.0.3/drivers/staging/hv/vmbus_drv.c 2011-08-23 21:47:56.000000000 -0400
32061@@ -668,11 +668,11 @@ int vmbus_child_device_register(struct h
32062 {
32063 int ret = 0;
32064
32065- static atomic_t device_num = ATOMIC_INIT(0);
32066+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
32067
32068 /* Set the device name. Otherwise, device_register() will fail. */
32069 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
32070- atomic_inc_return(&device_num));
32071+ atomic_inc_return_unchecked(&device_num));
32072
32073 /* The new device belongs to this bus */
32074 child_device_obj->device.bus = &hv_bus; /* device->dev.bus; */
32075diff -urNp linux-3.0.3/drivers/staging/iio/ring_generic.h linux-3.0.3/drivers/staging/iio/ring_generic.h
32076--- linux-3.0.3/drivers/staging/iio/ring_generic.h 2011-07-21 22:17:23.000000000 -0400
32077+++ linux-3.0.3/drivers/staging/iio/ring_generic.h 2011-08-23 21:47:56.000000000 -0400
32078@@ -62,7 +62,7 @@ struct iio_ring_access_funcs {
32079
32080 int (*is_enabled)(struct iio_ring_buffer *ring);
32081 int (*enable)(struct iio_ring_buffer *ring);
32082-};
32083+} __no_const;
32084
32085 struct iio_ring_setup_ops {
32086 int (*preenable)(struct iio_dev *);
32087diff -urNp linux-3.0.3/drivers/staging/octeon/ethernet.c linux-3.0.3/drivers/staging/octeon/ethernet.c
32088--- linux-3.0.3/drivers/staging/octeon/ethernet.c 2011-07-21 22:17:23.000000000 -0400
32089+++ linux-3.0.3/drivers/staging/octeon/ethernet.c 2011-08-23 21:47:56.000000000 -0400
32090@@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_
32091 * since the RX tasklet also increments it.
32092 */
32093 #ifdef CONFIG_64BIT
32094- atomic64_add(rx_status.dropped_packets,
32095- (atomic64_t *)&priv->stats.rx_dropped);
32096+ atomic64_add_unchecked(rx_status.dropped_packets,
32097+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
32098 #else
32099- atomic_add(rx_status.dropped_packets,
32100- (atomic_t *)&priv->stats.rx_dropped);
32101+ atomic_add_unchecked(rx_status.dropped_packets,
32102+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
32103 #endif
32104 }
32105
32106diff -urNp linux-3.0.3/drivers/staging/octeon/ethernet-rx.c linux-3.0.3/drivers/staging/octeon/ethernet-rx.c
32107--- linux-3.0.3/drivers/staging/octeon/ethernet-rx.c 2011-07-21 22:17:23.000000000 -0400
32108+++ linux-3.0.3/drivers/staging/octeon/ethernet-rx.c 2011-08-23 21:47:56.000000000 -0400
32109@@ -417,11 +417,11 @@ static int cvm_oct_napi_poll(struct napi
32110 /* Increment RX stats for virtual ports */
32111 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
32112 #ifdef CONFIG_64BIT
32113- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
32114- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
32115+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
32116+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
32117 #else
32118- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
32119- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
32120+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
32121+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
32122 #endif
32123 }
32124 netif_receive_skb(skb);
32125@@ -433,9 +433,9 @@ static int cvm_oct_napi_poll(struct napi
32126 dev->name);
32127 */
32128 #ifdef CONFIG_64BIT
32129- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
32130+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
32131 #else
32132- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
32133+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
32134 #endif
32135 dev_kfree_skb_irq(skb);
32136 }
32137diff -urNp linux-3.0.3/drivers/staging/pohmelfs/inode.c linux-3.0.3/drivers/staging/pohmelfs/inode.c
32138--- linux-3.0.3/drivers/staging/pohmelfs/inode.c 2011-07-21 22:17:23.000000000 -0400
32139+++ linux-3.0.3/drivers/staging/pohmelfs/inode.c 2011-08-23 21:47:56.000000000 -0400
32140@@ -1856,7 +1856,7 @@ static int pohmelfs_fill_super(struct su
32141 mutex_init(&psb->mcache_lock);
32142 psb->mcache_root = RB_ROOT;
32143 psb->mcache_timeout = msecs_to_jiffies(5000);
32144- atomic_long_set(&psb->mcache_gen, 0);
32145+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
32146
32147 psb->trans_max_pages = 100;
32148
32149@@ -1871,7 +1871,7 @@ static int pohmelfs_fill_super(struct su
32150 INIT_LIST_HEAD(&psb->crypto_ready_list);
32151 INIT_LIST_HEAD(&psb->crypto_active_list);
32152
32153- atomic_set(&psb->trans_gen, 1);
32154+ atomic_set_unchecked(&psb->trans_gen, 1);
32155 atomic_long_set(&psb->total_inodes, 0);
32156
32157 mutex_init(&psb->state_lock);
32158diff -urNp linux-3.0.3/drivers/staging/pohmelfs/mcache.c linux-3.0.3/drivers/staging/pohmelfs/mcache.c
32159--- linux-3.0.3/drivers/staging/pohmelfs/mcache.c 2011-07-21 22:17:23.000000000 -0400
32160+++ linux-3.0.3/drivers/staging/pohmelfs/mcache.c 2011-08-23 21:47:56.000000000 -0400
32161@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
32162 m->data = data;
32163 m->start = start;
32164 m->size = size;
32165- m->gen = atomic_long_inc_return(&psb->mcache_gen);
32166+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
32167
32168 mutex_lock(&psb->mcache_lock);
32169 err = pohmelfs_mcache_insert(psb, m);
32170diff -urNp linux-3.0.3/drivers/staging/pohmelfs/netfs.h linux-3.0.3/drivers/staging/pohmelfs/netfs.h
32171--- linux-3.0.3/drivers/staging/pohmelfs/netfs.h 2011-07-21 22:17:23.000000000 -0400
32172+++ linux-3.0.3/drivers/staging/pohmelfs/netfs.h 2011-08-23 21:47:56.000000000 -0400
32173@@ -571,14 +571,14 @@ struct pohmelfs_config;
32174 struct pohmelfs_sb {
32175 struct rb_root mcache_root;
32176 struct mutex mcache_lock;
32177- atomic_long_t mcache_gen;
32178+ atomic_long_unchecked_t mcache_gen;
32179 unsigned long mcache_timeout;
32180
32181 unsigned int idx;
32182
32183 unsigned int trans_retries;
32184
32185- atomic_t trans_gen;
32186+ atomic_unchecked_t trans_gen;
32187
32188 unsigned int crypto_attached_size;
32189 unsigned int crypto_align_size;
32190diff -urNp linux-3.0.3/drivers/staging/pohmelfs/trans.c linux-3.0.3/drivers/staging/pohmelfs/trans.c
32191--- linux-3.0.3/drivers/staging/pohmelfs/trans.c 2011-07-21 22:17:23.000000000 -0400
32192+++ linux-3.0.3/drivers/staging/pohmelfs/trans.c 2011-08-23 21:47:56.000000000 -0400
32193@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
32194 int err;
32195 struct netfs_cmd *cmd = t->iovec.iov_base;
32196
32197- t->gen = atomic_inc_return(&psb->trans_gen);
32198+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
32199
32200 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
32201 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
32202diff -urNp linux-3.0.3/drivers/staging/rtl8712/rtl871x_io.h linux-3.0.3/drivers/staging/rtl8712/rtl871x_io.h
32203--- linux-3.0.3/drivers/staging/rtl8712/rtl871x_io.h 2011-07-21 22:17:23.000000000 -0400
32204+++ linux-3.0.3/drivers/staging/rtl8712/rtl871x_io.h 2011-08-23 21:47:56.000000000 -0400
32205@@ -83,7 +83,7 @@ struct _io_ops {
32206 u8 *pmem);
32207 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
32208 u8 *pmem);
32209-};
32210+} __no_const;
32211
32212 struct io_req {
32213 struct list_head list;
32214diff -urNp linux-3.0.3/drivers/staging/sbe-2t3e3/netdev.c linux-3.0.3/drivers/staging/sbe-2t3e3/netdev.c
32215--- linux-3.0.3/drivers/staging/sbe-2t3e3/netdev.c 2011-07-21 22:17:23.000000000 -0400
32216+++ linux-3.0.3/drivers/staging/sbe-2t3e3/netdev.c 2011-08-24 18:21:41.000000000 -0400
32217@@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, s
32218 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
32219
32220 if (rlen)
32221- if (copy_to_user(data, &resp, rlen))
32222+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
32223 return -EFAULT;
32224
32225 return 0;
32226diff -urNp linux-3.0.3/drivers/staging/tty/stallion.c linux-3.0.3/drivers/staging/tty/stallion.c
32227--- linux-3.0.3/drivers/staging/tty/stallion.c 2011-07-21 22:17:23.000000000 -0400
32228+++ linux-3.0.3/drivers/staging/tty/stallion.c 2011-08-23 21:48:14.000000000 -0400
32229@@ -2406,6 +2406,8 @@ static int stl_getportstruct(struct stlp
32230 struct stlport stl_dummyport;
32231 struct stlport *portp;
32232
32233+ pax_track_stack();
32234+
32235 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
32236 return -EFAULT;
32237 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
32238diff -urNp linux-3.0.3/drivers/staging/usbip/usbip_common.h linux-3.0.3/drivers/staging/usbip/usbip_common.h
32239--- linux-3.0.3/drivers/staging/usbip/usbip_common.h 2011-07-21 22:17:23.000000000 -0400
32240+++ linux-3.0.3/drivers/staging/usbip/usbip_common.h 2011-08-23 21:47:56.000000000 -0400
32241@@ -315,7 +315,7 @@ struct usbip_device {
32242 void (*shutdown)(struct usbip_device *);
32243 void (*reset)(struct usbip_device *);
32244 void (*unusable)(struct usbip_device *);
32245- } eh_ops;
32246+ } __no_const eh_ops;
32247 };
32248
32249 void usbip_pack_pdu(struct usbip_header *pdu, struct urb *urb, int cmd,
32250diff -urNp linux-3.0.3/drivers/staging/usbip/vhci.h linux-3.0.3/drivers/staging/usbip/vhci.h
32251--- linux-3.0.3/drivers/staging/usbip/vhci.h 2011-07-21 22:17:23.000000000 -0400
32252+++ linux-3.0.3/drivers/staging/usbip/vhci.h 2011-08-23 21:47:56.000000000 -0400
32253@@ -94,7 +94,7 @@ struct vhci_hcd {
32254 unsigned resuming:1;
32255 unsigned long re_timeout;
32256
32257- atomic_t seqnum;
32258+ atomic_unchecked_t seqnum;
32259
32260 /*
32261 * NOTE:
32262diff -urNp linux-3.0.3/drivers/staging/usbip/vhci_hcd.c linux-3.0.3/drivers/staging/usbip/vhci_hcd.c
32263--- linux-3.0.3/drivers/staging/usbip/vhci_hcd.c 2011-08-23 21:44:40.000000000 -0400
32264+++ linux-3.0.3/drivers/staging/usbip/vhci_hcd.c 2011-08-23 21:47:56.000000000 -0400
32265@@ -511,7 +511,7 @@ static void vhci_tx_urb(struct urb *urb)
32266 return;
32267 }
32268
32269- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
32270+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
32271 if (priv->seqnum == 0xffff)
32272 dev_info(&urb->dev->dev, "seqnum max\n");
32273
32274@@ -765,7 +765,7 @@ static int vhci_urb_dequeue(struct usb_h
32275 return -ENOMEM;
32276 }
32277
32278- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
32279+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
32280 if (unlink->seqnum == 0xffff)
32281 pr_info("seqnum max\n");
32282
32283@@ -955,7 +955,7 @@ static int vhci_start(struct usb_hcd *hc
32284 vdev->rhport = rhport;
32285 }
32286
32287- atomic_set(&vhci->seqnum, 0);
32288+ atomic_set_unchecked(&vhci->seqnum, 0);
32289 spin_lock_init(&vhci->lock);
32290
32291 hcd->power_budget = 0; /* no limit */
32292diff -urNp linux-3.0.3/drivers/staging/usbip/vhci_rx.c linux-3.0.3/drivers/staging/usbip/vhci_rx.c
32293--- linux-3.0.3/drivers/staging/usbip/vhci_rx.c 2011-07-21 22:17:23.000000000 -0400
32294+++ linux-3.0.3/drivers/staging/usbip/vhci_rx.c 2011-08-23 21:47:56.000000000 -0400
32295@@ -76,7 +76,7 @@ static void vhci_recv_ret_submit(struct
32296 if (!urb) {
32297 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
32298 pr_info("max seqnum %d\n",
32299- atomic_read(&the_controller->seqnum));
32300+ atomic_read_unchecked(&the_controller->seqnum));
32301 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
32302 return;
32303 }
32304diff -urNp linux-3.0.3/drivers/staging/vt6655/hostap.c linux-3.0.3/drivers/staging/vt6655/hostap.c
32305--- linux-3.0.3/drivers/staging/vt6655/hostap.c 2011-07-21 22:17:23.000000000 -0400
32306+++ linux-3.0.3/drivers/staging/vt6655/hostap.c 2011-08-23 21:47:56.000000000 -0400
32307@@ -79,14 +79,13 @@ static int msglevel
32308 *
32309 */
32310
32311+static net_device_ops_no_const apdev_netdev_ops;
32312+
32313 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
32314 {
32315 PSDevice apdev_priv;
32316 struct net_device *dev = pDevice->dev;
32317 int ret;
32318- const struct net_device_ops apdev_netdev_ops = {
32319- .ndo_start_xmit = pDevice->tx_80211,
32320- };
32321
32322 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
32323
32324@@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevic
32325 *apdev_priv = *pDevice;
32326 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
32327
32328+ /* only half broken now */
32329+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
32330 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
32331
32332 pDevice->apdev->type = ARPHRD_IEEE80211;
32333diff -urNp linux-3.0.3/drivers/staging/vt6656/hostap.c linux-3.0.3/drivers/staging/vt6656/hostap.c
32334--- linux-3.0.3/drivers/staging/vt6656/hostap.c 2011-07-21 22:17:23.000000000 -0400
32335+++ linux-3.0.3/drivers/staging/vt6656/hostap.c 2011-08-23 21:47:56.000000000 -0400
32336@@ -80,14 +80,13 @@ static int msglevel
32337 *
32338 */
32339
32340+static net_device_ops_no_const apdev_netdev_ops;
32341+
32342 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
32343 {
32344 PSDevice apdev_priv;
32345 struct net_device *dev = pDevice->dev;
32346 int ret;
32347- const struct net_device_ops apdev_netdev_ops = {
32348- .ndo_start_xmit = pDevice->tx_80211,
32349- };
32350
32351 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
32352
32353@@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevic
32354 *apdev_priv = *pDevice;
32355 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
32356
32357+ /* only half broken now */
32358+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
32359 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
32360
32361 pDevice->apdev->type = ARPHRD_IEEE80211;
32362diff -urNp linux-3.0.3/drivers/staging/wlan-ng/hfa384x_usb.c linux-3.0.3/drivers/staging/wlan-ng/hfa384x_usb.c
32363--- linux-3.0.3/drivers/staging/wlan-ng/hfa384x_usb.c 2011-07-21 22:17:23.000000000 -0400
32364+++ linux-3.0.3/drivers/staging/wlan-ng/hfa384x_usb.c 2011-08-23 21:47:56.000000000 -0400
32365@@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hf
32366
32367 struct usbctlx_completor {
32368 int (*complete) (struct usbctlx_completor *);
32369-};
32370+} __no_const;
32371
32372 static int
32373 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
32374diff -urNp linux-3.0.3/drivers/staging/zcache/tmem.c linux-3.0.3/drivers/staging/zcache/tmem.c
32375--- linux-3.0.3/drivers/staging/zcache/tmem.c 2011-07-21 22:17:23.000000000 -0400
32376+++ linux-3.0.3/drivers/staging/zcache/tmem.c 2011-08-23 21:47:56.000000000 -0400
32377@@ -39,7 +39,7 @@
32378 * A tmem host implementation must use this function to register callbacks
32379 * for memory allocation.
32380 */
32381-static struct tmem_hostops tmem_hostops;
32382+static tmem_hostops_no_const tmem_hostops;
32383
32384 static void tmem_objnode_tree_init(void);
32385
32386@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_h
32387 * A tmem host implementation must use this function to register
32388 * callbacks for a page-accessible memory (PAM) implementation
32389 */
32390-static struct tmem_pamops tmem_pamops;
32391+static tmem_pamops_no_const tmem_pamops;
32392
32393 void tmem_register_pamops(struct tmem_pamops *m)
32394 {
32395diff -urNp linux-3.0.3/drivers/staging/zcache/tmem.h linux-3.0.3/drivers/staging/zcache/tmem.h
32396--- linux-3.0.3/drivers/staging/zcache/tmem.h 2011-07-21 22:17:23.000000000 -0400
32397+++ linux-3.0.3/drivers/staging/zcache/tmem.h 2011-08-23 21:47:56.000000000 -0400
32398@@ -171,6 +171,7 @@ struct tmem_pamops {
32399 int (*get_data)(struct page *, void *, struct tmem_pool *);
32400 void (*free)(void *, struct tmem_pool *);
32401 };
32402+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
32403 extern void tmem_register_pamops(struct tmem_pamops *m);
32404
32405 /* memory allocation methods provided by the host implementation */
32406@@ -180,6 +181,7 @@ struct tmem_hostops {
32407 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
32408 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
32409 };
32410+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
32411 extern void tmem_register_hostops(struct tmem_hostops *m);
32412
32413 /* core tmem accessor functions */
32414diff -urNp linux-3.0.3/drivers/target/target_core_alua.c linux-3.0.3/drivers/target/target_core_alua.c
32415--- linux-3.0.3/drivers/target/target_core_alua.c 2011-07-21 22:17:23.000000000 -0400
32416+++ linux-3.0.3/drivers/target/target_core_alua.c 2011-08-23 21:48:14.000000000 -0400
32417@@ -675,6 +675,8 @@ static int core_alua_update_tpg_primary_
32418 char path[ALUA_METADATA_PATH_LEN];
32419 int len;
32420
32421+ pax_track_stack();
32422+
32423 memset(path, 0, ALUA_METADATA_PATH_LEN);
32424
32425 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
32426@@ -938,6 +940,8 @@ static int core_alua_update_tpg_secondar
32427 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
32428 int len;
32429
32430+ pax_track_stack();
32431+
32432 memset(path, 0, ALUA_METADATA_PATH_LEN);
32433 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
32434
32435diff -urNp linux-3.0.3/drivers/target/target_core_cdb.c linux-3.0.3/drivers/target/target_core_cdb.c
32436--- linux-3.0.3/drivers/target/target_core_cdb.c 2011-07-21 22:17:23.000000000 -0400
32437+++ linux-3.0.3/drivers/target/target_core_cdb.c 2011-08-23 21:48:14.000000000 -0400
32438@@ -838,6 +838,8 @@ target_emulate_modesense(struct se_cmd *
32439 int length = 0;
32440 unsigned char buf[SE_MODE_PAGE_BUF];
32441
32442+ pax_track_stack();
32443+
32444 memset(buf, 0, SE_MODE_PAGE_BUF);
32445
32446 switch (cdb[2] & 0x3f) {
32447diff -urNp linux-3.0.3/drivers/target/target_core_configfs.c linux-3.0.3/drivers/target/target_core_configfs.c
32448--- linux-3.0.3/drivers/target/target_core_configfs.c 2011-07-21 22:17:23.000000000 -0400
32449+++ linux-3.0.3/drivers/target/target_core_configfs.c 2011-08-23 21:48:14.000000000 -0400
32450@@ -1276,6 +1276,8 @@ static ssize_t target_core_dev_pr_show_a
32451 ssize_t len = 0;
32452 int reg_count = 0, prf_isid;
32453
32454+ pax_track_stack();
32455+
32456 if (!(su_dev->se_dev_ptr))
32457 return -ENODEV;
32458
32459diff -urNp linux-3.0.3/drivers/target/target_core_pr.c linux-3.0.3/drivers/target/target_core_pr.c
32460--- linux-3.0.3/drivers/target/target_core_pr.c 2011-07-21 22:17:23.000000000 -0400
32461+++ linux-3.0.3/drivers/target/target_core_pr.c 2011-08-23 21:48:14.000000000 -0400
32462@@ -918,6 +918,8 @@ static int __core_scsi3_check_aptpl_regi
32463 unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
32464 u16 tpgt;
32465
32466+ pax_track_stack();
32467+
32468 memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
32469 memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
32470 /*
32471@@ -1861,6 +1863,8 @@ static int __core_scsi3_update_aptpl_buf
32472 ssize_t len = 0;
32473 int reg_count = 0;
32474
32475+ pax_track_stack();
32476+
32477 memset(buf, 0, pr_aptpl_buf_len);
32478 /*
32479 * Called to clear metadata once APTPL has been deactivated.
32480@@ -1983,6 +1987,8 @@ static int __core_scsi3_write_aptpl_to_f
32481 char path[512];
32482 int ret;
32483
32484+ pax_track_stack();
32485+
32486 memset(iov, 0, sizeof(struct iovec));
32487 memset(path, 0, 512);
32488
32489diff -urNp linux-3.0.3/drivers/target/target_core_tmr.c linux-3.0.3/drivers/target/target_core_tmr.c
32490--- linux-3.0.3/drivers/target/target_core_tmr.c 2011-07-21 22:17:23.000000000 -0400
32491+++ linux-3.0.3/drivers/target/target_core_tmr.c 2011-08-23 21:47:56.000000000 -0400
32492@@ -269,7 +269,7 @@ int core_tmr_lun_reset(
32493 CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key,
32494 T_TASK(cmd)->t_task_cdbs,
32495 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32496- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32497+ atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32498 atomic_read(&T_TASK(cmd)->t_transport_active),
32499 atomic_read(&T_TASK(cmd)->t_transport_stop),
32500 atomic_read(&T_TASK(cmd)->t_transport_sent));
32501@@ -311,7 +311,7 @@ int core_tmr_lun_reset(
32502 DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
32503 " task: %p, t_fe_count: %d dev: %p\n", task,
32504 fe_count, dev);
32505- atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
32506+ atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
32507 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
32508 flags);
32509 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
32510@@ -321,7 +321,7 @@ int core_tmr_lun_reset(
32511 }
32512 DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
32513 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
32514- atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
32515+ atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
32516 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
32517 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
32518
32519diff -urNp linux-3.0.3/drivers/target/target_core_transport.c linux-3.0.3/drivers/target/target_core_transport.c
32520--- linux-3.0.3/drivers/target/target_core_transport.c 2011-07-21 22:17:23.000000000 -0400
32521+++ linux-3.0.3/drivers/target/target_core_transport.c 2011-08-23 21:47:56.000000000 -0400
32522@@ -1681,7 +1681,7 @@ struct se_device *transport_add_device_t
32523
32524 dev->queue_depth = dev_limits->queue_depth;
32525 atomic_set(&dev->depth_left, dev->queue_depth);
32526- atomic_set(&dev->dev_ordered_id, 0);
32527+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
32528
32529 se_dev_set_default_attribs(dev, dev_limits);
32530
32531@@ -1882,7 +1882,7 @@ static int transport_check_alloc_task_at
32532 * Used to determine when ORDERED commands should go from
32533 * Dormant to Active status.
32534 */
32535- cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id);
32536+ cmd->se_ordered_id = atomic_inc_return_unchecked(&SE_DEV(cmd)->dev_ordered_id);
32537 smp_mb__after_atomic_inc();
32538 DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
32539 cmd->se_ordered_id, cmd->sam_task_attr,
32540@@ -2169,7 +2169,7 @@ static void transport_generic_request_fa
32541 " t_transport_active: %d t_transport_stop: %d"
32542 " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs,
32543 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32544- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32545+ atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32546 atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left),
32547 atomic_read(&T_TASK(cmd)->t_transport_active),
32548 atomic_read(&T_TASK(cmd)->t_transport_stop),
32549@@ -2673,9 +2673,9 @@ check_depth:
32550 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
32551 atomic_set(&task->task_active, 1);
32552 atomic_set(&task->task_sent, 1);
32553- atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent);
32554+ atomic_inc_unchecked(&T_TASK(cmd)->t_task_cdbs_sent);
32555
32556- if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) ==
32557+ if (atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent) ==
32558 T_TASK(cmd)->t_task_cdbs)
32559 atomic_set(&cmd->transport_sent, 1);
32560
32561@@ -5568,7 +5568,7 @@ static void transport_generic_wait_for_t
32562 atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
32563 }
32564 if (!atomic_read(&T_TASK(cmd)->t_transport_active) ||
32565- atomic_read(&T_TASK(cmd)->t_transport_aborted))
32566+ atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted))
32567 goto remove;
32568
32569 atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
32570@@ -5797,7 +5797,7 @@ int transport_check_aborted_status(struc
32571 {
32572 int ret = 0;
32573
32574- if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) {
32575+ if (atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted) != 0) {
32576 if (!(send_status) ||
32577 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
32578 return 1;
32579@@ -5825,7 +5825,7 @@ void transport_send_task_abort(struct se
32580 */
32581 if (cmd->data_direction == DMA_TO_DEVICE) {
32582 if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) {
32583- atomic_inc(&T_TASK(cmd)->t_transport_aborted);
32584+ atomic_inc_unchecked(&T_TASK(cmd)->t_transport_aborted);
32585 smp_mb__after_atomic_inc();
32586 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
32587 transport_new_cmd_failure(cmd);
32588@@ -5949,7 +5949,7 @@ static void transport_processing_shutdow
32589 CMD_TFO(cmd)->get_task_tag(cmd),
32590 T_TASK(cmd)->t_task_cdbs,
32591 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32592- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32593+ atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32594 atomic_read(&T_TASK(cmd)->t_transport_active),
32595 atomic_read(&T_TASK(cmd)->t_transport_stop),
32596 atomic_read(&T_TASK(cmd)->t_transport_sent));
32597diff -urNp linux-3.0.3/drivers/telephony/ixj.c linux-3.0.3/drivers/telephony/ixj.c
32598--- linux-3.0.3/drivers/telephony/ixj.c 2011-07-21 22:17:23.000000000 -0400
32599+++ linux-3.0.3/drivers/telephony/ixj.c 2011-08-23 21:48:14.000000000 -0400
32600@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
32601 bool mContinue;
32602 char *pIn, *pOut;
32603
32604+ pax_track_stack();
32605+
32606 if (!SCI_Prepare(j))
32607 return 0;
32608
32609diff -urNp linux-3.0.3/drivers/tty/hvc/hvcs.c linux-3.0.3/drivers/tty/hvc/hvcs.c
32610--- linux-3.0.3/drivers/tty/hvc/hvcs.c 2011-07-21 22:17:23.000000000 -0400
32611+++ linux-3.0.3/drivers/tty/hvc/hvcs.c 2011-08-23 21:47:56.000000000 -0400
32612@@ -83,6 +83,7 @@
32613 #include <asm/hvcserver.h>
32614 #include <asm/uaccess.h>
32615 #include <asm/vio.h>
32616+#include <asm/local.h>
32617
32618 /*
32619 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
32620@@ -270,7 +271,7 @@ struct hvcs_struct {
32621 unsigned int index;
32622
32623 struct tty_struct *tty;
32624- int open_count;
32625+ local_t open_count;
32626
32627 /*
32628 * Used to tell the driver kernel_thread what operations need to take
32629@@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(st
32630
32631 spin_lock_irqsave(&hvcsd->lock, flags);
32632
32633- if (hvcsd->open_count > 0) {
32634+ if (local_read(&hvcsd->open_count) > 0) {
32635 spin_unlock_irqrestore(&hvcsd->lock, flags);
32636 printk(KERN_INFO "HVCS: vterm state unchanged. "
32637 "The hvcs device node is still in use.\n");
32638@@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *
32639 if ((retval = hvcs_partner_connect(hvcsd)))
32640 goto error_release;
32641
32642- hvcsd->open_count = 1;
32643+ local_set(&hvcsd->open_count, 1);
32644 hvcsd->tty = tty;
32645 tty->driver_data = hvcsd;
32646
32647@@ -1179,7 +1180,7 @@ fast_open:
32648
32649 spin_lock_irqsave(&hvcsd->lock, flags);
32650 kref_get(&hvcsd->kref);
32651- hvcsd->open_count++;
32652+ local_inc(&hvcsd->open_count);
32653 hvcsd->todo_mask |= HVCS_SCHED_READ;
32654 spin_unlock_irqrestore(&hvcsd->lock, flags);
32655
32656@@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct
32657 hvcsd = tty->driver_data;
32658
32659 spin_lock_irqsave(&hvcsd->lock, flags);
32660- if (--hvcsd->open_count == 0) {
32661+ if (local_dec_and_test(&hvcsd->open_count)) {
32662
32663 vio_disable_interrupts(hvcsd->vdev);
32664
32665@@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct
32666 free_irq(irq, hvcsd);
32667 kref_put(&hvcsd->kref, destroy_hvcs_struct);
32668 return;
32669- } else if (hvcsd->open_count < 0) {
32670+ } else if (local_read(&hvcsd->open_count) < 0) {
32671 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
32672 " is missmanaged.\n",
32673- hvcsd->vdev->unit_address, hvcsd->open_count);
32674+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
32675 }
32676
32677 spin_unlock_irqrestore(&hvcsd->lock, flags);
32678@@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struc
32679
32680 spin_lock_irqsave(&hvcsd->lock, flags);
32681 /* Preserve this so that we know how many kref refs to put */
32682- temp_open_count = hvcsd->open_count;
32683+ temp_open_count = local_read(&hvcsd->open_count);
32684
32685 /*
32686 * Don't kref put inside the spinlock because the destruction
32687@@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struc
32688 hvcsd->tty->driver_data = NULL;
32689 hvcsd->tty = NULL;
32690
32691- hvcsd->open_count = 0;
32692+ local_set(&hvcsd->open_count, 0);
32693
32694 /* This will drop any buffered data on the floor which is OK in a hangup
32695 * scenario. */
32696@@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct
32697 * the middle of a write operation? This is a crummy place to do this
32698 * but we want to keep it all in the spinlock.
32699 */
32700- if (hvcsd->open_count <= 0) {
32701+ if (local_read(&hvcsd->open_count) <= 0) {
32702 spin_unlock_irqrestore(&hvcsd->lock, flags);
32703 return -ENODEV;
32704 }
32705@@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_st
32706 {
32707 struct hvcs_struct *hvcsd = tty->driver_data;
32708
32709- if (!hvcsd || hvcsd->open_count <= 0)
32710+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
32711 return 0;
32712
32713 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
32714diff -urNp linux-3.0.3/drivers/tty/ipwireless/tty.c linux-3.0.3/drivers/tty/ipwireless/tty.c
32715--- linux-3.0.3/drivers/tty/ipwireless/tty.c 2011-07-21 22:17:23.000000000 -0400
32716+++ linux-3.0.3/drivers/tty/ipwireless/tty.c 2011-08-23 21:47:56.000000000 -0400
32717@@ -29,6 +29,7 @@
32718 #include <linux/tty_driver.h>
32719 #include <linux/tty_flip.h>
32720 #include <linux/uaccess.h>
32721+#include <asm/local.h>
32722
32723 #include "tty.h"
32724 #include "network.h"
32725@@ -51,7 +52,7 @@ struct ipw_tty {
32726 int tty_type;
32727 struct ipw_network *network;
32728 struct tty_struct *linux_tty;
32729- int open_count;
32730+ local_t open_count;
32731 unsigned int control_lines;
32732 struct mutex ipw_tty_mutex;
32733 int tx_bytes_queued;
32734@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
32735 mutex_unlock(&tty->ipw_tty_mutex);
32736 return -ENODEV;
32737 }
32738- if (tty->open_count == 0)
32739+ if (local_read(&tty->open_count) == 0)
32740 tty->tx_bytes_queued = 0;
32741
32742- tty->open_count++;
32743+ local_inc(&tty->open_count);
32744
32745 tty->linux_tty = linux_tty;
32746 linux_tty->driver_data = tty;
32747@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
32748
32749 static void do_ipw_close(struct ipw_tty *tty)
32750 {
32751- tty->open_count--;
32752-
32753- if (tty->open_count == 0) {
32754+ if (local_dec_return(&tty->open_count) == 0) {
32755 struct tty_struct *linux_tty = tty->linux_tty;
32756
32757 if (linux_tty != NULL) {
32758@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
32759 return;
32760
32761 mutex_lock(&tty->ipw_tty_mutex);
32762- if (tty->open_count == 0) {
32763+ if (local_read(&tty->open_count) == 0) {
32764 mutex_unlock(&tty->ipw_tty_mutex);
32765 return;
32766 }
32767@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
32768 return;
32769 }
32770
32771- if (!tty->open_count) {
32772+ if (!local_read(&tty->open_count)) {
32773 mutex_unlock(&tty->ipw_tty_mutex);
32774 return;
32775 }
32776@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
32777 return -ENODEV;
32778
32779 mutex_lock(&tty->ipw_tty_mutex);
32780- if (!tty->open_count) {
32781+ if (!local_read(&tty->open_count)) {
32782 mutex_unlock(&tty->ipw_tty_mutex);
32783 return -EINVAL;
32784 }
32785@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
32786 if (!tty)
32787 return -ENODEV;
32788
32789- if (!tty->open_count)
32790+ if (!local_read(&tty->open_count))
32791 return -EINVAL;
32792
32793 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
32794@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
32795 if (!tty)
32796 return 0;
32797
32798- if (!tty->open_count)
32799+ if (!local_read(&tty->open_count))
32800 return 0;
32801
32802 return tty->tx_bytes_queued;
32803@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
32804 if (!tty)
32805 return -ENODEV;
32806
32807- if (!tty->open_count)
32808+ if (!local_read(&tty->open_count))
32809 return -EINVAL;
32810
32811 return get_control_lines(tty);
32812@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
32813 if (!tty)
32814 return -ENODEV;
32815
32816- if (!tty->open_count)
32817+ if (!local_read(&tty->open_count))
32818 return -EINVAL;
32819
32820 return set_control_lines(tty, set, clear);
32821@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
32822 if (!tty)
32823 return -ENODEV;
32824
32825- if (!tty->open_count)
32826+ if (!local_read(&tty->open_count))
32827 return -EINVAL;
32828
32829 /* FIXME: Exactly how is the tty object locked here .. */
32830@@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty
32831 against a parallel ioctl etc */
32832 mutex_lock(&ttyj->ipw_tty_mutex);
32833 }
32834- while (ttyj->open_count)
32835+ while (local_read(&ttyj->open_count))
32836 do_ipw_close(ttyj);
32837 ipwireless_disassociate_network_ttys(network,
32838 ttyj->channel_idx);
32839diff -urNp linux-3.0.3/drivers/tty/n_gsm.c linux-3.0.3/drivers/tty/n_gsm.c
32840--- linux-3.0.3/drivers/tty/n_gsm.c 2011-08-23 21:44:40.000000000 -0400
32841+++ linux-3.0.3/drivers/tty/n_gsm.c 2011-08-23 21:47:56.000000000 -0400
32842@@ -1589,7 +1589,7 @@ static struct gsm_dlci *gsm_dlci_alloc(s
32843 return NULL;
32844 spin_lock_init(&dlci->lock);
32845 dlci->fifo = &dlci->_fifo;
32846- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
32847+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
32848 kfree(dlci);
32849 return NULL;
32850 }
32851diff -urNp linux-3.0.3/drivers/tty/n_tty.c linux-3.0.3/drivers/tty/n_tty.c
32852--- linux-3.0.3/drivers/tty/n_tty.c 2011-07-21 22:17:23.000000000 -0400
32853+++ linux-3.0.3/drivers/tty/n_tty.c 2011-08-23 21:47:56.000000000 -0400
32854@@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_
32855 {
32856 *ops = tty_ldisc_N_TTY;
32857 ops->owner = NULL;
32858- ops->refcount = ops->flags = 0;
32859+ atomic_set(&ops->refcount, 0);
32860+ ops->flags = 0;
32861 }
32862 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
32863diff -urNp linux-3.0.3/drivers/tty/pty.c linux-3.0.3/drivers/tty/pty.c
32864--- linux-3.0.3/drivers/tty/pty.c 2011-07-21 22:17:23.000000000 -0400
32865+++ linux-3.0.3/drivers/tty/pty.c 2011-08-23 21:47:56.000000000 -0400
32866@@ -754,8 +754,10 @@ static void __init unix98_pty_init(void)
32867 register_sysctl_table(pty_root_table);
32868
32869 /* Now create the /dev/ptmx special device */
32870+ pax_open_kernel();
32871 tty_default_fops(&ptmx_fops);
32872- ptmx_fops.open = ptmx_open;
32873+ *(void **)&ptmx_fops.open = ptmx_open;
32874+ pax_close_kernel();
32875
32876 cdev_init(&ptmx_cdev, &ptmx_fops);
32877 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
32878diff -urNp linux-3.0.3/drivers/tty/rocket.c linux-3.0.3/drivers/tty/rocket.c
32879--- linux-3.0.3/drivers/tty/rocket.c 2011-07-21 22:17:23.000000000 -0400
32880+++ linux-3.0.3/drivers/tty/rocket.c 2011-08-23 21:48:14.000000000 -0400
32881@@ -1277,6 +1277,8 @@ static int get_ports(struct r_port *info
32882 struct rocket_ports tmp;
32883 int board;
32884
32885+ pax_track_stack();
32886+
32887 if (!retports)
32888 return -EFAULT;
32889 memset(&tmp, 0, sizeof (tmp));
32890diff -urNp linux-3.0.3/drivers/tty/serial/kgdboc.c linux-3.0.3/drivers/tty/serial/kgdboc.c
32891--- linux-3.0.3/drivers/tty/serial/kgdboc.c 2011-07-21 22:17:23.000000000 -0400
32892+++ linux-3.0.3/drivers/tty/serial/kgdboc.c 2011-08-23 21:47:56.000000000 -0400
32893@@ -23,8 +23,9 @@
32894 #define MAX_CONFIG_LEN 40
32895
32896 static struct kgdb_io kgdboc_io_ops;
32897+static struct kgdb_io kgdboc_io_ops_console;
32898
32899-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
32900+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
32901 static int configured = -1;
32902
32903 static char config[MAX_CONFIG_LEN];
32904@@ -147,6 +148,8 @@ static void cleanup_kgdboc(void)
32905 kgdboc_unregister_kbd();
32906 if (configured == 1)
32907 kgdb_unregister_io_module(&kgdboc_io_ops);
32908+ else if (configured == 2)
32909+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
32910 }
32911
32912 static int configure_kgdboc(void)
32913@@ -156,13 +159,13 @@ static int configure_kgdboc(void)
32914 int err;
32915 char *cptr = config;
32916 struct console *cons;
32917+ int is_console = 0;
32918
32919 err = kgdboc_option_setup(config);
32920 if (err || !strlen(config) || isspace(config[0]))
32921 goto noconfig;
32922
32923 err = -ENODEV;
32924- kgdboc_io_ops.is_console = 0;
32925 kgdb_tty_driver = NULL;
32926
32927 kgdboc_use_kms = 0;
32928@@ -183,7 +186,7 @@ static int configure_kgdboc(void)
32929 int idx;
32930 if (cons->device && cons->device(cons, &idx) == p &&
32931 idx == tty_line) {
32932- kgdboc_io_ops.is_console = 1;
32933+ is_console = 1;
32934 break;
32935 }
32936 cons = cons->next;
32937@@ -193,12 +196,16 @@ static int configure_kgdboc(void)
32938 kgdb_tty_line = tty_line;
32939
32940 do_register:
32941- err = kgdb_register_io_module(&kgdboc_io_ops);
32942+ if (is_console) {
32943+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
32944+ configured = 2;
32945+ } else {
32946+ err = kgdb_register_io_module(&kgdboc_io_ops);
32947+ configured = 1;
32948+ }
32949 if (err)
32950 goto noconfig;
32951
32952- configured = 1;
32953-
32954 return 0;
32955
32956 noconfig:
32957@@ -212,7 +219,7 @@ noconfig:
32958 static int __init init_kgdboc(void)
32959 {
32960 /* Already configured? */
32961- if (configured == 1)
32962+ if (configured >= 1)
32963 return 0;
32964
32965 return configure_kgdboc();
32966@@ -261,7 +268,7 @@ static int param_set_kgdboc_var(const ch
32967 if (config[len - 1] == '\n')
32968 config[len - 1] = '\0';
32969
32970- if (configured == 1)
32971+ if (configured >= 1)
32972 cleanup_kgdboc();
32973
32974 /* Go and configure with the new params. */
32975@@ -301,6 +308,15 @@ static struct kgdb_io kgdboc_io_ops = {
32976 .post_exception = kgdboc_post_exp_handler,
32977 };
32978
32979+static struct kgdb_io kgdboc_io_ops_console = {
32980+ .name = "kgdboc",
32981+ .read_char = kgdboc_get_char,
32982+ .write_char = kgdboc_put_char,
32983+ .pre_exception = kgdboc_pre_exp_handler,
32984+ .post_exception = kgdboc_post_exp_handler,
32985+ .is_console = 1
32986+};
32987+
32988 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
32989 /* This is only available if kgdboc is a built in for early debugging */
32990 static int __init kgdboc_early_init(char *opt)
32991diff -urNp linux-3.0.3/drivers/tty/serial/mrst_max3110.c linux-3.0.3/drivers/tty/serial/mrst_max3110.c
32992--- linux-3.0.3/drivers/tty/serial/mrst_max3110.c 2011-07-21 22:17:23.000000000 -0400
32993+++ linux-3.0.3/drivers/tty/serial/mrst_max3110.c 2011-08-23 21:48:14.000000000 -0400
32994@@ -393,6 +393,8 @@ static void max3110_con_receive(struct u
32995 int loop = 1, num, total = 0;
32996 u8 recv_buf[512], *pbuf;
32997
32998+ pax_track_stack();
32999+
33000 pbuf = recv_buf;
33001 do {
33002 num = max3110_read_multi(max, pbuf);
33003diff -urNp linux-3.0.3/drivers/tty/tty_io.c linux-3.0.3/drivers/tty/tty_io.c
33004--- linux-3.0.3/drivers/tty/tty_io.c 2011-07-21 22:17:23.000000000 -0400
33005+++ linux-3.0.3/drivers/tty/tty_io.c 2011-08-23 21:47:56.000000000 -0400
33006@@ -3215,7 +3215,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
33007
33008 void tty_default_fops(struct file_operations *fops)
33009 {
33010- *fops = tty_fops;
33011+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
33012 }
33013
33014 /*
33015diff -urNp linux-3.0.3/drivers/tty/tty_ldisc.c linux-3.0.3/drivers/tty/tty_ldisc.c
33016--- linux-3.0.3/drivers/tty/tty_ldisc.c 2011-07-21 22:17:23.000000000 -0400
33017+++ linux-3.0.3/drivers/tty/tty_ldisc.c 2011-08-23 21:47:56.000000000 -0400
33018@@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
33019 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
33020 struct tty_ldisc_ops *ldo = ld->ops;
33021
33022- ldo->refcount--;
33023+ atomic_dec(&ldo->refcount);
33024 module_put(ldo->owner);
33025 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33026
33027@@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
33028 spin_lock_irqsave(&tty_ldisc_lock, flags);
33029 tty_ldiscs[disc] = new_ldisc;
33030 new_ldisc->num = disc;
33031- new_ldisc->refcount = 0;
33032+ atomic_set(&new_ldisc->refcount, 0);
33033 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33034
33035 return ret;
33036@@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
33037 return -EINVAL;
33038
33039 spin_lock_irqsave(&tty_ldisc_lock, flags);
33040- if (tty_ldiscs[disc]->refcount)
33041+ if (atomic_read(&tty_ldiscs[disc]->refcount))
33042 ret = -EBUSY;
33043 else
33044 tty_ldiscs[disc] = NULL;
33045@@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
33046 if (ldops) {
33047 ret = ERR_PTR(-EAGAIN);
33048 if (try_module_get(ldops->owner)) {
33049- ldops->refcount++;
33050+ atomic_inc(&ldops->refcount);
33051 ret = ldops;
33052 }
33053 }
33054@@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
33055 unsigned long flags;
33056
33057 spin_lock_irqsave(&tty_ldisc_lock, flags);
33058- ldops->refcount--;
33059+ atomic_dec(&ldops->refcount);
33060 module_put(ldops->owner);
33061 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33062 }
33063diff -urNp linux-3.0.3/drivers/tty/vt/keyboard.c linux-3.0.3/drivers/tty/vt/keyboard.c
33064--- linux-3.0.3/drivers/tty/vt/keyboard.c 2011-07-21 22:17:23.000000000 -0400
33065+++ linux-3.0.3/drivers/tty/vt/keyboard.c 2011-08-23 21:48:14.000000000 -0400
33066@@ -656,6 +656,16 @@ static void k_spec(struct vc_data *vc, u
33067 kbd->kbdmode == VC_OFF) &&
33068 value != KVAL(K_SAK))
33069 return; /* SAK is allowed even in raw mode */
33070+
33071+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
33072+ {
33073+ void *func = fn_handler[value];
33074+ if (func == fn_show_state || func == fn_show_ptregs ||
33075+ func == fn_show_mem)
33076+ return;
33077+ }
33078+#endif
33079+
33080 fn_handler[value](vc);
33081 }
33082
33083diff -urNp linux-3.0.3/drivers/tty/vt/vt.c linux-3.0.3/drivers/tty/vt/vt.c
33084--- linux-3.0.3/drivers/tty/vt/vt.c 2011-07-21 22:17:23.000000000 -0400
33085+++ linux-3.0.3/drivers/tty/vt/vt.c 2011-08-23 21:47:56.000000000 -0400
33086@@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
33087
33088 static void notify_write(struct vc_data *vc, unsigned int unicode)
33089 {
33090- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
33091+ struct vt_notifier_param param = { .vc = vc, .c = unicode };
33092 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
33093 }
33094
33095diff -urNp linux-3.0.3/drivers/tty/vt/vt_ioctl.c linux-3.0.3/drivers/tty/vt/vt_ioctl.c
33096--- linux-3.0.3/drivers/tty/vt/vt_ioctl.c 2011-07-21 22:17:23.000000000 -0400
33097+++ linux-3.0.3/drivers/tty/vt/vt_ioctl.c 2011-08-23 21:48:14.000000000 -0400
33098@@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
33099 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
33100 return -EFAULT;
33101
33102- if (!capable(CAP_SYS_TTY_CONFIG))
33103- perm = 0;
33104-
33105 switch (cmd) {
33106 case KDGKBENT:
33107 key_map = key_maps[s];
33108@@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __
33109 val = (i ? K_HOLE : K_NOSUCHMAP);
33110 return put_user(val, &user_kbe->kb_value);
33111 case KDSKBENT:
33112+ if (!capable(CAP_SYS_TTY_CONFIG))
33113+ perm = 0;
33114+
33115 if (!perm)
33116 return -EPERM;
33117 if (!i && v == K_NOSUCHMAP) {
33118@@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
33119 int i, j, k;
33120 int ret;
33121
33122- if (!capable(CAP_SYS_TTY_CONFIG))
33123- perm = 0;
33124-
33125 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
33126 if (!kbs) {
33127 ret = -ENOMEM;
33128@@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
33129 kfree(kbs);
33130 return ((p && *p) ? -EOVERFLOW : 0);
33131 case KDSKBSENT:
33132+ if (!capable(CAP_SYS_TTY_CONFIG))
33133+ perm = 0;
33134+
33135 if (!perm) {
33136 ret = -EPERM;
33137 goto reterr;
33138diff -urNp linux-3.0.3/drivers/uio/uio.c linux-3.0.3/drivers/uio/uio.c
33139--- linux-3.0.3/drivers/uio/uio.c 2011-07-21 22:17:23.000000000 -0400
33140+++ linux-3.0.3/drivers/uio/uio.c 2011-08-23 21:47:56.000000000 -0400
33141@@ -25,6 +25,7 @@
33142 #include <linux/kobject.h>
33143 #include <linux/cdev.h>
33144 #include <linux/uio_driver.h>
33145+#include <asm/local.h>
33146
33147 #define UIO_MAX_DEVICES (1U << MINORBITS)
33148
33149@@ -32,10 +33,10 @@ struct uio_device {
33150 struct module *owner;
33151 struct device *dev;
33152 int minor;
33153- atomic_t event;
33154+ atomic_unchecked_t event;
33155 struct fasync_struct *async_queue;
33156 wait_queue_head_t wait;
33157- int vma_count;
33158+ local_t vma_count;
33159 struct uio_info *info;
33160 struct kobject *map_dir;
33161 struct kobject *portio_dir;
33162@@ -242,7 +243,7 @@ static ssize_t show_event(struct device
33163 struct device_attribute *attr, char *buf)
33164 {
33165 struct uio_device *idev = dev_get_drvdata(dev);
33166- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
33167+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
33168 }
33169
33170 static struct device_attribute uio_class_attributes[] = {
33171@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *i
33172 {
33173 struct uio_device *idev = info->uio_dev;
33174
33175- atomic_inc(&idev->event);
33176+ atomic_inc_unchecked(&idev->event);
33177 wake_up_interruptible(&idev->wait);
33178 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
33179 }
33180@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode,
33181 }
33182
33183 listener->dev = idev;
33184- listener->event_count = atomic_read(&idev->event);
33185+ listener->event_count = atomic_read_unchecked(&idev->event);
33186 filep->private_data = listener;
33187
33188 if (idev->info->open) {
33189@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file
33190 return -EIO;
33191
33192 poll_wait(filep, &idev->wait, wait);
33193- if (listener->event_count != atomic_read(&idev->event))
33194+ if (listener->event_count != atomic_read_unchecked(&idev->event))
33195 return POLLIN | POLLRDNORM;
33196 return 0;
33197 }
33198@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *fil
33199 do {
33200 set_current_state(TASK_INTERRUPTIBLE);
33201
33202- event_count = atomic_read(&idev->event);
33203+ event_count = atomic_read_unchecked(&idev->event);
33204 if (event_count != listener->event_count) {
33205 if (copy_to_user(buf, &event_count, count))
33206 retval = -EFAULT;
33207@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_
33208 static void uio_vma_open(struct vm_area_struct *vma)
33209 {
33210 struct uio_device *idev = vma->vm_private_data;
33211- idev->vma_count++;
33212+ local_inc(&idev->vma_count);
33213 }
33214
33215 static void uio_vma_close(struct vm_area_struct *vma)
33216 {
33217 struct uio_device *idev = vma->vm_private_data;
33218- idev->vma_count--;
33219+ local_dec(&idev->vma_count);
33220 }
33221
33222 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33223@@ -823,7 +824,7 @@ int __uio_register_device(struct module
33224 idev->owner = owner;
33225 idev->info = info;
33226 init_waitqueue_head(&idev->wait);
33227- atomic_set(&idev->event, 0);
33228+ atomic_set_unchecked(&idev->event, 0);
33229
33230 ret = uio_get_minor(idev);
33231 if (ret)
33232diff -urNp linux-3.0.3/drivers/usb/atm/cxacru.c linux-3.0.3/drivers/usb/atm/cxacru.c
33233--- linux-3.0.3/drivers/usb/atm/cxacru.c 2011-07-21 22:17:23.000000000 -0400
33234+++ linux-3.0.3/drivers/usb/atm/cxacru.c 2011-08-23 21:47:56.000000000 -0400
33235@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_c
33236 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
33237 if (ret < 2)
33238 return -EINVAL;
33239- if (index < 0 || index > 0x7f)
33240+ if (index > 0x7f)
33241 return -EINVAL;
33242 pos += tmp;
33243
33244diff -urNp linux-3.0.3/drivers/usb/atm/usbatm.c linux-3.0.3/drivers/usb/atm/usbatm.c
33245--- linux-3.0.3/drivers/usb/atm/usbatm.c 2011-07-21 22:17:23.000000000 -0400
33246+++ linux-3.0.3/drivers/usb/atm/usbatm.c 2011-08-23 21:47:56.000000000 -0400
33247@@ -332,7 +332,7 @@ static void usbatm_extract_one_cell(stru
33248 if (printk_ratelimit())
33249 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
33250 __func__, vpi, vci);
33251- atomic_inc(&vcc->stats->rx_err);
33252+ atomic_inc_unchecked(&vcc->stats->rx_err);
33253 return;
33254 }
33255
33256@@ -360,7 +360,7 @@ static void usbatm_extract_one_cell(stru
33257 if (length > ATM_MAX_AAL5_PDU) {
33258 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
33259 __func__, length, vcc);
33260- atomic_inc(&vcc->stats->rx_err);
33261+ atomic_inc_unchecked(&vcc->stats->rx_err);
33262 goto out;
33263 }
33264
33265@@ -369,14 +369,14 @@ static void usbatm_extract_one_cell(stru
33266 if (sarb->len < pdu_length) {
33267 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
33268 __func__, pdu_length, sarb->len, vcc);
33269- atomic_inc(&vcc->stats->rx_err);
33270+ atomic_inc_unchecked(&vcc->stats->rx_err);
33271 goto out;
33272 }
33273
33274 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
33275 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
33276 __func__, vcc);
33277- atomic_inc(&vcc->stats->rx_err);
33278+ atomic_inc_unchecked(&vcc->stats->rx_err);
33279 goto out;
33280 }
33281
33282@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(stru
33283 if (printk_ratelimit())
33284 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
33285 __func__, length);
33286- atomic_inc(&vcc->stats->rx_drop);
33287+ atomic_inc_unchecked(&vcc->stats->rx_drop);
33288 goto out;
33289 }
33290
33291@@ -411,7 +411,7 @@ static void usbatm_extract_one_cell(stru
33292
33293 vcc->push(vcc, skb);
33294
33295- atomic_inc(&vcc->stats->rx);
33296+ atomic_inc_unchecked(&vcc->stats->rx);
33297 out:
33298 skb_trim(sarb, 0);
33299 }
33300@@ -614,7 +614,7 @@ static void usbatm_tx_process(unsigned l
33301 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
33302
33303 usbatm_pop(vcc, skb);
33304- atomic_inc(&vcc->stats->tx);
33305+ atomic_inc_unchecked(&vcc->stats->tx);
33306
33307 skb = skb_dequeue(&instance->sndqueue);
33308 }
33309@@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct a
33310 if (!left--)
33311 return sprintf(page,
33312 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
33313- atomic_read(&atm_dev->stats.aal5.tx),
33314- atomic_read(&atm_dev->stats.aal5.tx_err),
33315- atomic_read(&atm_dev->stats.aal5.rx),
33316- atomic_read(&atm_dev->stats.aal5.rx_err),
33317- atomic_read(&atm_dev->stats.aal5.rx_drop));
33318+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
33319+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
33320+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
33321+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
33322+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
33323
33324 if (!left--) {
33325 if (instance->disconnected)
33326diff -urNp linux-3.0.3/drivers/usb/core/devices.c linux-3.0.3/drivers/usb/core/devices.c
33327--- linux-3.0.3/drivers/usb/core/devices.c 2011-07-21 22:17:23.000000000 -0400
33328+++ linux-3.0.3/drivers/usb/core/devices.c 2011-08-23 21:47:56.000000000 -0400
33329@@ -126,7 +126,7 @@ static const char format_endpt[] =
33330 * time it gets called.
33331 */
33332 static struct device_connect_event {
33333- atomic_t count;
33334+ atomic_unchecked_t count;
33335 wait_queue_head_t wait;
33336 } device_event = {
33337 .count = ATOMIC_INIT(1),
33338@@ -164,7 +164,7 @@ static const struct class_info clas_info
33339
33340 void usbfs_conn_disc_event(void)
33341 {
33342- atomic_add(2, &device_event.count);
33343+ atomic_add_unchecked(2, &device_event.count);
33344 wake_up(&device_event.wait);
33345 }
33346
33347@@ -648,7 +648,7 @@ static unsigned int usb_device_poll(stru
33348
33349 poll_wait(file, &device_event.wait, wait);
33350
33351- event_count = atomic_read(&device_event.count);
33352+ event_count = atomic_read_unchecked(&device_event.count);
33353 if (file->f_version != event_count) {
33354 file->f_version = event_count;
33355 return POLLIN | POLLRDNORM;
33356diff -urNp linux-3.0.3/drivers/usb/core/message.c linux-3.0.3/drivers/usb/core/message.c
33357--- linux-3.0.3/drivers/usb/core/message.c 2011-07-21 22:17:23.000000000 -0400
33358+++ linux-3.0.3/drivers/usb/core/message.c 2011-08-23 21:47:56.000000000 -0400
33359@@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device
33360 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
33361 if (buf) {
33362 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
33363- if (len > 0) {
33364- smallbuf = kmalloc(++len, GFP_NOIO);
33365+ if (len++ > 0) {
33366+ smallbuf = kmalloc(len, GFP_NOIO);
33367 if (!smallbuf)
33368 return buf;
33369 memcpy(smallbuf, buf, len);
33370diff -urNp linux-3.0.3/drivers/usb/early/ehci-dbgp.c linux-3.0.3/drivers/usb/early/ehci-dbgp.c
33371--- linux-3.0.3/drivers/usb/early/ehci-dbgp.c 2011-07-21 22:17:23.000000000 -0400
33372+++ linux-3.0.3/drivers/usb/early/ehci-dbgp.c 2011-08-23 21:47:56.000000000 -0400
33373@@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x,
33374
33375 #ifdef CONFIG_KGDB
33376 static struct kgdb_io kgdbdbgp_io_ops;
33377-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
33378+static struct kgdb_io kgdbdbgp_io_ops_console;
33379+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
33380 #else
33381 #define dbgp_kgdb_mode (0)
33382 #endif
33383@@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops =
33384 .write_char = kgdbdbgp_write_char,
33385 };
33386
33387+static struct kgdb_io kgdbdbgp_io_ops_console = {
33388+ .name = "kgdbdbgp",
33389+ .read_char = kgdbdbgp_read_char,
33390+ .write_char = kgdbdbgp_write_char,
33391+ .is_console = 1
33392+};
33393+
33394 static int kgdbdbgp_wait_time;
33395
33396 static int __init kgdbdbgp_parse_config(char *str)
33397@@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(
33398 ptr++;
33399 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
33400 }
33401- kgdb_register_io_module(&kgdbdbgp_io_ops);
33402- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
33403+ if (early_dbgp_console.index != -1)
33404+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
33405+ else
33406+ kgdb_register_io_module(&kgdbdbgp_io_ops);
33407
33408 return 0;
33409 }
33410diff -urNp linux-3.0.3/drivers/usb/host/xhci-mem.c linux-3.0.3/drivers/usb/host/xhci-mem.c
33411--- linux-3.0.3/drivers/usb/host/xhci-mem.c 2011-07-21 22:17:23.000000000 -0400
33412+++ linux-3.0.3/drivers/usb/host/xhci-mem.c 2011-08-23 21:48:14.000000000 -0400
33413@@ -1685,6 +1685,8 @@ static int xhci_check_trb_in_td_math(str
33414 unsigned int num_tests;
33415 int i, ret;
33416
33417+ pax_track_stack();
33418+
33419 num_tests = ARRAY_SIZE(simple_test_vector);
33420 for (i = 0; i < num_tests; i++) {
33421 ret = xhci_test_trb_in_td(xhci,
33422diff -urNp linux-3.0.3/drivers/usb/wusbcore/wa-hc.h linux-3.0.3/drivers/usb/wusbcore/wa-hc.h
33423--- linux-3.0.3/drivers/usb/wusbcore/wa-hc.h 2011-07-21 22:17:23.000000000 -0400
33424+++ linux-3.0.3/drivers/usb/wusbcore/wa-hc.h 2011-08-23 21:47:56.000000000 -0400
33425@@ -192,7 +192,7 @@ struct wahc {
33426 struct list_head xfer_delayed_list;
33427 spinlock_t xfer_list_lock;
33428 struct work_struct xfer_work;
33429- atomic_t xfer_id_count;
33430+ atomic_unchecked_t xfer_id_count;
33431 };
33432
33433
33434@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
33435 INIT_LIST_HEAD(&wa->xfer_delayed_list);
33436 spin_lock_init(&wa->xfer_list_lock);
33437 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
33438- atomic_set(&wa->xfer_id_count, 1);
33439+ atomic_set_unchecked(&wa->xfer_id_count, 1);
33440 }
33441
33442 /**
33443diff -urNp linux-3.0.3/drivers/usb/wusbcore/wa-xfer.c linux-3.0.3/drivers/usb/wusbcore/wa-xfer.c
33444--- linux-3.0.3/drivers/usb/wusbcore/wa-xfer.c 2011-07-21 22:17:23.000000000 -0400
33445+++ linux-3.0.3/drivers/usb/wusbcore/wa-xfer.c 2011-08-23 21:47:56.000000000 -0400
33446@@ -294,7 +294,7 @@ out:
33447 */
33448 static void wa_xfer_id_init(struct wa_xfer *xfer)
33449 {
33450- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
33451+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
33452 }
33453
33454 /*
33455diff -urNp linux-3.0.3/drivers/vhost/vhost.c linux-3.0.3/drivers/vhost/vhost.c
33456--- linux-3.0.3/drivers/vhost/vhost.c 2011-07-21 22:17:23.000000000 -0400
33457+++ linux-3.0.3/drivers/vhost/vhost.c 2011-08-23 21:47:56.000000000 -0400
33458@@ -589,7 +589,7 @@ static int init_used(struct vhost_virtqu
33459 return get_user(vq->last_used_idx, &used->idx);
33460 }
33461
33462-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
33463+static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
33464 {
33465 struct file *eventfp, *filep = NULL,
33466 *pollstart = NULL, *pollstop = NULL;
33467diff -urNp linux-3.0.3/drivers/video/fbcmap.c linux-3.0.3/drivers/video/fbcmap.c
33468--- linux-3.0.3/drivers/video/fbcmap.c 2011-07-21 22:17:23.000000000 -0400
33469+++ linux-3.0.3/drivers/video/fbcmap.c 2011-08-23 21:47:56.000000000 -0400
33470@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user
33471 rc = -ENODEV;
33472 goto out;
33473 }
33474- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
33475- !info->fbops->fb_setcmap)) {
33476+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
33477 rc = -EINVAL;
33478 goto out1;
33479 }
33480diff -urNp linux-3.0.3/drivers/video/fbmem.c linux-3.0.3/drivers/video/fbmem.c
33481--- linux-3.0.3/drivers/video/fbmem.c 2011-07-21 22:17:23.000000000 -0400
33482+++ linux-3.0.3/drivers/video/fbmem.c 2011-08-23 21:48:14.000000000 -0400
33483@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_in
33484 image->dx += image->width + 8;
33485 }
33486 } else if (rotate == FB_ROTATE_UD) {
33487- for (x = 0; x < num && image->dx >= 0; x++) {
33488+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
33489 info->fbops->fb_imageblit(info, image);
33490 image->dx -= image->width + 8;
33491 }
33492@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_in
33493 image->dy += image->height + 8;
33494 }
33495 } else if (rotate == FB_ROTATE_CCW) {
33496- for (x = 0; x < num && image->dy >= 0; x++) {
33497+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
33498 info->fbops->fb_imageblit(info, image);
33499 image->dy -= image->height + 8;
33500 }
33501@@ -939,6 +939,8 @@ fb_set_var(struct fb_info *info, struct
33502 int flags = info->flags;
33503 int ret = 0;
33504
33505+ pax_track_stack();
33506+
33507 if (var->activate & FB_ACTIVATE_INV_MODE) {
33508 struct fb_videomode mode1, mode2;
33509
33510@@ -1064,6 +1066,8 @@ static long do_fb_ioctl(struct fb_info *
33511 void __user *argp = (void __user *)arg;
33512 long ret = 0;
33513
33514+ pax_track_stack();
33515+
33516 switch (cmd) {
33517 case FBIOGET_VSCREENINFO:
33518 if (!lock_fb_info(info))
33519@@ -1143,7 +1147,7 @@ static long do_fb_ioctl(struct fb_info *
33520 return -EFAULT;
33521 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
33522 return -EINVAL;
33523- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
33524+ if (con2fb.framebuffer >= FB_MAX)
33525 return -EINVAL;
33526 if (!registered_fb[con2fb.framebuffer])
33527 request_module("fb%d", con2fb.framebuffer);
33528diff -urNp linux-3.0.3/drivers/video/i810/i810_accel.c linux-3.0.3/drivers/video/i810/i810_accel.c
33529--- linux-3.0.3/drivers/video/i810/i810_accel.c 2011-07-21 22:17:23.000000000 -0400
33530+++ linux-3.0.3/drivers/video/i810/i810_accel.c 2011-08-23 21:47:56.000000000 -0400
33531@@ -73,6 +73,7 @@ static inline int wait_for_space(struct
33532 }
33533 }
33534 printk("ringbuffer lockup!!!\n");
33535+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
33536 i810_report_error(mmio);
33537 par->dev_flags |= LOCKUP;
33538 info->pixmap.scan_align = 1;
33539diff -urNp linux-3.0.3/drivers/video/udlfb.c linux-3.0.3/drivers/video/udlfb.c
33540--- linux-3.0.3/drivers/video/udlfb.c 2011-07-21 22:17:23.000000000 -0400
33541+++ linux-3.0.3/drivers/video/udlfb.c 2011-08-23 21:47:56.000000000 -0400
33542@@ -586,11 +586,11 @@ int dlfb_handle_damage(struct dlfb_data
33543 dlfb_urb_completion(urb);
33544
33545 error:
33546- atomic_add(bytes_sent, &dev->bytes_sent);
33547- atomic_add(bytes_identical, &dev->bytes_identical);
33548- atomic_add(width*height*2, &dev->bytes_rendered);
33549+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
33550+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
33551+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
33552 end_cycles = get_cycles();
33553- atomic_add(((unsigned int) ((end_cycles - start_cycles)
33554+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
33555 >> 10)), /* Kcycles */
33556 &dev->cpu_kcycles_used);
33557
33558@@ -711,11 +711,11 @@ static void dlfb_dpy_deferred_io(struct
33559 dlfb_urb_completion(urb);
33560
33561 error:
33562- atomic_add(bytes_sent, &dev->bytes_sent);
33563- atomic_add(bytes_identical, &dev->bytes_identical);
33564- atomic_add(bytes_rendered, &dev->bytes_rendered);
33565+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
33566+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
33567+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
33568 end_cycles = get_cycles();
33569- atomic_add(((unsigned int) ((end_cycles - start_cycles)
33570+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
33571 >> 10)), /* Kcycles */
33572 &dev->cpu_kcycles_used);
33573 }
33574@@ -1307,7 +1307,7 @@ static ssize_t metrics_bytes_rendered_sh
33575 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33576 struct dlfb_data *dev = fb_info->par;
33577 return snprintf(buf, PAGE_SIZE, "%u\n",
33578- atomic_read(&dev->bytes_rendered));
33579+ atomic_read_unchecked(&dev->bytes_rendered));
33580 }
33581
33582 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
33583@@ -1315,7 +1315,7 @@ static ssize_t metrics_bytes_identical_s
33584 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33585 struct dlfb_data *dev = fb_info->par;
33586 return snprintf(buf, PAGE_SIZE, "%u\n",
33587- atomic_read(&dev->bytes_identical));
33588+ atomic_read_unchecked(&dev->bytes_identical));
33589 }
33590
33591 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
33592@@ -1323,7 +1323,7 @@ static ssize_t metrics_bytes_sent_show(s
33593 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33594 struct dlfb_data *dev = fb_info->par;
33595 return snprintf(buf, PAGE_SIZE, "%u\n",
33596- atomic_read(&dev->bytes_sent));
33597+ atomic_read_unchecked(&dev->bytes_sent));
33598 }
33599
33600 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
33601@@ -1331,7 +1331,7 @@ static ssize_t metrics_cpu_kcycles_used_
33602 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33603 struct dlfb_data *dev = fb_info->par;
33604 return snprintf(buf, PAGE_SIZE, "%u\n",
33605- atomic_read(&dev->cpu_kcycles_used));
33606+ atomic_read_unchecked(&dev->cpu_kcycles_used));
33607 }
33608
33609 static ssize_t edid_show(
33610@@ -1388,10 +1388,10 @@ static ssize_t metrics_reset_store(struc
33611 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33612 struct dlfb_data *dev = fb_info->par;
33613
33614- atomic_set(&dev->bytes_rendered, 0);
33615- atomic_set(&dev->bytes_identical, 0);
33616- atomic_set(&dev->bytes_sent, 0);
33617- atomic_set(&dev->cpu_kcycles_used, 0);
33618+ atomic_set_unchecked(&dev->bytes_rendered, 0);
33619+ atomic_set_unchecked(&dev->bytes_identical, 0);
33620+ atomic_set_unchecked(&dev->bytes_sent, 0);
33621+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
33622
33623 return count;
33624 }
33625diff -urNp linux-3.0.3/drivers/video/uvesafb.c linux-3.0.3/drivers/video/uvesafb.c
33626--- linux-3.0.3/drivers/video/uvesafb.c 2011-07-21 22:17:23.000000000 -0400
33627+++ linux-3.0.3/drivers/video/uvesafb.c 2011-08-23 21:47:56.000000000 -0400
33628@@ -19,6 +19,7 @@
33629 #include <linux/io.h>
33630 #include <linux/mutex.h>
33631 #include <linux/slab.h>
33632+#include <linux/moduleloader.h>
33633 #include <video/edid.h>
33634 #include <video/uvesafb.h>
33635 #ifdef CONFIG_X86
33636@@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
33637 NULL,
33638 };
33639
33640- return call_usermodehelper(v86d_path, argv, envp, 1);
33641+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
33642 }
33643
33644 /*
33645@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(
33646 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
33647 par->pmi_setpal = par->ypan = 0;
33648 } else {
33649+
33650+#ifdef CONFIG_PAX_KERNEXEC
33651+#ifdef CONFIG_MODULES
33652+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
33653+#endif
33654+ if (!par->pmi_code) {
33655+ par->pmi_setpal = par->ypan = 0;
33656+ return 0;
33657+ }
33658+#endif
33659+
33660 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
33661 + task->t.regs.edi);
33662+
33663+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33664+ pax_open_kernel();
33665+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
33666+ pax_close_kernel();
33667+
33668+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
33669+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
33670+#else
33671 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
33672 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
33673+#endif
33674+
33675 printk(KERN_INFO "uvesafb: protected mode interface info at "
33676 "%04x:%04x\n",
33677 (u16)task->t.regs.es, (u16)task->t.regs.edi);
33678@@ -1821,6 +1844,11 @@ out:
33679 if (par->vbe_modes)
33680 kfree(par->vbe_modes);
33681
33682+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33683+ if (par->pmi_code)
33684+ module_free_exec(NULL, par->pmi_code);
33685+#endif
33686+
33687 framebuffer_release(info);
33688 return err;
33689 }
33690@@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platfor
33691 kfree(par->vbe_state_orig);
33692 if (par->vbe_state_saved)
33693 kfree(par->vbe_state_saved);
33694+
33695+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33696+ if (par->pmi_code)
33697+ module_free_exec(NULL, par->pmi_code);
33698+#endif
33699+
33700 }
33701
33702 framebuffer_release(info);
33703diff -urNp linux-3.0.3/drivers/video/vesafb.c linux-3.0.3/drivers/video/vesafb.c
33704--- linux-3.0.3/drivers/video/vesafb.c 2011-07-21 22:17:23.000000000 -0400
33705+++ linux-3.0.3/drivers/video/vesafb.c 2011-08-23 21:47:56.000000000 -0400
33706@@ -9,6 +9,7 @@
33707 */
33708
33709 #include <linux/module.h>
33710+#include <linux/moduleloader.h>
33711 #include <linux/kernel.h>
33712 #include <linux/errno.h>
33713 #include <linux/string.h>
33714@@ -52,8 +53,8 @@ static int vram_remap __initdata; /*
33715 static int vram_total __initdata; /* Set total amount of memory */
33716 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
33717 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
33718-static void (*pmi_start)(void) __read_mostly;
33719-static void (*pmi_pal) (void) __read_mostly;
33720+static void (*pmi_start)(void) __read_only;
33721+static void (*pmi_pal) (void) __read_only;
33722 static int depth __read_mostly;
33723 static int vga_compat __read_mostly;
33724 /* --------------------------------------------------------------------- */
33725@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
33726 unsigned int size_vmode;
33727 unsigned int size_remap;
33728 unsigned int size_total;
33729+ void *pmi_code = NULL;
33730
33731 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
33732 return -ENODEV;
33733@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
33734 size_remap = size_total;
33735 vesafb_fix.smem_len = size_remap;
33736
33737-#ifndef __i386__
33738- screen_info.vesapm_seg = 0;
33739-#endif
33740-
33741 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
33742 printk(KERN_WARNING
33743 "vesafb: cannot reserve video memory at 0x%lx\n",
33744@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct pl
33745 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
33746 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
33747
33748+#ifdef __i386__
33749+
33750+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33751+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
33752+ if (!pmi_code)
33753+#elif !defined(CONFIG_PAX_KERNEXEC)
33754+ if (0)
33755+#endif
33756+
33757+#endif
33758+ screen_info.vesapm_seg = 0;
33759+
33760 if (screen_info.vesapm_seg) {
33761- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
33762- screen_info.vesapm_seg,screen_info.vesapm_off);
33763+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
33764+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
33765 }
33766
33767 if (screen_info.vesapm_seg < 0xc000)
33768@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct pl
33769
33770 if (ypan || pmi_setpal) {
33771 unsigned short *pmi_base;
33772+
33773 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
33774- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
33775- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
33776+
33777+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33778+ pax_open_kernel();
33779+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
33780+#else
33781+ pmi_code = pmi_base;
33782+#endif
33783+
33784+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
33785+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
33786+
33787+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33788+ pmi_start = ktva_ktla(pmi_start);
33789+ pmi_pal = ktva_ktla(pmi_pal);
33790+ pax_close_kernel();
33791+#endif
33792+
33793 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
33794 if (pmi_base[3]) {
33795 printk(KERN_INFO "vesafb: pmi: ports = ");
33796@@ -488,6 +514,11 @@ static int __init vesafb_probe(struct pl
33797 info->node, info->fix.id);
33798 return 0;
33799 err:
33800+
33801+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33802+ module_free_exec(NULL, pmi_code);
33803+#endif
33804+
33805 if (info->screen_base)
33806 iounmap(info->screen_base);
33807 framebuffer_release(info);
33808diff -urNp linux-3.0.3/drivers/video/via/via_clock.h linux-3.0.3/drivers/video/via/via_clock.h
33809--- linux-3.0.3/drivers/video/via/via_clock.h 2011-07-21 22:17:23.000000000 -0400
33810+++ linux-3.0.3/drivers/video/via/via_clock.h 2011-08-23 21:47:56.000000000 -0400
33811@@ -56,7 +56,7 @@ struct via_clock {
33812
33813 void (*set_engine_pll_state)(u8 state);
33814 void (*set_engine_pll)(struct via_pll_config config);
33815-};
33816+} __no_const;
33817
33818
33819 static inline u32 get_pll_internal_frequency(u32 ref_freq,
33820diff -urNp linux-3.0.3/drivers/virtio/virtio_balloon.c linux-3.0.3/drivers/virtio/virtio_balloon.c
33821--- linux-3.0.3/drivers/virtio/virtio_balloon.c 2011-07-21 22:17:23.000000000 -0400
33822+++ linux-3.0.3/drivers/virtio/virtio_balloon.c 2011-08-23 21:48:14.000000000 -0400
33823@@ -174,6 +174,8 @@ static void update_balloon_stats(struct
33824 struct sysinfo i;
33825 int idx = 0;
33826
33827+ pax_track_stack();
33828+
33829 all_vm_events(events);
33830 si_meminfo(&i);
33831
33832diff -urNp linux-3.0.3/fs/9p/vfs_inode.c linux-3.0.3/fs/9p/vfs_inode.c
33833--- linux-3.0.3/fs/9p/vfs_inode.c 2011-07-21 22:17:23.000000000 -0400
33834+++ linux-3.0.3/fs/9p/vfs_inode.c 2011-08-23 21:47:56.000000000 -0400
33835@@ -1210,7 +1210,7 @@ static void *v9fs_vfs_follow_link(struct
33836 void
33837 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
33838 {
33839- char *s = nd_get_link(nd);
33840+ const char *s = nd_get_link(nd);
33841
33842 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
33843 IS_ERR(s) ? "<error>" : s);
33844diff -urNp linux-3.0.3/fs/aio.c linux-3.0.3/fs/aio.c
33845--- linux-3.0.3/fs/aio.c 2011-07-21 22:17:23.000000000 -0400
33846+++ linux-3.0.3/fs/aio.c 2011-08-23 21:48:14.000000000 -0400
33847@@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx
33848 size += sizeof(struct io_event) * nr_events;
33849 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
33850
33851- if (nr_pages < 0)
33852+ if (nr_pages <= 0)
33853 return -EINVAL;
33854
33855 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
33856@@ -1088,6 +1088,8 @@ static int read_events(struct kioctx *ct
33857 struct aio_timeout to;
33858 int retry = 0;
33859
33860+ pax_track_stack();
33861+
33862 /* needed to zero any padding within an entry (there shouldn't be
33863 * any, but C is fun!
33864 */
33865@@ -1381,22 +1383,27 @@ static ssize_t aio_fsync(struct kiocb *i
33866 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
33867 {
33868 ssize_t ret;
33869+ struct iovec iovstack;
33870
33871 #ifdef CONFIG_COMPAT
33872 if (compat)
33873 ret = compat_rw_copy_check_uvector(type,
33874 (struct compat_iovec __user *)kiocb->ki_buf,
33875- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
33876+ kiocb->ki_nbytes, 1, &iovstack,
33877 &kiocb->ki_iovec);
33878 else
33879 #endif
33880 ret = rw_copy_check_uvector(type,
33881 (struct iovec __user *)kiocb->ki_buf,
33882- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
33883+ kiocb->ki_nbytes, 1, &iovstack,
33884 &kiocb->ki_iovec);
33885 if (ret < 0)
33886 goto out;
33887
33888+ if (kiocb->ki_iovec == &iovstack) {
33889+ kiocb->ki_inline_vec = iovstack;
33890+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
33891+ }
33892 kiocb->ki_nr_segs = kiocb->ki_nbytes;
33893 kiocb->ki_cur_seg = 0;
33894 /* ki_nbytes/left now reflect bytes instead of segs */
33895diff -urNp linux-3.0.3/fs/attr.c linux-3.0.3/fs/attr.c
33896--- linux-3.0.3/fs/attr.c 2011-07-21 22:17:23.000000000 -0400
33897+++ linux-3.0.3/fs/attr.c 2011-08-23 21:48:14.000000000 -0400
33898@@ -98,6 +98,7 @@ int inode_newsize_ok(const struct inode
33899 unsigned long limit;
33900
33901 limit = rlimit(RLIMIT_FSIZE);
33902+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
33903 if (limit != RLIM_INFINITY && offset > limit)
33904 goto out_sig;
33905 if (offset > inode->i_sb->s_maxbytes)
33906diff -urNp linux-3.0.3/fs/befs/linuxvfs.c linux-3.0.3/fs/befs/linuxvfs.c
33907--- linux-3.0.3/fs/befs/linuxvfs.c 2011-07-21 22:17:23.000000000 -0400
33908+++ linux-3.0.3/fs/befs/linuxvfs.c 2011-08-23 21:47:56.000000000 -0400
33909@@ -498,7 +498,7 @@ static void befs_put_link(struct dentry
33910 {
33911 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
33912 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
33913- char *link = nd_get_link(nd);
33914+ const char *link = nd_get_link(nd);
33915 if (!IS_ERR(link))
33916 kfree(link);
33917 }
33918diff -urNp linux-3.0.3/fs/binfmt_aout.c linux-3.0.3/fs/binfmt_aout.c
33919--- linux-3.0.3/fs/binfmt_aout.c 2011-07-21 22:17:23.000000000 -0400
33920+++ linux-3.0.3/fs/binfmt_aout.c 2011-08-23 21:48:14.000000000 -0400
33921@@ -16,6 +16,7 @@
33922 #include <linux/string.h>
33923 #include <linux/fs.h>
33924 #include <linux/file.h>
33925+#include <linux/security.h>
33926 #include <linux/stat.h>
33927 #include <linux/fcntl.h>
33928 #include <linux/ptrace.h>
33929@@ -86,6 +87,8 @@ static int aout_core_dump(struct coredum
33930 #endif
33931 # define START_STACK(u) ((void __user *)u.start_stack)
33932
33933+ memset(&dump, 0, sizeof(dump));
33934+
33935 fs = get_fs();
33936 set_fs(KERNEL_DS);
33937 has_dumped = 1;
33938@@ -97,10 +100,12 @@ static int aout_core_dump(struct coredum
33939
33940 /* If the size of the dump file exceeds the rlimit, then see what would happen
33941 if we wrote the stack, but not the data area. */
33942+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
33943 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
33944 dump.u_dsize = 0;
33945
33946 /* Make sure we have enough room to write the stack and data areas. */
33947+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
33948 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
33949 dump.u_ssize = 0;
33950
33951@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux
33952 rlim = rlimit(RLIMIT_DATA);
33953 if (rlim >= RLIM_INFINITY)
33954 rlim = ~0;
33955+
33956+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
33957 if (ex.a_data + ex.a_bss > rlim)
33958 return -ENOMEM;
33959
33960@@ -262,6 +269,27 @@ static int load_aout_binary(struct linux
33961 install_exec_creds(bprm);
33962 current->flags &= ~PF_FORKNOEXEC;
33963
33964+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
33965+ current->mm->pax_flags = 0UL;
33966+#endif
33967+
33968+#ifdef CONFIG_PAX_PAGEEXEC
33969+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
33970+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
33971+
33972+#ifdef CONFIG_PAX_EMUTRAMP
33973+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
33974+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
33975+#endif
33976+
33977+#ifdef CONFIG_PAX_MPROTECT
33978+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
33979+ current->mm->pax_flags |= MF_PAX_MPROTECT;
33980+#endif
33981+
33982+ }
33983+#endif
33984+
33985 if (N_MAGIC(ex) == OMAGIC) {
33986 unsigned long text_addr, map_size;
33987 loff_t pos;
33988@@ -334,7 +362,7 @@ static int load_aout_binary(struct linux
33989
33990 down_write(&current->mm->mmap_sem);
33991 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
33992- PROT_READ | PROT_WRITE | PROT_EXEC,
33993+ PROT_READ | PROT_WRITE,
33994 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
33995 fd_offset + ex.a_text);
33996 up_write(&current->mm->mmap_sem);
33997diff -urNp linux-3.0.3/fs/binfmt_elf.c linux-3.0.3/fs/binfmt_elf.c
33998--- linux-3.0.3/fs/binfmt_elf.c 2011-07-21 22:17:23.000000000 -0400
33999+++ linux-3.0.3/fs/binfmt_elf.c 2011-08-23 21:48:14.000000000 -0400
34000@@ -51,6 +51,10 @@ static int elf_core_dump(struct coredump
34001 #define elf_core_dump NULL
34002 #endif
34003
34004+#ifdef CONFIG_PAX_MPROTECT
34005+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
34006+#endif
34007+
34008 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
34009 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
34010 #else
34011@@ -70,6 +74,11 @@ static struct linux_binfmt elf_format =
34012 .load_binary = load_elf_binary,
34013 .load_shlib = load_elf_library,
34014 .core_dump = elf_core_dump,
34015+
34016+#ifdef CONFIG_PAX_MPROTECT
34017+ .handle_mprotect= elf_handle_mprotect,
34018+#endif
34019+
34020 .min_coredump = ELF_EXEC_PAGESIZE,
34021 };
34022
34023@@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
34024
34025 static int set_brk(unsigned long start, unsigned long end)
34026 {
34027+ unsigned long e = end;
34028+
34029 start = ELF_PAGEALIGN(start);
34030 end = ELF_PAGEALIGN(end);
34031 if (end > start) {
34032@@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
34033 if (BAD_ADDR(addr))
34034 return addr;
34035 }
34036- current->mm->start_brk = current->mm->brk = end;
34037+ current->mm->start_brk = current->mm->brk = e;
34038 return 0;
34039 }
34040
34041@@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
34042 elf_addr_t __user *u_rand_bytes;
34043 const char *k_platform = ELF_PLATFORM;
34044 const char *k_base_platform = ELF_BASE_PLATFORM;
34045- unsigned char k_rand_bytes[16];
34046+ u32 k_rand_bytes[4];
34047 int items;
34048 elf_addr_t *elf_info;
34049 int ei_index = 0;
34050 const struct cred *cred = current_cred();
34051 struct vm_area_struct *vma;
34052+ unsigned long saved_auxv[AT_VECTOR_SIZE];
34053+
34054+ pax_track_stack();
34055
34056 /*
34057 * In some cases (e.g. Hyper-Threading), we want to avoid L1
34058@@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
34059 * Generate 16 random bytes for userspace PRNG seeding.
34060 */
34061 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
34062- u_rand_bytes = (elf_addr_t __user *)
34063- STACK_ALLOC(p, sizeof(k_rand_bytes));
34064+ srandom32(k_rand_bytes[0] ^ random32());
34065+ srandom32(k_rand_bytes[1] ^ random32());
34066+ srandom32(k_rand_bytes[2] ^ random32());
34067+ srandom32(k_rand_bytes[3] ^ random32());
34068+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
34069+ u_rand_bytes = (elf_addr_t __user *) p;
34070 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
34071 return -EFAULT;
34072
34073@@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
34074 return -EFAULT;
34075 current->mm->env_end = p;
34076
34077+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
34078+
34079 /* Put the elf_info on the stack in the right place. */
34080 sp = (elf_addr_t __user *)envp + 1;
34081- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
34082+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
34083 return -EFAULT;
34084 return 0;
34085 }
34086@@ -381,10 +401,10 @@ static unsigned long load_elf_interp(str
34087 {
34088 struct elf_phdr *elf_phdata;
34089 struct elf_phdr *eppnt;
34090- unsigned long load_addr = 0;
34091+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
34092 int load_addr_set = 0;
34093 unsigned long last_bss = 0, elf_bss = 0;
34094- unsigned long error = ~0UL;
34095+ unsigned long error = -EINVAL;
34096 unsigned long total_size;
34097 int retval, i, size;
34098
34099@@ -430,6 +450,11 @@ static unsigned long load_elf_interp(str
34100 goto out_close;
34101 }
34102
34103+#ifdef CONFIG_PAX_SEGMEXEC
34104+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
34105+ pax_task_size = SEGMEXEC_TASK_SIZE;
34106+#endif
34107+
34108 eppnt = elf_phdata;
34109 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
34110 if (eppnt->p_type == PT_LOAD) {
34111@@ -473,8 +498,8 @@ static unsigned long load_elf_interp(str
34112 k = load_addr + eppnt->p_vaddr;
34113 if (BAD_ADDR(k) ||
34114 eppnt->p_filesz > eppnt->p_memsz ||
34115- eppnt->p_memsz > TASK_SIZE ||
34116- TASK_SIZE - eppnt->p_memsz < k) {
34117+ eppnt->p_memsz > pax_task_size ||
34118+ pax_task_size - eppnt->p_memsz < k) {
34119 error = -ENOMEM;
34120 goto out_close;
34121 }
34122@@ -528,6 +553,193 @@ out:
34123 return error;
34124 }
34125
34126+#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
34127+static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
34128+{
34129+ unsigned long pax_flags = 0UL;
34130+
34131+#ifdef CONFIG_PAX_PAGEEXEC
34132+ if (elf_phdata->p_flags & PF_PAGEEXEC)
34133+ pax_flags |= MF_PAX_PAGEEXEC;
34134+#endif
34135+
34136+#ifdef CONFIG_PAX_SEGMEXEC
34137+ if (elf_phdata->p_flags & PF_SEGMEXEC)
34138+ pax_flags |= MF_PAX_SEGMEXEC;
34139+#endif
34140+
34141+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
34142+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34143+ if ((__supported_pte_mask & _PAGE_NX))
34144+ pax_flags &= ~MF_PAX_SEGMEXEC;
34145+ else
34146+ pax_flags &= ~MF_PAX_PAGEEXEC;
34147+ }
34148+#endif
34149+
34150+#ifdef CONFIG_PAX_EMUTRAMP
34151+ if (elf_phdata->p_flags & PF_EMUTRAMP)
34152+ pax_flags |= MF_PAX_EMUTRAMP;
34153+#endif
34154+
34155+#ifdef CONFIG_PAX_MPROTECT
34156+ if (elf_phdata->p_flags & PF_MPROTECT)
34157+ pax_flags |= MF_PAX_MPROTECT;
34158+#endif
34159+
34160+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
34161+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
34162+ pax_flags |= MF_PAX_RANDMMAP;
34163+#endif
34164+
34165+ return pax_flags;
34166+}
34167+#endif
34168+
34169+#ifdef CONFIG_PAX_PT_PAX_FLAGS
34170+static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
34171+{
34172+ unsigned long pax_flags = 0UL;
34173+
34174+#ifdef CONFIG_PAX_PAGEEXEC
34175+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
34176+ pax_flags |= MF_PAX_PAGEEXEC;
34177+#endif
34178+
34179+#ifdef CONFIG_PAX_SEGMEXEC
34180+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
34181+ pax_flags |= MF_PAX_SEGMEXEC;
34182+#endif
34183+
34184+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
34185+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34186+ if ((__supported_pte_mask & _PAGE_NX))
34187+ pax_flags &= ~MF_PAX_SEGMEXEC;
34188+ else
34189+ pax_flags &= ~MF_PAX_PAGEEXEC;
34190+ }
34191+#endif
34192+
34193+#ifdef CONFIG_PAX_EMUTRAMP
34194+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
34195+ pax_flags |= MF_PAX_EMUTRAMP;
34196+#endif
34197+
34198+#ifdef CONFIG_PAX_MPROTECT
34199+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
34200+ pax_flags |= MF_PAX_MPROTECT;
34201+#endif
34202+
34203+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
34204+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
34205+ pax_flags |= MF_PAX_RANDMMAP;
34206+#endif
34207+
34208+ return pax_flags;
34209+}
34210+#endif
34211+
34212+#ifdef CONFIG_PAX_EI_PAX
34213+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
34214+{
34215+ unsigned long pax_flags = 0UL;
34216+
34217+#ifdef CONFIG_PAX_PAGEEXEC
34218+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
34219+ pax_flags |= MF_PAX_PAGEEXEC;
34220+#endif
34221+
34222+#ifdef CONFIG_PAX_SEGMEXEC
34223+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
34224+ pax_flags |= MF_PAX_SEGMEXEC;
34225+#endif
34226+
34227+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
34228+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34229+ if ((__supported_pte_mask & _PAGE_NX))
34230+ pax_flags &= ~MF_PAX_SEGMEXEC;
34231+ else
34232+ pax_flags &= ~MF_PAX_PAGEEXEC;
34233+ }
34234+#endif
34235+
34236+#ifdef CONFIG_PAX_EMUTRAMP
34237+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
34238+ pax_flags |= MF_PAX_EMUTRAMP;
34239+#endif
34240+
34241+#ifdef CONFIG_PAX_MPROTECT
34242+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
34243+ pax_flags |= MF_PAX_MPROTECT;
34244+#endif
34245+
34246+#ifdef CONFIG_PAX_ASLR
34247+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
34248+ pax_flags |= MF_PAX_RANDMMAP;
34249+#endif
34250+
34251+ return pax_flags;
34252+}
34253+#endif
34254+
34255+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
34256+static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
34257+{
34258+ unsigned long pax_flags = 0UL;
34259+
34260+#ifdef CONFIG_PAX_PT_PAX_FLAGS
34261+ unsigned long i;
34262+ int found_flags = 0;
34263+#endif
34264+
34265+#ifdef CONFIG_PAX_EI_PAX
34266+ pax_flags = pax_parse_ei_pax(elf_ex);
34267+#endif
34268+
34269+#ifdef CONFIG_PAX_PT_PAX_FLAGS
34270+ for (i = 0UL; i < elf_ex->e_phnum; i++)
34271+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
34272+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
34273+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
34274+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
34275+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
34276+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
34277+ return -EINVAL;
34278+
34279+#ifdef CONFIG_PAX_SOFTMODE
34280+ if (pax_softmode)
34281+ pax_flags = pax_parse_softmode(&elf_phdata[i]);
34282+ else
34283+#endif
34284+
34285+ pax_flags = pax_parse_hardmode(&elf_phdata[i]);
34286+ found_flags = 1;
34287+ break;
34288+ }
34289+#endif
34290+
34291+#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
34292+ if (found_flags == 0) {
34293+ struct elf_phdr phdr;
34294+ memset(&phdr, 0, sizeof(phdr));
34295+ phdr.p_flags = PF_NOEMUTRAMP;
34296+#ifdef CONFIG_PAX_SOFTMODE
34297+ if (pax_softmode)
34298+ pax_flags = pax_parse_softmode(&phdr);
34299+ else
34300+#endif
34301+ pax_flags = pax_parse_hardmode(&phdr);
34302+ }
34303+#endif
34304+
34305+ if (0 > pax_check_flags(&pax_flags))
34306+ return -EINVAL;
34307+
34308+ current->mm->pax_flags = pax_flags;
34309+ return 0;
34310+}
34311+#endif
34312+
34313 /*
34314 * These are the functions used to load ELF style executables and shared
34315 * libraries. There is no binary dependent code anywhere else.
34316@@ -544,6 +756,11 @@ static unsigned long randomize_stack_top
34317 {
34318 unsigned int random_variable = 0;
34319
34320+#ifdef CONFIG_PAX_RANDUSTACK
34321+ if (randomize_va_space)
34322+ return stack_top - current->mm->delta_stack;
34323+#endif
34324+
34325 if ((current->flags & PF_RANDOMIZE) &&
34326 !(current->personality & ADDR_NO_RANDOMIZE)) {
34327 random_variable = get_random_int() & STACK_RND_MASK;
34328@@ -562,7 +779,7 @@ static int load_elf_binary(struct linux_
34329 unsigned long load_addr = 0, load_bias = 0;
34330 int load_addr_set = 0;
34331 char * elf_interpreter = NULL;
34332- unsigned long error;
34333+ unsigned long error = 0;
34334 struct elf_phdr *elf_ppnt, *elf_phdata;
34335 unsigned long elf_bss, elf_brk;
34336 int retval, i;
34337@@ -572,11 +789,11 @@ static int load_elf_binary(struct linux_
34338 unsigned long start_code, end_code, start_data, end_data;
34339 unsigned long reloc_func_desc __maybe_unused = 0;
34340 int executable_stack = EXSTACK_DEFAULT;
34341- unsigned long def_flags = 0;
34342 struct {
34343 struct elfhdr elf_ex;
34344 struct elfhdr interp_elf_ex;
34345 } *loc;
34346+ unsigned long pax_task_size = TASK_SIZE;
34347
34348 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
34349 if (!loc) {
34350@@ -714,11 +931,81 @@ static int load_elf_binary(struct linux_
34351
34352 /* OK, This is the point of no return */
34353 current->flags &= ~PF_FORKNOEXEC;
34354- current->mm->def_flags = def_flags;
34355+
34356+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
34357+ current->mm->pax_flags = 0UL;
34358+#endif
34359+
34360+#ifdef CONFIG_PAX_DLRESOLVE
34361+ current->mm->call_dl_resolve = 0UL;
34362+#endif
34363+
34364+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
34365+ current->mm->call_syscall = 0UL;
34366+#endif
34367+
34368+#ifdef CONFIG_PAX_ASLR
34369+ current->mm->delta_mmap = 0UL;
34370+ current->mm->delta_stack = 0UL;
34371+#endif
34372+
34373+ current->mm->def_flags = 0;
34374+
34375+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
34376+ if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
34377+ send_sig(SIGKILL, current, 0);
34378+ goto out_free_dentry;
34379+ }
34380+#endif
34381+
34382+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
34383+ pax_set_initial_flags(bprm);
34384+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
34385+ if (pax_set_initial_flags_func)
34386+ (pax_set_initial_flags_func)(bprm);
34387+#endif
34388+
34389+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
34390+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
34391+ current->mm->context.user_cs_limit = PAGE_SIZE;
34392+ current->mm->def_flags |= VM_PAGEEXEC;
34393+ }
34394+#endif
34395+
34396+#ifdef CONFIG_PAX_SEGMEXEC
34397+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
34398+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
34399+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
34400+ pax_task_size = SEGMEXEC_TASK_SIZE;
34401+ current->mm->def_flags |= VM_NOHUGEPAGE;
34402+ }
34403+#endif
34404+
34405+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
34406+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34407+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
34408+ put_cpu();
34409+ }
34410+#endif
34411
34412 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
34413 may depend on the personality. */
34414 SET_PERSONALITY(loc->elf_ex);
34415+
34416+#ifdef CONFIG_PAX_ASLR
34417+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
34418+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
34419+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
34420+ }
34421+#endif
34422+
34423+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
34424+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34425+ executable_stack = EXSTACK_DISABLE_X;
34426+ current->personality &= ~READ_IMPLIES_EXEC;
34427+ } else
34428+#endif
34429+
34430 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
34431 current->personality |= READ_IMPLIES_EXEC;
34432
34433@@ -800,6 +1087,20 @@ static int load_elf_binary(struct linux_
34434 #else
34435 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
34436 #endif
34437+
34438+#ifdef CONFIG_PAX_RANDMMAP
34439+ /* PaX: randomize base address at the default exe base if requested */
34440+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
34441+#ifdef CONFIG_SPARC64
34442+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
34443+#else
34444+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
34445+#endif
34446+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
34447+ elf_flags |= MAP_FIXED;
34448+ }
34449+#endif
34450+
34451 }
34452
34453 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
34454@@ -832,9 +1133,9 @@ static int load_elf_binary(struct linux_
34455 * allowed task size. Note that p_filesz must always be
34456 * <= p_memsz so it is only necessary to check p_memsz.
34457 */
34458- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
34459- elf_ppnt->p_memsz > TASK_SIZE ||
34460- TASK_SIZE - elf_ppnt->p_memsz < k) {
34461+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
34462+ elf_ppnt->p_memsz > pax_task_size ||
34463+ pax_task_size - elf_ppnt->p_memsz < k) {
34464 /* set_brk can never work. Avoid overflows. */
34465 send_sig(SIGKILL, current, 0);
34466 retval = -EINVAL;
34467@@ -862,6 +1163,11 @@ static int load_elf_binary(struct linux_
34468 start_data += load_bias;
34469 end_data += load_bias;
34470
34471+#ifdef CONFIG_PAX_RANDMMAP
34472+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
34473+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
34474+#endif
34475+
34476 /* Calling set_brk effectively mmaps the pages that we need
34477 * for the bss and break sections. We must do this before
34478 * mapping in the interpreter, to make sure it doesn't wind
34479@@ -873,9 +1179,11 @@ static int load_elf_binary(struct linux_
34480 goto out_free_dentry;
34481 }
34482 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
34483- send_sig(SIGSEGV, current, 0);
34484- retval = -EFAULT; /* Nobody gets to see this, but.. */
34485- goto out_free_dentry;
34486+ /*
34487+ * This bss-zeroing can fail if the ELF
34488+ * file specifies odd protections. So
34489+ * we don't check the return value
34490+ */
34491 }
34492
34493 if (elf_interpreter) {
34494@@ -1090,7 +1398,7 @@ out:
34495 * Decide what to dump of a segment, part, all or none.
34496 */
34497 static unsigned long vma_dump_size(struct vm_area_struct *vma,
34498- unsigned long mm_flags)
34499+ unsigned long mm_flags, long signr)
34500 {
34501 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
34502
34503@@ -1124,7 +1432,7 @@ static unsigned long vma_dump_size(struc
34504 if (vma->vm_file == NULL)
34505 return 0;
34506
34507- if (FILTER(MAPPED_PRIVATE))
34508+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
34509 goto whole;
34510
34511 /*
34512@@ -1346,9 +1654,9 @@ static void fill_auxv_note(struct memelf
34513 {
34514 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
34515 int i = 0;
34516- do
34517+ do {
34518 i += 2;
34519- while (auxv[i - 2] != AT_NULL);
34520+ } while (auxv[i - 2] != AT_NULL);
34521 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
34522 }
34523
34524@@ -1854,14 +2162,14 @@ static void fill_extnum_info(struct elfh
34525 }
34526
34527 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
34528- unsigned long mm_flags)
34529+ struct coredump_params *cprm)
34530 {
34531 struct vm_area_struct *vma;
34532 size_t size = 0;
34533
34534 for (vma = first_vma(current, gate_vma); vma != NULL;
34535 vma = next_vma(vma, gate_vma))
34536- size += vma_dump_size(vma, mm_flags);
34537+ size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
34538 return size;
34539 }
34540
34541@@ -1955,7 +2263,7 @@ static int elf_core_dump(struct coredump
34542
34543 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
34544
34545- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
34546+ offset += elf_core_vma_data_size(gate_vma, cprm);
34547 offset += elf_core_extra_data_size();
34548 e_shoff = offset;
34549
34550@@ -1969,10 +2277,12 @@ static int elf_core_dump(struct coredump
34551 offset = dataoff;
34552
34553 size += sizeof(*elf);
34554+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
34555 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
34556 goto end_coredump;
34557
34558 size += sizeof(*phdr4note);
34559+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
34560 if (size > cprm->limit
34561 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
34562 goto end_coredump;
34563@@ -1986,7 +2296,7 @@ static int elf_core_dump(struct coredump
34564 phdr.p_offset = offset;
34565 phdr.p_vaddr = vma->vm_start;
34566 phdr.p_paddr = 0;
34567- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
34568+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
34569 phdr.p_memsz = vma->vm_end - vma->vm_start;
34570 offset += phdr.p_filesz;
34571 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
34572@@ -1997,6 +2307,7 @@ static int elf_core_dump(struct coredump
34573 phdr.p_align = ELF_EXEC_PAGESIZE;
34574
34575 size += sizeof(phdr);
34576+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
34577 if (size > cprm->limit
34578 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
34579 goto end_coredump;
34580@@ -2021,7 +2332,7 @@ static int elf_core_dump(struct coredump
34581 unsigned long addr;
34582 unsigned long end;
34583
34584- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
34585+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
34586
34587 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
34588 struct page *page;
34589@@ -2030,6 +2341,7 @@ static int elf_core_dump(struct coredump
34590 page = get_dump_page(addr);
34591 if (page) {
34592 void *kaddr = kmap(page);
34593+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
34594 stop = ((size += PAGE_SIZE) > cprm->limit) ||
34595 !dump_write(cprm->file, kaddr,
34596 PAGE_SIZE);
34597@@ -2047,6 +2359,7 @@ static int elf_core_dump(struct coredump
34598
34599 if (e_phnum == PN_XNUM) {
34600 size += sizeof(*shdr4extnum);
34601+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
34602 if (size > cprm->limit
34603 || !dump_write(cprm->file, shdr4extnum,
34604 sizeof(*shdr4extnum)))
34605@@ -2067,6 +2380,97 @@ out:
34606
34607 #endif /* CONFIG_ELF_CORE */
34608
34609+#ifdef CONFIG_PAX_MPROTECT
34610+/* PaX: non-PIC ELF libraries need relocations on their executable segments
34611+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
34612+ * we'll remove VM_MAYWRITE for good on RELRO segments.
34613+ *
34614+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
34615+ * basis because we want to allow the common case and not the special ones.
34616+ */
34617+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
34618+{
34619+ struct elfhdr elf_h;
34620+ struct elf_phdr elf_p;
34621+ unsigned long i;
34622+ unsigned long oldflags;
34623+ bool is_textrel_rw, is_textrel_rx, is_relro;
34624+
34625+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
34626+ return;
34627+
34628+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
34629+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
34630+
34631+#ifdef CONFIG_PAX_ELFRELOCS
34632+ /* possible TEXTREL */
34633+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
34634+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
34635+#else
34636+ is_textrel_rw = false;
34637+ is_textrel_rx = false;
34638+#endif
34639+
34640+ /* possible RELRO */
34641+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
34642+
34643+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
34644+ return;
34645+
34646+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
34647+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
34648+
34649+#ifdef CONFIG_PAX_ETEXECRELOCS
34650+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
34651+#else
34652+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
34653+#endif
34654+
34655+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
34656+ !elf_check_arch(&elf_h) ||
34657+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
34658+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
34659+ return;
34660+
34661+ for (i = 0UL; i < elf_h.e_phnum; i++) {
34662+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
34663+ return;
34664+ switch (elf_p.p_type) {
34665+ case PT_DYNAMIC:
34666+ if (!is_textrel_rw && !is_textrel_rx)
34667+ continue;
34668+ i = 0UL;
34669+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
34670+ elf_dyn dyn;
34671+
34672+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
34673+ return;
34674+ if (dyn.d_tag == DT_NULL)
34675+ return;
34676+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
34677+ gr_log_textrel(vma);
34678+ if (is_textrel_rw)
34679+ vma->vm_flags |= VM_MAYWRITE;
34680+ else
34681+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
34682+ vma->vm_flags &= ~VM_MAYWRITE;
34683+ return;
34684+ }
34685+ i++;
34686+ }
34687+ return;
34688+
34689+ case PT_GNU_RELRO:
34690+ if (!is_relro)
34691+ continue;
34692+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
34693+ vma->vm_flags &= ~VM_MAYWRITE;
34694+ return;
34695+ }
34696+ }
34697+}
34698+#endif
34699+
34700 static int __init init_elf_binfmt(void)
34701 {
34702 return register_binfmt(&elf_format);
34703diff -urNp linux-3.0.3/fs/binfmt_flat.c linux-3.0.3/fs/binfmt_flat.c
34704--- linux-3.0.3/fs/binfmt_flat.c 2011-07-21 22:17:23.000000000 -0400
34705+++ linux-3.0.3/fs/binfmt_flat.c 2011-08-23 21:47:56.000000000 -0400
34706@@ -567,7 +567,9 @@ static int load_flat_file(struct linux_b
34707 realdatastart = (unsigned long) -ENOMEM;
34708 printk("Unable to allocate RAM for process data, errno %d\n",
34709 (int)-realdatastart);
34710+ down_write(&current->mm->mmap_sem);
34711 do_munmap(current->mm, textpos, text_len);
34712+ up_write(&current->mm->mmap_sem);
34713 ret = realdatastart;
34714 goto err;
34715 }
34716@@ -591,8 +593,10 @@ static int load_flat_file(struct linux_b
34717 }
34718 if (IS_ERR_VALUE(result)) {
34719 printk("Unable to read data+bss, errno %d\n", (int)-result);
34720+ down_write(&current->mm->mmap_sem);
34721 do_munmap(current->mm, textpos, text_len);
34722 do_munmap(current->mm, realdatastart, len);
34723+ up_write(&current->mm->mmap_sem);
34724 ret = result;
34725 goto err;
34726 }
34727@@ -661,8 +665,10 @@ static int load_flat_file(struct linux_b
34728 }
34729 if (IS_ERR_VALUE(result)) {
34730 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
34731+ down_write(&current->mm->mmap_sem);
34732 do_munmap(current->mm, textpos, text_len + data_len + extra +
34733 MAX_SHARED_LIBS * sizeof(unsigned long));
34734+ up_write(&current->mm->mmap_sem);
34735 ret = result;
34736 goto err;
34737 }
34738diff -urNp linux-3.0.3/fs/bio.c linux-3.0.3/fs/bio.c
34739--- linux-3.0.3/fs/bio.c 2011-07-21 22:17:23.000000000 -0400
34740+++ linux-3.0.3/fs/bio.c 2011-08-23 21:47:56.000000000 -0400
34741@@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct b
34742 const int read = bio_data_dir(bio) == READ;
34743 struct bio_map_data *bmd = bio->bi_private;
34744 int i;
34745- char *p = bmd->sgvecs[0].iov_base;
34746+ char *p = (__force char *)bmd->sgvecs[0].iov_base;
34747
34748 __bio_for_each_segment(bvec, bio, i, 0) {
34749 char *addr = page_address(bvec->bv_page);
34750diff -urNp linux-3.0.3/fs/block_dev.c linux-3.0.3/fs/block_dev.c
34751--- linux-3.0.3/fs/block_dev.c 2011-07-21 22:17:23.000000000 -0400
34752+++ linux-3.0.3/fs/block_dev.c 2011-08-23 21:47:56.000000000 -0400
34753@@ -671,7 +671,7 @@ static bool bd_may_claim(struct block_de
34754 else if (bdev->bd_contains == bdev)
34755 return true; /* is a whole device which isn't held */
34756
34757- else if (whole->bd_holder == bd_may_claim)
34758+ else if (whole->bd_holder == (void *)bd_may_claim)
34759 return true; /* is a partition of a device that is being partitioned */
34760 else if (whole->bd_holder != NULL)
34761 return false; /* is a partition of a held device */
34762diff -urNp linux-3.0.3/fs/btrfs/ctree.c linux-3.0.3/fs/btrfs/ctree.c
34763--- linux-3.0.3/fs/btrfs/ctree.c 2011-07-21 22:17:23.000000000 -0400
34764+++ linux-3.0.3/fs/btrfs/ctree.c 2011-08-23 21:47:56.000000000 -0400
34765@@ -454,9 +454,12 @@ static noinline int __btrfs_cow_block(st
34766 free_extent_buffer(buf);
34767 add_root_to_dirty_list(root);
34768 } else {
34769- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
34770- parent_start = parent->start;
34771- else
34772+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
34773+ if (parent)
34774+ parent_start = parent->start;
34775+ else
34776+ parent_start = 0;
34777+ } else
34778 parent_start = 0;
34779
34780 WARN_ON(trans->transid != btrfs_header_generation(parent));
34781diff -urNp linux-3.0.3/fs/btrfs/inode.c linux-3.0.3/fs/btrfs/inode.c
34782--- linux-3.0.3/fs/btrfs/inode.c 2011-07-21 22:17:23.000000000 -0400
34783+++ linux-3.0.3/fs/btrfs/inode.c 2011-08-23 21:48:14.000000000 -0400
34784@@ -6895,7 +6895,7 @@ fail:
34785 return -ENOMEM;
34786 }
34787
34788-static int btrfs_getattr(struct vfsmount *mnt,
34789+int btrfs_getattr(struct vfsmount *mnt,
34790 struct dentry *dentry, struct kstat *stat)
34791 {
34792 struct inode *inode = dentry->d_inode;
34793@@ -6907,6 +6907,14 @@ static int btrfs_getattr(struct vfsmount
34794 return 0;
34795 }
34796
34797+EXPORT_SYMBOL(btrfs_getattr);
34798+
34799+dev_t get_btrfs_dev_from_inode(struct inode *inode)
34800+{
34801+ return BTRFS_I(inode)->root->anon_super.s_dev;
34802+}
34803+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
34804+
34805 /*
34806 * If a file is moved, it will inherit the cow and compression flags of the new
34807 * directory.
34808diff -urNp linux-3.0.3/fs/btrfs/ioctl.c linux-3.0.3/fs/btrfs/ioctl.c
34809--- linux-3.0.3/fs/btrfs/ioctl.c 2011-07-21 22:17:23.000000000 -0400
34810+++ linux-3.0.3/fs/btrfs/ioctl.c 2011-08-23 21:48:14.000000000 -0400
34811@@ -2676,9 +2676,12 @@ long btrfs_ioctl_space_info(struct btrfs
34812 for (i = 0; i < num_types; i++) {
34813 struct btrfs_space_info *tmp;
34814
34815+ /* Don't copy in more than we allocated */
34816 if (!slot_count)
34817 break;
34818
34819+ slot_count--;
34820+
34821 info = NULL;
34822 rcu_read_lock();
34823 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
34824@@ -2700,10 +2703,7 @@ long btrfs_ioctl_space_info(struct btrfs
34825 memcpy(dest, &space, sizeof(space));
34826 dest++;
34827 space_args.total_spaces++;
34828- slot_count--;
34829 }
34830- if (!slot_count)
34831- break;
34832 }
34833 up_read(&info->groups_sem);
34834 }
34835diff -urNp linux-3.0.3/fs/btrfs/relocation.c linux-3.0.3/fs/btrfs/relocation.c
34836--- linux-3.0.3/fs/btrfs/relocation.c 2011-07-21 22:17:23.000000000 -0400
34837+++ linux-3.0.3/fs/btrfs/relocation.c 2011-08-23 21:47:56.000000000 -0400
34838@@ -1242,7 +1242,7 @@ static int __update_reloc_root(struct bt
34839 }
34840 spin_unlock(&rc->reloc_root_tree.lock);
34841
34842- BUG_ON((struct btrfs_root *)node->data != root);
34843+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
34844
34845 if (!del) {
34846 spin_lock(&rc->reloc_root_tree.lock);
34847diff -urNp linux-3.0.3/fs/cachefiles/bind.c linux-3.0.3/fs/cachefiles/bind.c
34848--- linux-3.0.3/fs/cachefiles/bind.c 2011-07-21 22:17:23.000000000 -0400
34849+++ linux-3.0.3/fs/cachefiles/bind.c 2011-08-23 21:47:56.000000000 -0400
34850@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
34851 args);
34852
34853 /* start by checking things over */
34854- ASSERT(cache->fstop_percent >= 0 &&
34855- cache->fstop_percent < cache->fcull_percent &&
34856+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
34857 cache->fcull_percent < cache->frun_percent &&
34858 cache->frun_percent < 100);
34859
34860- ASSERT(cache->bstop_percent >= 0 &&
34861- cache->bstop_percent < cache->bcull_percent &&
34862+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
34863 cache->bcull_percent < cache->brun_percent &&
34864 cache->brun_percent < 100);
34865
34866diff -urNp linux-3.0.3/fs/cachefiles/daemon.c linux-3.0.3/fs/cachefiles/daemon.c
34867--- linux-3.0.3/fs/cachefiles/daemon.c 2011-07-21 22:17:23.000000000 -0400
34868+++ linux-3.0.3/fs/cachefiles/daemon.c 2011-08-23 21:47:56.000000000 -0400
34869@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(st
34870 if (n > buflen)
34871 return -EMSGSIZE;
34872
34873- if (copy_to_user(_buffer, buffer, n) != 0)
34874+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
34875 return -EFAULT;
34876
34877 return n;
34878@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(s
34879 if (test_bit(CACHEFILES_DEAD, &cache->flags))
34880 return -EIO;
34881
34882- if (datalen < 0 || datalen > PAGE_SIZE - 1)
34883+ if (datalen > PAGE_SIZE - 1)
34884 return -EOPNOTSUPP;
34885
34886 /* drag the command string into the kernel so we can parse it */
34887@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struc
34888 if (args[0] != '%' || args[1] != '\0')
34889 return -EINVAL;
34890
34891- if (fstop < 0 || fstop >= cache->fcull_percent)
34892+ if (fstop >= cache->fcull_percent)
34893 return cachefiles_daemon_range_error(cache, args);
34894
34895 cache->fstop_percent = fstop;
34896@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struc
34897 if (args[0] != '%' || args[1] != '\0')
34898 return -EINVAL;
34899
34900- if (bstop < 0 || bstop >= cache->bcull_percent)
34901+ if (bstop >= cache->bcull_percent)
34902 return cachefiles_daemon_range_error(cache, args);
34903
34904 cache->bstop_percent = bstop;
34905diff -urNp linux-3.0.3/fs/cachefiles/internal.h linux-3.0.3/fs/cachefiles/internal.h
34906--- linux-3.0.3/fs/cachefiles/internal.h 2011-07-21 22:17:23.000000000 -0400
34907+++ linux-3.0.3/fs/cachefiles/internal.h 2011-08-23 21:47:56.000000000 -0400
34908@@ -57,7 +57,7 @@ struct cachefiles_cache {
34909 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
34910 struct rb_root active_nodes; /* active nodes (can't be culled) */
34911 rwlock_t active_lock; /* lock for active_nodes */
34912- atomic_t gravecounter; /* graveyard uniquifier */
34913+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
34914 unsigned frun_percent; /* when to stop culling (% files) */
34915 unsigned fcull_percent; /* when to start culling (% files) */
34916 unsigned fstop_percent; /* when to stop allocating (% files) */
34917@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struc
34918 * proc.c
34919 */
34920 #ifdef CONFIG_CACHEFILES_HISTOGRAM
34921-extern atomic_t cachefiles_lookup_histogram[HZ];
34922-extern atomic_t cachefiles_mkdir_histogram[HZ];
34923-extern atomic_t cachefiles_create_histogram[HZ];
34924+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
34925+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
34926+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
34927
34928 extern int __init cachefiles_proc_init(void);
34929 extern void cachefiles_proc_cleanup(void);
34930 static inline
34931-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
34932+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
34933 {
34934 unsigned long jif = jiffies - start_jif;
34935 if (jif >= HZ)
34936 jif = HZ - 1;
34937- atomic_inc(&histogram[jif]);
34938+ atomic_inc_unchecked(&histogram[jif]);
34939 }
34940
34941 #else
34942diff -urNp linux-3.0.3/fs/cachefiles/namei.c linux-3.0.3/fs/cachefiles/namei.c
34943--- linux-3.0.3/fs/cachefiles/namei.c 2011-07-21 22:17:23.000000000 -0400
34944+++ linux-3.0.3/fs/cachefiles/namei.c 2011-08-23 21:47:56.000000000 -0400
34945@@ -318,7 +318,7 @@ try_again:
34946 /* first step is to make up a grave dentry in the graveyard */
34947 sprintf(nbuffer, "%08x%08x",
34948 (uint32_t) get_seconds(),
34949- (uint32_t) atomic_inc_return(&cache->gravecounter));
34950+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
34951
34952 /* do the multiway lock magic */
34953 trap = lock_rename(cache->graveyard, dir);
34954diff -urNp linux-3.0.3/fs/cachefiles/proc.c linux-3.0.3/fs/cachefiles/proc.c
34955--- linux-3.0.3/fs/cachefiles/proc.c 2011-07-21 22:17:23.000000000 -0400
34956+++ linux-3.0.3/fs/cachefiles/proc.c 2011-08-23 21:47:56.000000000 -0400
34957@@ -14,9 +14,9 @@
34958 #include <linux/seq_file.h>
34959 #include "internal.h"
34960
34961-atomic_t cachefiles_lookup_histogram[HZ];
34962-atomic_t cachefiles_mkdir_histogram[HZ];
34963-atomic_t cachefiles_create_histogram[HZ];
34964+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
34965+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
34966+atomic_unchecked_t cachefiles_create_histogram[HZ];
34967
34968 /*
34969 * display the latency histogram
34970@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
34971 return 0;
34972 default:
34973 index = (unsigned long) v - 3;
34974- x = atomic_read(&cachefiles_lookup_histogram[index]);
34975- y = atomic_read(&cachefiles_mkdir_histogram[index]);
34976- z = atomic_read(&cachefiles_create_histogram[index]);
34977+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
34978+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
34979+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
34980 if (x == 0 && y == 0 && z == 0)
34981 return 0;
34982
34983diff -urNp linux-3.0.3/fs/cachefiles/rdwr.c linux-3.0.3/fs/cachefiles/rdwr.c
34984--- linux-3.0.3/fs/cachefiles/rdwr.c 2011-07-21 22:17:23.000000000 -0400
34985+++ linux-3.0.3/fs/cachefiles/rdwr.c 2011-08-23 21:47:56.000000000 -0400
34986@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache
34987 old_fs = get_fs();
34988 set_fs(KERNEL_DS);
34989 ret = file->f_op->write(
34990- file, (const void __user *) data, len, &pos);
34991+ file, (__force const void __user *) data, len, &pos);
34992 set_fs(old_fs);
34993 kunmap(page);
34994 if (ret != len)
34995diff -urNp linux-3.0.3/fs/ceph/dir.c linux-3.0.3/fs/ceph/dir.c
34996--- linux-3.0.3/fs/ceph/dir.c 2011-07-21 22:17:23.000000000 -0400
34997+++ linux-3.0.3/fs/ceph/dir.c 2011-08-23 21:47:56.000000000 -0400
34998@@ -226,7 +226,7 @@ static int ceph_readdir(struct file *fil
34999 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
35000 struct ceph_mds_client *mdsc = fsc->mdsc;
35001 unsigned frag = fpos_frag(filp->f_pos);
35002- int off = fpos_off(filp->f_pos);
35003+ unsigned int off = fpos_off(filp->f_pos);
35004 int err;
35005 u32 ftype;
35006 struct ceph_mds_reply_info_parsed *rinfo;
35007diff -urNp linux-3.0.3/fs/cifs/cifs_debug.c linux-3.0.3/fs/cifs/cifs_debug.c
35008--- linux-3.0.3/fs/cifs/cifs_debug.c 2011-07-21 22:17:23.000000000 -0400
35009+++ linux-3.0.3/fs/cifs/cifs_debug.c 2011-08-25 17:18:05.000000000 -0400
35010@@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(str
35011
35012 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
35013 #ifdef CONFIG_CIFS_STATS2
35014- atomic_set(&totBufAllocCount, 0);
35015- atomic_set(&totSmBufAllocCount, 0);
35016+ atomic_set_unchecked(&totBufAllocCount, 0);
35017+ atomic_set_unchecked(&totSmBufAllocCount, 0);
35018 #endif /* CONFIG_CIFS_STATS2 */
35019 spin_lock(&cifs_tcp_ses_lock);
35020 list_for_each(tmp1, &cifs_tcp_ses_list) {
35021@@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(str
35022 tcon = list_entry(tmp3,
35023 struct cifs_tcon,
35024 tcon_list);
35025- atomic_set(&tcon->num_smbs_sent, 0);
35026- atomic_set(&tcon->num_writes, 0);
35027- atomic_set(&tcon->num_reads, 0);
35028- atomic_set(&tcon->num_oplock_brks, 0);
35029- atomic_set(&tcon->num_opens, 0);
35030- atomic_set(&tcon->num_posixopens, 0);
35031- atomic_set(&tcon->num_posixmkdirs, 0);
35032- atomic_set(&tcon->num_closes, 0);
35033- atomic_set(&tcon->num_deletes, 0);
35034- atomic_set(&tcon->num_mkdirs, 0);
35035- atomic_set(&tcon->num_rmdirs, 0);
35036- atomic_set(&tcon->num_renames, 0);
35037- atomic_set(&tcon->num_t2renames, 0);
35038- atomic_set(&tcon->num_ffirst, 0);
35039- atomic_set(&tcon->num_fnext, 0);
35040- atomic_set(&tcon->num_fclose, 0);
35041- atomic_set(&tcon->num_hardlinks, 0);
35042- atomic_set(&tcon->num_symlinks, 0);
35043- atomic_set(&tcon->num_locks, 0);
35044+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
35045+ atomic_set_unchecked(&tcon->num_writes, 0);
35046+ atomic_set_unchecked(&tcon->num_reads, 0);
35047+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
35048+ atomic_set_unchecked(&tcon->num_opens, 0);
35049+ atomic_set_unchecked(&tcon->num_posixopens, 0);
35050+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
35051+ atomic_set_unchecked(&tcon->num_closes, 0);
35052+ atomic_set_unchecked(&tcon->num_deletes, 0);
35053+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
35054+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
35055+ atomic_set_unchecked(&tcon->num_renames, 0);
35056+ atomic_set_unchecked(&tcon->num_t2renames, 0);
35057+ atomic_set_unchecked(&tcon->num_ffirst, 0);
35058+ atomic_set_unchecked(&tcon->num_fnext, 0);
35059+ atomic_set_unchecked(&tcon->num_fclose, 0);
35060+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
35061+ atomic_set_unchecked(&tcon->num_symlinks, 0);
35062+ atomic_set_unchecked(&tcon->num_locks, 0);
35063 }
35064 }
35065 }
35066@@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct s
35067 smBufAllocCount.counter, cifs_min_small);
35068 #ifdef CONFIG_CIFS_STATS2
35069 seq_printf(m, "Total Large %d Small %d Allocations\n",
35070- atomic_read(&totBufAllocCount),
35071- atomic_read(&totSmBufAllocCount));
35072+ atomic_read_unchecked(&totBufAllocCount),
35073+ atomic_read_unchecked(&totSmBufAllocCount));
35074 #endif /* CONFIG_CIFS_STATS2 */
35075
35076 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
35077@@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct s
35078 if (tcon->need_reconnect)
35079 seq_puts(m, "\tDISCONNECTED ");
35080 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
35081- atomic_read(&tcon->num_smbs_sent),
35082- atomic_read(&tcon->num_oplock_brks));
35083+ atomic_read_unchecked(&tcon->num_smbs_sent),
35084+ atomic_read_unchecked(&tcon->num_oplock_brks));
35085 seq_printf(m, "\nReads: %d Bytes: %lld",
35086- atomic_read(&tcon->num_reads),
35087+ atomic_read_unchecked(&tcon->num_reads),
35088 (long long)(tcon->bytes_read));
35089 seq_printf(m, "\nWrites: %d Bytes: %lld",
35090- atomic_read(&tcon->num_writes),
35091+ atomic_read_unchecked(&tcon->num_writes),
35092 (long long)(tcon->bytes_written));
35093 seq_printf(m, "\nFlushes: %d",
35094- atomic_read(&tcon->num_flushes));
35095+ atomic_read_unchecked(&tcon->num_flushes));
35096 seq_printf(m, "\nLocks: %d HardLinks: %d "
35097 "Symlinks: %d",
35098- atomic_read(&tcon->num_locks),
35099- atomic_read(&tcon->num_hardlinks),
35100- atomic_read(&tcon->num_symlinks));
35101+ atomic_read_unchecked(&tcon->num_locks),
35102+ atomic_read_unchecked(&tcon->num_hardlinks),
35103+ atomic_read_unchecked(&tcon->num_symlinks));
35104 seq_printf(m, "\nOpens: %d Closes: %d "
35105 "Deletes: %d",
35106- atomic_read(&tcon->num_opens),
35107- atomic_read(&tcon->num_closes),
35108- atomic_read(&tcon->num_deletes));
35109+ atomic_read_unchecked(&tcon->num_opens),
35110+ atomic_read_unchecked(&tcon->num_closes),
35111+ atomic_read_unchecked(&tcon->num_deletes));
35112 seq_printf(m, "\nPosix Opens: %d "
35113 "Posix Mkdirs: %d",
35114- atomic_read(&tcon->num_posixopens),
35115- atomic_read(&tcon->num_posixmkdirs));
35116+ atomic_read_unchecked(&tcon->num_posixopens),
35117+ atomic_read_unchecked(&tcon->num_posixmkdirs));
35118 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
35119- atomic_read(&tcon->num_mkdirs),
35120- atomic_read(&tcon->num_rmdirs));
35121+ atomic_read_unchecked(&tcon->num_mkdirs),
35122+ atomic_read_unchecked(&tcon->num_rmdirs));
35123 seq_printf(m, "\nRenames: %d T2 Renames %d",
35124- atomic_read(&tcon->num_renames),
35125- atomic_read(&tcon->num_t2renames));
35126+ atomic_read_unchecked(&tcon->num_renames),
35127+ atomic_read_unchecked(&tcon->num_t2renames));
35128 seq_printf(m, "\nFindFirst: %d FNext %d "
35129 "FClose %d",
35130- atomic_read(&tcon->num_ffirst),
35131- atomic_read(&tcon->num_fnext),
35132- atomic_read(&tcon->num_fclose));
35133+ atomic_read_unchecked(&tcon->num_ffirst),
35134+ atomic_read_unchecked(&tcon->num_fnext),
35135+ atomic_read_unchecked(&tcon->num_fclose));
35136 }
35137 }
35138 }
35139diff -urNp linux-3.0.3/fs/cifs/cifsfs.c linux-3.0.3/fs/cifs/cifsfs.c
35140--- linux-3.0.3/fs/cifs/cifsfs.c 2011-08-23 21:44:40.000000000 -0400
35141+++ linux-3.0.3/fs/cifs/cifsfs.c 2011-08-25 17:18:05.000000000 -0400
35142@@ -994,7 +994,7 @@ cifs_init_request_bufs(void)
35143 cifs_req_cachep = kmem_cache_create("cifs_request",
35144 CIFSMaxBufSize +
35145 MAX_CIFS_HDR_SIZE, 0,
35146- SLAB_HWCACHE_ALIGN, NULL);
35147+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
35148 if (cifs_req_cachep == NULL)
35149 return -ENOMEM;
35150
35151@@ -1021,7 +1021,7 @@ cifs_init_request_bufs(void)
35152 efficient to alloc 1 per page off the slab compared to 17K (5page)
35153 alloc of large cifs buffers even when page debugging is on */
35154 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
35155- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
35156+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
35157 NULL);
35158 if (cifs_sm_req_cachep == NULL) {
35159 mempool_destroy(cifs_req_poolp);
35160@@ -1106,8 +1106,8 @@ init_cifs(void)
35161 atomic_set(&bufAllocCount, 0);
35162 atomic_set(&smBufAllocCount, 0);
35163 #ifdef CONFIG_CIFS_STATS2
35164- atomic_set(&totBufAllocCount, 0);
35165- atomic_set(&totSmBufAllocCount, 0);
35166+ atomic_set_unchecked(&totBufAllocCount, 0);
35167+ atomic_set_unchecked(&totSmBufAllocCount, 0);
35168 #endif /* CONFIG_CIFS_STATS2 */
35169
35170 atomic_set(&midCount, 0);
35171diff -urNp linux-3.0.3/fs/cifs/cifsglob.h linux-3.0.3/fs/cifs/cifsglob.h
35172--- linux-3.0.3/fs/cifs/cifsglob.h 2011-07-21 22:17:23.000000000 -0400
35173+++ linux-3.0.3/fs/cifs/cifsglob.h 2011-08-25 17:18:05.000000000 -0400
35174@@ -381,28 +381,28 @@ struct cifs_tcon {
35175 __u16 Flags; /* optional support bits */
35176 enum statusEnum tidStatus;
35177 #ifdef CONFIG_CIFS_STATS
35178- atomic_t num_smbs_sent;
35179- atomic_t num_writes;
35180- atomic_t num_reads;
35181- atomic_t num_flushes;
35182- atomic_t num_oplock_brks;
35183- atomic_t num_opens;
35184- atomic_t num_closes;
35185- atomic_t num_deletes;
35186- atomic_t num_mkdirs;
35187- atomic_t num_posixopens;
35188- atomic_t num_posixmkdirs;
35189- atomic_t num_rmdirs;
35190- atomic_t num_renames;
35191- atomic_t num_t2renames;
35192- atomic_t num_ffirst;
35193- atomic_t num_fnext;
35194- atomic_t num_fclose;
35195- atomic_t num_hardlinks;
35196- atomic_t num_symlinks;
35197- atomic_t num_locks;
35198- atomic_t num_acl_get;
35199- atomic_t num_acl_set;
35200+ atomic_unchecked_t num_smbs_sent;
35201+ atomic_unchecked_t num_writes;
35202+ atomic_unchecked_t num_reads;
35203+ atomic_unchecked_t num_flushes;
35204+ atomic_unchecked_t num_oplock_brks;
35205+ atomic_unchecked_t num_opens;
35206+ atomic_unchecked_t num_closes;
35207+ atomic_unchecked_t num_deletes;
35208+ atomic_unchecked_t num_mkdirs;
35209+ atomic_unchecked_t num_posixopens;
35210+ atomic_unchecked_t num_posixmkdirs;
35211+ atomic_unchecked_t num_rmdirs;
35212+ atomic_unchecked_t num_renames;
35213+ atomic_unchecked_t num_t2renames;
35214+ atomic_unchecked_t num_ffirst;
35215+ atomic_unchecked_t num_fnext;
35216+ atomic_unchecked_t num_fclose;
35217+ atomic_unchecked_t num_hardlinks;
35218+ atomic_unchecked_t num_symlinks;
35219+ atomic_unchecked_t num_locks;
35220+ atomic_unchecked_t num_acl_get;
35221+ atomic_unchecked_t num_acl_set;
35222 #ifdef CONFIG_CIFS_STATS2
35223 unsigned long long time_writes;
35224 unsigned long long time_reads;
35225@@ -613,7 +613,7 @@ convert_delimiter(char *path, char delim
35226 }
35227
35228 #ifdef CONFIG_CIFS_STATS
35229-#define cifs_stats_inc atomic_inc
35230+#define cifs_stats_inc atomic_inc_unchecked
35231
35232 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
35233 unsigned int bytes)
35234@@ -911,8 +911,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnect
35235 /* Various Debug counters */
35236 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
35237 #ifdef CONFIG_CIFS_STATS2
35238-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
35239-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
35240+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
35241+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
35242 #endif
35243 GLOBAL_EXTERN atomic_t smBufAllocCount;
35244 GLOBAL_EXTERN atomic_t midCount;
35245diff -urNp linux-3.0.3/fs/cifs/link.c linux-3.0.3/fs/cifs/link.c
35246--- linux-3.0.3/fs/cifs/link.c 2011-07-21 22:17:23.000000000 -0400
35247+++ linux-3.0.3/fs/cifs/link.c 2011-08-23 21:47:56.000000000 -0400
35248@@ -587,7 +587,7 @@ symlink_exit:
35249
35250 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
35251 {
35252- char *p = nd_get_link(nd);
35253+ const char *p = nd_get_link(nd);
35254 if (!IS_ERR(p))
35255 kfree(p);
35256 }
35257diff -urNp linux-3.0.3/fs/cifs/misc.c linux-3.0.3/fs/cifs/misc.c
35258--- linux-3.0.3/fs/cifs/misc.c 2011-07-21 22:17:23.000000000 -0400
35259+++ linux-3.0.3/fs/cifs/misc.c 2011-08-25 17:18:05.000000000 -0400
35260@@ -156,7 +156,7 @@ cifs_buf_get(void)
35261 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
35262 atomic_inc(&bufAllocCount);
35263 #ifdef CONFIG_CIFS_STATS2
35264- atomic_inc(&totBufAllocCount);
35265+ atomic_inc_unchecked(&totBufAllocCount);
35266 #endif /* CONFIG_CIFS_STATS2 */
35267 }
35268
35269@@ -191,7 +191,7 @@ cifs_small_buf_get(void)
35270 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
35271 atomic_inc(&smBufAllocCount);
35272 #ifdef CONFIG_CIFS_STATS2
35273- atomic_inc(&totSmBufAllocCount);
35274+ atomic_inc_unchecked(&totSmBufAllocCount);
35275 #endif /* CONFIG_CIFS_STATS2 */
35276
35277 }
35278diff -urNp linux-3.0.3/fs/coda/cache.c linux-3.0.3/fs/coda/cache.c
35279--- linux-3.0.3/fs/coda/cache.c 2011-07-21 22:17:23.000000000 -0400
35280+++ linux-3.0.3/fs/coda/cache.c 2011-08-23 21:47:56.000000000 -0400
35281@@ -24,7 +24,7 @@
35282 #include "coda_linux.h"
35283 #include "coda_cache.h"
35284
35285-static atomic_t permission_epoch = ATOMIC_INIT(0);
35286+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
35287
35288 /* replace or extend an acl cache hit */
35289 void coda_cache_enter(struct inode *inode, int mask)
35290@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inod
35291 struct coda_inode_info *cii = ITOC(inode);
35292
35293 spin_lock(&cii->c_lock);
35294- cii->c_cached_epoch = atomic_read(&permission_epoch);
35295+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
35296 if (cii->c_uid != current_fsuid()) {
35297 cii->c_uid = current_fsuid();
35298 cii->c_cached_perm = mask;
35299@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode
35300 {
35301 struct coda_inode_info *cii = ITOC(inode);
35302 spin_lock(&cii->c_lock);
35303- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
35304+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
35305 spin_unlock(&cii->c_lock);
35306 }
35307
35308 /* remove all acl caches */
35309 void coda_cache_clear_all(struct super_block *sb)
35310 {
35311- atomic_inc(&permission_epoch);
35312+ atomic_inc_unchecked(&permission_epoch);
35313 }
35314
35315
35316@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode
35317 spin_lock(&cii->c_lock);
35318 hit = (mask & cii->c_cached_perm) == mask &&
35319 cii->c_uid == current_fsuid() &&
35320- cii->c_cached_epoch == atomic_read(&permission_epoch);
35321+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
35322 spin_unlock(&cii->c_lock);
35323
35324 return hit;
35325diff -urNp linux-3.0.3/fs/compat_binfmt_elf.c linux-3.0.3/fs/compat_binfmt_elf.c
35326--- linux-3.0.3/fs/compat_binfmt_elf.c 2011-07-21 22:17:23.000000000 -0400
35327+++ linux-3.0.3/fs/compat_binfmt_elf.c 2011-08-23 21:47:56.000000000 -0400
35328@@ -30,11 +30,13 @@
35329 #undef elf_phdr
35330 #undef elf_shdr
35331 #undef elf_note
35332+#undef elf_dyn
35333 #undef elf_addr_t
35334 #define elfhdr elf32_hdr
35335 #define elf_phdr elf32_phdr
35336 #define elf_shdr elf32_shdr
35337 #define elf_note elf32_note
35338+#define elf_dyn Elf32_Dyn
35339 #define elf_addr_t Elf32_Addr
35340
35341 /*
35342diff -urNp linux-3.0.3/fs/compat.c linux-3.0.3/fs/compat.c
35343--- linux-3.0.3/fs/compat.c 2011-07-21 22:17:23.000000000 -0400
35344+++ linux-3.0.3/fs/compat.c 2011-08-23 22:49:33.000000000 -0400
35345@@ -566,7 +566,7 @@ ssize_t compat_rw_copy_check_uvector(int
35346 goto out;
35347
35348 ret = -EINVAL;
35349- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
35350+ if (nr_segs > UIO_MAXIOV)
35351 goto out;
35352 if (nr_segs > fast_segs) {
35353 ret = -ENOMEM;
35354@@ -848,6 +848,7 @@ struct compat_old_linux_dirent {
35355
35356 struct compat_readdir_callback {
35357 struct compat_old_linux_dirent __user *dirent;
35358+ struct file * file;
35359 int result;
35360 };
35361
35362@@ -865,6 +866,10 @@ static int compat_fillonedir(void *__buf
35363 buf->result = -EOVERFLOW;
35364 return -EOVERFLOW;
35365 }
35366+
35367+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
35368+ return 0;
35369+
35370 buf->result++;
35371 dirent = buf->dirent;
35372 if (!access_ok(VERIFY_WRITE, dirent,
35373@@ -897,6 +902,7 @@ asmlinkage long compat_sys_old_readdir(u
35374
35375 buf.result = 0;
35376 buf.dirent = dirent;
35377+ buf.file = file;
35378
35379 error = vfs_readdir(file, compat_fillonedir, &buf);
35380 if (buf.result)
35381@@ -917,6 +923,7 @@ struct compat_linux_dirent {
35382 struct compat_getdents_callback {
35383 struct compat_linux_dirent __user *current_dir;
35384 struct compat_linux_dirent __user *previous;
35385+ struct file * file;
35386 int count;
35387 int error;
35388 };
35389@@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, c
35390 buf->error = -EOVERFLOW;
35391 return -EOVERFLOW;
35392 }
35393+
35394+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
35395+ return 0;
35396+
35397 dirent = buf->previous;
35398 if (dirent) {
35399 if (__put_user(offset, &dirent->d_off))
35400@@ -985,6 +996,7 @@ asmlinkage long compat_sys_getdents(unsi
35401 buf.previous = NULL;
35402 buf.count = count;
35403 buf.error = 0;
35404+ buf.file = file;
35405
35406 error = vfs_readdir(file, compat_filldir, &buf);
35407 if (error >= 0)
35408@@ -1006,6 +1018,7 @@ out:
35409 struct compat_getdents_callback64 {
35410 struct linux_dirent64 __user *current_dir;
35411 struct linux_dirent64 __user *previous;
35412+ struct file * file;
35413 int count;
35414 int error;
35415 };
35416@@ -1022,6 +1035,10 @@ static int compat_filldir64(void * __buf
35417 buf->error = -EINVAL; /* only used if we fail.. */
35418 if (reclen > buf->count)
35419 return -EINVAL;
35420+
35421+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
35422+ return 0;
35423+
35424 dirent = buf->previous;
35425
35426 if (dirent) {
35427@@ -1073,6 +1090,7 @@ asmlinkage long compat_sys_getdents64(un
35428 buf.previous = NULL;
35429 buf.count = count;
35430 buf.error = 0;
35431+ buf.file = file;
35432
35433 error = vfs_readdir(file, compat_filldir64, &buf);
35434 if (error >= 0)
35435@@ -1446,6 +1464,8 @@ int compat_core_sys_select(int n, compat
35436 struct fdtable *fdt;
35437 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
35438
35439+ pax_track_stack();
35440+
35441 if (n < 0)
35442 goto out_nofds;
35443
35444diff -urNp linux-3.0.3/fs/compat_ioctl.c linux-3.0.3/fs/compat_ioctl.c
35445--- linux-3.0.3/fs/compat_ioctl.c 2011-07-21 22:17:23.000000000 -0400
35446+++ linux-3.0.3/fs/compat_ioctl.c 2011-08-23 21:47:56.000000000 -0400
35447@@ -208,6 +208,8 @@ static int do_video_set_spu_palette(unsi
35448
35449 err = get_user(palp, &up->palette);
35450 err |= get_user(length, &up->length);
35451+ if (err)
35452+ return -EFAULT;
35453
35454 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
35455 err = put_user(compat_ptr(palp), &up_native->palette);
35456@@ -1638,8 +1640,8 @@ asmlinkage long compat_sys_ioctl(unsigne
35457 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
35458 {
35459 unsigned int a, b;
35460- a = *(unsigned int *)p;
35461- b = *(unsigned int *)q;
35462+ a = *(const unsigned int *)p;
35463+ b = *(const unsigned int *)q;
35464 if (a > b)
35465 return 1;
35466 if (a < b)
35467diff -urNp linux-3.0.3/fs/configfs/dir.c linux-3.0.3/fs/configfs/dir.c
35468--- linux-3.0.3/fs/configfs/dir.c 2011-07-21 22:17:23.000000000 -0400
35469+++ linux-3.0.3/fs/configfs/dir.c 2011-08-23 21:47:56.000000000 -0400
35470@@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file
35471 }
35472 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
35473 struct configfs_dirent *next;
35474- const char * name;
35475+ const unsigned char * name;
35476+ char d_name[sizeof(next->s_dentry->d_iname)];
35477 int len;
35478 struct inode *inode = NULL;
35479
35480@@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file
35481 continue;
35482
35483 name = configfs_get_name(next);
35484- len = strlen(name);
35485+ if (next->s_dentry && name == next->s_dentry->d_iname) {
35486+ len = next->s_dentry->d_name.len;
35487+ memcpy(d_name, name, len);
35488+ name = d_name;
35489+ } else
35490+ len = strlen(name);
35491
35492 /*
35493 * We'll have a dentry and an inode for
35494diff -urNp linux-3.0.3/fs/dcache.c linux-3.0.3/fs/dcache.c
35495--- linux-3.0.3/fs/dcache.c 2011-07-21 22:17:23.000000000 -0400
35496+++ linux-3.0.3/fs/dcache.c 2011-08-23 21:47:56.000000000 -0400
35497@@ -3089,7 +3089,7 @@ void __init vfs_caches_init(unsigned lon
35498 mempages -= reserve;
35499
35500 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
35501- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
35502+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
35503
35504 dcache_init();
35505 inode_init();
35506diff -urNp linux-3.0.3/fs/ecryptfs/inode.c linux-3.0.3/fs/ecryptfs/inode.c
35507--- linux-3.0.3/fs/ecryptfs/inode.c 2011-08-23 21:44:40.000000000 -0400
35508+++ linux-3.0.3/fs/ecryptfs/inode.c 2011-08-23 21:47:56.000000000 -0400
35509@@ -704,7 +704,7 @@ static int ecryptfs_readlink_lower(struc
35510 old_fs = get_fs();
35511 set_fs(get_ds());
35512 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
35513- (char __user *)lower_buf,
35514+ (__force char __user *)lower_buf,
35515 lower_bufsiz);
35516 set_fs(old_fs);
35517 if (rc < 0)
35518@@ -750,7 +750,7 @@ static void *ecryptfs_follow_link(struct
35519 }
35520 old_fs = get_fs();
35521 set_fs(get_ds());
35522- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
35523+ rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
35524 set_fs(old_fs);
35525 if (rc < 0) {
35526 kfree(buf);
35527@@ -765,7 +765,7 @@ out:
35528 static void
35529 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
35530 {
35531- char *buf = nd_get_link(nd);
35532+ const char *buf = nd_get_link(nd);
35533 if (!IS_ERR(buf)) {
35534 /* Free the char* */
35535 kfree(buf);
35536diff -urNp linux-3.0.3/fs/ecryptfs/miscdev.c linux-3.0.3/fs/ecryptfs/miscdev.c
35537--- linux-3.0.3/fs/ecryptfs/miscdev.c 2011-07-21 22:17:23.000000000 -0400
35538+++ linux-3.0.3/fs/ecryptfs/miscdev.c 2011-08-23 21:47:56.000000000 -0400
35539@@ -328,7 +328,7 @@ check_list:
35540 goto out_unlock_msg_ctx;
35541 i = 5;
35542 if (msg_ctx->msg) {
35543- if (copy_to_user(&buf[i], packet_length, packet_length_size))
35544+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
35545 goto out_unlock_msg_ctx;
35546 i += packet_length_size;
35547 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
35548diff -urNp linux-3.0.3/fs/exec.c linux-3.0.3/fs/exec.c
35549--- linux-3.0.3/fs/exec.c 2011-07-21 22:17:23.000000000 -0400
35550+++ linux-3.0.3/fs/exec.c 2011-08-25 17:26:58.000000000 -0400
35551@@ -55,12 +55,24 @@
35552 #include <linux/pipe_fs_i.h>
35553 #include <linux/oom.h>
35554 #include <linux/compat.h>
35555+#include <linux/random.h>
35556+#include <linux/seq_file.h>
35557+
35558+#ifdef CONFIG_PAX_REFCOUNT
35559+#include <linux/kallsyms.h>
35560+#include <linux/kdebug.h>
35561+#endif
35562
35563 #include <asm/uaccess.h>
35564 #include <asm/mmu_context.h>
35565 #include <asm/tlb.h>
35566 #include "internal.h"
35567
35568+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
35569+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
35570+EXPORT_SYMBOL(pax_set_initial_flags_func);
35571+#endif
35572+
35573 int core_uses_pid;
35574 char core_pattern[CORENAME_MAX_SIZE] = "core";
35575 unsigned int core_pipe_limit;
35576@@ -70,7 +82,7 @@ struct core_name {
35577 char *corename;
35578 int used, size;
35579 };
35580-static atomic_t call_count = ATOMIC_INIT(1);
35581+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
35582
35583 /* The maximal length of core_pattern is also specified in sysctl.c */
35584
35585@@ -116,7 +128,7 @@ SYSCALL_DEFINE1(uselib, const char __use
35586 char *tmp = getname(library);
35587 int error = PTR_ERR(tmp);
35588 static const struct open_flags uselib_flags = {
35589- .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
35590+ .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
35591 .acc_mode = MAY_READ | MAY_EXEC | MAY_OPEN,
35592 .intent = LOOKUP_OPEN
35593 };
35594@@ -195,18 +207,10 @@ static struct page *get_arg_page(struct
35595 int write)
35596 {
35597 struct page *page;
35598- int ret;
35599
35600-#ifdef CONFIG_STACK_GROWSUP
35601- if (write) {
35602- ret = expand_downwards(bprm->vma, pos);
35603- if (ret < 0)
35604- return NULL;
35605- }
35606-#endif
35607- ret = get_user_pages(current, bprm->mm, pos,
35608- 1, write, 1, &page, NULL);
35609- if (ret <= 0)
35610+ if (0 > expand_downwards(bprm->vma, pos))
35611+ return NULL;
35612+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
35613 return NULL;
35614
35615 if (write) {
35616@@ -281,6 +285,11 @@ static int __bprm_mm_init(struct linux_b
35617 vma->vm_end = STACK_TOP_MAX;
35618 vma->vm_start = vma->vm_end - PAGE_SIZE;
35619 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
35620+
35621+#ifdef CONFIG_PAX_SEGMEXEC
35622+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
35623+#endif
35624+
35625 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
35626 INIT_LIST_HEAD(&vma->anon_vma_chain);
35627
35628@@ -295,6 +304,12 @@ static int __bprm_mm_init(struct linux_b
35629 mm->stack_vm = mm->total_vm = 1;
35630 up_write(&mm->mmap_sem);
35631 bprm->p = vma->vm_end - sizeof(void *);
35632+
35633+#ifdef CONFIG_PAX_RANDUSTACK
35634+ if (randomize_va_space)
35635+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
35636+#endif
35637+
35638 return 0;
35639 err:
35640 up_write(&mm->mmap_sem);
35641@@ -403,19 +418,7 @@ err:
35642 return err;
35643 }
35644
35645-struct user_arg_ptr {
35646-#ifdef CONFIG_COMPAT
35647- bool is_compat;
35648-#endif
35649- union {
35650- const char __user *const __user *native;
35651-#ifdef CONFIG_COMPAT
35652- compat_uptr_t __user *compat;
35653-#endif
35654- } ptr;
35655-};
35656-
35657-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
35658+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
35659 {
35660 const char __user *native;
35661
35662@@ -566,7 +569,7 @@ int copy_strings_kernel(int argc, const
35663 int r;
35664 mm_segment_t oldfs = get_fs();
35665 struct user_arg_ptr argv = {
35666- .ptr.native = (const char __user *const __user *)__argv,
35667+ .ptr.native = (__force const char __user *const __user *)__argv,
35668 };
35669
35670 set_fs(KERNEL_DS);
35671@@ -601,7 +604,8 @@ static int shift_arg_pages(struct vm_are
35672 unsigned long new_end = old_end - shift;
35673 struct mmu_gather tlb;
35674
35675- BUG_ON(new_start > new_end);
35676+ if (new_start >= new_end || new_start < mmap_min_addr)
35677+ return -ENOMEM;
35678
35679 /*
35680 * ensure there are no vmas between where we want to go
35681@@ -610,6 +614,10 @@ static int shift_arg_pages(struct vm_are
35682 if (vma != find_vma(mm, new_start))
35683 return -EFAULT;
35684
35685+#ifdef CONFIG_PAX_SEGMEXEC
35686+ BUG_ON(pax_find_mirror_vma(vma));
35687+#endif
35688+
35689 /*
35690 * cover the whole range: [new_start, old_end)
35691 */
35692@@ -690,10 +698,6 @@ int setup_arg_pages(struct linux_binprm
35693 stack_top = arch_align_stack(stack_top);
35694 stack_top = PAGE_ALIGN(stack_top);
35695
35696- if (unlikely(stack_top < mmap_min_addr) ||
35697- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
35698- return -ENOMEM;
35699-
35700 stack_shift = vma->vm_end - stack_top;
35701
35702 bprm->p -= stack_shift;
35703@@ -705,8 +709,28 @@ int setup_arg_pages(struct linux_binprm
35704 bprm->exec -= stack_shift;
35705
35706 down_write(&mm->mmap_sem);
35707+
35708+ /* Move stack pages down in memory. */
35709+ if (stack_shift) {
35710+ ret = shift_arg_pages(vma, stack_shift);
35711+ if (ret)
35712+ goto out_unlock;
35713+ }
35714+
35715 vm_flags = VM_STACK_FLAGS;
35716
35717+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
35718+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
35719+ vm_flags &= ~VM_EXEC;
35720+
35721+#ifdef CONFIG_PAX_MPROTECT
35722+ if (mm->pax_flags & MF_PAX_MPROTECT)
35723+ vm_flags &= ~VM_MAYEXEC;
35724+#endif
35725+
35726+ }
35727+#endif
35728+
35729 /*
35730 * Adjust stack execute permissions; explicitly enable for
35731 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
35732@@ -725,13 +749,6 @@ int setup_arg_pages(struct linux_binprm
35733 goto out_unlock;
35734 BUG_ON(prev != vma);
35735
35736- /* Move stack pages down in memory. */
35737- if (stack_shift) {
35738- ret = shift_arg_pages(vma, stack_shift);
35739- if (ret)
35740- goto out_unlock;
35741- }
35742-
35743 /* mprotect_fixup is overkill to remove the temporary stack flags */
35744 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
35745
35746@@ -771,7 +788,7 @@ struct file *open_exec(const char *name)
35747 struct file *file;
35748 int err;
35749 static const struct open_flags open_exec_flags = {
35750- .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
35751+ .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
35752 .acc_mode = MAY_EXEC | MAY_OPEN,
35753 .intent = LOOKUP_OPEN
35754 };
35755@@ -812,7 +829,7 @@ int kernel_read(struct file *file, loff_
35756 old_fs = get_fs();
35757 set_fs(get_ds());
35758 /* The cast to a user pointer is valid due to the set_fs() */
35759- result = vfs_read(file, (void __user *)addr, count, &pos);
35760+ result = vfs_read(file, (__force void __user *)addr, count, &pos);
35761 set_fs(old_fs);
35762 return result;
35763 }
35764@@ -1236,7 +1253,7 @@ int check_unsafe_exec(struct linux_binpr
35765 }
35766 rcu_read_unlock();
35767
35768- if (p->fs->users > n_fs) {
35769+ if (atomic_read(&p->fs->users) > n_fs) {
35770 bprm->unsafe |= LSM_UNSAFE_SHARE;
35771 } else {
35772 res = -EAGAIN;
35773@@ -1428,11 +1445,35 @@ static int do_execve_common(const char *
35774 struct user_arg_ptr envp,
35775 struct pt_regs *regs)
35776 {
35777+#ifdef CONFIG_GRKERNSEC
35778+ struct file *old_exec_file;
35779+ struct acl_subject_label *old_acl;
35780+ struct rlimit old_rlim[RLIM_NLIMITS];
35781+#endif
35782 struct linux_binprm *bprm;
35783 struct file *file;
35784 struct files_struct *displaced;
35785 bool clear_in_exec;
35786 int retval;
35787+ const struct cred *cred = current_cred();
35788+
35789+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
35790+
35791+ /*
35792+ * We move the actual failure in case of RLIMIT_NPROC excess from
35793+ * set*uid() to execve() because too many poorly written programs
35794+ * don't check setuid() return code. Here we additionally recheck
35795+ * whether NPROC limit is still exceeded.
35796+ */
35797+ if ((current->flags & PF_NPROC_EXCEEDED) &&
35798+ atomic_read(&cred->user->processes) > rlimit(RLIMIT_NPROC)) {
35799+ retval = -EAGAIN;
35800+ goto out_ret;
35801+ }
35802+
35803+ /* We're below the limit (still or again), so we don't want to make
35804+ * further execve() calls fail. */
35805+ current->flags &= ~PF_NPROC_EXCEEDED;
35806
35807 retval = unshare_files(&displaced);
35808 if (retval)
35809@@ -1464,6 +1505,16 @@ static int do_execve_common(const char *
35810 bprm->filename = filename;
35811 bprm->interp = filename;
35812
35813+ if (gr_process_user_ban()) {
35814+ retval = -EPERM;
35815+ goto out_file;
35816+ }
35817+
35818+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
35819+ retval = -EACCES;
35820+ goto out_file;
35821+ }
35822+
35823 retval = bprm_mm_init(bprm);
35824 if (retval)
35825 goto out_file;
35826@@ -1493,9 +1544,40 @@ static int do_execve_common(const char *
35827 if (retval < 0)
35828 goto out;
35829
35830+ if (!gr_tpe_allow(file)) {
35831+ retval = -EACCES;
35832+ goto out;
35833+ }
35834+
35835+ if (gr_check_crash_exec(file)) {
35836+ retval = -EACCES;
35837+ goto out;
35838+ }
35839+
35840+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
35841+
35842+ gr_handle_exec_args(bprm, argv);
35843+
35844+#ifdef CONFIG_GRKERNSEC
35845+ old_acl = current->acl;
35846+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
35847+ old_exec_file = current->exec_file;
35848+ get_file(file);
35849+ current->exec_file = file;
35850+#endif
35851+
35852+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
35853+ bprm->unsafe & LSM_UNSAFE_SHARE);
35854+ if (retval < 0)
35855+ goto out_fail;
35856+
35857 retval = search_binary_handler(bprm,regs);
35858 if (retval < 0)
35859- goto out;
35860+ goto out_fail;
35861+#ifdef CONFIG_GRKERNSEC
35862+ if (old_exec_file)
35863+ fput(old_exec_file);
35864+#endif
35865
35866 /* execve succeeded */
35867 current->fs->in_exec = 0;
35868@@ -1506,6 +1588,14 @@ static int do_execve_common(const char *
35869 put_files_struct(displaced);
35870 return retval;
35871
35872+out_fail:
35873+#ifdef CONFIG_GRKERNSEC
35874+ current->acl = old_acl;
35875+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
35876+ fput(current->exec_file);
35877+ current->exec_file = old_exec_file;
35878+#endif
35879+
35880 out:
35881 if (bprm->mm) {
35882 acct_arg_size(bprm, 0);
35883@@ -1579,7 +1669,7 @@ static int expand_corename(struct core_n
35884 {
35885 char *old_corename = cn->corename;
35886
35887- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
35888+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
35889 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
35890
35891 if (!cn->corename) {
35892@@ -1667,7 +1757,7 @@ static int format_corename(struct core_n
35893 int pid_in_pattern = 0;
35894 int err = 0;
35895
35896- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
35897+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
35898 cn->corename = kmalloc(cn->size, GFP_KERNEL);
35899 cn->used = 0;
35900
35901@@ -1758,6 +1848,219 @@ out:
35902 return ispipe;
35903 }
35904
35905+int pax_check_flags(unsigned long *flags)
35906+{
35907+ int retval = 0;
35908+
35909+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
35910+ if (*flags & MF_PAX_SEGMEXEC)
35911+ {
35912+ *flags &= ~MF_PAX_SEGMEXEC;
35913+ retval = -EINVAL;
35914+ }
35915+#endif
35916+
35917+ if ((*flags & MF_PAX_PAGEEXEC)
35918+
35919+#ifdef CONFIG_PAX_PAGEEXEC
35920+ && (*flags & MF_PAX_SEGMEXEC)
35921+#endif
35922+
35923+ )
35924+ {
35925+ *flags &= ~MF_PAX_PAGEEXEC;
35926+ retval = -EINVAL;
35927+ }
35928+
35929+ if ((*flags & MF_PAX_MPROTECT)
35930+
35931+#ifdef CONFIG_PAX_MPROTECT
35932+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
35933+#endif
35934+
35935+ )
35936+ {
35937+ *flags &= ~MF_PAX_MPROTECT;
35938+ retval = -EINVAL;
35939+ }
35940+
35941+ if ((*flags & MF_PAX_EMUTRAMP)
35942+
35943+#ifdef CONFIG_PAX_EMUTRAMP
35944+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
35945+#endif
35946+
35947+ )
35948+ {
35949+ *flags &= ~MF_PAX_EMUTRAMP;
35950+ retval = -EINVAL;
35951+ }
35952+
35953+ return retval;
35954+}
35955+
35956+EXPORT_SYMBOL(pax_check_flags);
35957+
35958+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
35959+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
35960+{
35961+ struct task_struct *tsk = current;
35962+ struct mm_struct *mm = current->mm;
35963+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
35964+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
35965+ char *path_exec = NULL;
35966+ char *path_fault = NULL;
35967+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
35968+
35969+ if (buffer_exec && buffer_fault) {
35970+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
35971+
35972+ down_read(&mm->mmap_sem);
35973+ vma = mm->mmap;
35974+ while (vma && (!vma_exec || !vma_fault)) {
35975+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
35976+ vma_exec = vma;
35977+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
35978+ vma_fault = vma;
35979+ vma = vma->vm_next;
35980+ }
35981+ if (vma_exec) {
35982+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
35983+ if (IS_ERR(path_exec))
35984+ path_exec = "<path too long>";
35985+ else {
35986+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
35987+ if (path_exec) {
35988+ *path_exec = 0;
35989+ path_exec = buffer_exec;
35990+ } else
35991+ path_exec = "<path too long>";
35992+ }
35993+ }
35994+ if (vma_fault) {
35995+ start = vma_fault->vm_start;
35996+ end = vma_fault->vm_end;
35997+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
35998+ if (vma_fault->vm_file) {
35999+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
36000+ if (IS_ERR(path_fault))
36001+ path_fault = "<path too long>";
36002+ else {
36003+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
36004+ if (path_fault) {
36005+ *path_fault = 0;
36006+ path_fault = buffer_fault;
36007+ } else
36008+ path_fault = "<path too long>";
36009+ }
36010+ } else
36011+ path_fault = "<anonymous mapping>";
36012+ }
36013+ up_read(&mm->mmap_sem);
36014+ }
36015+ if (tsk->signal->curr_ip)
36016+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
36017+ else
36018+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
36019+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
36020+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
36021+ task_uid(tsk), task_euid(tsk), pc, sp);
36022+ free_page((unsigned long)buffer_exec);
36023+ free_page((unsigned long)buffer_fault);
36024+ pax_report_insns(pc, sp);
36025+ do_coredump(SIGKILL, SIGKILL, regs);
36026+}
36027+#endif
36028+
36029+#ifdef CONFIG_PAX_REFCOUNT
36030+void pax_report_refcount_overflow(struct pt_regs *regs)
36031+{
36032+ if (current->signal->curr_ip)
36033+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
36034+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
36035+ else
36036+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
36037+ current->comm, task_pid_nr(current), current_uid(), current_euid());
36038+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
36039+ show_regs(regs);
36040+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
36041+}
36042+#endif
36043+
36044+#ifdef CONFIG_PAX_USERCOPY
36045+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
36046+int object_is_on_stack(const void *obj, unsigned long len)
36047+{
36048+ const void * const stack = task_stack_page(current);
36049+ const void * const stackend = stack + THREAD_SIZE;
36050+
36051+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
36052+ const void *frame = NULL;
36053+ const void *oldframe;
36054+#endif
36055+
36056+ if (obj + len < obj)
36057+ return -1;
36058+
36059+ if (obj + len <= stack || stackend <= obj)
36060+ return 0;
36061+
36062+ if (obj < stack || stackend < obj + len)
36063+ return -1;
36064+
36065+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
36066+ oldframe = __builtin_frame_address(1);
36067+ if (oldframe)
36068+ frame = __builtin_frame_address(2);
36069+ /*
36070+ low ----------------------------------------------> high
36071+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
36072+ ^----------------^
36073+ allow copies only within here
36074+ */
36075+ while (stack <= frame && frame < stackend) {
36076+ /* if obj + len extends past the last frame, this
36077+ check won't pass and the next frame will be 0,
36078+ causing us to bail out and correctly report
36079+ the copy as invalid
36080+ */
36081+ if (obj + len <= frame)
36082+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
36083+ oldframe = frame;
36084+ frame = *(const void * const *)frame;
36085+ }
36086+ return -1;
36087+#else
36088+ return 1;
36089+#endif
36090+}
36091+
36092+
36093+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
36094+{
36095+ if (current->signal->curr_ip)
36096+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
36097+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
36098+ else
36099+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
36100+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
36101+ dump_stack();
36102+ gr_handle_kernel_exploit();
36103+ do_group_exit(SIGKILL);
36104+}
36105+#endif
36106+
36107+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
36108+void pax_track_stack(void)
36109+{
36110+ unsigned long sp = (unsigned long)&sp;
36111+ if (sp < current_thread_info()->lowest_stack &&
36112+ sp > (unsigned long)task_stack_page(current))
36113+ current_thread_info()->lowest_stack = sp;
36114+}
36115+EXPORT_SYMBOL(pax_track_stack);
36116+#endif
36117+
36118 static int zap_process(struct task_struct *start, int exit_code)
36119 {
36120 struct task_struct *t;
36121@@ -1969,17 +2272,17 @@ static void wait_for_dump_helpers(struct
36122 pipe = file->f_path.dentry->d_inode->i_pipe;
36123
36124 pipe_lock(pipe);
36125- pipe->readers++;
36126- pipe->writers--;
36127+ atomic_inc(&pipe->readers);
36128+ atomic_dec(&pipe->writers);
36129
36130- while ((pipe->readers > 1) && (!signal_pending(current))) {
36131+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
36132 wake_up_interruptible_sync(&pipe->wait);
36133 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
36134 pipe_wait(pipe);
36135 }
36136
36137- pipe->readers--;
36138- pipe->writers++;
36139+ atomic_dec(&pipe->readers);
36140+ atomic_inc(&pipe->writers);
36141 pipe_unlock(pipe);
36142
36143 }
36144@@ -2040,7 +2343,7 @@ void do_coredump(long signr, int exit_co
36145 int retval = 0;
36146 int flag = 0;
36147 int ispipe;
36148- static atomic_t core_dump_count = ATOMIC_INIT(0);
36149+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
36150 struct coredump_params cprm = {
36151 .signr = signr,
36152 .regs = regs,
36153@@ -2055,6 +2358,9 @@ void do_coredump(long signr, int exit_co
36154
36155 audit_core_dumps(signr);
36156
36157+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
36158+ gr_handle_brute_attach(current, cprm.mm_flags);
36159+
36160 binfmt = mm->binfmt;
36161 if (!binfmt || !binfmt->core_dump)
36162 goto fail;
36163@@ -2095,6 +2401,8 @@ void do_coredump(long signr, int exit_co
36164 goto fail_corename;
36165 }
36166
36167+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
36168+
36169 if (ispipe) {
36170 int dump_count;
36171 char **helper_argv;
36172@@ -2122,7 +2430,7 @@ void do_coredump(long signr, int exit_co
36173 }
36174 cprm.limit = RLIM_INFINITY;
36175
36176- dump_count = atomic_inc_return(&core_dump_count);
36177+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
36178 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
36179 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
36180 task_tgid_vnr(current), current->comm);
36181@@ -2192,7 +2500,7 @@ close_fail:
36182 filp_close(cprm.file, NULL);
36183 fail_dropcount:
36184 if (ispipe)
36185- atomic_dec(&core_dump_count);
36186+ atomic_dec_unchecked(&core_dump_count);
36187 fail_unlock:
36188 kfree(cn.corename);
36189 fail_corename:
36190diff -urNp linux-3.0.3/fs/ext2/balloc.c linux-3.0.3/fs/ext2/balloc.c
36191--- linux-3.0.3/fs/ext2/balloc.c 2011-07-21 22:17:23.000000000 -0400
36192+++ linux-3.0.3/fs/ext2/balloc.c 2011-08-23 21:48:14.000000000 -0400
36193@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
36194
36195 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
36196 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
36197- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
36198+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
36199 sbi->s_resuid != current_fsuid() &&
36200 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
36201 return 0;
36202diff -urNp linux-3.0.3/fs/ext3/balloc.c linux-3.0.3/fs/ext3/balloc.c
36203--- linux-3.0.3/fs/ext3/balloc.c 2011-07-21 22:17:23.000000000 -0400
36204+++ linux-3.0.3/fs/ext3/balloc.c 2011-08-23 21:48:14.000000000 -0400
36205@@ -1441,7 +1441,7 @@ static int ext3_has_free_blocks(struct e
36206
36207 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
36208 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
36209- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
36210+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
36211 sbi->s_resuid != current_fsuid() &&
36212 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
36213 return 0;
36214diff -urNp linux-3.0.3/fs/ext4/balloc.c linux-3.0.3/fs/ext4/balloc.c
36215--- linux-3.0.3/fs/ext4/balloc.c 2011-07-21 22:17:23.000000000 -0400
36216+++ linux-3.0.3/fs/ext4/balloc.c 2011-08-23 21:48:14.000000000 -0400
36217@@ -394,8 +394,8 @@ static int ext4_has_free_blocks(struct e
36218 /* Hm, nope. Are (enough) root reserved blocks available? */
36219 if (sbi->s_resuid == current_fsuid() ||
36220 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
36221- capable(CAP_SYS_RESOURCE) ||
36222- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
36223+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
36224+ capable_nolog(CAP_SYS_RESOURCE)) {
36225
36226 if (free_blocks >= (nblocks + dirty_blocks))
36227 return 1;
36228diff -urNp linux-3.0.3/fs/ext4/ext4.h linux-3.0.3/fs/ext4/ext4.h
36229--- linux-3.0.3/fs/ext4/ext4.h 2011-08-23 21:44:40.000000000 -0400
36230+++ linux-3.0.3/fs/ext4/ext4.h 2011-08-23 21:47:56.000000000 -0400
36231@@ -1177,19 +1177,19 @@ struct ext4_sb_info {
36232 unsigned long s_mb_last_start;
36233
36234 /* stats for buddy allocator */
36235- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
36236- atomic_t s_bal_success; /* we found long enough chunks */
36237- atomic_t s_bal_allocated; /* in blocks */
36238- atomic_t s_bal_ex_scanned; /* total extents scanned */
36239- atomic_t s_bal_goals; /* goal hits */
36240- atomic_t s_bal_breaks; /* too long searches */
36241- atomic_t s_bal_2orders; /* 2^order hits */
36242+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
36243+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
36244+ atomic_unchecked_t s_bal_allocated; /* in blocks */
36245+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
36246+ atomic_unchecked_t s_bal_goals; /* goal hits */
36247+ atomic_unchecked_t s_bal_breaks; /* too long searches */
36248+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
36249 spinlock_t s_bal_lock;
36250 unsigned long s_mb_buddies_generated;
36251 unsigned long long s_mb_generation_time;
36252- atomic_t s_mb_lost_chunks;
36253- atomic_t s_mb_preallocated;
36254- atomic_t s_mb_discarded;
36255+ atomic_unchecked_t s_mb_lost_chunks;
36256+ atomic_unchecked_t s_mb_preallocated;
36257+ atomic_unchecked_t s_mb_discarded;
36258 atomic_t s_lock_busy;
36259
36260 /* locality groups */
36261diff -urNp linux-3.0.3/fs/ext4/mballoc.c linux-3.0.3/fs/ext4/mballoc.c
36262--- linux-3.0.3/fs/ext4/mballoc.c 2011-08-23 21:44:40.000000000 -0400
36263+++ linux-3.0.3/fs/ext4/mballoc.c 2011-08-23 21:48:14.000000000 -0400
36264@@ -1793,7 +1793,7 @@ void ext4_mb_simple_scan_group(struct ex
36265 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
36266
36267 if (EXT4_SB(sb)->s_mb_stats)
36268- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
36269+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
36270
36271 break;
36272 }
36273@@ -2087,7 +2087,7 @@ repeat:
36274 ac->ac_status = AC_STATUS_CONTINUE;
36275 ac->ac_flags |= EXT4_MB_HINT_FIRST;
36276 cr = 3;
36277- atomic_inc(&sbi->s_mb_lost_chunks);
36278+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
36279 goto repeat;
36280 }
36281 }
36282@@ -2130,6 +2130,8 @@ static int ext4_mb_seq_groups_show(struc
36283 ext4_grpblk_t counters[16];
36284 } sg;
36285
36286+ pax_track_stack();
36287+
36288 group--;
36289 if (group == 0)
36290 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
36291@@ -2553,25 +2555,25 @@ int ext4_mb_release(struct super_block *
36292 if (sbi->s_mb_stats) {
36293 printk(KERN_INFO
36294 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
36295- atomic_read(&sbi->s_bal_allocated),
36296- atomic_read(&sbi->s_bal_reqs),
36297- atomic_read(&sbi->s_bal_success));
36298+ atomic_read_unchecked(&sbi->s_bal_allocated),
36299+ atomic_read_unchecked(&sbi->s_bal_reqs),
36300+ atomic_read_unchecked(&sbi->s_bal_success));
36301 printk(KERN_INFO
36302 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
36303 "%u 2^N hits, %u breaks, %u lost\n",
36304- atomic_read(&sbi->s_bal_ex_scanned),
36305- atomic_read(&sbi->s_bal_goals),
36306- atomic_read(&sbi->s_bal_2orders),
36307- atomic_read(&sbi->s_bal_breaks),
36308- atomic_read(&sbi->s_mb_lost_chunks));
36309+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
36310+ atomic_read_unchecked(&sbi->s_bal_goals),
36311+ atomic_read_unchecked(&sbi->s_bal_2orders),
36312+ atomic_read_unchecked(&sbi->s_bal_breaks),
36313+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
36314 printk(KERN_INFO
36315 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
36316 sbi->s_mb_buddies_generated++,
36317 sbi->s_mb_generation_time);
36318 printk(KERN_INFO
36319 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
36320- atomic_read(&sbi->s_mb_preallocated),
36321- atomic_read(&sbi->s_mb_discarded));
36322+ atomic_read_unchecked(&sbi->s_mb_preallocated),
36323+ atomic_read_unchecked(&sbi->s_mb_discarded));
36324 }
36325
36326 free_percpu(sbi->s_locality_groups);
36327@@ -3041,16 +3043,16 @@ static void ext4_mb_collect_stats(struct
36328 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
36329
36330 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
36331- atomic_inc(&sbi->s_bal_reqs);
36332- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
36333+ atomic_inc_unchecked(&sbi->s_bal_reqs);
36334+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
36335 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
36336- atomic_inc(&sbi->s_bal_success);
36337- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
36338+ atomic_inc_unchecked(&sbi->s_bal_success);
36339+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
36340 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
36341 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
36342- atomic_inc(&sbi->s_bal_goals);
36343+ atomic_inc_unchecked(&sbi->s_bal_goals);
36344 if (ac->ac_found > sbi->s_mb_max_to_scan)
36345- atomic_inc(&sbi->s_bal_breaks);
36346+ atomic_inc_unchecked(&sbi->s_bal_breaks);
36347 }
36348
36349 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
36350@@ -3448,7 +3450,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
36351 trace_ext4_mb_new_inode_pa(ac, pa);
36352
36353 ext4_mb_use_inode_pa(ac, pa);
36354- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36355+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36356
36357 ei = EXT4_I(ac->ac_inode);
36358 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
36359@@ -3508,7 +3510,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
36360 trace_ext4_mb_new_group_pa(ac, pa);
36361
36362 ext4_mb_use_group_pa(ac, pa);
36363- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36364+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36365
36366 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
36367 lg = ac->ac_lg;
36368@@ -3595,7 +3597,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
36369 * from the bitmap and continue.
36370 */
36371 }
36372- atomic_add(free, &sbi->s_mb_discarded);
36373+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
36374
36375 return err;
36376 }
36377@@ -3613,7 +3615,7 @@ ext4_mb_release_group_pa(struct ext4_bud
36378 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
36379 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
36380 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
36381- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
36382+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
36383 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
36384
36385 return 0;
36386diff -urNp linux-3.0.3/fs/fcntl.c linux-3.0.3/fs/fcntl.c
36387--- linux-3.0.3/fs/fcntl.c 2011-07-21 22:17:23.000000000 -0400
36388+++ linux-3.0.3/fs/fcntl.c 2011-08-23 21:48:14.000000000 -0400
36389@@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct
36390 if (err)
36391 return err;
36392
36393+ if (gr_handle_chroot_fowner(pid, type))
36394+ return -ENOENT;
36395+ if (gr_check_protected_task_fowner(pid, type))
36396+ return -EACCES;
36397+
36398 f_modown(filp, pid, type, force);
36399 return 0;
36400 }
36401@@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned in
36402 switch (cmd) {
36403 case F_DUPFD:
36404 case F_DUPFD_CLOEXEC:
36405+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
36406 if (arg >= rlimit(RLIMIT_NOFILE))
36407 break;
36408 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
36409@@ -835,14 +841,14 @@ static int __init fcntl_init(void)
36410 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
36411 * is defined as O_NONBLOCK on some platforms and not on others.
36412 */
36413- BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
36414+ BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
36415 O_RDONLY | O_WRONLY | O_RDWR |
36416 O_CREAT | O_EXCL | O_NOCTTY |
36417 O_TRUNC | O_APPEND | /* O_NONBLOCK | */
36418 __O_SYNC | O_DSYNC | FASYNC |
36419 O_DIRECT | O_LARGEFILE | O_DIRECTORY |
36420 O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
36421- __FMODE_EXEC | O_PATH
36422+ __FMODE_EXEC | O_PATH | FMODE_GREXEC
36423 ));
36424
36425 fasync_cache = kmem_cache_create("fasync_cache",
36426diff -urNp linux-3.0.3/fs/fifo.c linux-3.0.3/fs/fifo.c
36427--- linux-3.0.3/fs/fifo.c 2011-07-21 22:17:23.000000000 -0400
36428+++ linux-3.0.3/fs/fifo.c 2011-08-23 21:47:56.000000000 -0400
36429@@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode
36430 */
36431 filp->f_op = &read_pipefifo_fops;
36432 pipe->r_counter++;
36433- if (pipe->readers++ == 0)
36434+ if (atomic_inc_return(&pipe->readers) == 1)
36435 wake_up_partner(inode);
36436
36437- if (!pipe->writers) {
36438+ if (!atomic_read(&pipe->writers)) {
36439 if ((filp->f_flags & O_NONBLOCK)) {
36440 /* suppress POLLHUP until we have
36441 * seen a writer */
36442@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode
36443 * errno=ENXIO when there is no process reading the FIFO.
36444 */
36445 ret = -ENXIO;
36446- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
36447+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
36448 goto err;
36449
36450 filp->f_op = &write_pipefifo_fops;
36451 pipe->w_counter++;
36452- if (!pipe->writers++)
36453+ if (atomic_inc_return(&pipe->writers) == 1)
36454 wake_up_partner(inode);
36455
36456- if (!pipe->readers) {
36457+ if (!atomic_read(&pipe->readers)) {
36458 wait_for_partner(inode, &pipe->r_counter);
36459 if (signal_pending(current))
36460 goto err_wr;
36461@@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode
36462 */
36463 filp->f_op = &rdwr_pipefifo_fops;
36464
36465- pipe->readers++;
36466- pipe->writers++;
36467+ atomic_inc(&pipe->readers);
36468+ atomic_inc(&pipe->writers);
36469 pipe->r_counter++;
36470 pipe->w_counter++;
36471- if (pipe->readers == 1 || pipe->writers == 1)
36472+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
36473 wake_up_partner(inode);
36474 break;
36475
36476@@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode
36477 return 0;
36478
36479 err_rd:
36480- if (!--pipe->readers)
36481+ if (atomic_dec_and_test(&pipe->readers))
36482 wake_up_interruptible(&pipe->wait);
36483 ret = -ERESTARTSYS;
36484 goto err;
36485
36486 err_wr:
36487- if (!--pipe->writers)
36488+ if (atomic_dec_and_test(&pipe->writers))
36489 wake_up_interruptible(&pipe->wait);
36490 ret = -ERESTARTSYS;
36491 goto err;
36492
36493 err:
36494- if (!pipe->readers && !pipe->writers)
36495+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
36496 free_pipe_info(inode);
36497
36498 err_nocleanup:
36499diff -urNp linux-3.0.3/fs/file.c linux-3.0.3/fs/file.c
36500--- linux-3.0.3/fs/file.c 2011-07-21 22:17:23.000000000 -0400
36501+++ linux-3.0.3/fs/file.c 2011-08-23 21:48:14.000000000 -0400
36502@@ -15,6 +15,7 @@
36503 #include <linux/slab.h>
36504 #include <linux/vmalloc.h>
36505 #include <linux/file.h>
36506+#include <linux/security.h>
36507 #include <linux/fdtable.h>
36508 #include <linux/bitops.h>
36509 #include <linux/interrupt.h>
36510@@ -254,6 +255,7 @@ int expand_files(struct files_struct *fi
36511 * N.B. For clone tasks sharing a files structure, this test
36512 * will limit the total number of files that can be opened.
36513 */
36514+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
36515 if (nr >= rlimit(RLIMIT_NOFILE))
36516 return -EMFILE;
36517
36518diff -urNp linux-3.0.3/fs/filesystems.c linux-3.0.3/fs/filesystems.c
36519--- linux-3.0.3/fs/filesystems.c 2011-07-21 22:17:23.000000000 -0400
36520+++ linux-3.0.3/fs/filesystems.c 2011-08-23 21:48:14.000000000 -0400
36521@@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(con
36522 int len = dot ? dot - name : strlen(name);
36523
36524 fs = __get_fs_type(name, len);
36525+
36526+#ifdef CONFIG_GRKERNSEC_MODHARDEN
36527+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
36528+#else
36529 if (!fs && (request_module("%.*s", len, name) == 0))
36530+#endif
36531 fs = __get_fs_type(name, len);
36532
36533 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
36534diff -urNp linux-3.0.3/fs/fscache/cookie.c linux-3.0.3/fs/fscache/cookie.c
36535--- linux-3.0.3/fs/fscache/cookie.c 2011-07-21 22:17:23.000000000 -0400
36536+++ linux-3.0.3/fs/fscache/cookie.c 2011-08-23 21:47:56.000000000 -0400
36537@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
36538 parent ? (char *) parent->def->name : "<no-parent>",
36539 def->name, netfs_data);
36540
36541- fscache_stat(&fscache_n_acquires);
36542+ fscache_stat_unchecked(&fscache_n_acquires);
36543
36544 /* if there's no parent cookie, then we don't create one here either */
36545 if (!parent) {
36546- fscache_stat(&fscache_n_acquires_null);
36547+ fscache_stat_unchecked(&fscache_n_acquires_null);
36548 _leave(" [no parent]");
36549 return NULL;
36550 }
36551@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
36552 /* allocate and initialise a cookie */
36553 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
36554 if (!cookie) {
36555- fscache_stat(&fscache_n_acquires_oom);
36556+ fscache_stat_unchecked(&fscache_n_acquires_oom);
36557 _leave(" [ENOMEM]");
36558 return NULL;
36559 }
36560@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
36561
36562 switch (cookie->def->type) {
36563 case FSCACHE_COOKIE_TYPE_INDEX:
36564- fscache_stat(&fscache_n_cookie_index);
36565+ fscache_stat_unchecked(&fscache_n_cookie_index);
36566 break;
36567 case FSCACHE_COOKIE_TYPE_DATAFILE:
36568- fscache_stat(&fscache_n_cookie_data);
36569+ fscache_stat_unchecked(&fscache_n_cookie_data);
36570 break;
36571 default:
36572- fscache_stat(&fscache_n_cookie_special);
36573+ fscache_stat_unchecked(&fscache_n_cookie_special);
36574 break;
36575 }
36576
36577@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
36578 if (fscache_acquire_non_index_cookie(cookie) < 0) {
36579 atomic_dec(&parent->n_children);
36580 __fscache_cookie_put(cookie);
36581- fscache_stat(&fscache_n_acquires_nobufs);
36582+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
36583 _leave(" = NULL");
36584 return NULL;
36585 }
36586 }
36587
36588- fscache_stat(&fscache_n_acquires_ok);
36589+ fscache_stat_unchecked(&fscache_n_acquires_ok);
36590 _leave(" = %p", cookie);
36591 return cookie;
36592 }
36593@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
36594 cache = fscache_select_cache_for_object(cookie->parent);
36595 if (!cache) {
36596 up_read(&fscache_addremove_sem);
36597- fscache_stat(&fscache_n_acquires_no_cache);
36598+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
36599 _leave(" = -ENOMEDIUM [no cache]");
36600 return -ENOMEDIUM;
36601 }
36602@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
36603 object = cache->ops->alloc_object(cache, cookie);
36604 fscache_stat_d(&fscache_n_cop_alloc_object);
36605 if (IS_ERR(object)) {
36606- fscache_stat(&fscache_n_object_no_alloc);
36607+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
36608 ret = PTR_ERR(object);
36609 goto error;
36610 }
36611
36612- fscache_stat(&fscache_n_object_alloc);
36613+ fscache_stat_unchecked(&fscache_n_object_alloc);
36614
36615 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
36616
36617@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
36618 struct fscache_object *object;
36619 struct hlist_node *_p;
36620
36621- fscache_stat(&fscache_n_updates);
36622+ fscache_stat_unchecked(&fscache_n_updates);
36623
36624 if (!cookie) {
36625- fscache_stat(&fscache_n_updates_null);
36626+ fscache_stat_unchecked(&fscache_n_updates_null);
36627 _leave(" [no cookie]");
36628 return;
36629 }
36630@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
36631 struct fscache_object *object;
36632 unsigned long event;
36633
36634- fscache_stat(&fscache_n_relinquishes);
36635+ fscache_stat_unchecked(&fscache_n_relinquishes);
36636 if (retire)
36637- fscache_stat(&fscache_n_relinquishes_retire);
36638+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
36639
36640 if (!cookie) {
36641- fscache_stat(&fscache_n_relinquishes_null);
36642+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
36643 _leave(" [no cookie]");
36644 return;
36645 }
36646@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
36647
36648 /* wait for the cookie to finish being instantiated (or to fail) */
36649 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
36650- fscache_stat(&fscache_n_relinquishes_waitcrt);
36651+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
36652 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
36653 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
36654 }
36655diff -urNp linux-3.0.3/fs/fscache/internal.h linux-3.0.3/fs/fscache/internal.h
36656--- linux-3.0.3/fs/fscache/internal.h 2011-07-21 22:17:23.000000000 -0400
36657+++ linux-3.0.3/fs/fscache/internal.h 2011-08-23 21:47:56.000000000 -0400
36658@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
36659 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
36660 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
36661
36662-extern atomic_t fscache_n_op_pend;
36663-extern atomic_t fscache_n_op_run;
36664-extern atomic_t fscache_n_op_enqueue;
36665-extern atomic_t fscache_n_op_deferred_release;
36666-extern atomic_t fscache_n_op_release;
36667-extern atomic_t fscache_n_op_gc;
36668-extern atomic_t fscache_n_op_cancelled;
36669-extern atomic_t fscache_n_op_rejected;
36670-
36671-extern atomic_t fscache_n_attr_changed;
36672-extern atomic_t fscache_n_attr_changed_ok;
36673-extern atomic_t fscache_n_attr_changed_nobufs;
36674-extern atomic_t fscache_n_attr_changed_nomem;
36675-extern atomic_t fscache_n_attr_changed_calls;
36676-
36677-extern atomic_t fscache_n_allocs;
36678-extern atomic_t fscache_n_allocs_ok;
36679-extern atomic_t fscache_n_allocs_wait;
36680-extern atomic_t fscache_n_allocs_nobufs;
36681-extern atomic_t fscache_n_allocs_intr;
36682-extern atomic_t fscache_n_allocs_object_dead;
36683-extern atomic_t fscache_n_alloc_ops;
36684-extern atomic_t fscache_n_alloc_op_waits;
36685-
36686-extern atomic_t fscache_n_retrievals;
36687-extern atomic_t fscache_n_retrievals_ok;
36688-extern atomic_t fscache_n_retrievals_wait;
36689-extern atomic_t fscache_n_retrievals_nodata;
36690-extern atomic_t fscache_n_retrievals_nobufs;
36691-extern atomic_t fscache_n_retrievals_intr;
36692-extern atomic_t fscache_n_retrievals_nomem;
36693-extern atomic_t fscache_n_retrievals_object_dead;
36694-extern atomic_t fscache_n_retrieval_ops;
36695-extern atomic_t fscache_n_retrieval_op_waits;
36696-
36697-extern atomic_t fscache_n_stores;
36698-extern atomic_t fscache_n_stores_ok;
36699-extern atomic_t fscache_n_stores_again;
36700-extern atomic_t fscache_n_stores_nobufs;
36701-extern atomic_t fscache_n_stores_oom;
36702-extern atomic_t fscache_n_store_ops;
36703-extern atomic_t fscache_n_store_calls;
36704-extern atomic_t fscache_n_store_pages;
36705-extern atomic_t fscache_n_store_radix_deletes;
36706-extern atomic_t fscache_n_store_pages_over_limit;
36707-
36708-extern atomic_t fscache_n_store_vmscan_not_storing;
36709-extern atomic_t fscache_n_store_vmscan_gone;
36710-extern atomic_t fscache_n_store_vmscan_busy;
36711-extern atomic_t fscache_n_store_vmscan_cancelled;
36712-
36713-extern atomic_t fscache_n_marks;
36714-extern atomic_t fscache_n_uncaches;
36715-
36716-extern atomic_t fscache_n_acquires;
36717-extern atomic_t fscache_n_acquires_null;
36718-extern atomic_t fscache_n_acquires_no_cache;
36719-extern atomic_t fscache_n_acquires_ok;
36720-extern atomic_t fscache_n_acquires_nobufs;
36721-extern atomic_t fscache_n_acquires_oom;
36722-
36723-extern atomic_t fscache_n_updates;
36724-extern atomic_t fscache_n_updates_null;
36725-extern atomic_t fscache_n_updates_run;
36726-
36727-extern atomic_t fscache_n_relinquishes;
36728-extern atomic_t fscache_n_relinquishes_null;
36729-extern atomic_t fscache_n_relinquishes_waitcrt;
36730-extern atomic_t fscache_n_relinquishes_retire;
36731-
36732-extern atomic_t fscache_n_cookie_index;
36733-extern atomic_t fscache_n_cookie_data;
36734-extern atomic_t fscache_n_cookie_special;
36735-
36736-extern atomic_t fscache_n_object_alloc;
36737-extern atomic_t fscache_n_object_no_alloc;
36738-extern atomic_t fscache_n_object_lookups;
36739-extern atomic_t fscache_n_object_lookups_negative;
36740-extern atomic_t fscache_n_object_lookups_positive;
36741-extern atomic_t fscache_n_object_lookups_timed_out;
36742-extern atomic_t fscache_n_object_created;
36743-extern atomic_t fscache_n_object_avail;
36744-extern atomic_t fscache_n_object_dead;
36745-
36746-extern atomic_t fscache_n_checkaux_none;
36747-extern atomic_t fscache_n_checkaux_okay;
36748-extern atomic_t fscache_n_checkaux_update;
36749-extern atomic_t fscache_n_checkaux_obsolete;
36750+extern atomic_unchecked_t fscache_n_op_pend;
36751+extern atomic_unchecked_t fscache_n_op_run;
36752+extern atomic_unchecked_t fscache_n_op_enqueue;
36753+extern atomic_unchecked_t fscache_n_op_deferred_release;
36754+extern atomic_unchecked_t fscache_n_op_release;
36755+extern atomic_unchecked_t fscache_n_op_gc;
36756+extern atomic_unchecked_t fscache_n_op_cancelled;
36757+extern atomic_unchecked_t fscache_n_op_rejected;
36758+
36759+extern atomic_unchecked_t fscache_n_attr_changed;
36760+extern atomic_unchecked_t fscache_n_attr_changed_ok;
36761+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
36762+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
36763+extern atomic_unchecked_t fscache_n_attr_changed_calls;
36764+
36765+extern atomic_unchecked_t fscache_n_allocs;
36766+extern atomic_unchecked_t fscache_n_allocs_ok;
36767+extern atomic_unchecked_t fscache_n_allocs_wait;
36768+extern atomic_unchecked_t fscache_n_allocs_nobufs;
36769+extern atomic_unchecked_t fscache_n_allocs_intr;
36770+extern atomic_unchecked_t fscache_n_allocs_object_dead;
36771+extern atomic_unchecked_t fscache_n_alloc_ops;
36772+extern atomic_unchecked_t fscache_n_alloc_op_waits;
36773+
36774+extern atomic_unchecked_t fscache_n_retrievals;
36775+extern atomic_unchecked_t fscache_n_retrievals_ok;
36776+extern atomic_unchecked_t fscache_n_retrievals_wait;
36777+extern atomic_unchecked_t fscache_n_retrievals_nodata;
36778+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
36779+extern atomic_unchecked_t fscache_n_retrievals_intr;
36780+extern atomic_unchecked_t fscache_n_retrievals_nomem;
36781+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
36782+extern atomic_unchecked_t fscache_n_retrieval_ops;
36783+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
36784+
36785+extern atomic_unchecked_t fscache_n_stores;
36786+extern atomic_unchecked_t fscache_n_stores_ok;
36787+extern atomic_unchecked_t fscache_n_stores_again;
36788+extern atomic_unchecked_t fscache_n_stores_nobufs;
36789+extern atomic_unchecked_t fscache_n_stores_oom;
36790+extern atomic_unchecked_t fscache_n_store_ops;
36791+extern atomic_unchecked_t fscache_n_store_calls;
36792+extern atomic_unchecked_t fscache_n_store_pages;
36793+extern atomic_unchecked_t fscache_n_store_radix_deletes;
36794+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
36795+
36796+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
36797+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
36798+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
36799+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
36800+
36801+extern atomic_unchecked_t fscache_n_marks;
36802+extern atomic_unchecked_t fscache_n_uncaches;
36803+
36804+extern atomic_unchecked_t fscache_n_acquires;
36805+extern atomic_unchecked_t fscache_n_acquires_null;
36806+extern atomic_unchecked_t fscache_n_acquires_no_cache;
36807+extern atomic_unchecked_t fscache_n_acquires_ok;
36808+extern atomic_unchecked_t fscache_n_acquires_nobufs;
36809+extern atomic_unchecked_t fscache_n_acquires_oom;
36810+
36811+extern atomic_unchecked_t fscache_n_updates;
36812+extern atomic_unchecked_t fscache_n_updates_null;
36813+extern atomic_unchecked_t fscache_n_updates_run;
36814+
36815+extern atomic_unchecked_t fscache_n_relinquishes;
36816+extern atomic_unchecked_t fscache_n_relinquishes_null;
36817+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
36818+extern atomic_unchecked_t fscache_n_relinquishes_retire;
36819+
36820+extern atomic_unchecked_t fscache_n_cookie_index;
36821+extern atomic_unchecked_t fscache_n_cookie_data;
36822+extern atomic_unchecked_t fscache_n_cookie_special;
36823+
36824+extern atomic_unchecked_t fscache_n_object_alloc;
36825+extern atomic_unchecked_t fscache_n_object_no_alloc;
36826+extern atomic_unchecked_t fscache_n_object_lookups;
36827+extern atomic_unchecked_t fscache_n_object_lookups_negative;
36828+extern atomic_unchecked_t fscache_n_object_lookups_positive;
36829+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
36830+extern atomic_unchecked_t fscache_n_object_created;
36831+extern atomic_unchecked_t fscache_n_object_avail;
36832+extern atomic_unchecked_t fscache_n_object_dead;
36833+
36834+extern atomic_unchecked_t fscache_n_checkaux_none;
36835+extern atomic_unchecked_t fscache_n_checkaux_okay;
36836+extern atomic_unchecked_t fscache_n_checkaux_update;
36837+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
36838
36839 extern atomic_t fscache_n_cop_alloc_object;
36840 extern atomic_t fscache_n_cop_lookup_object;
36841@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t
36842 atomic_inc(stat);
36843 }
36844
36845+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
36846+{
36847+ atomic_inc_unchecked(stat);
36848+}
36849+
36850 static inline void fscache_stat_d(atomic_t *stat)
36851 {
36852 atomic_dec(stat);
36853@@ -267,6 +272,7 @@ extern const struct file_operations fsca
36854
36855 #define __fscache_stat(stat) (NULL)
36856 #define fscache_stat(stat) do {} while (0)
36857+#define fscache_stat_unchecked(stat) do {} while (0)
36858 #define fscache_stat_d(stat) do {} while (0)
36859 #endif
36860
36861diff -urNp linux-3.0.3/fs/fscache/object.c linux-3.0.3/fs/fscache/object.c
36862--- linux-3.0.3/fs/fscache/object.c 2011-07-21 22:17:23.000000000 -0400
36863+++ linux-3.0.3/fs/fscache/object.c 2011-08-23 21:47:56.000000000 -0400
36864@@ -128,7 +128,7 @@ static void fscache_object_state_machine
36865 /* update the object metadata on disk */
36866 case FSCACHE_OBJECT_UPDATING:
36867 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
36868- fscache_stat(&fscache_n_updates_run);
36869+ fscache_stat_unchecked(&fscache_n_updates_run);
36870 fscache_stat(&fscache_n_cop_update_object);
36871 object->cache->ops->update_object(object);
36872 fscache_stat_d(&fscache_n_cop_update_object);
36873@@ -217,7 +217,7 @@ static void fscache_object_state_machine
36874 spin_lock(&object->lock);
36875 object->state = FSCACHE_OBJECT_DEAD;
36876 spin_unlock(&object->lock);
36877- fscache_stat(&fscache_n_object_dead);
36878+ fscache_stat_unchecked(&fscache_n_object_dead);
36879 goto terminal_transit;
36880
36881 /* handle the parent cache of this object being withdrawn from
36882@@ -232,7 +232,7 @@ static void fscache_object_state_machine
36883 spin_lock(&object->lock);
36884 object->state = FSCACHE_OBJECT_DEAD;
36885 spin_unlock(&object->lock);
36886- fscache_stat(&fscache_n_object_dead);
36887+ fscache_stat_unchecked(&fscache_n_object_dead);
36888 goto terminal_transit;
36889
36890 /* complain about the object being woken up once it is
36891@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct
36892 parent->cookie->def->name, cookie->def->name,
36893 object->cache->tag->name);
36894
36895- fscache_stat(&fscache_n_object_lookups);
36896+ fscache_stat_unchecked(&fscache_n_object_lookups);
36897 fscache_stat(&fscache_n_cop_lookup_object);
36898 ret = object->cache->ops->lookup_object(object);
36899 fscache_stat_d(&fscache_n_cop_lookup_object);
36900@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct
36901 if (ret == -ETIMEDOUT) {
36902 /* probably stuck behind another object, so move this one to
36903 * the back of the queue */
36904- fscache_stat(&fscache_n_object_lookups_timed_out);
36905+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
36906 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
36907 }
36908
36909@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(stru
36910
36911 spin_lock(&object->lock);
36912 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
36913- fscache_stat(&fscache_n_object_lookups_negative);
36914+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
36915
36916 /* transit here to allow write requests to begin stacking up
36917 * and read requests to begin returning ENODATA */
36918@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fsca
36919 * result, in which case there may be data available */
36920 spin_lock(&object->lock);
36921 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
36922- fscache_stat(&fscache_n_object_lookups_positive);
36923+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
36924
36925 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
36926
36927@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fsca
36928 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
36929 } else {
36930 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
36931- fscache_stat(&fscache_n_object_created);
36932+ fscache_stat_unchecked(&fscache_n_object_created);
36933
36934 object->state = FSCACHE_OBJECT_AVAILABLE;
36935 spin_unlock(&object->lock);
36936@@ -602,7 +602,7 @@ static void fscache_object_available(str
36937 fscache_enqueue_dependents(object);
36938
36939 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
36940- fscache_stat(&fscache_n_object_avail);
36941+ fscache_stat_unchecked(&fscache_n_object_avail);
36942
36943 _leave("");
36944 }
36945@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
36946 enum fscache_checkaux result;
36947
36948 if (!object->cookie->def->check_aux) {
36949- fscache_stat(&fscache_n_checkaux_none);
36950+ fscache_stat_unchecked(&fscache_n_checkaux_none);
36951 return FSCACHE_CHECKAUX_OKAY;
36952 }
36953
36954@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
36955 switch (result) {
36956 /* entry okay as is */
36957 case FSCACHE_CHECKAUX_OKAY:
36958- fscache_stat(&fscache_n_checkaux_okay);
36959+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
36960 break;
36961
36962 /* entry requires update */
36963 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
36964- fscache_stat(&fscache_n_checkaux_update);
36965+ fscache_stat_unchecked(&fscache_n_checkaux_update);
36966 break;
36967
36968 /* entry requires deletion */
36969 case FSCACHE_CHECKAUX_OBSOLETE:
36970- fscache_stat(&fscache_n_checkaux_obsolete);
36971+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
36972 break;
36973
36974 default:
36975diff -urNp linux-3.0.3/fs/fscache/operation.c linux-3.0.3/fs/fscache/operation.c
36976--- linux-3.0.3/fs/fscache/operation.c 2011-07-21 22:17:23.000000000 -0400
36977+++ linux-3.0.3/fs/fscache/operation.c 2011-08-23 21:47:56.000000000 -0400
36978@@ -17,7 +17,7 @@
36979 #include <linux/slab.h>
36980 #include "internal.h"
36981
36982-atomic_t fscache_op_debug_id;
36983+atomic_unchecked_t fscache_op_debug_id;
36984 EXPORT_SYMBOL(fscache_op_debug_id);
36985
36986 /**
36987@@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fs
36988 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
36989 ASSERTCMP(atomic_read(&op->usage), >, 0);
36990
36991- fscache_stat(&fscache_n_op_enqueue);
36992+ fscache_stat_unchecked(&fscache_n_op_enqueue);
36993 switch (op->flags & FSCACHE_OP_TYPE) {
36994 case FSCACHE_OP_ASYNC:
36995 _debug("queue async");
36996@@ -69,7 +69,7 @@ static void fscache_run_op(struct fscach
36997 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
36998 if (op->processor)
36999 fscache_enqueue_operation(op);
37000- fscache_stat(&fscache_n_op_run);
37001+ fscache_stat_unchecked(&fscache_n_op_run);
37002 }
37003
37004 /*
37005@@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct f
37006 if (object->n_ops > 1) {
37007 atomic_inc(&op->usage);
37008 list_add_tail(&op->pend_link, &object->pending_ops);
37009- fscache_stat(&fscache_n_op_pend);
37010+ fscache_stat_unchecked(&fscache_n_op_pend);
37011 } else if (!list_empty(&object->pending_ops)) {
37012 atomic_inc(&op->usage);
37013 list_add_tail(&op->pend_link, &object->pending_ops);
37014- fscache_stat(&fscache_n_op_pend);
37015+ fscache_stat_unchecked(&fscache_n_op_pend);
37016 fscache_start_operations(object);
37017 } else {
37018 ASSERTCMP(object->n_in_progress, ==, 0);
37019@@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct f
37020 object->n_exclusive++; /* reads and writes must wait */
37021 atomic_inc(&op->usage);
37022 list_add_tail(&op->pend_link, &object->pending_ops);
37023- fscache_stat(&fscache_n_op_pend);
37024+ fscache_stat_unchecked(&fscache_n_op_pend);
37025 ret = 0;
37026 } else {
37027 /* not allowed to submit ops in any other state */
37028@@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_obj
37029 if (object->n_exclusive > 0) {
37030 atomic_inc(&op->usage);
37031 list_add_tail(&op->pend_link, &object->pending_ops);
37032- fscache_stat(&fscache_n_op_pend);
37033+ fscache_stat_unchecked(&fscache_n_op_pend);
37034 } else if (!list_empty(&object->pending_ops)) {
37035 atomic_inc(&op->usage);
37036 list_add_tail(&op->pend_link, &object->pending_ops);
37037- fscache_stat(&fscache_n_op_pend);
37038+ fscache_stat_unchecked(&fscache_n_op_pend);
37039 fscache_start_operations(object);
37040 } else {
37041 ASSERTCMP(object->n_exclusive, ==, 0);
37042@@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_obj
37043 object->n_ops++;
37044 atomic_inc(&op->usage);
37045 list_add_tail(&op->pend_link, &object->pending_ops);
37046- fscache_stat(&fscache_n_op_pend);
37047+ fscache_stat_unchecked(&fscache_n_op_pend);
37048 ret = 0;
37049 } else if (object->state == FSCACHE_OBJECT_DYING ||
37050 object->state == FSCACHE_OBJECT_LC_DYING ||
37051 object->state == FSCACHE_OBJECT_WITHDRAWING) {
37052- fscache_stat(&fscache_n_op_rejected);
37053+ fscache_stat_unchecked(&fscache_n_op_rejected);
37054 ret = -ENOBUFS;
37055 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
37056 fscache_report_unexpected_submission(object, op, ostate);
37057@@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_ope
37058
37059 ret = -EBUSY;
37060 if (!list_empty(&op->pend_link)) {
37061- fscache_stat(&fscache_n_op_cancelled);
37062+ fscache_stat_unchecked(&fscache_n_op_cancelled);
37063 list_del_init(&op->pend_link);
37064 object->n_ops--;
37065 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
37066@@ -331,7 +331,7 @@ void fscache_put_operation(struct fscach
37067 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
37068 BUG();
37069
37070- fscache_stat(&fscache_n_op_release);
37071+ fscache_stat_unchecked(&fscache_n_op_release);
37072
37073 if (op->release) {
37074 op->release(op);
37075@@ -348,7 +348,7 @@ void fscache_put_operation(struct fscach
37076 * lock, and defer it otherwise */
37077 if (!spin_trylock(&object->lock)) {
37078 _debug("defer put");
37079- fscache_stat(&fscache_n_op_deferred_release);
37080+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
37081
37082 cache = object->cache;
37083 spin_lock(&cache->op_gc_list_lock);
37084@@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_st
37085
37086 _debug("GC DEFERRED REL OBJ%x OP%x",
37087 object->debug_id, op->debug_id);
37088- fscache_stat(&fscache_n_op_gc);
37089+ fscache_stat_unchecked(&fscache_n_op_gc);
37090
37091 ASSERTCMP(atomic_read(&op->usage), ==, 0);
37092
37093diff -urNp linux-3.0.3/fs/fscache/page.c linux-3.0.3/fs/fscache/page.c
37094--- linux-3.0.3/fs/fscache/page.c 2011-07-21 22:17:23.000000000 -0400
37095+++ linux-3.0.3/fs/fscache/page.c 2011-08-23 21:47:56.000000000 -0400
37096@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct
37097 val = radix_tree_lookup(&cookie->stores, page->index);
37098 if (!val) {
37099 rcu_read_unlock();
37100- fscache_stat(&fscache_n_store_vmscan_not_storing);
37101+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
37102 __fscache_uncache_page(cookie, page);
37103 return true;
37104 }
37105@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct
37106 spin_unlock(&cookie->stores_lock);
37107
37108 if (xpage) {
37109- fscache_stat(&fscache_n_store_vmscan_cancelled);
37110- fscache_stat(&fscache_n_store_radix_deletes);
37111+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
37112+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
37113 ASSERTCMP(xpage, ==, page);
37114 } else {
37115- fscache_stat(&fscache_n_store_vmscan_gone);
37116+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
37117 }
37118
37119 wake_up_bit(&cookie->flags, 0);
37120@@ -107,7 +107,7 @@ page_busy:
37121 /* we might want to wait here, but that could deadlock the allocator as
37122 * the work threads writing to the cache may all end up sleeping
37123 * on memory allocation */
37124- fscache_stat(&fscache_n_store_vmscan_busy);
37125+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
37126 return false;
37127 }
37128 EXPORT_SYMBOL(__fscache_maybe_release_page);
37129@@ -131,7 +131,7 @@ static void fscache_end_page_write(struc
37130 FSCACHE_COOKIE_STORING_TAG);
37131 if (!radix_tree_tag_get(&cookie->stores, page->index,
37132 FSCACHE_COOKIE_PENDING_TAG)) {
37133- fscache_stat(&fscache_n_store_radix_deletes);
37134+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
37135 xpage = radix_tree_delete(&cookie->stores, page->index);
37136 }
37137 spin_unlock(&cookie->stores_lock);
37138@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(stru
37139
37140 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
37141
37142- fscache_stat(&fscache_n_attr_changed_calls);
37143+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
37144
37145 if (fscache_object_is_active(object)) {
37146 fscache_stat(&fscache_n_cop_attr_changed);
37147@@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscach
37148
37149 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
37150
37151- fscache_stat(&fscache_n_attr_changed);
37152+ fscache_stat_unchecked(&fscache_n_attr_changed);
37153
37154 op = kzalloc(sizeof(*op), GFP_KERNEL);
37155 if (!op) {
37156- fscache_stat(&fscache_n_attr_changed_nomem);
37157+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
37158 _leave(" = -ENOMEM");
37159 return -ENOMEM;
37160 }
37161@@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscach
37162 if (fscache_submit_exclusive_op(object, op) < 0)
37163 goto nobufs;
37164 spin_unlock(&cookie->lock);
37165- fscache_stat(&fscache_n_attr_changed_ok);
37166+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
37167 fscache_put_operation(op);
37168 _leave(" = 0");
37169 return 0;
37170@@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscach
37171 nobufs:
37172 spin_unlock(&cookie->lock);
37173 kfree(op);
37174- fscache_stat(&fscache_n_attr_changed_nobufs);
37175+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
37176 _leave(" = %d", -ENOBUFS);
37177 return -ENOBUFS;
37178 }
37179@@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache
37180 /* allocate a retrieval operation and attempt to submit it */
37181 op = kzalloc(sizeof(*op), GFP_NOIO);
37182 if (!op) {
37183- fscache_stat(&fscache_n_retrievals_nomem);
37184+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
37185 return NULL;
37186 }
37187
37188@@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_loo
37189 return 0;
37190 }
37191
37192- fscache_stat(&fscache_n_retrievals_wait);
37193+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
37194
37195 jif = jiffies;
37196 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
37197 fscache_wait_bit_interruptible,
37198 TASK_INTERRUPTIBLE) != 0) {
37199- fscache_stat(&fscache_n_retrievals_intr);
37200+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
37201 _leave(" = -ERESTARTSYS");
37202 return -ERESTARTSYS;
37203 }
37204@@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_loo
37205 */
37206 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
37207 struct fscache_retrieval *op,
37208- atomic_t *stat_op_waits,
37209- atomic_t *stat_object_dead)
37210+ atomic_unchecked_t *stat_op_waits,
37211+ atomic_unchecked_t *stat_object_dead)
37212 {
37213 int ret;
37214
37215@@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_ac
37216 goto check_if_dead;
37217
37218 _debug(">>> WT");
37219- fscache_stat(stat_op_waits);
37220+ fscache_stat_unchecked(stat_op_waits);
37221 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
37222 fscache_wait_bit_interruptible,
37223 TASK_INTERRUPTIBLE) < 0) {
37224@@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_ac
37225
37226 check_if_dead:
37227 if (unlikely(fscache_object_is_dead(object))) {
37228- fscache_stat(stat_object_dead);
37229+ fscache_stat_unchecked(stat_object_dead);
37230 return -ENOBUFS;
37231 }
37232 return 0;
37233@@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct
37234
37235 _enter("%p,%p,,,", cookie, page);
37236
37237- fscache_stat(&fscache_n_retrievals);
37238+ fscache_stat_unchecked(&fscache_n_retrievals);
37239
37240 if (hlist_empty(&cookie->backing_objects))
37241 goto nobufs;
37242@@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct
37243 goto nobufs_unlock;
37244 spin_unlock(&cookie->lock);
37245
37246- fscache_stat(&fscache_n_retrieval_ops);
37247+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
37248
37249 /* pin the netfs read context in case we need to do the actual netfs
37250 * read because we've encountered a cache read failure */
37251@@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct
37252
37253 error:
37254 if (ret == -ENOMEM)
37255- fscache_stat(&fscache_n_retrievals_nomem);
37256+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
37257 else if (ret == -ERESTARTSYS)
37258- fscache_stat(&fscache_n_retrievals_intr);
37259+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
37260 else if (ret == -ENODATA)
37261- fscache_stat(&fscache_n_retrievals_nodata);
37262+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
37263 else if (ret < 0)
37264- fscache_stat(&fscache_n_retrievals_nobufs);
37265+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37266 else
37267- fscache_stat(&fscache_n_retrievals_ok);
37268+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
37269
37270 fscache_put_retrieval(op);
37271 _leave(" = %d", ret);
37272@@ -429,7 +429,7 @@ nobufs_unlock:
37273 spin_unlock(&cookie->lock);
37274 kfree(op);
37275 nobufs:
37276- fscache_stat(&fscache_n_retrievals_nobufs);
37277+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37278 _leave(" = -ENOBUFS");
37279 return -ENOBUFS;
37280 }
37281@@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct
37282
37283 _enter("%p,,%d,,,", cookie, *nr_pages);
37284
37285- fscache_stat(&fscache_n_retrievals);
37286+ fscache_stat_unchecked(&fscache_n_retrievals);
37287
37288 if (hlist_empty(&cookie->backing_objects))
37289 goto nobufs;
37290@@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct
37291 goto nobufs_unlock;
37292 spin_unlock(&cookie->lock);
37293
37294- fscache_stat(&fscache_n_retrieval_ops);
37295+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
37296
37297 /* pin the netfs read context in case we need to do the actual netfs
37298 * read because we've encountered a cache read failure */
37299@@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct
37300
37301 error:
37302 if (ret == -ENOMEM)
37303- fscache_stat(&fscache_n_retrievals_nomem);
37304+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
37305 else if (ret == -ERESTARTSYS)
37306- fscache_stat(&fscache_n_retrievals_intr);
37307+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
37308 else if (ret == -ENODATA)
37309- fscache_stat(&fscache_n_retrievals_nodata);
37310+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
37311 else if (ret < 0)
37312- fscache_stat(&fscache_n_retrievals_nobufs);
37313+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37314 else
37315- fscache_stat(&fscache_n_retrievals_ok);
37316+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
37317
37318 fscache_put_retrieval(op);
37319 _leave(" = %d", ret);
37320@@ -545,7 +545,7 @@ nobufs_unlock:
37321 spin_unlock(&cookie->lock);
37322 kfree(op);
37323 nobufs:
37324- fscache_stat(&fscache_n_retrievals_nobufs);
37325+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37326 _leave(" = -ENOBUFS");
37327 return -ENOBUFS;
37328 }
37329@@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_
37330
37331 _enter("%p,%p,,,", cookie, page);
37332
37333- fscache_stat(&fscache_n_allocs);
37334+ fscache_stat_unchecked(&fscache_n_allocs);
37335
37336 if (hlist_empty(&cookie->backing_objects))
37337 goto nobufs;
37338@@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_
37339 goto nobufs_unlock;
37340 spin_unlock(&cookie->lock);
37341
37342- fscache_stat(&fscache_n_alloc_ops);
37343+ fscache_stat_unchecked(&fscache_n_alloc_ops);
37344
37345 ret = fscache_wait_for_retrieval_activation(
37346 object, op,
37347@@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_
37348
37349 error:
37350 if (ret == -ERESTARTSYS)
37351- fscache_stat(&fscache_n_allocs_intr);
37352+ fscache_stat_unchecked(&fscache_n_allocs_intr);
37353 else if (ret < 0)
37354- fscache_stat(&fscache_n_allocs_nobufs);
37355+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
37356 else
37357- fscache_stat(&fscache_n_allocs_ok);
37358+ fscache_stat_unchecked(&fscache_n_allocs_ok);
37359
37360 fscache_put_retrieval(op);
37361 _leave(" = %d", ret);
37362@@ -625,7 +625,7 @@ nobufs_unlock:
37363 spin_unlock(&cookie->lock);
37364 kfree(op);
37365 nobufs:
37366- fscache_stat(&fscache_n_allocs_nobufs);
37367+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
37368 _leave(" = -ENOBUFS");
37369 return -ENOBUFS;
37370 }
37371@@ -666,7 +666,7 @@ static void fscache_write_op(struct fsca
37372
37373 spin_lock(&cookie->stores_lock);
37374
37375- fscache_stat(&fscache_n_store_calls);
37376+ fscache_stat_unchecked(&fscache_n_store_calls);
37377
37378 /* find a page to store */
37379 page = NULL;
37380@@ -677,7 +677,7 @@ static void fscache_write_op(struct fsca
37381 page = results[0];
37382 _debug("gang %d [%lx]", n, page->index);
37383 if (page->index > op->store_limit) {
37384- fscache_stat(&fscache_n_store_pages_over_limit);
37385+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
37386 goto superseded;
37387 }
37388
37389@@ -689,7 +689,7 @@ static void fscache_write_op(struct fsca
37390 spin_unlock(&cookie->stores_lock);
37391 spin_unlock(&object->lock);
37392
37393- fscache_stat(&fscache_n_store_pages);
37394+ fscache_stat_unchecked(&fscache_n_store_pages);
37395 fscache_stat(&fscache_n_cop_write_page);
37396 ret = object->cache->ops->write_page(op, page);
37397 fscache_stat_d(&fscache_n_cop_write_page);
37398@@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_
37399 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
37400 ASSERT(PageFsCache(page));
37401
37402- fscache_stat(&fscache_n_stores);
37403+ fscache_stat_unchecked(&fscache_n_stores);
37404
37405 op = kzalloc(sizeof(*op), GFP_NOIO);
37406 if (!op)
37407@@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_
37408 spin_unlock(&cookie->stores_lock);
37409 spin_unlock(&object->lock);
37410
37411- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
37412+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
37413 op->store_limit = object->store_limit;
37414
37415 if (fscache_submit_op(object, &op->op) < 0)
37416@@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_
37417
37418 spin_unlock(&cookie->lock);
37419 radix_tree_preload_end();
37420- fscache_stat(&fscache_n_store_ops);
37421- fscache_stat(&fscache_n_stores_ok);
37422+ fscache_stat_unchecked(&fscache_n_store_ops);
37423+ fscache_stat_unchecked(&fscache_n_stores_ok);
37424
37425 /* the work queue now carries its own ref on the object */
37426 fscache_put_operation(&op->op);
37427@@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_
37428 return 0;
37429
37430 already_queued:
37431- fscache_stat(&fscache_n_stores_again);
37432+ fscache_stat_unchecked(&fscache_n_stores_again);
37433 already_pending:
37434 spin_unlock(&cookie->stores_lock);
37435 spin_unlock(&object->lock);
37436 spin_unlock(&cookie->lock);
37437 radix_tree_preload_end();
37438 kfree(op);
37439- fscache_stat(&fscache_n_stores_ok);
37440+ fscache_stat_unchecked(&fscache_n_stores_ok);
37441 _leave(" = 0");
37442 return 0;
37443
37444@@ -851,14 +851,14 @@ nobufs:
37445 spin_unlock(&cookie->lock);
37446 radix_tree_preload_end();
37447 kfree(op);
37448- fscache_stat(&fscache_n_stores_nobufs);
37449+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
37450 _leave(" = -ENOBUFS");
37451 return -ENOBUFS;
37452
37453 nomem_free:
37454 kfree(op);
37455 nomem:
37456- fscache_stat(&fscache_n_stores_oom);
37457+ fscache_stat_unchecked(&fscache_n_stores_oom);
37458 _leave(" = -ENOMEM");
37459 return -ENOMEM;
37460 }
37461@@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscac
37462 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
37463 ASSERTCMP(page, !=, NULL);
37464
37465- fscache_stat(&fscache_n_uncaches);
37466+ fscache_stat_unchecked(&fscache_n_uncaches);
37467
37468 /* cache withdrawal may beat us to it */
37469 if (!PageFsCache(page))
37470@@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fs
37471 unsigned long loop;
37472
37473 #ifdef CONFIG_FSCACHE_STATS
37474- atomic_add(pagevec->nr, &fscache_n_marks);
37475+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
37476 #endif
37477
37478 for (loop = 0; loop < pagevec->nr; loop++) {
37479diff -urNp linux-3.0.3/fs/fscache/stats.c linux-3.0.3/fs/fscache/stats.c
37480--- linux-3.0.3/fs/fscache/stats.c 2011-07-21 22:17:23.000000000 -0400
37481+++ linux-3.0.3/fs/fscache/stats.c 2011-08-23 21:47:56.000000000 -0400
37482@@ -18,95 +18,95 @@
37483 /*
37484 * operation counters
37485 */
37486-atomic_t fscache_n_op_pend;
37487-atomic_t fscache_n_op_run;
37488-atomic_t fscache_n_op_enqueue;
37489-atomic_t fscache_n_op_requeue;
37490-atomic_t fscache_n_op_deferred_release;
37491-atomic_t fscache_n_op_release;
37492-atomic_t fscache_n_op_gc;
37493-atomic_t fscache_n_op_cancelled;
37494-atomic_t fscache_n_op_rejected;
37495-
37496-atomic_t fscache_n_attr_changed;
37497-atomic_t fscache_n_attr_changed_ok;
37498-atomic_t fscache_n_attr_changed_nobufs;
37499-atomic_t fscache_n_attr_changed_nomem;
37500-atomic_t fscache_n_attr_changed_calls;
37501-
37502-atomic_t fscache_n_allocs;
37503-atomic_t fscache_n_allocs_ok;
37504-atomic_t fscache_n_allocs_wait;
37505-atomic_t fscache_n_allocs_nobufs;
37506-atomic_t fscache_n_allocs_intr;
37507-atomic_t fscache_n_allocs_object_dead;
37508-atomic_t fscache_n_alloc_ops;
37509-atomic_t fscache_n_alloc_op_waits;
37510-
37511-atomic_t fscache_n_retrievals;
37512-atomic_t fscache_n_retrievals_ok;
37513-atomic_t fscache_n_retrievals_wait;
37514-atomic_t fscache_n_retrievals_nodata;
37515-atomic_t fscache_n_retrievals_nobufs;
37516-atomic_t fscache_n_retrievals_intr;
37517-atomic_t fscache_n_retrievals_nomem;
37518-atomic_t fscache_n_retrievals_object_dead;
37519-atomic_t fscache_n_retrieval_ops;
37520-atomic_t fscache_n_retrieval_op_waits;
37521-
37522-atomic_t fscache_n_stores;
37523-atomic_t fscache_n_stores_ok;
37524-atomic_t fscache_n_stores_again;
37525-atomic_t fscache_n_stores_nobufs;
37526-atomic_t fscache_n_stores_oom;
37527-atomic_t fscache_n_store_ops;
37528-atomic_t fscache_n_store_calls;
37529-atomic_t fscache_n_store_pages;
37530-atomic_t fscache_n_store_radix_deletes;
37531-atomic_t fscache_n_store_pages_over_limit;
37532-
37533-atomic_t fscache_n_store_vmscan_not_storing;
37534-atomic_t fscache_n_store_vmscan_gone;
37535-atomic_t fscache_n_store_vmscan_busy;
37536-atomic_t fscache_n_store_vmscan_cancelled;
37537-
37538-atomic_t fscache_n_marks;
37539-atomic_t fscache_n_uncaches;
37540-
37541-atomic_t fscache_n_acquires;
37542-atomic_t fscache_n_acquires_null;
37543-atomic_t fscache_n_acquires_no_cache;
37544-atomic_t fscache_n_acquires_ok;
37545-atomic_t fscache_n_acquires_nobufs;
37546-atomic_t fscache_n_acquires_oom;
37547-
37548-atomic_t fscache_n_updates;
37549-atomic_t fscache_n_updates_null;
37550-atomic_t fscache_n_updates_run;
37551-
37552-atomic_t fscache_n_relinquishes;
37553-atomic_t fscache_n_relinquishes_null;
37554-atomic_t fscache_n_relinquishes_waitcrt;
37555-atomic_t fscache_n_relinquishes_retire;
37556-
37557-atomic_t fscache_n_cookie_index;
37558-atomic_t fscache_n_cookie_data;
37559-atomic_t fscache_n_cookie_special;
37560-
37561-atomic_t fscache_n_object_alloc;
37562-atomic_t fscache_n_object_no_alloc;
37563-atomic_t fscache_n_object_lookups;
37564-atomic_t fscache_n_object_lookups_negative;
37565-atomic_t fscache_n_object_lookups_positive;
37566-atomic_t fscache_n_object_lookups_timed_out;
37567-atomic_t fscache_n_object_created;
37568-atomic_t fscache_n_object_avail;
37569-atomic_t fscache_n_object_dead;
37570-
37571-atomic_t fscache_n_checkaux_none;
37572-atomic_t fscache_n_checkaux_okay;
37573-atomic_t fscache_n_checkaux_update;
37574-atomic_t fscache_n_checkaux_obsolete;
37575+atomic_unchecked_t fscache_n_op_pend;
37576+atomic_unchecked_t fscache_n_op_run;
37577+atomic_unchecked_t fscache_n_op_enqueue;
37578+atomic_unchecked_t fscache_n_op_requeue;
37579+atomic_unchecked_t fscache_n_op_deferred_release;
37580+atomic_unchecked_t fscache_n_op_release;
37581+atomic_unchecked_t fscache_n_op_gc;
37582+atomic_unchecked_t fscache_n_op_cancelled;
37583+atomic_unchecked_t fscache_n_op_rejected;
37584+
37585+atomic_unchecked_t fscache_n_attr_changed;
37586+atomic_unchecked_t fscache_n_attr_changed_ok;
37587+atomic_unchecked_t fscache_n_attr_changed_nobufs;
37588+atomic_unchecked_t fscache_n_attr_changed_nomem;
37589+atomic_unchecked_t fscache_n_attr_changed_calls;
37590+
37591+atomic_unchecked_t fscache_n_allocs;
37592+atomic_unchecked_t fscache_n_allocs_ok;
37593+atomic_unchecked_t fscache_n_allocs_wait;
37594+atomic_unchecked_t fscache_n_allocs_nobufs;
37595+atomic_unchecked_t fscache_n_allocs_intr;
37596+atomic_unchecked_t fscache_n_allocs_object_dead;
37597+atomic_unchecked_t fscache_n_alloc_ops;
37598+atomic_unchecked_t fscache_n_alloc_op_waits;
37599+
37600+atomic_unchecked_t fscache_n_retrievals;
37601+atomic_unchecked_t fscache_n_retrievals_ok;
37602+atomic_unchecked_t fscache_n_retrievals_wait;
37603+atomic_unchecked_t fscache_n_retrievals_nodata;
37604+atomic_unchecked_t fscache_n_retrievals_nobufs;
37605+atomic_unchecked_t fscache_n_retrievals_intr;
37606+atomic_unchecked_t fscache_n_retrievals_nomem;
37607+atomic_unchecked_t fscache_n_retrievals_object_dead;
37608+atomic_unchecked_t fscache_n_retrieval_ops;
37609+atomic_unchecked_t fscache_n_retrieval_op_waits;
37610+
37611+atomic_unchecked_t fscache_n_stores;
37612+atomic_unchecked_t fscache_n_stores_ok;
37613+atomic_unchecked_t fscache_n_stores_again;
37614+atomic_unchecked_t fscache_n_stores_nobufs;
37615+atomic_unchecked_t fscache_n_stores_oom;
37616+atomic_unchecked_t fscache_n_store_ops;
37617+atomic_unchecked_t fscache_n_store_calls;
37618+atomic_unchecked_t fscache_n_store_pages;
37619+atomic_unchecked_t fscache_n_store_radix_deletes;
37620+atomic_unchecked_t fscache_n_store_pages_over_limit;
37621+
37622+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
37623+atomic_unchecked_t fscache_n_store_vmscan_gone;
37624+atomic_unchecked_t fscache_n_store_vmscan_busy;
37625+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
37626+
37627+atomic_unchecked_t fscache_n_marks;
37628+atomic_unchecked_t fscache_n_uncaches;
37629+
37630+atomic_unchecked_t fscache_n_acquires;
37631+atomic_unchecked_t fscache_n_acquires_null;
37632+atomic_unchecked_t fscache_n_acquires_no_cache;
37633+atomic_unchecked_t fscache_n_acquires_ok;
37634+atomic_unchecked_t fscache_n_acquires_nobufs;
37635+atomic_unchecked_t fscache_n_acquires_oom;
37636+
37637+atomic_unchecked_t fscache_n_updates;
37638+atomic_unchecked_t fscache_n_updates_null;
37639+atomic_unchecked_t fscache_n_updates_run;
37640+
37641+atomic_unchecked_t fscache_n_relinquishes;
37642+atomic_unchecked_t fscache_n_relinquishes_null;
37643+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
37644+atomic_unchecked_t fscache_n_relinquishes_retire;
37645+
37646+atomic_unchecked_t fscache_n_cookie_index;
37647+atomic_unchecked_t fscache_n_cookie_data;
37648+atomic_unchecked_t fscache_n_cookie_special;
37649+
37650+atomic_unchecked_t fscache_n_object_alloc;
37651+atomic_unchecked_t fscache_n_object_no_alloc;
37652+atomic_unchecked_t fscache_n_object_lookups;
37653+atomic_unchecked_t fscache_n_object_lookups_negative;
37654+atomic_unchecked_t fscache_n_object_lookups_positive;
37655+atomic_unchecked_t fscache_n_object_lookups_timed_out;
37656+atomic_unchecked_t fscache_n_object_created;
37657+atomic_unchecked_t fscache_n_object_avail;
37658+atomic_unchecked_t fscache_n_object_dead;
37659+
37660+atomic_unchecked_t fscache_n_checkaux_none;
37661+atomic_unchecked_t fscache_n_checkaux_okay;
37662+atomic_unchecked_t fscache_n_checkaux_update;
37663+atomic_unchecked_t fscache_n_checkaux_obsolete;
37664
37665 atomic_t fscache_n_cop_alloc_object;
37666 atomic_t fscache_n_cop_lookup_object;
37667@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
37668 seq_puts(m, "FS-Cache statistics\n");
37669
37670 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
37671- atomic_read(&fscache_n_cookie_index),
37672- atomic_read(&fscache_n_cookie_data),
37673- atomic_read(&fscache_n_cookie_special));
37674+ atomic_read_unchecked(&fscache_n_cookie_index),
37675+ atomic_read_unchecked(&fscache_n_cookie_data),
37676+ atomic_read_unchecked(&fscache_n_cookie_special));
37677
37678 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
37679- atomic_read(&fscache_n_object_alloc),
37680- atomic_read(&fscache_n_object_no_alloc),
37681- atomic_read(&fscache_n_object_avail),
37682- atomic_read(&fscache_n_object_dead));
37683+ atomic_read_unchecked(&fscache_n_object_alloc),
37684+ atomic_read_unchecked(&fscache_n_object_no_alloc),
37685+ atomic_read_unchecked(&fscache_n_object_avail),
37686+ atomic_read_unchecked(&fscache_n_object_dead));
37687 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
37688- atomic_read(&fscache_n_checkaux_none),
37689- atomic_read(&fscache_n_checkaux_okay),
37690- atomic_read(&fscache_n_checkaux_update),
37691- atomic_read(&fscache_n_checkaux_obsolete));
37692+ atomic_read_unchecked(&fscache_n_checkaux_none),
37693+ atomic_read_unchecked(&fscache_n_checkaux_okay),
37694+ atomic_read_unchecked(&fscache_n_checkaux_update),
37695+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
37696
37697 seq_printf(m, "Pages : mrk=%u unc=%u\n",
37698- atomic_read(&fscache_n_marks),
37699- atomic_read(&fscache_n_uncaches));
37700+ atomic_read_unchecked(&fscache_n_marks),
37701+ atomic_read_unchecked(&fscache_n_uncaches));
37702
37703 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
37704 " oom=%u\n",
37705- atomic_read(&fscache_n_acquires),
37706- atomic_read(&fscache_n_acquires_null),
37707- atomic_read(&fscache_n_acquires_no_cache),
37708- atomic_read(&fscache_n_acquires_ok),
37709- atomic_read(&fscache_n_acquires_nobufs),
37710- atomic_read(&fscache_n_acquires_oom));
37711+ atomic_read_unchecked(&fscache_n_acquires),
37712+ atomic_read_unchecked(&fscache_n_acquires_null),
37713+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
37714+ atomic_read_unchecked(&fscache_n_acquires_ok),
37715+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
37716+ atomic_read_unchecked(&fscache_n_acquires_oom));
37717
37718 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
37719- atomic_read(&fscache_n_object_lookups),
37720- atomic_read(&fscache_n_object_lookups_negative),
37721- atomic_read(&fscache_n_object_lookups_positive),
37722- atomic_read(&fscache_n_object_created),
37723- atomic_read(&fscache_n_object_lookups_timed_out));
37724+ atomic_read_unchecked(&fscache_n_object_lookups),
37725+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
37726+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
37727+ atomic_read_unchecked(&fscache_n_object_created),
37728+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
37729
37730 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
37731- atomic_read(&fscache_n_updates),
37732- atomic_read(&fscache_n_updates_null),
37733- atomic_read(&fscache_n_updates_run));
37734+ atomic_read_unchecked(&fscache_n_updates),
37735+ atomic_read_unchecked(&fscache_n_updates_null),
37736+ atomic_read_unchecked(&fscache_n_updates_run));
37737
37738 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
37739- atomic_read(&fscache_n_relinquishes),
37740- atomic_read(&fscache_n_relinquishes_null),
37741- atomic_read(&fscache_n_relinquishes_waitcrt),
37742- atomic_read(&fscache_n_relinquishes_retire));
37743+ atomic_read_unchecked(&fscache_n_relinquishes),
37744+ atomic_read_unchecked(&fscache_n_relinquishes_null),
37745+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
37746+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
37747
37748 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
37749- atomic_read(&fscache_n_attr_changed),
37750- atomic_read(&fscache_n_attr_changed_ok),
37751- atomic_read(&fscache_n_attr_changed_nobufs),
37752- atomic_read(&fscache_n_attr_changed_nomem),
37753- atomic_read(&fscache_n_attr_changed_calls));
37754+ atomic_read_unchecked(&fscache_n_attr_changed),
37755+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
37756+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
37757+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
37758+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
37759
37760 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
37761- atomic_read(&fscache_n_allocs),
37762- atomic_read(&fscache_n_allocs_ok),
37763- atomic_read(&fscache_n_allocs_wait),
37764- atomic_read(&fscache_n_allocs_nobufs),
37765- atomic_read(&fscache_n_allocs_intr));
37766+ atomic_read_unchecked(&fscache_n_allocs),
37767+ atomic_read_unchecked(&fscache_n_allocs_ok),
37768+ atomic_read_unchecked(&fscache_n_allocs_wait),
37769+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
37770+ atomic_read_unchecked(&fscache_n_allocs_intr));
37771 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
37772- atomic_read(&fscache_n_alloc_ops),
37773- atomic_read(&fscache_n_alloc_op_waits),
37774- atomic_read(&fscache_n_allocs_object_dead));
37775+ atomic_read_unchecked(&fscache_n_alloc_ops),
37776+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
37777+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
37778
37779 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
37780 " int=%u oom=%u\n",
37781- atomic_read(&fscache_n_retrievals),
37782- atomic_read(&fscache_n_retrievals_ok),
37783- atomic_read(&fscache_n_retrievals_wait),
37784- atomic_read(&fscache_n_retrievals_nodata),
37785- atomic_read(&fscache_n_retrievals_nobufs),
37786- atomic_read(&fscache_n_retrievals_intr),
37787- atomic_read(&fscache_n_retrievals_nomem));
37788+ atomic_read_unchecked(&fscache_n_retrievals),
37789+ atomic_read_unchecked(&fscache_n_retrievals_ok),
37790+ atomic_read_unchecked(&fscache_n_retrievals_wait),
37791+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
37792+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
37793+ atomic_read_unchecked(&fscache_n_retrievals_intr),
37794+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
37795 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
37796- atomic_read(&fscache_n_retrieval_ops),
37797- atomic_read(&fscache_n_retrieval_op_waits),
37798- atomic_read(&fscache_n_retrievals_object_dead));
37799+ atomic_read_unchecked(&fscache_n_retrieval_ops),
37800+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
37801+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
37802
37803 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
37804- atomic_read(&fscache_n_stores),
37805- atomic_read(&fscache_n_stores_ok),
37806- atomic_read(&fscache_n_stores_again),
37807- atomic_read(&fscache_n_stores_nobufs),
37808- atomic_read(&fscache_n_stores_oom));
37809+ atomic_read_unchecked(&fscache_n_stores),
37810+ atomic_read_unchecked(&fscache_n_stores_ok),
37811+ atomic_read_unchecked(&fscache_n_stores_again),
37812+ atomic_read_unchecked(&fscache_n_stores_nobufs),
37813+ atomic_read_unchecked(&fscache_n_stores_oom));
37814 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
37815- atomic_read(&fscache_n_store_ops),
37816- atomic_read(&fscache_n_store_calls),
37817- atomic_read(&fscache_n_store_pages),
37818- atomic_read(&fscache_n_store_radix_deletes),
37819- atomic_read(&fscache_n_store_pages_over_limit));
37820+ atomic_read_unchecked(&fscache_n_store_ops),
37821+ atomic_read_unchecked(&fscache_n_store_calls),
37822+ atomic_read_unchecked(&fscache_n_store_pages),
37823+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
37824+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
37825
37826 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
37827- atomic_read(&fscache_n_store_vmscan_not_storing),
37828- atomic_read(&fscache_n_store_vmscan_gone),
37829- atomic_read(&fscache_n_store_vmscan_busy),
37830- atomic_read(&fscache_n_store_vmscan_cancelled));
37831+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
37832+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
37833+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
37834+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
37835
37836 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
37837- atomic_read(&fscache_n_op_pend),
37838- atomic_read(&fscache_n_op_run),
37839- atomic_read(&fscache_n_op_enqueue),
37840- atomic_read(&fscache_n_op_cancelled),
37841- atomic_read(&fscache_n_op_rejected));
37842+ atomic_read_unchecked(&fscache_n_op_pend),
37843+ atomic_read_unchecked(&fscache_n_op_run),
37844+ atomic_read_unchecked(&fscache_n_op_enqueue),
37845+ atomic_read_unchecked(&fscache_n_op_cancelled),
37846+ atomic_read_unchecked(&fscache_n_op_rejected));
37847 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
37848- atomic_read(&fscache_n_op_deferred_release),
37849- atomic_read(&fscache_n_op_release),
37850- atomic_read(&fscache_n_op_gc));
37851+ atomic_read_unchecked(&fscache_n_op_deferred_release),
37852+ atomic_read_unchecked(&fscache_n_op_release),
37853+ atomic_read_unchecked(&fscache_n_op_gc));
37854
37855 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
37856 atomic_read(&fscache_n_cop_alloc_object),
37857diff -urNp linux-3.0.3/fs/fs_struct.c linux-3.0.3/fs/fs_struct.c
37858--- linux-3.0.3/fs/fs_struct.c 2011-07-21 22:17:23.000000000 -0400
37859+++ linux-3.0.3/fs/fs_struct.c 2011-08-23 21:48:14.000000000 -0400
37860@@ -4,6 +4,7 @@
37861 #include <linux/path.h>
37862 #include <linux/slab.h>
37863 #include <linux/fs_struct.h>
37864+#include <linux/grsecurity.h>
37865 #include "internal.h"
37866
37867 static inline void path_get_longterm(struct path *path)
37868@@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, s
37869 old_root = fs->root;
37870 fs->root = *path;
37871 path_get_longterm(path);
37872+ gr_set_chroot_entries(current, path);
37873 write_seqcount_end(&fs->seq);
37874 spin_unlock(&fs->lock);
37875 if (old_root.dentry)
37876@@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_roo
37877 && fs->root.mnt == old_root->mnt) {
37878 path_get_longterm(new_root);
37879 fs->root = *new_root;
37880+ gr_set_chroot_entries(p, new_root);
37881 count++;
37882 }
37883 if (fs->pwd.dentry == old_root->dentry
37884@@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
37885 spin_lock(&fs->lock);
37886 write_seqcount_begin(&fs->seq);
37887 tsk->fs = NULL;
37888- kill = !--fs->users;
37889+ gr_clear_chroot_entries(tsk);
37890+ kill = !atomic_dec_return(&fs->users);
37891 write_seqcount_end(&fs->seq);
37892 spin_unlock(&fs->lock);
37893 task_unlock(tsk);
37894@@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct
37895 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
37896 /* We don't need to lock fs - think why ;-) */
37897 if (fs) {
37898- fs->users = 1;
37899+ atomic_set(&fs->users, 1);
37900 fs->in_exec = 0;
37901 spin_lock_init(&fs->lock);
37902 seqcount_init(&fs->seq);
37903@@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct
37904 spin_lock(&old->lock);
37905 fs->root = old->root;
37906 path_get_longterm(&fs->root);
37907+ /* instead of calling gr_set_chroot_entries here,
37908+ we call it from every caller of this function
37909+ */
37910 fs->pwd = old->pwd;
37911 path_get_longterm(&fs->pwd);
37912 spin_unlock(&old->lock);
37913@@ -150,8 +157,9 @@ int unshare_fs_struct(void)
37914
37915 task_lock(current);
37916 spin_lock(&fs->lock);
37917- kill = !--fs->users;
37918+ kill = !atomic_dec_return(&fs->users);
37919 current->fs = new_fs;
37920+ gr_set_chroot_entries(current, &new_fs->root);
37921 spin_unlock(&fs->lock);
37922 task_unlock(current);
37923
37924@@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
37925
37926 /* to be mentioned only in INIT_TASK */
37927 struct fs_struct init_fs = {
37928- .users = 1,
37929+ .users = ATOMIC_INIT(1),
37930 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
37931 .seq = SEQCNT_ZERO,
37932 .umask = 0022,
37933@@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
37934 task_lock(current);
37935
37936 spin_lock(&init_fs.lock);
37937- init_fs.users++;
37938+ atomic_inc(&init_fs.users);
37939 spin_unlock(&init_fs.lock);
37940
37941 spin_lock(&fs->lock);
37942 current->fs = &init_fs;
37943- kill = !--fs->users;
37944+ gr_set_chroot_entries(current, &current->fs->root);
37945+ kill = !atomic_dec_return(&fs->users);
37946 spin_unlock(&fs->lock);
37947
37948 task_unlock(current);
37949diff -urNp linux-3.0.3/fs/fuse/cuse.c linux-3.0.3/fs/fuse/cuse.c
37950--- linux-3.0.3/fs/fuse/cuse.c 2011-07-21 22:17:23.000000000 -0400
37951+++ linux-3.0.3/fs/fuse/cuse.c 2011-08-23 21:47:56.000000000 -0400
37952@@ -586,10 +586,12 @@ static int __init cuse_init(void)
37953 INIT_LIST_HEAD(&cuse_conntbl[i]);
37954
37955 /* inherit and extend fuse_dev_operations */
37956- cuse_channel_fops = fuse_dev_operations;
37957- cuse_channel_fops.owner = THIS_MODULE;
37958- cuse_channel_fops.open = cuse_channel_open;
37959- cuse_channel_fops.release = cuse_channel_release;
37960+ pax_open_kernel();
37961+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
37962+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
37963+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
37964+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
37965+ pax_close_kernel();
37966
37967 cuse_class = class_create(THIS_MODULE, "cuse");
37968 if (IS_ERR(cuse_class))
37969diff -urNp linux-3.0.3/fs/fuse/dev.c linux-3.0.3/fs/fuse/dev.c
37970--- linux-3.0.3/fs/fuse/dev.c 2011-07-21 22:17:23.000000000 -0400
37971+++ linux-3.0.3/fs/fuse/dev.c 2011-08-23 21:47:56.000000000 -0400
37972@@ -1238,7 +1238,7 @@ static ssize_t fuse_dev_splice_read(stru
37973 ret = 0;
37974 pipe_lock(pipe);
37975
37976- if (!pipe->readers) {
37977+ if (!atomic_read(&pipe->readers)) {
37978 send_sig(SIGPIPE, current, 0);
37979 if (!ret)
37980 ret = -EPIPE;
37981diff -urNp linux-3.0.3/fs/fuse/dir.c linux-3.0.3/fs/fuse/dir.c
37982--- linux-3.0.3/fs/fuse/dir.c 2011-07-21 22:17:23.000000000 -0400
37983+++ linux-3.0.3/fs/fuse/dir.c 2011-08-23 21:47:56.000000000 -0400
37984@@ -1148,7 +1148,7 @@ static char *read_link(struct dentry *de
37985 return link;
37986 }
37987
37988-static void free_link(char *link)
37989+static void free_link(const char *link)
37990 {
37991 if (!IS_ERR(link))
37992 free_page((unsigned long) link);
37993diff -urNp linux-3.0.3/fs/gfs2/inode.c linux-3.0.3/fs/gfs2/inode.c
37994--- linux-3.0.3/fs/gfs2/inode.c 2011-07-21 22:17:23.000000000 -0400
37995+++ linux-3.0.3/fs/gfs2/inode.c 2011-08-23 21:47:56.000000000 -0400
37996@@ -1525,7 +1525,7 @@ out:
37997
37998 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
37999 {
38000- char *s = nd_get_link(nd);
38001+ const char *s = nd_get_link(nd);
38002 if (!IS_ERR(s))
38003 kfree(s);
38004 }
38005diff -urNp linux-3.0.3/fs/hfsplus/catalog.c linux-3.0.3/fs/hfsplus/catalog.c
38006--- linux-3.0.3/fs/hfsplus/catalog.c 2011-07-21 22:17:23.000000000 -0400
38007+++ linux-3.0.3/fs/hfsplus/catalog.c 2011-08-23 21:48:14.000000000 -0400
38008@@ -179,6 +179,8 @@ int hfsplus_find_cat(struct super_block
38009 int err;
38010 u16 type;
38011
38012+ pax_track_stack();
38013+
38014 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
38015 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
38016 if (err)
38017@@ -210,6 +212,8 @@ int hfsplus_create_cat(u32 cnid, struct
38018 int entry_size;
38019 int err;
38020
38021+ pax_track_stack();
38022+
38023 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n",
38024 str->name, cnid, inode->i_nlink);
38025 hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
38026@@ -349,6 +353,8 @@ int hfsplus_rename_cat(u32 cnid,
38027 int entry_size, type;
38028 int err = 0;
38029
38030+ pax_track_stack();
38031+
38032 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
38033 cnid, src_dir->i_ino, src_name->name,
38034 dst_dir->i_ino, dst_name->name);
38035diff -urNp linux-3.0.3/fs/hfsplus/dir.c linux-3.0.3/fs/hfsplus/dir.c
38036--- linux-3.0.3/fs/hfsplus/dir.c 2011-07-21 22:17:23.000000000 -0400
38037+++ linux-3.0.3/fs/hfsplus/dir.c 2011-08-23 21:48:14.000000000 -0400
38038@@ -129,6 +129,8 @@ static int hfsplus_readdir(struct file *
38039 struct hfsplus_readdir_data *rd;
38040 u16 type;
38041
38042+ pax_track_stack();
38043+
38044 if (filp->f_pos >= inode->i_size)
38045 return 0;
38046
38047diff -urNp linux-3.0.3/fs/hfsplus/inode.c linux-3.0.3/fs/hfsplus/inode.c
38048--- linux-3.0.3/fs/hfsplus/inode.c 2011-07-21 22:17:23.000000000 -0400
38049+++ linux-3.0.3/fs/hfsplus/inode.c 2011-08-23 21:48:14.000000000 -0400
38050@@ -489,6 +489,8 @@ int hfsplus_cat_read_inode(struct inode
38051 int res = 0;
38052 u16 type;
38053
38054+ pax_track_stack();
38055+
38056 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
38057
38058 HFSPLUS_I(inode)->linkid = 0;
38059@@ -552,6 +554,8 @@ int hfsplus_cat_write_inode(struct inode
38060 struct hfs_find_data fd;
38061 hfsplus_cat_entry entry;
38062
38063+ pax_track_stack();
38064+
38065 if (HFSPLUS_IS_RSRC(inode))
38066 main_inode = HFSPLUS_I(inode)->rsrc_inode;
38067
38068diff -urNp linux-3.0.3/fs/hfsplus/ioctl.c linux-3.0.3/fs/hfsplus/ioctl.c
38069--- linux-3.0.3/fs/hfsplus/ioctl.c 2011-07-21 22:17:23.000000000 -0400
38070+++ linux-3.0.3/fs/hfsplus/ioctl.c 2011-08-23 21:48:14.000000000 -0400
38071@@ -122,6 +122,8 @@ int hfsplus_setxattr(struct dentry *dent
38072 struct hfsplus_cat_file *file;
38073 int res;
38074
38075+ pax_track_stack();
38076+
38077 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
38078 return -EOPNOTSUPP;
38079
38080@@ -166,6 +168,8 @@ ssize_t hfsplus_getxattr(struct dentry *
38081 struct hfsplus_cat_file *file;
38082 ssize_t res = 0;
38083
38084+ pax_track_stack();
38085+
38086 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
38087 return -EOPNOTSUPP;
38088
38089diff -urNp linux-3.0.3/fs/hfsplus/super.c linux-3.0.3/fs/hfsplus/super.c
38090--- linux-3.0.3/fs/hfsplus/super.c 2011-07-21 22:17:23.000000000 -0400
38091+++ linux-3.0.3/fs/hfsplus/super.c 2011-08-23 21:48:14.000000000 -0400
38092@@ -340,6 +340,8 @@ static int hfsplus_fill_super(struct sup
38093 struct nls_table *nls = NULL;
38094 int err;
38095
38096+ pax_track_stack();
38097+
38098 err = -EINVAL;
38099 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
38100 if (!sbi)
38101diff -urNp linux-3.0.3/fs/hugetlbfs/inode.c linux-3.0.3/fs/hugetlbfs/inode.c
38102--- linux-3.0.3/fs/hugetlbfs/inode.c 2011-07-21 22:17:23.000000000 -0400
38103+++ linux-3.0.3/fs/hugetlbfs/inode.c 2011-08-23 21:48:14.000000000 -0400
38104@@ -914,7 +914,7 @@ static struct file_system_type hugetlbfs
38105 .kill_sb = kill_litter_super,
38106 };
38107
38108-static struct vfsmount *hugetlbfs_vfsmount;
38109+struct vfsmount *hugetlbfs_vfsmount;
38110
38111 static int can_do_hugetlb_shm(void)
38112 {
38113diff -urNp linux-3.0.3/fs/inode.c linux-3.0.3/fs/inode.c
38114--- linux-3.0.3/fs/inode.c 2011-07-21 22:17:23.000000000 -0400
38115+++ linux-3.0.3/fs/inode.c 2011-08-23 21:47:56.000000000 -0400
38116@@ -829,8 +829,8 @@ unsigned int get_next_ino(void)
38117
38118 #ifdef CONFIG_SMP
38119 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
38120- static atomic_t shared_last_ino;
38121- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
38122+ static atomic_unchecked_t shared_last_ino;
38123+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
38124
38125 res = next - LAST_INO_BATCH;
38126 }
38127diff -urNp linux-3.0.3/fs/jbd/checkpoint.c linux-3.0.3/fs/jbd/checkpoint.c
38128--- linux-3.0.3/fs/jbd/checkpoint.c 2011-07-21 22:17:23.000000000 -0400
38129+++ linux-3.0.3/fs/jbd/checkpoint.c 2011-08-23 21:48:14.000000000 -0400
38130@@ -350,6 +350,8 @@ int log_do_checkpoint(journal_t *journal
38131 tid_t this_tid;
38132 int result;
38133
38134+ pax_track_stack();
38135+
38136 jbd_debug(1, "Start checkpoint\n");
38137
38138 /*
38139diff -urNp linux-3.0.3/fs/jffs2/compr_rtime.c linux-3.0.3/fs/jffs2/compr_rtime.c
38140--- linux-3.0.3/fs/jffs2/compr_rtime.c 2011-07-21 22:17:23.000000000 -0400
38141+++ linux-3.0.3/fs/jffs2/compr_rtime.c 2011-08-23 21:48:14.000000000 -0400
38142@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
38143 int outpos = 0;
38144 int pos=0;
38145
38146+ pax_track_stack();
38147+
38148 memset(positions,0,sizeof(positions));
38149
38150 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
38151@@ -78,6 +80,8 @@ static int jffs2_rtime_decompress(unsign
38152 int outpos = 0;
38153 int pos=0;
38154
38155+ pax_track_stack();
38156+
38157 memset(positions,0,sizeof(positions));
38158
38159 while (outpos<destlen) {
38160diff -urNp linux-3.0.3/fs/jffs2/compr_rubin.c linux-3.0.3/fs/jffs2/compr_rubin.c
38161--- linux-3.0.3/fs/jffs2/compr_rubin.c 2011-07-21 22:17:23.000000000 -0400
38162+++ linux-3.0.3/fs/jffs2/compr_rubin.c 2011-08-23 21:48:14.000000000 -0400
38163@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
38164 int ret;
38165 uint32_t mysrclen, mydstlen;
38166
38167+ pax_track_stack();
38168+
38169 mysrclen = *sourcelen;
38170 mydstlen = *dstlen - 8;
38171
38172diff -urNp linux-3.0.3/fs/jffs2/erase.c linux-3.0.3/fs/jffs2/erase.c
38173--- linux-3.0.3/fs/jffs2/erase.c 2011-07-21 22:17:23.000000000 -0400
38174+++ linux-3.0.3/fs/jffs2/erase.c 2011-08-23 21:47:56.000000000 -0400
38175@@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(stru
38176 struct jffs2_unknown_node marker = {
38177 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
38178 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
38179- .totlen = cpu_to_je32(c->cleanmarker_size)
38180+ .totlen = cpu_to_je32(c->cleanmarker_size),
38181+ .hdr_crc = cpu_to_je32(0)
38182 };
38183
38184 jffs2_prealloc_raw_node_refs(c, jeb, 1);
38185diff -urNp linux-3.0.3/fs/jffs2/wbuf.c linux-3.0.3/fs/jffs2/wbuf.c
38186--- linux-3.0.3/fs/jffs2/wbuf.c 2011-07-21 22:17:23.000000000 -0400
38187+++ linux-3.0.3/fs/jffs2/wbuf.c 2011-08-23 21:47:56.000000000 -0400
38188@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
38189 {
38190 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
38191 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
38192- .totlen = constant_cpu_to_je32(8)
38193+ .totlen = constant_cpu_to_je32(8),
38194+ .hdr_crc = constant_cpu_to_je32(0)
38195 };
38196
38197 /*
38198diff -urNp linux-3.0.3/fs/jffs2/xattr.c linux-3.0.3/fs/jffs2/xattr.c
38199--- linux-3.0.3/fs/jffs2/xattr.c 2011-07-21 22:17:23.000000000 -0400
38200+++ linux-3.0.3/fs/jffs2/xattr.c 2011-08-23 21:48:14.000000000 -0400
38201@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
38202
38203 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
38204
38205+ pax_track_stack();
38206+
38207 /* Phase.1 : Merge same xref */
38208 for (i=0; i < XREF_TMPHASH_SIZE; i++)
38209 xref_tmphash[i] = NULL;
38210diff -urNp linux-3.0.3/fs/jfs/super.c linux-3.0.3/fs/jfs/super.c
38211--- linux-3.0.3/fs/jfs/super.c 2011-07-21 22:17:23.000000000 -0400
38212+++ linux-3.0.3/fs/jfs/super.c 2011-08-23 21:47:56.000000000 -0400
38213@@ -803,7 +803,7 @@ static int __init init_jfs_fs(void)
38214
38215 jfs_inode_cachep =
38216 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
38217- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
38218+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
38219 init_once);
38220 if (jfs_inode_cachep == NULL)
38221 return -ENOMEM;
38222diff -urNp linux-3.0.3/fs/Kconfig.binfmt linux-3.0.3/fs/Kconfig.binfmt
38223--- linux-3.0.3/fs/Kconfig.binfmt 2011-07-21 22:17:23.000000000 -0400
38224+++ linux-3.0.3/fs/Kconfig.binfmt 2011-08-23 21:47:56.000000000 -0400
38225@@ -86,7 +86,7 @@ config HAVE_AOUT
38226
38227 config BINFMT_AOUT
38228 tristate "Kernel support for a.out and ECOFF binaries"
38229- depends on HAVE_AOUT
38230+ depends on HAVE_AOUT && BROKEN
38231 ---help---
38232 A.out (Assembler.OUTput) is a set of formats for libraries and
38233 executables used in the earliest versions of UNIX. Linux used
38234diff -urNp linux-3.0.3/fs/libfs.c linux-3.0.3/fs/libfs.c
38235--- linux-3.0.3/fs/libfs.c 2011-07-21 22:17:23.000000000 -0400
38236+++ linux-3.0.3/fs/libfs.c 2011-08-23 21:47:56.000000000 -0400
38237@@ -163,6 +163,9 @@ int dcache_readdir(struct file * filp, v
38238
38239 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
38240 struct dentry *next;
38241+ char d_name[sizeof(next->d_iname)];
38242+ const unsigned char *name;
38243+
38244 next = list_entry(p, struct dentry, d_u.d_child);
38245 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
38246 if (!simple_positive(next)) {
38247@@ -172,7 +175,12 @@ int dcache_readdir(struct file * filp, v
38248
38249 spin_unlock(&next->d_lock);
38250 spin_unlock(&dentry->d_lock);
38251- if (filldir(dirent, next->d_name.name,
38252+ name = next->d_name.name;
38253+ if (name == next->d_iname) {
38254+ memcpy(d_name, name, next->d_name.len);
38255+ name = d_name;
38256+ }
38257+ if (filldir(dirent, name,
38258 next->d_name.len, filp->f_pos,
38259 next->d_inode->i_ino,
38260 dt_type(next->d_inode)) < 0)
38261diff -urNp linux-3.0.3/fs/lockd/clntproc.c linux-3.0.3/fs/lockd/clntproc.c
38262--- linux-3.0.3/fs/lockd/clntproc.c 2011-07-21 22:17:23.000000000 -0400
38263+++ linux-3.0.3/fs/lockd/clntproc.c 2011-08-23 21:48:14.000000000 -0400
38264@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
38265 /*
38266 * Cookie counter for NLM requests
38267 */
38268-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
38269+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
38270
38271 void nlmclnt_next_cookie(struct nlm_cookie *c)
38272 {
38273- u32 cookie = atomic_inc_return(&nlm_cookie);
38274+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
38275
38276 memcpy(c->data, &cookie, 4);
38277 c->len=4;
38278@@ -620,6 +620,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
38279 struct nlm_rqst reqst, *req;
38280 int status;
38281
38282+ pax_track_stack();
38283+
38284 req = &reqst;
38285 memset(req, 0, sizeof(*req));
38286 locks_init_lock(&req->a_args.lock.fl);
38287diff -urNp linux-3.0.3/fs/locks.c linux-3.0.3/fs/locks.c
38288--- linux-3.0.3/fs/locks.c 2011-07-21 22:17:23.000000000 -0400
38289+++ linux-3.0.3/fs/locks.c 2011-08-23 21:47:56.000000000 -0400
38290@@ -2043,16 +2043,16 @@ void locks_remove_flock(struct file *fil
38291 return;
38292
38293 if (filp->f_op && filp->f_op->flock) {
38294- struct file_lock fl = {
38295+ struct file_lock flock = {
38296 .fl_pid = current->tgid,
38297 .fl_file = filp,
38298 .fl_flags = FL_FLOCK,
38299 .fl_type = F_UNLCK,
38300 .fl_end = OFFSET_MAX,
38301 };
38302- filp->f_op->flock(filp, F_SETLKW, &fl);
38303- if (fl.fl_ops && fl.fl_ops->fl_release_private)
38304- fl.fl_ops->fl_release_private(&fl);
38305+ filp->f_op->flock(filp, F_SETLKW, &flock);
38306+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
38307+ flock.fl_ops->fl_release_private(&flock);
38308 }
38309
38310 lock_flocks();
38311diff -urNp linux-3.0.3/fs/logfs/super.c linux-3.0.3/fs/logfs/super.c
38312--- linux-3.0.3/fs/logfs/super.c 2011-07-21 22:17:23.000000000 -0400
38313+++ linux-3.0.3/fs/logfs/super.c 2011-08-23 21:48:14.000000000 -0400
38314@@ -266,6 +266,8 @@ static int logfs_recover_sb(struct super
38315 struct logfs_disk_super _ds1, *ds1 = &_ds1;
38316 int err, valid0, valid1;
38317
38318+ pax_track_stack();
38319+
38320 /* read first superblock */
38321 err = wbuf_read(sb, super->s_sb_ofs[0], sizeof(*ds0), ds0);
38322 if (err)
38323diff -urNp linux-3.0.3/fs/namei.c linux-3.0.3/fs/namei.c
38324--- linux-3.0.3/fs/namei.c 2011-07-21 22:17:23.000000000 -0400
38325+++ linux-3.0.3/fs/namei.c 2011-08-23 21:48:14.000000000 -0400
38326@@ -237,21 +237,31 @@ int generic_permission(struct inode *ino
38327 return ret;
38328
38329 /*
38330- * Read/write DACs are always overridable.
38331- * Executable DACs are overridable for all directories and
38332- * for non-directories that have least one exec bit set.
38333+ * Searching includes executable on directories, else just read.
38334 */
38335- if (!(mask & MAY_EXEC) || execute_ok(inode))
38336- if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
38337+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
38338+ if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE))) {
38339+#ifdef CONFIG_GRKERNSEC
38340+ if (flags & IPERM_FLAG_RCU)
38341+ return -ECHILD;
38342+#endif
38343+ if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
38344 return 0;
38345+ }
38346
38347 /*
38348- * Searching includes executable on directories, else just read.
38349+ * Read/write DACs are always overridable.
38350+ * Executable DACs are overridable for all directories and
38351+ * for non-directories that have least one exec bit set.
38352 */
38353- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
38354- if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
38355- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
38356+ if (!(mask & MAY_EXEC) || execute_ok(inode)) {
38357+#ifdef CONFIG_GRKERNSEC
38358+ if (flags & IPERM_FLAG_RCU)
38359+ return -ECHILD;
38360+#endif
38361+ if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
38362 return 0;
38363+ }
38364
38365 return -EACCES;
38366 }
38367@@ -547,6 +557,9 @@ static int complete_walk(struct nameidat
38368 br_read_unlock(vfsmount_lock);
38369 }
38370
38371+ if (!(nd->flags & LOOKUP_PARENT) && !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
38372+ return -ENOENT;
38373+
38374 if (likely(!(nd->flags & LOOKUP_JUMPED)))
38375 return 0;
38376
38377@@ -593,9 +606,16 @@ static inline int exec_permission(struct
38378 if (ret == -ECHILD)
38379 return ret;
38380
38381- if (ns_capable(ns, CAP_DAC_OVERRIDE) ||
38382- ns_capable(ns, CAP_DAC_READ_SEARCH))
38383+ if (ns_capable_nolog(ns, CAP_DAC_OVERRIDE))
38384 goto ok;
38385+ else {
38386+#ifdef CONFIG_GRKERNSEC
38387+ if (flags & IPERM_FLAG_RCU)
38388+ return -ECHILD;
38389+#endif
38390+ if (ns_capable(ns, CAP_DAC_READ_SEARCH) || ns_capable(ns, CAP_DAC_OVERRIDE))
38391+ goto ok;
38392+ }
38393
38394 return ret;
38395 ok:
38396@@ -703,11 +723,19 @@ follow_link(struct path *link, struct na
38397 return error;
38398 }
38399
38400+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
38401+ dentry->d_inode, dentry, nd->path.mnt)) {
38402+ error = -EACCES;
38403+ *p = ERR_PTR(error); /* no ->put_link(), please */
38404+ path_put(&nd->path);
38405+ return error;
38406+ }
38407+
38408 nd->last_type = LAST_BIND;
38409 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
38410 error = PTR_ERR(*p);
38411 if (!IS_ERR(*p)) {
38412- char *s = nd_get_link(nd);
38413+ const char *s = nd_get_link(nd);
38414 error = 0;
38415 if (s)
38416 error = __vfs_follow_link(nd, s);
38417@@ -1625,6 +1653,9 @@ static int do_path_lookup(int dfd, const
38418 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
38419
38420 if (likely(!retval)) {
38421+ if (*name != '/' && nd->path.dentry && nd->inode && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
38422+ return -ENOENT;
38423+
38424 if (unlikely(!audit_dummy_context())) {
38425 if (nd->path.dentry && nd->inode)
38426 audit_inode(name, nd->path.dentry);
38427@@ -1935,6 +1966,30 @@ int vfs_create(struct inode *dir, struct
38428 return error;
38429 }
38430
38431+/*
38432+ * Note that while the flag value (low two bits) for sys_open means:
38433+ * 00 - read-only
38434+ * 01 - write-only
38435+ * 10 - read-write
38436+ * 11 - special
38437+ * it is changed into
38438+ * 00 - no permissions needed
38439+ * 01 - read-permission
38440+ * 10 - write-permission
38441+ * 11 - read-write
38442+ * for the internal routines (ie open_namei()/follow_link() etc)
38443+ * This is more logical, and also allows the 00 "no perm needed"
38444+ * to be used for symlinks (where the permissions are checked
38445+ * later).
38446+ *
38447+*/
38448+static inline int open_to_namei_flags(int flag)
38449+{
38450+ if ((flag+1) & O_ACCMODE)
38451+ flag++;
38452+ return flag;
38453+}
38454+
38455 static int may_open(struct path *path, int acc_mode, int flag)
38456 {
38457 struct dentry *dentry = path->dentry;
38458@@ -1987,7 +2042,27 @@ static int may_open(struct path *path, i
38459 /*
38460 * Ensure there are no outstanding leases on the file.
38461 */
38462- return break_lease(inode, flag);
38463+ error = break_lease(inode, flag);
38464+
38465+ if (error)
38466+ return error;
38467+
38468+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
38469+ error = -EPERM;
38470+ goto exit;
38471+ }
38472+
38473+ if (gr_handle_rawio(inode)) {
38474+ error = -EPERM;
38475+ goto exit;
38476+ }
38477+
38478+ if (!gr_acl_handle_open(dentry, path->mnt, open_to_namei_flags(flag))) {
38479+ error = -EACCES;
38480+ goto exit;
38481+ }
38482+exit:
38483+ return error;
38484 }
38485
38486 static int handle_truncate(struct file *filp)
38487@@ -2013,30 +2088,6 @@ static int handle_truncate(struct file *
38488 }
38489
38490 /*
38491- * Note that while the flag value (low two bits) for sys_open means:
38492- * 00 - read-only
38493- * 01 - write-only
38494- * 10 - read-write
38495- * 11 - special
38496- * it is changed into
38497- * 00 - no permissions needed
38498- * 01 - read-permission
38499- * 10 - write-permission
38500- * 11 - read-write
38501- * for the internal routines (ie open_namei()/follow_link() etc)
38502- * This is more logical, and also allows the 00 "no perm needed"
38503- * to be used for symlinks (where the permissions are checked
38504- * later).
38505- *
38506-*/
38507-static inline int open_to_namei_flags(int flag)
38508-{
38509- if ((flag+1) & O_ACCMODE)
38510- flag++;
38511- return flag;
38512-}
38513-
38514-/*
38515 * Handle the last step of open()
38516 */
38517 static struct file *do_last(struct nameidata *nd, struct path *path,
38518@@ -2045,6 +2096,7 @@ static struct file *do_last(struct namei
38519 struct dentry *dir = nd->path.dentry;
38520 struct dentry *dentry;
38521 int open_flag = op->open_flag;
38522+ int flag = open_to_namei_flags(open_flag);
38523 int will_truncate = open_flag & O_TRUNC;
38524 int want_write = 0;
38525 int acc_mode = op->acc_mode;
38526@@ -2132,6 +2184,12 @@ static struct file *do_last(struct namei
38527 /* Negative dentry, just create the file */
38528 if (!dentry->d_inode) {
38529 int mode = op->mode;
38530+
38531+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, flag, mode)) {
38532+ error = -EACCES;
38533+ goto exit_mutex_unlock;
38534+ }
38535+
38536 if (!IS_POSIXACL(dir->d_inode))
38537 mode &= ~current_umask();
38538 /*
38539@@ -2155,6 +2213,8 @@ static struct file *do_last(struct namei
38540 error = vfs_create(dir->d_inode, dentry, mode, nd);
38541 if (error)
38542 goto exit_mutex_unlock;
38543+ else
38544+ gr_handle_create(path->dentry, path->mnt);
38545 mutex_unlock(&dir->d_inode->i_mutex);
38546 dput(nd->path.dentry);
38547 nd->path.dentry = dentry;
38548@@ -2164,6 +2224,14 @@ static struct file *do_last(struct namei
38549 /*
38550 * It already exists.
38551 */
38552+
38553+ /* only check if O_CREAT is specified, all other checks need to go
38554+ into may_open */
38555+ if (gr_handle_fifo(path->dentry, path->mnt, dir, flag, acc_mode)) {
38556+ error = -EACCES;
38557+ goto exit_mutex_unlock;
38558+ }
38559+
38560 mutex_unlock(&dir->d_inode->i_mutex);
38561 audit_inode(pathname, path->dentry);
38562
38563@@ -2450,6 +2518,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
38564 error = may_mknod(mode);
38565 if (error)
38566 goto out_dput;
38567+
38568+ if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
38569+ error = -EPERM;
38570+ goto out_dput;
38571+ }
38572+
38573+ if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
38574+ error = -EACCES;
38575+ goto out_dput;
38576+ }
38577+
38578 error = mnt_want_write(nd.path.mnt);
38579 if (error)
38580 goto out_dput;
38581@@ -2470,6 +2549,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
38582 }
38583 out_drop_write:
38584 mnt_drop_write(nd.path.mnt);
38585+
38586+ if (!error)
38587+ gr_handle_create(dentry, nd.path.mnt);
38588 out_dput:
38589 dput(dentry);
38590 out_unlock:
38591@@ -2522,6 +2604,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
38592 if (IS_ERR(dentry))
38593 goto out_unlock;
38594
38595+ if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
38596+ error = -EACCES;
38597+ goto out_dput;
38598+ }
38599+
38600 if (!IS_POSIXACL(nd.path.dentry->d_inode))
38601 mode &= ~current_umask();
38602 error = mnt_want_write(nd.path.mnt);
38603@@ -2533,6 +2620,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
38604 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
38605 out_drop_write:
38606 mnt_drop_write(nd.path.mnt);
38607+
38608+ if (!error)
38609+ gr_handle_create(dentry, nd.path.mnt);
38610+
38611 out_dput:
38612 dput(dentry);
38613 out_unlock:
38614@@ -2613,6 +2704,8 @@ static long do_rmdir(int dfd, const char
38615 char * name;
38616 struct dentry *dentry;
38617 struct nameidata nd;
38618+ ino_t saved_ino = 0;
38619+ dev_t saved_dev = 0;
38620
38621 error = user_path_parent(dfd, pathname, &nd, &name);
38622 if (error)
38623@@ -2641,6 +2734,17 @@ static long do_rmdir(int dfd, const char
38624 error = -ENOENT;
38625 goto exit3;
38626 }
38627+
38628+ if (dentry->d_inode->i_nlink <= 1) {
38629+ saved_ino = dentry->d_inode->i_ino;
38630+ saved_dev = gr_get_dev_from_dentry(dentry);
38631+ }
38632+
38633+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
38634+ error = -EACCES;
38635+ goto exit3;
38636+ }
38637+
38638 error = mnt_want_write(nd.path.mnt);
38639 if (error)
38640 goto exit3;
38641@@ -2648,6 +2752,8 @@ static long do_rmdir(int dfd, const char
38642 if (error)
38643 goto exit4;
38644 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
38645+ if (!error && (saved_dev || saved_ino))
38646+ gr_handle_delete(saved_ino, saved_dev);
38647 exit4:
38648 mnt_drop_write(nd.path.mnt);
38649 exit3:
38650@@ -2710,6 +2816,8 @@ static long do_unlinkat(int dfd, const c
38651 struct dentry *dentry;
38652 struct nameidata nd;
38653 struct inode *inode = NULL;
38654+ ino_t saved_ino = 0;
38655+ dev_t saved_dev = 0;
38656
38657 error = user_path_parent(dfd, pathname, &nd, &name);
38658 if (error)
38659@@ -2732,6 +2840,16 @@ static long do_unlinkat(int dfd, const c
38660 if (!inode)
38661 goto slashes;
38662 ihold(inode);
38663+
38664+ if (inode->i_nlink <= 1) {
38665+ saved_ino = inode->i_ino;
38666+ saved_dev = gr_get_dev_from_dentry(dentry);
38667+ }
38668+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
38669+ error = -EACCES;
38670+ goto exit2;
38671+ }
38672+
38673 error = mnt_want_write(nd.path.mnt);
38674 if (error)
38675 goto exit2;
38676@@ -2739,6 +2857,8 @@ static long do_unlinkat(int dfd, const c
38677 if (error)
38678 goto exit3;
38679 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
38680+ if (!error && (saved_ino || saved_dev))
38681+ gr_handle_delete(saved_ino, saved_dev);
38682 exit3:
38683 mnt_drop_write(nd.path.mnt);
38684 exit2:
38685@@ -2816,6 +2936,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
38686 if (IS_ERR(dentry))
38687 goto out_unlock;
38688
38689+ if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
38690+ error = -EACCES;
38691+ goto out_dput;
38692+ }
38693+
38694 error = mnt_want_write(nd.path.mnt);
38695 if (error)
38696 goto out_dput;
38697@@ -2823,6 +2948,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
38698 if (error)
38699 goto out_drop_write;
38700 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
38701+ if (!error)
38702+ gr_handle_create(dentry, nd.path.mnt);
38703 out_drop_write:
38704 mnt_drop_write(nd.path.mnt);
38705 out_dput:
38706@@ -2931,6 +3058,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
38707 error = PTR_ERR(new_dentry);
38708 if (IS_ERR(new_dentry))
38709 goto out_unlock;
38710+
38711+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
38712+ old_path.dentry->d_inode,
38713+ old_path.dentry->d_inode->i_mode, to)) {
38714+ error = -EACCES;
38715+ goto out_dput;
38716+ }
38717+
38718+ if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
38719+ old_path.dentry, old_path.mnt, to)) {
38720+ error = -EACCES;
38721+ goto out_dput;
38722+ }
38723+
38724 error = mnt_want_write(nd.path.mnt);
38725 if (error)
38726 goto out_dput;
38727@@ -2938,6 +3079,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
38728 if (error)
38729 goto out_drop_write;
38730 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
38731+ if (!error)
38732+ gr_handle_create(new_dentry, nd.path.mnt);
38733 out_drop_write:
38734 mnt_drop_write(nd.path.mnt);
38735 out_dput:
38736@@ -3113,6 +3256,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
38737 char *to;
38738 int error;
38739
38740+ pax_track_stack();
38741+
38742 error = user_path_parent(olddfd, oldname, &oldnd, &from);
38743 if (error)
38744 goto exit;
38745@@ -3169,6 +3314,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
38746 if (new_dentry == trap)
38747 goto exit5;
38748
38749+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
38750+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
38751+ to);
38752+ if (error)
38753+ goto exit5;
38754+
38755 error = mnt_want_write(oldnd.path.mnt);
38756 if (error)
38757 goto exit5;
38758@@ -3178,6 +3329,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
38759 goto exit6;
38760 error = vfs_rename(old_dir->d_inode, old_dentry,
38761 new_dir->d_inode, new_dentry);
38762+ if (!error)
38763+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
38764+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
38765 exit6:
38766 mnt_drop_write(oldnd.path.mnt);
38767 exit5:
38768@@ -3203,6 +3357,8 @@ SYSCALL_DEFINE2(rename, const char __use
38769
38770 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
38771 {
38772+ char tmpbuf[64];
38773+ const char *newlink;
38774 int len;
38775
38776 len = PTR_ERR(link);
38777@@ -3212,7 +3368,14 @@ int vfs_readlink(struct dentry *dentry,
38778 len = strlen(link);
38779 if (len > (unsigned) buflen)
38780 len = buflen;
38781- if (copy_to_user(buffer, link, len))
38782+
38783+ if (len < sizeof(tmpbuf)) {
38784+ memcpy(tmpbuf, link, len);
38785+ newlink = tmpbuf;
38786+ } else
38787+ newlink = link;
38788+
38789+ if (copy_to_user(buffer, newlink, len))
38790 len = -EFAULT;
38791 out:
38792 return len;
38793diff -urNp linux-3.0.3/fs/namespace.c linux-3.0.3/fs/namespace.c
38794--- linux-3.0.3/fs/namespace.c 2011-07-21 22:17:23.000000000 -0400
38795+++ linux-3.0.3/fs/namespace.c 2011-08-23 21:48:14.000000000 -0400
38796@@ -1328,6 +1328,9 @@ static int do_umount(struct vfsmount *mn
38797 if (!(sb->s_flags & MS_RDONLY))
38798 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
38799 up_write(&sb->s_umount);
38800+
38801+ gr_log_remount(mnt->mnt_devname, retval);
38802+
38803 return retval;
38804 }
38805
38806@@ -1347,6 +1350,9 @@ static int do_umount(struct vfsmount *mn
38807 br_write_unlock(vfsmount_lock);
38808 up_write(&namespace_sem);
38809 release_mounts(&umount_list);
38810+
38811+ gr_log_unmount(mnt->mnt_devname, retval);
38812+
38813 return retval;
38814 }
38815
38816@@ -2338,6 +2344,16 @@ long do_mount(char *dev_name, char *dir_
38817 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
38818 MS_STRICTATIME);
38819
38820+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
38821+ retval = -EPERM;
38822+ goto dput_out;
38823+ }
38824+
38825+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
38826+ retval = -EPERM;
38827+ goto dput_out;
38828+ }
38829+
38830 if (flags & MS_REMOUNT)
38831 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
38832 data_page);
38833@@ -2352,6 +2368,9 @@ long do_mount(char *dev_name, char *dir_
38834 dev_name, data_page);
38835 dput_out:
38836 path_put(&path);
38837+
38838+ gr_log_mount(dev_name, dir_name, retval);
38839+
38840 return retval;
38841 }
38842
38843@@ -2575,6 +2594,11 @@ SYSCALL_DEFINE2(pivot_root, const char _
38844 if (error)
38845 goto out2;
38846
38847+ if (gr_handle_chroot_pivot()) {
38848+ error = -EPERM;
38849+ goto out2;
38850+ }
38851+
38852 get_fs_root(current->fs, &root);
38853 error = lock_mount(&old);
38854 if (error)
38855diff -urNp linux-3.0.3/fs/ncpfs/dir.c linux-3.0.3/fs/ncpfs/dir.c
38856--- linux-3.0.3/fs/ncpfs/dir.c 2011-07-21 22:17:23.000000000 -0400
38857+++ linux-3.0.3/fs/ncpfs/dir.c 2011-08-23 21:48:14.000000000 -0400
38858@@ -299,6 +299,8 @@ ncp_lookup_validate(struct dentry *dentr
38859 int res, val = 0, len;
38860 __u8 __name[NCP_MAXPATHLEN + 1];
38861
38862+ pax_track_stack();
38863+
38864 if (dentry == dentry->d_sb->s_root)
38865 return 1;
38866
38867@@ -844,6 +846,8 @@ static struct dentry *ncp_lookup(struct
38868 int error, res, len;
38869 __u8 __name[NCP_MAXPATHLEN + 1];
38870
38871+ pax_track_stack();
38872+
38873 error = -EIO;
38874 if (!ncp_conn_valid(server))
38875 goto finished;
38876@@ -931,6 +935,8 @@ int ncp_create_new(struct inode *dir, st
38877 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
38878 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
38879
38880+ pax_track_stack();
38881+
38882 ncp_age_dentry(server, dentry);
38883 len = sizeof(__name);
38884 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
38885@@ -992,6 +998,8 @@ static int ncp_mkdir(struct inode *dir,
38886 int error, len;
38887 __u8 __name[NCP_MAXPATHLEN + 1];
38888
38889+ pax_track_stack();
38890+
38891 DPRINTK("ncp_mkdir: making %s/%s\n",
38892 dentry->d_parent->d_name.name, dentry->d_name.name);
38893
38894@@ -1140,6 +1148,8 @@ static int ncp_rename(struct inode *old_
38895 int old_len, new_len;
38896 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
38897
38898+ pax_track_stack();
38899+
38900 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
38901 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
38902 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
38903diff -urNp linux-3.0.3/fs/ncpfs/inode.c linux-3.0.3/fs/ncpfs/inode.c
38904--- linux-3.0.3/fs/ncpfs/inode.c 2011-07-21 22:17:23.000000000 -0400
38905+++ linux-3.0.3/fs/ncpfs/inode.c 2011-08-23 21:48:14.000000000 -0400
38906@@ -461,6 +461,8 @@ static int ncp_fill_super(struct super_b
38907 #endif
38908 struct ncp_entry_info finfo;
38909
38910+ pax_track_stack();
38911+
38912 memset(&data, 0, sizeof(data));
38913 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
38914 if (!server)
38915diff -urNp linux-3.0.3/fs/nfs/inode.c linux-3.0.3/fs/nfs/inode.c
38916--- linux-3.0.3/fs/nfs/inode.c 2011-07-21 22:17:23.000000000 -0400
38917+++ linux-3.0.3/fs/nfs/inode.c 2011-08-23 21:47:56.000000000 -0400
38918@@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct
38919 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
38920 nfsi->attrtimeo_timestamp = jiffies;
38921
38922- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
38923+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
38924 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
38925 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
38926 else
38927@@ -1000,16 +1000,16 @@ static int nfs_size_need_update(const st
38928 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
38929 }
38930
38931-static atomic_long_t nfs_attr_generation_counter;
38932+static atomic_long_unchecked_t nfs_attr_generation_counter;
38933
38934 static unsigned long nfs_read_attr_generation_counter(void)
38935 {
38936- return atomic_long_read(&nfs_attr_generation_counter);
38937+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
38938 }
38939
38940 unsigned long nfs_inc_attr_generation_counter(void)
38941 {
38942- return atomic_long_inc_return(&nfs_attr_generation_counter);
38943+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
38944 }
38945
38946 void nfs_fattr_init(struct nfs_fattr *fattr)
38947diff -urNp linux-3.0.3/fs/nfsd/nfs4state.c linux-3.0.3/fs/nfsd/nfs4state.c
38948--- linux-3.0.3/fs/nfsd/nfs4state.c 2011-08-23 21:44:40.000000000 -0400
38949+++ linux-3.0.3/fs/nfsd/nfs4state.c 2011-08-23 21:48:14.000000000 -0400
38950@@ -3794,6 +3794,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
38951 unsigned int strhashval;
38952 int err;
38953
38954+ pax_track_stack();
38955+
38956 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
38957 (long long) lock->lk_offset,
38958 (long long) lock->lk_length);
38959diff -urNp linux-3.0.3/fs/nfsd/nfs4xdr.c linux-3.0.3/fs/nfsd/nfs4xdr.c
38960--- linux-3.0.3/fs/nfsd/nfs4xdr.c 2011-07-21 22:17:23.000000000 -0400
38961+++ linux-3.0.3/fs/nfsd/nfs4xdr.c 2011-08-23 21:48:14.000000000 -0400
38962@@ -1788,6 +1788,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
38963 .dentry = dentry,
38964 };
38965
38966+ pax_track_stack();
38967+
38968 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
38969 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
38970 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
38971diff -urNp linux-3.0.3/fs/nfsd/vfs.c linux-3.0.3/fs/nfsd/vfs.c
38972--- linux-3.0.3/fs/nfsd/vfs.c 2011-07-21 22:17:23.000000000 -0400
38973+++ linux-3.0.3/fs/nfsd/vfs.c 2011-08-23 21:47:56.000000000 -0400
38974@@ -896,7 +896,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
38975 } else {
38976 oldfs = get_fs();
38977 set_fs(KERNEL_DS);
38978- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
38979+ host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
38980 set_fs(oldfs);
38981 }
38982
38983@@ -1000,7 +1000,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
38984
38985 /* Write the data. */
38986 oldfs = get_fs(); set_fs(KERNEL_DS);
38987- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
38988+ host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
38989 set_fs(oldfs);
38990 if (host_err < 0)
38991 goto out_nfserr;
38992@@ -1535,7 +1535,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
38993 */
38994
38995 oldfs = get_fs(); set_fs(KERNEL_DS);
38996- host_err = inode->i_op->readlink(dentry, buf, *lenp);
38997+ host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
38998 set_fs(oldfs);
38999
39000 if (host_err < 0)
39001diff -urNp linux-3.0.3/fs/notify/fanotify/fanotify_user.c linux-3.0.3/fs/notify/fanotify/fanotify_user.c
39002--- linux-3.0.3/fs/notify/fanotify/fanotify_user.c 2011-07-21 22:17:23.000000000 -0400
39003+++ linux-3.0.3/fs/notify/fanotify/fanotify_user.c 2011-08-23 21:48:14.000000000 -0400
39004@@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct
39005 goto out_close_fd;
39006
39007 ret = -EFAULT;
39008- if (copy_to_user(buf, &fanotify_event_metadata,
39009+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
39010+ copy_to_user(buf, &fanotify_event_metadata,
39011 fanotify_event_metadata.event_len))
39012 goto out_kill_access_response;
39013
39014diff -urNp linux-3.0.3/fs/notify/notification.c linux-3.0.3/fs/notify/notification.c
39015--- linux-3.0.3/fs/notify/notification.c 2011-07-21 22:17:23.000000000 -0400
39016+++ linux-3.0.3/fs/notify/notification.c 2011-08-23 21:47:56.000000000 -0400
39017@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
39018 * get set to 0 so it will never get 'freed'
39019 */
39020 static struct fsnotify_event *q_overflow_event;
39021-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
39022+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
39023
39024 /**
39025 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
39026@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
39027 */
39028 u32 fsnotify_get_cookie(void)
39029 {
39030- return atomic_inc_return(&fsnotify_sync_cookie);
39031+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
39032 }
39033 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
39034
39035diff -urNp linux-3.0.3/fs/ntfs/dir.c linux-3.0.3/fs/ntfs/dir.c
39036--- linux-3.0.3/fs/ntfs/dir.c 2011-07-21 22:17:23.000000000 -0400
39037+++ linux-3.0.3/fs/ntfs/dir.c 2011-08-23 21:47:56.000000000 -0400
39038@@ -1329,7 +1329,7 @@ find_next_index_buffer:
39039 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
39040 ~(s64)(ndir->itype.index.block_size - 1)));
39041 /* Bounds checks. */
39042- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
39043+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
39044 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
39045 "inode 0x%lx or driver bug.", vdir->i_ino);
39046 goto err_out;
39047diff -urNp linux-3.0.3/fs/ntfs/file.c linux-3.0.3/fs/ntfs/file.c
39048--- linux-3.0.3/fs/ntfs/file.c 2011-07-21 22:17:23.000000000 -0400
39049+++ linux-3.0.3/fs/ntfs/file.c 2011-08-23 21:47:56.000000000 -0400
39050@@ -2222,6 +2222,6 @@ const struct inode_operations ntfs_file_
39051 #endif /* NTFS_RW */
39052 };
39053
39054-const struct file_operations ntfs_empty_file_ops = {};
39055+const struct file_operations ntfs_empty_file_ops __read_only;
39056
39057-const struct inode_operations ntfs_empty_inode_ops = {};
39058+const struct inode_operations ntfs_empty_inode_ops __read_only;
39059diff -urNp linux-3.0.3/fs/ocfs2/localalloc.c linux-3.0.3/fs/ocfs2/localalloc.c
39060--- linux-3.0.3/fs/ocfs2/localalloc.c 2011-07-21 22:17:23.000000000 -0400
39061+++ linux-3.0.3/fs/ocfs2/localalloc.c 2011-08-23 21:47:56.000000000 -0400
39062@@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_windo
39063 goto bail;
39064 }
39065
39066- atomic_inc(&osb->alloc_stats.moves);
39067+ atomic_inc_unchecked(&osb->alloc_stats.moves);
39068
39069 bail:
39070 if (handle)
39071diff -urNp linux-3.0.3/fs/ocfs2/namei.c linux-3.0.3/fs/ocfs2/namei.c
39072--- linux-3.0.3/fs/ocfs2/namei.c 2011-07-21 22:17:23.000000000 -0400
39073+++ linux-3.0.3/fs/ocfs2/namei.c 2011-08-23 21:48:14.000000000 -0400
39074@@ -1063,6 +1063,8 @@ static int ocfs2_rename(struct inode *ol
39075 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
39076 struct ocfs2_dir_lookup_result target_insert = { NULL, };
39077
39078+ pax_track_stack();
39079+
39080 /* At some point it might be nice to break this function up a
39081 * bit. */
39082
39083diff -urNp linux-3.0.3/fs/ocfs2/ocfs2.h linux-3.0.3/fs/ocfs2/ocfs2.h
39084--- linux-3.0.3/fs/ocfs2/ocfs2.h 2011-07-21 22:17:23.000000000 -0400
39085+++ linux-3.0.3/fs/ocfs2/ocfs2.h 2011-08-23 21:47:56.000000000 -0400
39086@@ -235,11 +235,11 @@ enum ocfs2_vol_state
39087
39088 struct ocfs2_alloc_stats
39089 {
39090- atomic_t moves;
39091- atomic_t local_data;
39092- atomic_t bitmap_data;
39093- atomic_t bg_allocs;
39094- atomic_t bg_extends;
39095+ atomic_unchecked_t moves;
39096+ atomic_unchecked_t local_data;
39097+ atomic_unchecked_t bitmap_data;
39098+ atomic_unchecked_t bg_allocs;
39099+ atomic_unchecked_t bg_extends;
39100 };
39101
39102 enum ocfs2_local_alloc_state
39103diff -urNp linux-3.0.3/fs/ocfs2/suballoc.c linux-3.0.3/fs/ocfs2/suballoc.c
39104--- linux-3.0.3/fs/ocfs2/suballoc.c 2011-07-21 22:17:23.000000000 -0400
39105+++ linux-3.0.3/fs/ocfs2/suballoc.c 2011-08-23 21:47:56.000000000 -0400
39106@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(s
39107 mlog_errno(status);
39108 goto bail;
39109 }
39110- atomic_inc(&osb->alloc_stats.bg_extends);
39111+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
39112
39113 /* You should never ask for this much metadata */
39114 BUG_ON(bits_wanted >
39115@@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handl
39116 mlog_errno(status);
39117 goto bail;
39118 }
39119- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39120+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39121
39122 *suballoc_loc = res.sr_bg_blkno;
39123 *suballoc_bit_start = res.sr_bit_offset;
39124@@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_
39125 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
39126 res->sr_bits);
39127
39128- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39129+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39130
39131 BUG_ON(res->sr_bits != 1);
39132
39133@@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *hand
39134 mlog_errno(status);
39135 goto bail;
39136 }
39137- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39138+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39139
39140 BUG_ON(res.sr_bits != 1);
39141
39142@@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *han
39143 cluster_start,
39144 num_clusters);
39145 if (!status)
39146- atomic_inc(&osb->alloc_stats.local_data);
39147+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
39148 } else {
39149 if (min_clusters > (osb->bitmap_cpg - 1)) {
39150 /* The only paths asking for contiguousness
39151@@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *han
39152 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
39153 res.sr_bg_blkno,
39154 res.sr_bit_offset);
39155- atomic_inc(&osb->alloc_stats.bitmap_data);
39156+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
39157 *num_clusters = res.sr_bits;
39158 }
39159 }
39160diff -urNp linux-3.0.3/fs/ocfs2/super.c linux-3.0.3/fs/ocfs2/super.c
39161--- linux-3.0.3/fs/ocfs2/super.c 2011-07-21 22:17:23.000000000 -0400
39162+++ linux-3.0.3/fs/ocfs2/super.c 2011-08-23 21:47:56.000000000 -0400
39163@@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
39164 "%10s => GlobalAllocs: %d LocalAllocs: %d "
39165 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
39166 "Stats",
39167- atomic_read(&osb->alloc_stats.bitmap_data),
39168- atomic_read(&osb->alloc_stats.local_data),
39169- atomic_read(&osb->alloc_stats.bg_allocs),
39170- atomic_read(&osb->alloc_stats.moves),
39171- atomic_read(&osb->alloc_stats.bg_extends));
39172+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
39173+ atomic_read_unchecked(&osb->alloc_stats.local_data),
39174+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
39175+ atomic_read_unchecked(&osb->alloc_stats.moves),
39176+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
39177
39178 out += snprintf(buf + out, len - out,
39179 "%10s => State: %u Descriptor: %llu Size: %u bits "
39180@@ -2112,11 +2112,11 @@ static int ocfs2_initialize_super(struct
39181 spin_lock_init(&osb->osb_xattr_lock);
39182 ocfs2_init_steal_slots(osb);
39183
39184- atomic_set(&osb->alloc_stats.moves, 0);
39185- atomic_set(&osb->alloc_stats.local_data, 0);
39186- atomic_set(&osb->alloc_stats.bitmap_data, 0);
39187- atomic_set(&osb->alloc_stats.bg_allocs, 0);
39188- atomic_set(&osb->alloc_stats.bg_extends, 0);
39189+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
39190+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
39191+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
39192+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
39193+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
39194
39195 /* Copy the blockcheck stats from the superblock probe */
39196 osb->osb_ecc_stats = *stats;
39197diff -urNp linux-3.0.3/fs/ocfs2/symlink.c linux-3.0.3/fs/ocfs2/symlink.c
39198--- linux-3.0.3/fs/ocfs2/symlink.c 2011-07-21 22:17:23.000000000 -0400
39199+++ linux-3.0.3/fs/ocfs2/symlink.c 2011-08-23 21:47:56.000000000 -0400
39200@@ -142,7 +142,7 @@ bail:
39201
39202 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
39203 {
39204- char *link = nd_get_link(nd);
39205+ const char *link = nd_get_link(nd);
39206 if (!IS_ERR(link))
39207 kfree(link);
39208 }
39209diff -urNp linux-3.0.3/fs/open.c linux-3.0.3/fs/open.c
39210--- linux-3.0.3/fs/open.c 2011-07-21 22:17:23.000000000 -0400
39211+++ linux-3.0.3/fs/open.c 2011-08-23 21:48:14.000000000 -0400
39212@@ -112,6 +112,10 @@ static long do_sys_truncate(const char _
39213 error = locks_verify_truncate(inode, NULL, length);
39214 if (!error)
39215 error = security_path_truncate(&path);
39216+
39217+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
39218+ error = -EACCES;
39219+
39220 if (!error)
39221 error = do_truncate(path.dentry, length, 0, NULL);
39222
39223@@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
39224 if (__mnt_is_readonly(path.mnt))
39225 res = -EROFS;
39226
39227+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
39228+ res = -EACCES;
39229+
39230 out_path_release:
39231 path_put(&path);
39232 out:
39233@@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user
39234 if (error)
39235 goto dput_and_out;
39236
39237+ gr_log_chdir(path.dentry, path.mnt);
39238+
39239 set_fs_pwd(current->fs, &path);
39240
39241 dput_and_out:
39242@@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
39243 goto out_putf;
39244
39245 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
39246+
39247+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
39248+ error = -EPERM;
39249+
39250+ if (!error)
39251+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
39252+
39253 if (!error)
39254 set_fs_pwd(current->fs, &file->f_path);
39255 out_putf:
39256@@ -438,7 +454,18 @@ SYSCALL_DEFINE1(chroot, const char __use
39257 if (error)
39258 goto dput_and_out;
39259
39260+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
39261+ goto dput_and_out;
39262+
39263+ if (gr_handle_chroot_caps(&path)) {
39264+ error = -ENOMEM;
39265+ goto dput_and_out;
39266+ }
39267+
39268 set_fs_root(current->fs, &path);
39269+
39270+ gr_handle_chroot_chdir(&path);
39271+
39272 error = 0;
39273 dput_and_out:
39274 path_put(&path);
39275@@ -466,12 +493,25 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
39276 err = mnt_want_write_file(file);
39277 if (err)
39278 goto out_putf;
39279+
39280 mutex_lock(&inode->i_mutex);
39281+
39282+ if (!gr_acl_handle_fchmod(dentry, file->f_vfsmnt, mode)) {
39283+ err = -EACCES;
39284+ goto out_unlock;
39285+ }
39286+
39287 err = security_path_chmod(dentry, file->f_vfsmnt, mode);
39288 if (err)
39289 goto out_unlock;
39290 if (mode == (mode_t) -1)
39291 mode = inode->i_mode;
39292+
39293+ if (gr_handle_chroot_chmod(dentry, file->f_vfsmnt, mode)) {
39294+ err = -EACCES;
39295+ goto out_unlock;
39296+ }
39297+
39298 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
39299 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
39300 err = notify_change(dentry, &newattrs);
39301@@ -499,12 +539,25 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
39302 error = mnt_want_write(path.mnt);
39303 if (error)
39304 goto dput_and_out;
39305+
39306 mutex_lock(&inode->i_mutex);
39307+
39308+ if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
39309+ error = -EACCES;
39310+ goto out_unlock;
39311+ }
39312+
39313 error = security_path_chmod(path.dentry, path.mnt, mode);
39314 if (error)
39315 goto out_unlock;
39316 if (mode == (mode_t) -1)
39317 mode = inode->i_mode;
39318+
39319+ if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
39320+ error = -EACCES;
39321+ goto out_unlock;
39322+ }
39323+
39324 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
39325 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
39326 error = notify_change(path.dentry, &newattrs);
39327@@ -528,6 +581,9 @@ static int chown_common(struct path *pat
39328 int error;
39329 struct iattr newattrs;
39330
39331+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
39332+ return -EACCES;
39333+
39334 newattrs.ia_valid = ATTR_CTIME;
39335 if (user != (uid_t) -1) {
39336 newattrs.ia_valid |= ATTR_UID;
39337@@ -998,7 +1054,10 @@ long do_sys_open(int dfd, const char __u
39338 if (!IS_ERR(tmp)) {
39339 fd = get_unused_fd_flags(flags);
39340 if (fd >= 0) {
39341- struct file *f = do_filp_open(dfd, tmp, &op, lookup);
39342+ struct file *f;
39343+ /* don't allow to be set by userland */
39344+ flags &= ~FMODE_GREXEC;
39345+ f = do_filp_open(dfd, tmp, &op, lookup);
39346 if (IS_ERR(f)) {
39347 put_unused_fd(fd);
39348 fd = PTR_ERR(f);
39349diff -urNp linux-3.0.3/fs/partitions/ldm.c linux-3.0.3/fs/partitions/ldm.c
39350--- linux-3.0.3/fs/partitions/ldm.c 2011-07-21 22:17:23.000000000 -0400
39351+++ linux-3.0.3/fs/partitions/ldm.c 2011-08-23 21:48:14.000000000 -0400
39352@@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
39353 ldm_error ("A VBLK claims to have %d parts.", num);
39354 return false;
39355 }
39356+
39357 if (rec >= num) {
39358 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
39359 return false;
39360@@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
39361 goto found;
39362 }
39363
39364- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
39365+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
39366 if (!f) {
39367 ldm_crit ("Out of memory.");
39368 return false;
39369diff -urNp linux-3.0.3/fs/pipe.c linux-3.0.3/fs/pipe.c
39370--- linux-3.0.3/fs/pipe.c 2011-07-21 22:17:23.000000000 -0400
39371+++ linux-3.0.3/fs/pipe.c 2011-08-23 21:48:14.000000000 -0400
39372@@ -420,9 +420,9 @@ redo:
39373 }
39374 if (bufs) /* More to do? */
39375 continue;
39376- if (!pipe->writers)
39377+ if (!atomic_read(&pipe->writers))
39378 break;
39379- if (!pipe->waiting_writers) {
39380+ if (!atomic_read(&pipe->waiting_writers)) {
39381 /* syscall merging: Usually we must not sleep
39382 * if O_NONBLOCK is set, or if we got some data.
39383 * But if a writer sleeps in kernel space, then
39384@@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const str
39385 mutex_lock(&inode->i_mutex);
39386 pipe = inode->i_pipe;
39387
39388- if (!pipe->readers) {
39389+ if (!atomic_read(&pipe->readers)) {
39390 send_sig(SIGPIPE, current, 0);
39391 ret = -EPIPE;
39392 goto out;
39393@@ -530,7 +530,7 @@ redo1:
39394 for (;;) {
39395 int bufs;
39396
39397- if (!pipe->readers) {
39398+ if (!atomic_read(&pipe->readers)) {
39399 send_sig(SIGPIPE, current, 0);
39400 if (!ret)
39401 ret = -EPIPE;
39402@@ -616,9 +616,9 @@ redo2:
39403 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
39404 do_wakeup = 0;
39405 }
39406- pipe->waiting_writers++;
39407+ atomic_inc(&pipe->waiting_writers);
39408 pipe_wait(pipe);
39409- pipe->waiting_writers--;
39410+ atomic_dec(&pipe->waiting_writers);
39411 }
39412 out:
39413 mutex_unlock(&inode->i_mutex);
39414@@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table
39415 mask = 0;
39416 if (filp->f_mode & FMODE_READ) {
39417 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
39418- if (!pipe->writers && filp->f_version != pipe->w_counter)
39419+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
39420 mask |= POLLHUP;
39421 }
39422
39423@@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table
39424 * Most Unices do not set POLLERR for FIFOs but on Linux they
39425 * behave exactly like pipes for poll().
39426 */
39427- if (!pipe->readers)
39428+ if (!atomic_read(&pipe->readers))
39429 mask |= POLLERR;
39430 }
39431
39432@@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int de
39433
39434 mutex_lock(&inode->i_mutex);
39435 pipe = inode->i_pipe;
39436- pipe->readers -= decr;
39437- pipe->writers -= decw;
39438+ atomic_sub(decr, &pipe->readers);
39439+ atomic_sub(decw, &pipe->writers);
39440
39441- if (!pipe->readers && !pipe->writers) {
39442+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
39443 free_pipe_info(inode);
39444 } else {
39445 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
39446@@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, stru
39447
39448 if (inode->i_pipe) {
39449 ret = 0;
39450- inode->i_pipe->readers++;
39451+ atomic_inc(&inode->i_pipe->readers);
39452 }
39453
39454 mutex_unlock(&inode->i_mutex);
39455@@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, str
39456
39457 if (inode->i_pipe) {
39458 ret = 0;
39459- inode->i_pipe->writers++;
39460+ atomic_inc(&inode->i_pipe->writers);
39461 }
39462
39463 mutex_unlock(&inode->i_mutex);
39464@@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, stru
39465 if (inode->i_pipe) {
39466 ret = 0;
39467 if (filp->f_mode & FMODE_READ)
39468- inode->i_pipe->readers++;
39469+ atomic_inc(&inode->i_pipe->readers);
39470 if (filp->f_mode & FMODE_WRITE)
39471- inode->i_pipe->writers++;
39472+ atomic_inc(&inode->i_pipe->writers);
39473 }
39474
39475 mutex_unlock(&inode->i_mutex);
39476@@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
39477 inode->i_pipe = NULL;
39478 }
39479
39480-static struct vfsmount *pipe_mnt __read_mostly;
39481+struct vfsmount *pipe_mnt __read_mostly;
39482
39483 /*
39484 * pipefs_dname() is called from d_path().
39485@@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(voi
39486 goto fail_iput;
39487 inode->i_pipe = pipe;
39488
39489- pipe->readers = pipe->writers = 1;
39490+ atomic_set(&pipe->readers, 1);
39491+ atomic_set(&pipe->writers, 1);
39492 inode->i_fop = &rdwr_pipefifo_fops;
39493
39494 /*
39495diff -urNp linux-3.0.3/fs/proc/array.c linux-3.0.3/fs/proc/array.c
39496--- linux-3.0.3/fs/proc/array.c 2011-07-21 22:17:23.000000000 -0400
39497+++ linux-3.0.3/fs/proc/array.c 2011-08-23 21:48:14.000000000 -0400
39498@@ -60,6 +60,7 @@
39499 #include <linux/tty.h>
39500 #include <linux/string.h>
39501 #include <linux/mman.h>
39502+#include <linux/grsecurity.h>
39503 #include <linux/proc_fs.h>
39504 #include <linux/ioport.h>
39505 #include <linux/uaccess.h>
39506@@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq
39507 seq_putc(m, '\n');
39508 }
39509
39510+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39511+static inline void task_pax(struct seq_file *m, struct task_struct *p)
39512+{
39513+ if (p->mm)
39514+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
39515+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
39516+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
39517+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
39518+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
39519+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
39520+ else
39521+ seq_printf(m, "PaX:\t-----\n");
39522+}
39523+#endif
39524+
39525 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
39526 struct pid *pid, struct task_struct *task)
39527 {
39528@@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m,
39529 task_cpus_allowed(m, task);
39530 cpuset_task_status_allowed(m, task);
39531 task_context_switch_counts(m, task);
39532+
39533+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39534+ task_pax(m, task);
39535+#endif
39536+
39537+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
39538+ task_grsec_rbac(m, task);
39539+#endif
39540+
39541 return 0;
39542 }
39543
39544+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39545+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
39546+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
39547+ _mm->pax_flags & MF_PAX_SEGMEXEC))
39548+#endif
39549+
39550 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
39551 struct pid *pid, struct task_struct *task, int whole)
39552 {
39553@@ -375,9 +406,11 @@ static int do_task_stat(struct seq_file
39554 cputime_t cutime, cstime, utime, stime;
39555 cputime_t cgtime, gtime;
39556 unsigned long rsslim = 0;
39557- char tcomm[sizeof(task->comm)];
39558+ char tcomm[sizeof(task->comm)] = { 0 };
39559 unsigned long flags;
39560
39561+ pax_track_stack();
39562+
39563 state = *get_task_state(task);
39564 vsize = eip = esp = 0;
39565 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
39566@@ -449,6 +482,19 @@ static int do_task_stat(struct seq_file
39567 gtime = task->gtime;
39568 }
39569
39570+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39571+ if (PAX_RAND_FLAGS(mm)) {
39572+ eip = 0;
39573+ esp = 0;
39574+ wchan = 0;
39575+ }
39576+#endif
39577+#ifdef CONFIG_GRKERNSEC_HIDESYM
39578+ wchan = 0;
39579+ eip =0;
39580+ esp =0;
39581+#endif
39582+
39583 /* scale priority and nice values from timeslices to -20..20 */
39584 /* to make it look like a "normal" Unix priority/nice value */
39585 priority = task_prio(task);
39586@@ -489,9 +535,15 @@ static int do_task_stat(struct seq_file
39587 vsize,
39588 mm ? get_mm_rss(mm) : 0,
39589 rsslim,
39590+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39591+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
39592+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
39593+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
39594+#else
39595 mm ? (permitted ? mm->start_code : 1) : 0,
39596 mm ? (permitted ? mm->end_code : 1) : 0,
39597 (permitted && mm) ? mm->start_stack : 0,
39598+#endif
39599 esp,
39600 eip,
39601 /* The signal information here is obsolete.
39602@@ -544,3 +596,18 @@ int proc_pid_statm(struct seq_file *m, s
39603
39604 return 0;
39605 }
39606+
39607+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
39608+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
39609+{
39610+ u32 curr_ip = 0;
39611+ unsigned long flags;
39612+
39613+ if (lock_task_sighand(task, &flags)) {
39614+ curr_ip = task->signal->curr_ip;
39615+ unlock_task_sighand(task, &flags);
39616+ }
39617+
39618+ return sprintf(buffer, "%pI4\n", &curr_ip);
39619+}
39620+#endif
39621diff -urNp linux-3.0.3/fs/proc/base.c linux-3.0.3/fs/proc/base.c
39622--- linux-3.0.3/fs/proc/base.c 2011-08-23 21:44:40.000000000 -0400
39623+++ linux-3.0.3/fs/proc/base.c 2011-08-23 21:48:14.000000000 -0400
39624@@ -107,6 +107,22 @@ struct pid_entry {
39625 union proc_op op;
39626 };
39627
39628+struct getdents_callback {
39629+ struct linux_dirent __user * current_dir;
39630+ struct linux_dirent __user * previous;
39631+ struct file * file;
39632+ int count;
39633+ int error;
39634+};
39635+
39636+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
39637+ loff_t offset, u64 ino, unsigned int d_type)
39638+{
39639+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
39640+ buf->error = -EINVAL;
39641+ return 0;
39642+}
39643+
39644 #define NOD(NAME, MODE, IOP, FOP, OP) { \
39645 .name = (NAME), \
39646 .len = sizeof(NAME) - 1, \
39647@@ -209,6 +225,9 @@ static struct mm_struct *__check_mem_per
39648 if (task == current)
39649 return mm;
39650
39651+ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
39652+ return ERR_PTR(-EPERM);
39653+
39654 /*
39655 * If current is actively ptrace'ing, and would also be
39656 * permitted to freshly attach with ptrace now, permit it.
39657@@ -282,6 +301,9 @@ static int proc_pid_cmdline(struct task_
39658 if (!mm->arg_end)
39659 goto out_mm; /* Shh! No looking before we're done */
39660
39661+ if (gr_acl_handle_procpidmem(task))
39662+ goto out_mm;
39663+
39664 len = mm->arg_end - mm->arg_start;
39665
39666 if (len > PAGE_SIZE)
39667@@ -309,12 +331,28 @@ out:
39668 return res;
39669 }
39670
39671+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39672+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
39673+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
39674+ _mm->pax_flags & MF_PAX_SEGMEXEC))
39675+#endif
39676+
39677 static int proc_pid_auxv(struct task_struct *task, char *buffer)
39678 {
39679 struct mm_struct *mm = mm_for_maps(task);
39680 int res = PTR_ERR(mm);
39681 if (mm && !IS_ERR(mm)) {
39682 unsigned int nwords = 0;
39683+
39684+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39685+ /* allow if we're currently ptracing this task */
39686+ if (PAX_RAND_FLAGS(mm) &&
39687+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
39688+ mmput(mm);
39689+ return res;
39690+ }
39691+#endif
39692+
39693 do {
39694 nwords += 2;
39695 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
39696@@ -328,7 +366,7 @@ static int proc_pid_auxv(struct task_str
39697 }
39698
39699
39700-#ifdef CONFIG_KALLSYMS
39701+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39702 /*
39703 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
39704 * Returns the resolved symbol. If that fails, simply return the address.
39705@@ -367,7 +405,7 @@ static void unlock_trace(struct task_str
39706 mutex_unlock(&task->signal->cred_guard_mutex);
39707 }
39708
39709-#ifdef CONFIG_STACKTRACE
39710+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39711
39712 #define MAX_STACK_TRACE_DEPTH 64
39713
39714@@ -558,7 +596,7 @@ static int proc_pid_limits(struct task_s
39715 return count;
39716 }
39717
39718-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
39719+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
39720 static int proc_pid_syscall(struct task_struct *task, char *buffer)
39721 {
39722 long nr;
39723@@ -587,7 +625,7 @@ static int proc_pid_syscall(struct task_
39724 /************************************************************************/
39725
39726 /* permission checks */
39727-static int proc_fd_access_allowed(struct inode *inode)
39728+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
39729 {
39730 struct task_struct *task;
39731 int allowed = 0;
39732@@ -597,7 +635,10 @@ static int proc_fd_access_allowed(struct
39733 */
39734 task = get_proc_task(inode);
39735 if (task) {
39736- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
39737+ if (log)
39738+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
39739+ else
39740+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
39741 put_task_struct(task);
39742 }
39743 return allowed;
39744@@ -978,6 +1019,9 @@ static ssize_t environ_read(struct file
39745 if (!task)
39746 goto out_no_task;
39747
39748+ if (gr_acl_handle_procpidmem(task))
39749+ goto out;
39750+
39751 ret = -ENOMEM;
39752 page = (char *)__get_free_page(GFP_TEMPORARY);
39753 if (!page)
39754@@ -1614,7 +1658,7 @@ static void *proc_pid_follow_link(struct
39755 path_put(&nd->path);
39756
39757 /* Are we allowed to snoop on the tasks file descriptors? */
39758- if (!proc_fd_access_allowed(inode))
39759+ if (!proc_fd_access_allowed(inode,0))
39760 goto out;
39761
39762 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
39763@@ -1653,8 +1697,18 @@ static int proc_pid_readlink(struct dent
39764 struct path path;
39765
39766 /* Are we allowed to snoop on the tasks file descriptors? */
39767- if (!proc_fd_access_allowed(inode))
39768- goto out;
39769+ /* logging this is needed for learning on chromium to work properly,
39770+ but we don't want to flood the logs from 'ps' which does a readlink
39771+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
39772+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
39773+ */
39774+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
39775+ if (!proc_fd_access_allowed(inode,0))
39776+ goto out;
39777+ } else {
39778+ if (!proc_fd_access_allowed(inode,1))
39779+ goto out;
39780+ }
39781
39782 error = PROC_I(inode)->op.proc_get_link(inode, &path);
39783 if (error)
39784@@ -1719,7 +1773,11 @@ struct inode *proc_pid_make_inode(struct
39785 rcu_read_lock();
39786 cred = __task_cred(task);
39787 inode->i_uid = cred->euid;
39788+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39789+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
39790+#else
39791 inode->i_gid = cred->egid;
39792+#endif
39793 rcu_read_unlock();
39794 }
39795 security_task_to_inode(task, inode);
39796@@ -1737,6 +1795,9 @@ int pid_getattr(struct vfsmount *mnt, st
39797 struct inode *inode = dentry->d_inode;
39798 struct task_struct *task;
39799 const struct cred *cred;
39800+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39801+ const struct cred *tmpcred = current_cred();
39802+#endif
39803
39804 generic_fillattr(inode, stat);
39805
39806@@ -1744,13 +1805,41 @@ int pid_getattr(struct vfsmount *mnt, st
39807 stat->uid = 0;
39808 stat->gid = 0;
39809 task = pid_task(proc_pid(inode), PIDTYPE_PID);
39810+
39811+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
39812+ rcu_read_unlock();
39813+ return -ENOENT;
39814+ }
39815+
39816 if (task) {
39817+ cred = __task_cred(task);
39818+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39819+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
39820+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39821+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
39822+#endif
39823+ ) {
39824+#endif
39825 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
39826+#ifdef CONFIG_GRKERNSEC_PROC_USER
39827+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
39828+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39829+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
39830+#endif
39831 task_dumpable(task)) {
39832- cred = __task_cred(task);
39833 stat->uid = cred->euid;
39834+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39835+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
39836+#else
39837 stat->gid = cred->egid;
39838+#endif
39839 }
39840+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39841+ } else {
39842+ rcu_read_unlock();
39843+ return -ENOENT;
39844+ }
39845+#endif
39846 }
39847 rcu_read_unlock();
39848 return 0;
39849@@ -1787,11 +1876,20 @@ int pid_revalidate(struct dentry *dentry
39850
39851 if (task) {
39852 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
39853+#ifdef CONFIG_GRKERNSEC_PROC_USER
39854+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
39855+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39856+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
39857+#endif
39858 task_dumpable(task)) {
39859 rcu_read_lock();
39860 cred = __task_cred(task);
39861 inode->i_uid = cred->euid;
39862+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39863+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
39864+#else
39865 inode->i_gid = cred->egid;
39866+#endif
39867 rcu_read_unlock();
39868 } else {
39869 inode->i_uid = 0;
39870@@ -1909,7 +2007,8 @@ static int proc_fd_info(struct inode *in
39871 int fd = proc_fd(inode);
39872
39873 if (task) {
39874- files = get_files_struct(task);
39875+ if (!gr_acl_handle_procpidmem(task))
39876+ files = get_files_struct(task);
39877 put_task_struct(task);
39878 }
39879 if (files) {
39880@@ -2169,11 +2268,21 @@ static const struct file_operations proc
39881 */
39882 static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags)
39883 {
39884+ struct task_struct *task;
39885 int rv = generic_permission(inode, mask, flags, NULL);
39886- if (rv == 0)
39887- return 0;
39888+
39889 if (task_pid(current) == proc_pid(inode))
39890 rv = 0;
39891+
39892+ task = get_proc_task(inode);
39893+ if (task == NULL)
39894+ return rv;
39895+
39896+ if (gr_acl_handle_procpidmem(task))
39897+ rv = -EACCES;
39898+
39899+ put_task_struct(task);
39900+
39901 return rv;
39902 }
39903
39904@@ -2283,6 +2392,9 @@ static struct dentry *proc_pident_lookup
39905 if (!task)
39906 goto out_no_task;
39907
39908+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
39909+ goto out;
39910+
39911 /*
39912 * Yes, it does not scale. And it should not. Don't add
39913 * new entries into /proc/<tgid>/ without very good reasons.
39914@@ -2327,6 +2439,9 @@ static int proc_pident_readdir(struct fi
39915 if (!task)
39916 goto out_no_task;
39917
39918+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
39919+ goto out;
39920+
39921 ret = 0;
39922 i = filp->f_pos;
39923 switch (i) {
39924@@ -2597,7 +2712,7 @@ static void *proc_self_follow_link(struc
39925 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
39926 void *cookie)
39927 {
39928- char *s = nd_get_link(nd);
39929+ const char *s = nd_get_link(nd);
39930 if (!IS_ERR(s))
39931 __putname(s);
39932 }
39933@@ -2795,7 +2910,7 @@ static const struct pid_entry tgid_base_
39934 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
39935 #endif
39936 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
39937-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
39938+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
39939 INF("syscall", S_IRUGO, proc_pid_syscall),
39940 #endif
39941 INF("cmdline", S_IRUGO, proc_pid_cmdline),
39942@@ -2820,10 +2935,10 @@ static const struct pid_entry tgid_base_
39943 #ifdef CONFIG_SECURITY
39944 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
39945 #endif
39946-#ifdef CONFIG_KALLSYMS
39947+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39948 INF("wchan", S_IRUGO, proc_pid_wchan),
39949 #endif
39950-#ifdef CONFIG_STACKTRACE
39951+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39952 ONE("stack", S_IRUGO, proc_pid_stack),
39953 #endif
39954 #ifdef CONFIG_SCHEDSTATS
39955@@ -2857,6 +2972,9 @@ static const struct pid_entry tgid_base_
39956 #ifdef CONFIG_HARDWALL
39957 INF("hardwall", S_IRUGO, proc_pid_hardwall),
39958 #endif
39959+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
39960+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
39961+#endif
39962 };
39963
39964 static int proc_tgid_base_readdir(struct file * filp,
39965@@ -2982,7 +3100,14 @@ static struct dentry *proc_pid_instantia
39966 if (!inode)
39967 goto out;
39968
39969+#ifdef CONFIG_GRKERNSEC_PROC_USER
39970+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
39971+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39972+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
39973+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
39974+#else
39975 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
39976+#endif
39977 inode->i_op = &proc_tgid_base_inode_operations;
39978 inode->i_fop = &proc_tgid_base_operations;
39979 inode->i_flags|=S_IMMUTABLE;
39980@@ -3024,7 +3149,11 @@ struct dentry *proc_pid_lookup(struct in
39981 if (!task)
39982 goto out;
39983
39984+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
39985+ goto out_put_task;
39986+
39987 result = proc_pid_instantiate(dir, dentry, task, NULL);
39988+out_put_task:
39989 put_task_struct(task);
39990 out:
39991 return result;
39992@@ -3089,6 +3218,11 @@ int proc_pid_readdir(struct file * filp,
39993 {
39994 unsigned int nr;
39995 struct task_struct *reaper;
39996+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39997+ const struct cred *tmpcred = current_cred();
39998+ const struct cred *itercred;
39999+#endif
40000+ filldir_t __filldir = filldir;
40001 struct tgid_iter iter;
40002 struct pid_namespace *ns;
40003
40004@@ -3112,8 +3246,27 @@ int proc_pid_readdir(struct file * filp,
40005 for (iter = next_tgid(ns, iter);
40006 iter.task;
40007 iter.tgid += 1, iter = next_tgid(ns, iter)) {
40008+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40009+ rcu_read_lock();
40010+ itercred = __task_cred(iter.task);
40011+#endif
40012+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
40013+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40014+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
40015+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
40016+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
40017+#endif
40018+ )
40019+#endif
40020+ )
40021+ __filldir = &gr_fake_filldir;
40022+ else
40023+ __filldir = filldir;
40024+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40025+ rcu_read_unlock();
40026+#endif
40027 filp->f_pos = iter.tgid + TGID_OFFSET;
40028- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
40029+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
40030 put_task_struct(iter.task);
40031 goto out;
40032 }
40033@@ -3141,7 +3294,7 @@ static const struct pid_entry tid_base_s
40034 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
40035 #endif
40036 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
40037-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
40038+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
40039 INF("syscall", S_IRUGO, proc_pid_syscall),
40040 #endif
40041 INF("cmdline", S_IRUGO, proc_pid_cmdline),
40042@@ -3165,10 +3318,10 @@ static const struct pid_entry tid_base_s
40043 #ifdef CONFIG_SECURITY
40044 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
40045 #endif
40046-#ifdef CONFIG_KALLSYMS
40047+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
40048 INF("wchan", S_IRUGO, proc_pid_wchan),
40049 #endif
40050-#ifdef CONFIG_STACKTRACE
40051+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
40052 ONE("stack", S_IRUGO, proc_pid_stack),
40053 #endif
40054 #ifdef CONFIG_SCHEDSTATS
40055diff -urNp linux-3.0.3/fs/proc/cmdline.c linux-3.0.3/fs/proc/cmdline.c
40056--- linux-3.0.3/fs/proc/cmdline.c 2011-07-21 22:17:23.000000000 -0400
40057+++ linux-3.0.3/fs/proc/cmdline.c 2011-08-23 21:48:14.000000000 -0400
40058@@ -23,7 +23,11 @@ static const struct file_operations cmdl
40059
40060 static int __init proc_cmdline_init(void)
40061 {
40062+#ifdef CONFIG_GRKERNSEC_PROC_ADD
40063+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
40064+#else
40065 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
40066+#endif
40067 return 0;
40068 }
40069 module_init(proc_cmdline_init);
40070diff -urNp linux-3.0.3/fs/proc/devices.c linux-3.0.3/fs/proc/devices.c
40071--- linux-3.0.3/fs/proc/devices.c 2011-07-21 22:17:23.000000000 -0400
40072+++ linux-3.0.3/fs/proc/devices.c 2011-08-23 21:48:14.000000000 -0400
40073@@ -64,7 +64,11 @@ static const struct file_operations proc
40074
40075 static int __init proc_devices_init(void)
40076 {
40077+#ifdef CONFIG_GRKERNSEC_PROC_ADD
40078+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
40079+#else
40080 proc_create("devices", 0, NULL, &proc_devinfo_operations);
40081+#endif
40082 return 0;
40083 }
40084 module_init(proc_devices_init);
40085diff -urNp linux-3.0.3/fs/proc/inode.c linux-3.0.3/fs/proc/inode.c
40086--- linux-3.0.3/fs/proc/inode.c 2011-07-21 22:17:23.000000000 -0400
40087+++ linux-3.0.3/fs/proc/inode.c 2011-08-23 21:48:14.000000000 -0400
40088@@ -440,7 +440,11 @@ struct inode *proc_get_inode(struct supe
40089 if (de->mode) {
40090 inode->i_mode = de->mode;
40091 inode->i_uid = de->uid;
40092+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
40093+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
40094+#else
40095 inode->i_gid = de->gid;
40096+#endif
40097 }
40098 if (de->size)
40099 inode->i_size = de->size;
40100diff -urNp linux-3.0.3/fs/proc/internal.h linux-3.0.3/fs/proc/internal.h
40101--- linux-3.0.3/fs/proc/internal.h 2011-07-21 22:17:23.000000000 -0400
40102+++ linux-3.0.3/fs/proc/internal.h 2011-08-23 21:48:14.000000000 -0400
40103@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
40104 struct pid *pid, struct task_struct *task);
40105 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
40106 struct pid *pid, struct task_struct *task);
40107+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
40108+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
40109+#endif
40110 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
40111
40112 extern const struct file_operations proc_maps_operations;
40113diff -urNp linux-3.0.3/fs/proc/Kconfig linux-3.0.3/fs/proc/Kconfig
40114--- linux-3.0.3/fs/proc/Kconfig 2011-07-21 22:17:23.000000000 -0400
40115+++ linux-3.0.3/fs/proc/Kconfig 2011-08-23 21:48:14.000000000 -0400
40116@@ -30,12 +30,12 @@ config PROC_FS
40117
40118 config PROC_KCORE
40119 bool "/proc/kcore support" if !ARM
40120- depends on PROC_FS && MMU
40121+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
40122
40123 config PROC_VMCORE
40124 bool "/proc/vmcore support"
40125- depends on PROC_FS && CRASH_DUMP
40126- default y
40127+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
40128+ default n
40129 help
40130 Exports the dump image of crashed kernel in ELF format.
40131
40132@@ -59,8 +59,8 @@ config PROC_SYSCTL
40133 limited in memory.
40134
40135 config PROC_PAGE_MONITOR
40136- default y
40137- depends on PROC_FS && MMU
40138+ default n
40139+ depends on PROC_FS && MMU && !GRKERNSEC
40140 bool "Enable /proc page monitoring" if EXPERT
40141 help
40142 Various /proc files exist to monitor process memory utilization:
40143diff -urNp linux-3.0.3/fs/proc/kcore.c linux-3.0.3/fs/proc/kcore.c
40144--- linux-3.0.3/fs/proc/kcore.c 2011-07-21 22:17:23.000000000 -0400
40145+++ linux-3.0.3/fs/proc/kcore.c 2011-08-23 21:48:14.000000000 -0400
40146@@ -321,6 +321,8 @@ static void elf_kcore_store_hdr(char *bu
40147 off_t offset = 0;
40148 struct kcore_list *m;
40149
40150+ pax_track_stack();
40151+
40152 /* setup ELF header */
40153 elf = (struct elfhdr *) bufp;
40154 bufp += sizeof(struct elfhdr);
40155@@ -478,9 +480,10 @@ read_kcore(struct file *file, char __use
40156 * the addresses in the elf_phdr on our list.
40157 */
40158 start = kc_offset_to_vaddr(*fpos - elf_buflen);
40159- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
40160+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
40161+ if (tsz > buflen)
40162 tsz = buflen;
40163-
40164+
40165 while (buflen) {
40166 struct kcore_list *m;
40167
40168@@ -509,20 +512,23 @@ read_kcore(struct file *file, char __use
40169 kfree(elf_buf);
40170 } else {
40171 if (kern_addr_valid(start)) {
40172- unsigned long n;
40173+ char *elf_buf;
40174+ mm_segment_t oldfs;
40175
40176- n = copy_to_user(buffer, (char *)start, tsz);
40177- /*
40178- * We cannot distingush between fault on source
40179- * and fault on destination. When this happens
40180- * we clear too and hope it will trigger the
40181- * EFAULT again.
40182- */
40183- if (n) {
40184- if (clear_user(buffer + tsz - n,
40185- n))
40186+ elf_buf = kmalloc(tsz, GFP_KERNEL);
40187+ if (!elf_buf)
40188+ return -ENOMEM;
40189+ oldfs = get_fs();
40190+ set_fs(KERNEL_DS);
40191+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
40192+ set_fs(oldfs);
40193+ if (copy_to_user(buffer, elf_buf, tsz)) {
40194+ kfree(elf_buf);
40195 return -EFAULT;
40196+ }
40197 }
40198+ set_fs(oldfs);
40199+ kfree(elf_buf);
40200 } else {
40201 if (clear_user(buffer, tsz))
40202 return -EFAULT;
40203@@ -542,6 +548,9 @@ read_kcore(struct file *file, char __use
40204
40205 static int open_kcore(struct inode *inode, struct file *filp)
40206 {
40207+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
40208+ return -EPERM;
40209+#endif
40210 if (!capable(CAP_SYS_RAWIO))
40211 return -EPERM;
40212 if (kcore_need_update)
40213diff -urNp linux-3.0.3/fs/proc/meminfo.c linux-3.0.3/fs/proc/meminfo.c
40214--- linux-3.0.3/fs/proc/meminfo.c 2011-07-21 22:17:23.000000000 -0400
40215+++ linux-3.0.3/fs/proc/meminfo.c 2011-08-23 21:48:14.000000000 -0400
40216@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
40217 unsigned long pages[NR_LRU_LISTS];
40218 int lru;
40219
40220+ pax_track_stack();
40221+
40222 /*
40223 * display in kilobytes.
40224 */
40225@@ -157,7 +159,7 @@ static int meminfo_proc_show(struct seq_
40226 vmi.used >> 10,
40227 vmi.largest_chunk >> 10
40228 #ifdef CONFIG_MEMORY_FAILURE
40229- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
40230+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
40231 #endif
40232 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
40233 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
40234diff -urNp linux-3.0.3/fs/proc/nommu.c linux-3.0.3/fs/proc/nommu.c
40235--- linux-3.0.3/fs/proc/nommu.c 2011-07-21 22:17:23.000000000 -0400
40236+++ linux-3.0.3/fs/proc/nommu.c 2011-08-23 21:47:56.000000000 -0400
40237@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_
40238 if (len < 1)
40239 len = 1;
40240 seq_printf(m, "%*c", len, ' ');
40241- seq_path(m, &file->f_path, "");
40242+ seq_path(m, &file->f_path, "\n\\");
40243 }
40244
40245 seq_putc(m, '\n');
40246diff -urNp linux-3.0.3/fs/proc/proc_net.c linux-3.0.3/fs/proc/proc_net.c
40247--- linux-3.0.3/fs/proc/proc_net.c 2011-07-21 22:17:23.000000000 -0400
40248+++ linux-3.0.3/fs/proc/proc_net.c 2011-08-23 21:48:14.000000000 -0400
40249@@ -105,6 +105,17 @@ static struct net *get_proc_task_net(str
40250 struct task_struct *task;
40251 struct nsproxy *ns;
40252 struct net *net = NULL;
40253+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40254+ const struct cred *cred = current_cred();
40255+#endif
40256+
40257+#ifdef CONFIG_GRKERNSEC_PROC_USER
40258+ if (cred->fsuid)
40259+ return net;
40260+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40261+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
40262+ return net;
40263+#endif
40264
40265 rcu_read_lock();
40266 task = pid_task(proc_pid(dir), PIDTYPE_PID);
40267diff -urNp linux-3.0.3/fs/proc/proc_sysctl.c linux-3.0.3/fs/proc/proc_sysctl.c
40268--- linux-3.0.3/fs/proc/proc_sysctl.c 2011-07-21 22:17:23.000000000 -0400
40269+++ linux-3.0.3/fs/proc/proc_sysctl.c 2011-08-23 21:48:14.000000000 -0400
40270@@ -8,6 +8,8 @@
40271 #include <linux/namei.h>
40272 #include "internal.h"
40273
40274+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
40275+
40276 static const struct dentry_operations proc_sys_dentry_operations;
40277 static const struct file_operations proc_sys_file_operations;
40278 static const struct inode_operations proc_sys_inode_operations;
40279@@ -111,6 +113,9 @@ static struct dentry *proc_sys_lookup(st
40280 if (!p)
40281 goto out;
40282
40283+ if (gr_handle_sysctl(p, MAY_EXEC))
40284+ goto out;
40285+
40286 err = ERR_PTR(-ENOMEM);
40287 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
40288 if (h)
40289@@ -230,6 +235,9 @@ static int scan(struct ctl_table_header
40290 if (*pos < file->f_pos)
40291 continue;
40292
40293+ if (gr_handle_sysctl(table, 0))
40294+ continue;
40295+
40296 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
40297 if (res)
40298 return res;
40299@@ -355,6 +363,9 @@ static int proc_sys_getattr(struct vfsmo
40300 if (IS_ERR(head))
40301 return PTR_ERR(head);
40302
40303+ if (table && gr_handle_sysctl(table, MAY_EXEC))
40304+ return -ENOENT;
40305+
40306 generic_fillattr(inode, stat);
40307 if (table)
40308 stat->mode = (stat->mode & S_IFMT) | table->mode;
40309diff -urNp linux-3.0.3/fs/proc/root.c linux-3.0.3/fs/proc/root.c
40310--- linux-3.0.3/fs/proc/root.c 2011-07-21 22:17:23.000000000 -0400
40311+++ linux-3.0.3/fs/proc/root.c 2011-08-23 21:48:14.000000000 -0400
40312@@ -123,7 +123,15 @@ void __init proc_root_init(void)
40313 #ifdef CONFIG_PROC_DEVICETREE
40314 proc_device_tree_init();
40315 #endif
40316+#ifdef CONFIG_GRKERNSEC_PROC_ADD
40317+#ifdef CONFIG_GRKERNSEC_PROC_USER
40318+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
40319+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40320+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
40321+#endif
40322+#else
40323 proc_mkdir("bus", NULL);
40324+#endif
40325 proc_sys_init();
40326 }
40327
40328diff -urNp linux-3.0.3/fs/proc/task_mmu.c linux-3.0.3/fs/proc/task_mmu.c
40329--- linux-3.0.3/fs/proc/task_mmu.c 2011-07-21 22:17:23.000000000 -0400
40330+++ linux-3.0.3/fs/proc/task_mmu.c 2011-08-23 21:48:14.000000000 -0400
40331@@ -51,8 +51,13 @@ void task_mem(struct seq_file *m, struct
40332 "VmExe:\t%8lu kB\n"
40333 "VmLib:\t%8lu kB\n"
40334 "VmPTE:\t%8lu kB\n"
40335- "VmSwap:\t%8lu kB\n",
40336- hiwater_vm << (PAGE_SHIFT-10),
40337+ "VmSwap:\t%8lu kB\n"
40338+
40339+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40340+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
40341+#endif
40342+
40343+ ,hiwater_vm << (PAGE_SHIFT-10),
40344 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
40345 mm->locked_vm << (PAGE_SHIFT-10),
40346 hiwater_rss << (PAGE_SHIFT-10),
40347@@ -60,7 +65,13 @@ void task_mem(struct seq_file *m, struct
40348 data << (PAGE_SHIFT-10),
40349 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
40350 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
40351- swap << (PAGE_SHIFT-10));
40352+ swap << (PAGE_SHIFT-10)
40353+
40354+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40355+ , mm->context.user_cs_base, mm->context.user_cs_limit
40356+#endif
40357+
40358+ );
40359 }
40360
40361 unsigned long task_vsize(struct mm_struct *mm)
40362@@ -207,6 +218,12 @@ static int do_maps_open(struct inode *in
40363 return ret;
40364 }
40365
40366+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40367+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
40368+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
40369+ _mm->pax_flags & MF_PAX_SEGMEXEC))
40370+#endif
40371+
40372 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
40373 {
40374 struct mm_struct *mm = vma->vm_mm;
40375@@ -225,13 +242,13 @@ static void show_map_vma(struct seq_file
40376 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
40377 }
40378
40379- /* We don't show the stack guard page in /proc/maps */
40380+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40381+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
40382+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
40383+#else
40384 start = vma->vm_start;
40385- if (stack_guard_page_start(vma, start))
40386- start += PAGE_SIZE;
40387 end = vma->vm_end;
40388- if (stack_guard_page_end(vma, end))
40389- end -= PAGE_SIZE;
40390+#endif
40391
40392 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
40393 start,
40394@@ -240,7 +257,11 @@ static void show_map_vma(struct seq_file
40395 flags & VM_WRITE ? 'w' : '-',
40396 flags & VM_EXEC ? 'x' : '-',
40397 flags & VM_MAYSHARE ? 's' : 'p',
40398+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40399+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
40400+#else
40401 pgoff,
40402+#endif
40403 MAJOR(dev), MINOR(dev), ino, &len);
40404
40405 /*
40406@@ -249,7 +270,7 @@ static void show_map_vma(struct seq_file
40407 */
40408 if (file) {
40409 pad_len_spaces(m, len);
40410- seq_path(m, &file->f_path, "\n");
40411+ seq_path(m, &file->f_path, "\n\\");
40412 } else {
40413 const char *name = arch_vma_name(vma);
40414 if (!name) {
40415@@ -257,8 +278,9 @@ static void show_map_vma(struct seq_file
40416 if (vma->vm_start <= mm->brk &&
40417 vma->vm_end >= mm->start_brk) {
40418 name = "[heap]";
40419- } else if (vma->vm_start <= mm->start_stack &&
40420- vma->vm_end >= mm->start_stack) {
40421+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
40422+ (vma->vm_start <= mm->start_stack &&
40423+ vma->vm_end >= mm->start_stack)) {
40424 name = "[stack]";
40425 }
40426 } else {
40427@@ -433,11 +455,16 @@ static int show_smap(struct seq_file *m,
40428 };
40429
40430 memset(&mss, 0, sizeof mss);
40431- mss.vma = vma;
40432- /* mmap_sem is held in m_start */
40433- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
40434- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
40435-
40436+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40437+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
40438+#endif
40439+ mss.vma = vma;
40440+ /* mmap_sem is held in m_start */
40441+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
40442+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
40443+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40444+ }
40445+#endif
40446 show_map_vma(m, vma);
40447
40448 seq_printf(m,
40449@@ -455,7 +482,11 @@ static int show_smap(struct seq_file *m,
40450 "KernelPageSize: %8lu kB\n"
40451 "MMUPageSize: %8lu kB\n"
40452 "Locked: %8lu kB\n",
40453+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40454+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
40455+#else
40456 (vma->vm_end - vma->vm_start) >> 10,
40457+#endif
40458 mss.resident >> 10,
40459 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
40460 mss.shared_clean >> 10,
40461@@ -1001,7 +1032,7 @@ static int show_numa_map(struct seq_file
40462
40463 if (file) {
40464 seq_printf(m, " file=");
40465- seq_path(m, &file->f_path, "\n\t= ");
40466+ seq_path(m, &file->f_path, "\n\t\\= ");
40467 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
40468 seq_printf(m, " heap");
40469 } else if (vma->vm_start <= mm->start_stack &&
40470diff -urNp linux-3.0.3/fs/proc/task_nommu.c linux-3.0.3/fs/proc/task_nommu.c
40471--- linux-3.0.3/fs/proc/task_nommu.c 2011-07-21 22:17:23.000000000 -0400
40472+++ linux-3.0.3/fs/proc/task_nommu.c 2011-08-23 21:47:56.000000000 -0400
40473@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct
40474 else
40475 bytes += kobjsize(mm);
40476
40477- if (current->fs && current->fs->users > 1)
40478+ if (current->fs && atomic_read(&current->fs->users) > 1)
40479 sbytes += kobjsize(current->fs);
40480 else
40481 bytes += kobjsize(current->fs);
40482@@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_fil
40483
40484 if (file) {
40485 pad_len_spaces(m, len);
40486- seq_path(m, &file->f_path, "");
40487+ seq_path(m, &file->f_path, "\n\\");
40488 } else if (mm) {
40489 if (vma->vm_start <= mm->start_stack &&
40490 vma->vm_end >= mm->start_stack) {
40491diff -urNp linux-3.0.3/fs/quota/netlink.c linux-3.0.3/fs/quota/netlink.c
40492--- linux-3.0.3/fs/quota/netlink.c 2011-07-21 22:17:23.000000000 -0400
40493+++ linux-3.0.3/fs/quota/netlink.c 2011-08-23 21:47:56.000000000 -0400
40494@@ -33,7 +33,7 @@ static struct genl_family quota_genl_fam
40495 void quota_send_warning(short type, unsigned int id, dev_t dev,
40496 const char warntype)
40497 {
40498- static atomic_t seq;
40499+ static atomic_unchecked_t seq;
40500 struct sk_buff *skb;
40501 void *msg_head;
40502 int ret;
40503@@ -49,7 +49,7 @@ void quota_send_warning(short type, unsi
40504 "VFS: Not enough memory to send quota warning.\n");
40505 return;
40506 }
40507- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
40508+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
40509 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
40510 if (!msg_head) {
40511 printk(KERN_ERR
40512diff -urNp linux-3.0.3/fs/readdir.c linux-3.0.3/fs/readdir.c
40513--- linux-3.0.3/fs/readdir.c 2011-07-21 22:17:23.000000000 -0400
40514+++ linux-3.0.3/fs/readdir.c 2011-08-23 21:48:14.000000000 -0400
40515@@ -17,6 +17,7 @@
40516 #include <linux/security.h>
40517 #include <linux/syscalls.h>
40518 #include <linux/unistd.h>
40519+#include <linux/namei.h>
40520
40521 #include <asm/uaccess.h>
40522
40523@@ -67,6 +68,7 @@ struct old_linux_dirent {
40524
40525 struct readdir_callback {
40526 struct old_linux_dirent __user * dirent;
40527+ struct file * file;
40528 int result;
40529 };
40530
40531@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
40532 buf->result = -EOVERFLOW;
40533 return -EOVERFLOW;
40534 }
40535+
40536+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40537+ return 0;
40538+
40539 buf->result++;
40540 dirent = buf->dirent;
40541 if (!access_ok(VERIFY_WRITE, dirent,
40542@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
40543
40544 buf.result = 0;
40545 buf.dirent = dirent;
40546+ buf.file = file;
40547
40548 error = vfs_readdir(file, fillonedir, &buf);
40549 if (buf.result)
40550@@ -142,6 +149,7 @@ struct linux_dirent {
40551 struct getdents_callback {
40552 struct linux_dirent __user * current_dir;
40553 struct linux_dirent __user * previous;
40554+ struct file * file;
40555 int count;
40556 int error;
40557 };
40558@@ -163,6 +171,10 @@ static int filldir(void * __buf, const c
40559 buf->error = -EOVERFLOW;
40560 return -EOVERFLOW;
40561 }
40562+
40563+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40564+ return 0;
40565+
40566 dirent = buf->previous;
40567 if (dirent) {
40568 if (__put_user(offset, &dirent->d_off))
40569@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
40570 buf.previous = NULL;
40571 buf.count = count;
40572 buf.error = 0;
40573+ buf.file = file;
40574
40575 error = vfs_readdir(file, filldir, &buf);
40576 if (error >= 0)
40577@@ -229,6 +242,7 @@ out:
40578 struct getdents_callback64 {
40579 struct linux_dirent64 __user * current_dir;
40580 struct linux_dirent64 __user * previous;
40581+ struct file *file;
40582 int count;
40583 int error;
40584 };
40585@@ -244,6 +258,10 @@ static int filldir64(void * __buf, const
40586 buf->error = -EINVAL; /* only used if we fail.. */
40587 if (reclen > buf->count)
40588 return -EINVAL;
40589+
40590+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40591+ return 0;
40592+
40593 dirent = buf->previous;
40594 if (dirent) {
40595 if (__put_user(offset, &dirent->d_off))
40596@@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
40597
40598 buf.current_dir = dirent;
40599 buf.previous = NULL;
40600+ buf.file = file;
40601 buf.count = count;
40602 buf.error = 0;
40603
40604diff -urNp linux-3.0.3/fs/reiserfs/dir.c linux-3.0.3/fs/reiserfs/dir.c
40605--- linux-3.0.3/fs/reiserfs/dir.c 2011-07-21 22:17:23.000000000 -0400
40606+++ linux-3.0.3/fs/reiserfs/dir.c 2011-08-23 21:48:14.000000000 -0400
40607@@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
40608 struct reiserfs_dir_entry de;
40609 int ret = 0;
40610
40611+ pax_track_stack();
40612+
40613 reiserfs_write_lock(inode->i_sb);
40614
40615 reiserfs_check_lock_depth(inode->i_sb, "readdir");
40616diff -urNp linux-3.0.3/fs/reiserfs/do_balan.c linux-3.0.3/fs/reiserfs/do_balan.c
40617--- linux-3.0.3/fs/reiserfs/do_balan.c 2011-07-21 22:17:23.000000000 -0400
40618+++ linux-3.0.3/fs/reiserfs/do_balan.c 2011-08-23 21:47:56.000000000 -0400
40619@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb,
40620 return;
40621 }
40622
40623- atomic_inc(&(fs_generation(tb->tb_sb)));
40624+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
40625 do_balance_starts(tb);
40626
40627 /* balance leaf returns 0 except if combining L R and S into
40628diff -urNp linux-3.0.3/fs/reiserfs/journal.c linux-3.0.3/fs/reiserfs/journal.c
40629--- linux-3.0.3/fs/reiserfs/journal.c 2011-07-21 22:17:23.000000000 -0400
40630+++ linux-3.0.3/fs/reiserfs/journal.c 2011-08-23 21:48:14.000000000 -0400
40631@@ -2299,6 +2299,8 @@ static struct buffer_head *reiserfs_brea
40632 struct buffer_head *bh;
40633 int i, j;
40634
40635+ pax_track_stack();
40636+
40637 bh = __getblk(dev, block, bufsize);
40638 if (buffer_uptodate(bh))
40639 return (bh);
40640diff -urNp linux-3.0.3/fs/reiserfs/namei.c linux-3.0.3/fs/reiserfs/namei.c
40641--- linux-3.0.3/fs/reiserfs/namei.c 2011-07-21 22:17:23.000000000 -0400
40642+++ linux-3.0.3/fs/reiserfs/namei.c 2011-08-23 21:48:14.000000000 -0400
40643@@ -1225,6 +1225,8 @@ static int reiserfs_rename(struct inode
40644 unsigned long savelink = 1;
40645 struct timespec ctime;
40646
40647+ pax_track_stack();
40648+
40649 /* three balancings: (1) old name removal, (2) new name insertion
40650 and (3) maybe "save" link insertion
40651 stat data updates: (1) old directory,
40652diff -urNp linux-3.0.3/fs/reiserfs/procfs.c linux-3.0.3/fs/reiserfs/procfs.c
40653--- linux-3.0.3/fs/reiserfs/procfs.c 2011-07-21 22:17:23.000000000 -0400
40654+++ linux-3.0.3/fs/reiserfs/procfs.c 2011-08-23 21:48:14.000000000 -0400
40655@@ -113,7 +113,7 @@ static int show_super(struct seq_file *m
40656 "SMALL_TAILS " : "NO_TAILS ",
40657 replay_only(sb) ? "REPLAY_ONLY " : "",
40658 convert_reiserfs(sb) ? "CONV " : "",
40659- atomic_read(&r->s_generation_counter),
40660+ atomic_read_unchecked(&r->s_generation_counter),
40661 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
40662 SF(s_do_balance), SF(s_unneeded_left_neighbor),
40663 SF(s_good_search_by_key_reada), SF(s_bmaps),
40664@@ -299,6 +299,8 @@ static int show_journal(struct seq_file
40665 struct journal_params *jp = &rs->s_v1.s_journal;
40666 char b[BDEVNAME_SIZE];
40667
40668+ pax_track_stack();
40669+
40670 seq_printf(m, /* on-disk fields */
40671 "jp_journal_1st_block: \t%i\n"
40672 "jp_journal_dev: \t%s[%x]\n"
40673diff -urNp linux-3.0.3/fs/reiserfs/stree.c linux-3.0.3/fs/reiserfs/stree.c
40674--- linux-3.0.3/fs/reiserfs/stree.c 2011-07-21 22:17:23.000000000 -0400
40675+++ linux-3.0.3/fs/reiserfs/stree.c 2011-08-23 21:48:14.000000000 -0400
40676@@ -1196,6 +1196,8 @@ int reiserfs_delete_item(struct reiserfs
40677 int iter = 0;
40678 #endif
40679
40680+ pax_track_stack();
40681+
40682 BUG_ON(!th->t_trans_id);
40683
40684 init_tb_struct(th, &s_del_balance, sb, path,
40685@@ -1333,6 +1335,8 @@ void reiserfs_delete_solid_item(struct r
40686 int retval;
40687 int quota_cut_bytes = 0;
40688
40689+ pax_track_stack();
40690+
40691 BUG_ON(!th->t_trans_id);
40692
40693 le_key2cpu_key(&cpu_key, key);
40694@@ -1562,6 +1566,8 @@ int reiserfs_cut_from_item(struct reiser
40695 int quota_cut_bytes;
40696 loff_t tail_pos = 0;
40697
40698+ pax_track_stack();
40699+
40700 BUG_ON(!th->t_trans_id);
40701
40702 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
40703@@ -1957,6 +1963,8 @@ int reiserfs_paste_into_item(struct reis
40704 int retval;
40705 int fs_gen;
40706
40707+ pax_track_stack();
40708+
40709 BUG_ON(!th->t_trans_id);
40710
40711 fs_gen = get_generation(inode->i_sb);
40712@@ -2045,6 +2053,8 @@ int reiserfs_insert_item(struct reiserfs
40713 int fs_gen = 0;
40714 int quota_bytes = 0;
40715
40716+ pax_track_stack();
40717+
40718 BUG_ON(!th->t_trans_id);
40719
40720 if (inode) { /* Do we count quotas for item? */
40721diff -urNp linux-3.0.3/fs/reiserfs/super.c linux-3.0.3/fs/reiserfs/super.c
40722--- linux-3.0.3/fs/reiserfs/super.c 2011-07-21 22:17:23.000000000 -0400
40723+++ linux-3.0.3/fs/reiserfs/super.c 2011-08-23 21:48:14.000000000 -0400
40724@@ -927,6 +927,8 @@ static int reiserfs_parse_options(struct
40725 {.option_name = NULL}
40726 };
40727
40728+ pax_track_stack();
40729+
40730 *blocks = 0;
40731 if (!options || !*options)
40732 /* use default configuration: create tails, journaling on, no
40733diff -urNp linux-3.0.3/fs/select.c linux-3.0.3/fs/select.c
40734--- linux-3.0.3/fs/select.c 2011-07-21 22:17:23.000000000 -0400
40735+++ linux-3.0.3/fs/select.c 2011-08-23 21:48:14.000000000 -0400
40736@@ -20,6 +20,7 @@
40737 #include <linux/module.h>
40738 #include <linux/slab.h>
40739 #include <linux/poll.h>
40740+#include <linux/security.h>
40741 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
40742 #include <linux/file.h>
40743 #include <linux/fdtable.h>
40744@@ -403,6 +404,8 @@ int do_select(int n, fd_set_bits *fds, s
40745 int retval, i, timed_out = 0;
40746 unsigned long slack = 0;
40747
40748+ pax_track_stack();
40749+
40750 rcu_read_lock();
40751 retval = max_select_fd(n, fds);
40752 rcu_read_unlock();
40753@@ -528,6 +531,8 @@ int core_sys_select(int n, fd_set __user
40754 /* Allocate small arguments on the stack to save memory and be faster */
40755 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
40756
40757+ pax_track_stack();
40758+
40759 ret = -EINVAL;
40760 if (n < 0)
40761 goto out_nofds;
40762@@ -837,6 +842,9 @@ int do_sys_poll(struct pollfd __user *uf
40763 struct poll_list *walk = head;
40764 unsigned long todo = nfds;
40765
40766+ pax_track_stack();
40767+
40768+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
40769 if (nfds > rlimit(RLIMIT_NOFILE))
40770 return -EINVAL;
40771
40772diff -urNp linux-3.0.3/fs/seq_file.c linux-3.0.3/fs/seq_file.c
40773--- linux-3.0.3/fs/seq_file.c 2011-07-21 22:17:23.000000000 -0400
40774+++ linux-3.0.3/fs/seq_file.c 2011-08-23 21:47:56.000000000 -0400
40775@@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
40776 return 0;
40777 }
40778 if (!m->buf) {
40779- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
40780+ m->size = PAGE_SIZE;
40781+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
40782 if (!m->buf)
40783 return -ENOMEM;
40784 }
40785@@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
40786 Eoverflow:
40787 m->op->stop(m, p);
40788 kfree(m->buf);
40789- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
40790+ m->size <<= 1;
40791+ m->buf = kmalloc(m->size, GFP_KERNEL);
40792 return !m->buf ? -ENOMEM : -EAGAIN;
40793 }
40794
40795@@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
40796 m->version = file->f_version;
40797 /* grab buffer if we didn't have one */
40798 if (!m->buf) {
40799- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
40800+ m->size = PAGE_SIZE;
40801+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
40802 if (!m->buf)
40803 goto Enomem;
40804 }
40805@@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
40806 goto Fill;
40807 m->op->stop(m, p);
40808 kfree(m->buf);
40809- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
40810+ m->size <<= 1;
40811+ m->buf = kmalloc(m->size, GFP_KERNEL);
40812 if (!m->buf)
40813 goto Enomem;
40814 m->count = 0;
40815@@ -549,7 +553,7 @@ static void single_stop(struct seq_file
40816 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
40817 void *data)
40818 {
40819- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
40820+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
40821 int res = -ENOMEM;
40822
40823 if (op) {
40824diff -urNp linux-3.0.3/fs/splice.c linux-3.0.3/fs/splice.c
40825--- linux-3.0.3/fs/splice.c 2011-07-21 22:17:23.000000000 -0400
40826+++ linux-3.0.3/fs/splice.c 2011-08-23 21:48:14.000000000 -0400
40827@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode
40828 pipe_lock(pipe);
40829
40830 for (;;) {
40831- if (!pipe->readers) {
40832+ if (!atomic_read(&pipe->readers)) {
40833 send_sig(SIGPIPE, current, 0);
40834 if (!ret)
40835 ret = -EPIPE;
40836@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode
40837 do_wakeup = 0;
40838 }
40839
40840- pipe->waiting_writers++;
40841+ atomic_inc(&pipe->waiting_writers);
40842 pipe_wait(pipe);
40843- pipe->waiting_writers--;
40844+ atomic_dec(&pipe->waiting_writers);
40845 }
40846
40847 pipe_unlock(pipe);
40848@@ -320,6 +320,8 @@ __generic_file_splice_read(struct file *
40849 .spd_release = spd_release_page,
40850 };
40851
40852+ pax_track_stack();
40853+
40854 if (splice_grow_spd(pipe, &spd))
40855 return -ENOMEM;
40856
40857@@ -560,7 +562,7 @@ static ssize_t kernel_readv(struct file
40858 old_fs = get_fs();
40859 set_fs(get_ds());
40860 /* The cast to a user pointer is valid due to the set_fs() */
40861- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
40862+ res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
40863 set_fs(old_fs);
40864
40865 return res;
40866@@ -575,7 +577,7 @@ static ssize_t kernel_write(struct file
40867 old_fs = get_fs();
40868 set_fs(get_ds());
40869 /* The cast to a user pointer is valid due to the set_fs() */
40870- res = vfs_write(file, (const char __user *)buf, count, &pos);
40871+ res = vfs_write(file, (__force const char __user *)buf, count, &pos);
40872 set_fs(old_fs);
40873
40874 return res;
40875@@ -603,6 +605,8 @@ ssize_t default_file_splice_read(struct
40876 .spd_release = spd_release_page,
40877 };
40878
40879+ pax_track_stack();
40880+
40881 if (splice_grow_spd(pipe, &spd))
40882 return -ENOMEM;
40883
40884@@ -626,7 +630,7 @@ ssize_t default_file_splice_read(struct
40885 goto err;
40886
40887 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
40888- vec[i].iov_base = (void __user *) page_address(page);
40889+ vec[i].iov_base = (__force void __user *) page_address(page);
40890 vec[i].iov_len = this_len;
40891 spd.pages[i] = page;
40892 spd.nr_pages++;
40893@@ -846,10 +850,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
40894 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
40895 {
40896 while (!pipe->nrbufs) {
40897- if (!pipe->writers)
40898+ if (!atomic_read(&pipe->writers))
40899 return 0;
40900
40901- if (!pipe->waiting_writers && sd->num_spliced)
40902+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
40903 return 0;
40904
40905 if (sd->flags & SPLICE_F_NONBLOCK)
40906@@ -1182,7 +1186,7 @@ ssize_t splice_direct_to_actor(struct fi
40907 * out of the pipe right after the splice_to_pipe(). So set
40908 * PIPE_READERS appropriately.
40909 */
40910- pipe->readers = 1;
40911+ atomic_set(&pipe->readers, 1);
40912
40913 current->splice_pipe = pipe;
40914 }
40915@@ -1619,6 +1623,8 @@ static long vmsplice_to_pipe(struct file
40916 };
40917 long ret;
40918
40919+ pax_track_stack();
40920+
40921 pipe = get_pipe_info(file);
40922 if (!pipe)
40923 return -EBADF;
40924@@ -1734,9 +1740,9 @@ static int ipipe_prep(struct pipe_inode_
40925 ret = -ERESTARTSYS;
40926 break;
40927 }
40928- if (!pipe->writers)
40929+ if (!atomic_read(&pipe->writers))
40930 break;
40931- if (!pipe->waiting_writers) {
40932+ if (!atomic_read(&pipe->waiting_writers)) {
40933 if (flags & SPLICE_F_NONBLOCK) {
40934 ret = -EAGAIN;
40935 break;
40936@@ -1768,7 +1774,7 @@ static int opipe_prep(struct pipe_inode_
40937 pipe_lock(pipe);
40938
40939 while (pipe->nrbufs >= pipe->buffers) {
40940- if (!pipe->readers) {
40941+ if (!atomic_read(&pipe->readers)) {
40942 send_sig(SIGPIPE, current, 0);
40943 ret = -EPIPE;
40944 break;
40945@@ -1781,9 +1787,9 @@ static int opipe_prep(struct pipe_inode_
40946 ret = -ERESTARTSYS;
40947 break;
40948 }
40949- pipe->waiting_writers++;
40950+ atomic_inc(&pipe->waiting_writers);
40951 pipe_wait(pipe);
40952- pipe->waiting_writers--;
40953+ atomic_dec(&pipe->waiting_writers);
40954 }
40955
40956 pipe_unlock(pipe);
40957@@ -1819,14 +1825,14 @@ retry:
40958 pipe_double_lock(ipipe, opipe);
40959
40960 do {
40961- if (!opipe->readers) {
40962+ if (!atomic_read(&opipe->readers)) {
40963 send_sig(SIGPIPE, current, 0);
40964 if (!ret)
40965 ret = -EPIPE;
40966 break;
40967 }
40968
40969- if (!ipipe->nrbufs && !ipipe->writers)
40970+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
40971 break;
40972
40973 /*
40974@@ -1923,7 +1929,7 @@ static int link_pipe(struct pipe_inode_i
40975 pipe_double_lock(ipipe, opipe);
40976
40977 do {
40978- if (!opipe->readers) {
40979+ if (!atomic_read(&opipe->readers)) {
40980 send_sig(SIGPIPE, current, 0);
40981 if (!ret)
40982 ret = -EPIPE;
40983@@ -1968,7 +1974,7 @@ static int link_pipe(struct pipe_inode_i
40984 * return EAGAIN if we have the potential of some data in the
40985 * future, otherwise just return 0
40986 */
40987- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
40988+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
40989 ret = -EAGAIN;
40990
40991 pipe_unlock(ipipe);
40992diff -urNp linux-3.0.3/fs/sysfs/file.c linux-3.0.3/fs/sysfs/file.c
40993--- linux-3.0.3/fs/sysfs/file.c 2011-07-21 22:17:23.000000000 -0400
40994+++ linux-3.0.3/fs/sysfs/file.c 2011-08-23 21:47:56.000000000 -0400
40995@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
40996
40997 struct sysfs_open_dirent {
40998 atomic_t refcnt;
40999- atomic_t event;
41000+ atomic_unchecked_t event;
41001 wait_queue_head_t poll;
41002 struct list_head buffers; /* goes through sysfs_buffer.list */
41003 };
41004@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentr
41005 if (!sysfs_get_active(attr_sd))
41006 return -ENODEV;
41007
41008- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
41009+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
41010 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
41011
41012 sysfs_put_active(attr_sd);
41013@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct
41014 return -ENOMEM;
41015
41016 atomic_set(&new_od->refcnt, 0);
41017- atomic_set(&new_od->event, 1);
41018+ atomic_set_unchecked(&new_od->event, 1);
41019 init_waitqueue_head(&new_od->poll);
41020 INIT_LIST_HEAD(&new_od->buffers);
41021 goto retry;
41022@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct fi
41023
41024 sysfs_put_active(attr_sd);
41025
41026- if (buffer->event != atomic_read(&od->event))
41027+ if (buffer->event != atomic_read_unchecked(&od->event))
41028 goto trigger;
41029
41030 return DEFAULT_POLLMASK;
41031@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_di
41032
41033 od = sd->s_attr.open;
41034 if (od) {
41035- atomic_inc(&od->event);
41036+ atomic_inc_unchecked(&od->event);
41037 wake_up_interruptible(&od->poll);
41038 }
41039
41040diff -urNp linux-3.0.3/fs/sysfs/mount.c linux-3.0.3/fs/sysfs/mount.c
41041--- linux-3.0.3/fs/sysfs/mount.c 2011-07-21 22:17:23.000000000 -0400
41042+++ linux-3.0.3/fs/sysfs/mount.c 2011-08-23 21:48:14.000000000 -0400
41043@@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
41044 .s_name = "",
41045 .s_count = ATOMIC_INIT(1),
41046 .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
41047+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
41048+ .s_mode = S_IFDIR | S_IRWXU,
41049+#else
41050 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
41051+#endif
41052 .s_ino = 1,
41053 };
41054
41055diff -urNp linux-3.0.3/fs/sysfs/symlink.c linux-3.0.3/fs/sysfs/symlink.c
41056--- linux-3.0.3/fs/sysfs/symlink.c 2011-07-21 22:17:23.000000000 -0400
41057+++ linux-3.0.3/fs/sysfs/symlink.c 2011-08-23 21:47:56.000000000 -0400
41058@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct de
41059
41060 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
41061 {
41062- char *page = nd_get_link(nd);
41063+ const char *page = nd_get_link(nd);
41064 if (!IS_ERR(page))
41065 free_page((unsigned long)page);
41066 }
41067diff -urNp linux-3.0.3/fs/udf/inode.c linux-3.0.3/fs/udf/inode.c
41068--- linux-3.0.3/fs/udf/inode.c 2011-07-21 22:17:23.000000000 -0400
41069+++ linux-3.0.3/fs/udf/inode.c 2011-08-23 21:48:14.000000000 -0400
41070@@ -560,6 +560,8 @@ static struct buffer_head *inode_getblk(
41071 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
41072 int lastblock = 0;
41073
41074+ pax_track_stack();
41075+
41076 prev_epos.offset = udf_file_entry_alloc_offset(inode);
41077 prev_epos.block = iinfo->i_location;
41078 prev_epos.bh = NULL;
41079diff -urNp linux-3.0.3/fs/udf/misc.c linux-3.0.3/fs/udf/misc.c
41080--- linux-3.0.3/fs/udf/misc.c 2011-07-21 22:17:23.000000000 -0400
41081+++ linux-3.0.3/fs/udf/misc.c 2011-08-23 21:47:56.000000000 -0400
41082@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
41083
41084 u8 udf_tag_checksum(const struct tag *t)
41085 {
41086- u8 *data = (u8 *)t;
41087+ const u8 *data = (const u8 *)t;
41088 u8 checksum = 0;
41089 int i;
41090 for (i = 0; i < sizeof(struct tag); ++i)
41091diff -urNp linux-3.0.3/fs/utimes.c linux-3.0.3/fs/utimes.c
41092--- linux-3.0.3/fs/utimes.c 2011-07-21 22:17:23.000000000 -0400
41093+++ linux-3.0.3/fs/utimes.c 2011-08-23 21:48:14.000000000 -0400
41094@@ -1,6 +1,7 @@
41095 #include <linux/compiler.h>
41096 #include <linux/file.h>
41097 #include <linux/fs.h>
41098+#include <linux/security.h>
41099 #include <linux/linkage.h>
41100 #include <linux/mount.h>
41101 #include <linux/namei.h>
41102@@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
41103 goto mnt_drop_write_and_out;
41104 }
41105 }
41106+
41107+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
41108+ error = -EACCES;
41109+ goto mnt_drop_write_and_out;
41110+ }
41111+
41112 mutex_lock(&inode->i_mutex);
41113 error = notify_change(path->dentry, &newattrs);
41114 mutex_unlock(&inode->i_mutex);
41115diff -urNp linux-3.0.3/fs/xattr_acl.c linux-3.0.3/fs/xattr_acl.c
41116--- linux-3.0.3/fs/xattr_acl.c 2011-07-21 22:17:23.000000000 -0400
41117+++ linux-3.0.3/fs/xattr_acl.c 2011-08-23 21:47:56.000000000 -0400
41118@@ -17,8 +17,8 @@
41119 struct posix_acl *
41120 posix_acl_from_xattr(const void *value, size_t size)
41121 {
41122- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
41123- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
41124+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
41125+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
41126 int count;
41127 struct posix_acl *acl;
41128 struct posix_acl_entry *acl_e;
41129diff -urNp linux-3.0.3/fs/xattr.c linux-3.0.3/fs/xattr.c
41130--- linux-3.0.3/fs/xattr.c 2011-07-21 22:17:23.000000000 -0400
41131+++ linux-3.0.3/fs/xattr.c 2011-08-23 21:48:14.000000000 -0400
41132@@ -254,7 +254,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
41133 * Extended attribute SET operations
41134 */
41135 static long
41136-setxattr(struct dentry *d, const char __user *name, const void __user *value,
41137+setxattr(struct path *path, const char __user *name, const void __user *value,
41138 size_t size, int flags)
41139 {
41140 int error;
41141@@ -278,7 +278,13 @@ setxattr(struct dentry *d, const char __
41142 return PTR_ERR(kvalue);
41143 }
41144
41145- error = vfs_setxattr(d, kname, kvalue, size, flags);
41146+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
41147+ error = -EACCES;
41148+ goto out;
41149+ }
41150+
41151+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
41152+out:
41153 kfree(kvalue);
41154 return error;
41155 }
41156@@ -295,7 +301,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
41157 return error;
41158 error = mnt_want_write(path.mnt);
41159 if (!error) {
41160- error = setxattr(path.dentry, name, value, size, flags);
41161+ error = setxattr(&path, name, value, size, flags);
41162 mnt_drop_write(path.mnt);
41163 }
41164 path_put(&path);
41165@@ -314,7 +320,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
41166 return error;
41167 error = mnt_want_write(path.mnt);
41168 if (!error) {
41169- error = setxattr(path.dentry, name, value, size, flags);
41170+ error = setxattr(&path, name, value, size, flags);
41171 mnt_drop_write(path.mnt);
41172 }
41173 path_put(&path);
41174@@ -325,17 +331,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
41175 const void __user *,value, size_t, size, int, flags)
41176 {
41177 struct file *f;
41178- struct dentry *dentry;
41179 int error = -EBADF;
41180
41181 f = fget(fd);
41182 if (!f)
41183 return error;
41184- dentry = f->f_path.dentry;
41185- audit_inode(NULL, dentry);
41186+ audit_inode(NULL, f->f_path.dentry);
41187 error = mnt_want_write_file(f);
41188 if (!error) {
41189- error = setxattr(dentry, name, value, size, flags);
41190+ error = setxattr(&f->f_path, name, value, size, flags);
41191 mnt_drop_write(f->f_path.mnt);
41192 }
41193 fput(f);
41194diff -urNp linux-3.0.3/fs/xfs/linux-2.6/xfs_ioctl32.c linux-3.0.3/fs/xfs/linux-2.6/xfs_ioctl32.c
41195--- linux-3.0.3/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-07-21 22:17:23.000000000 -0400
41196+++ linux-3.0.3/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-08-23 21:48:14.000000000 -0400
41197@@ -73,6 +73,7 @@ xfs_compat_ioc_fsgeometry_v1(
41198 xfs_fsop_geom_t fsgeo;
41199 int error;
41200
41201+ memset(&fsgeo, 0, sizeof(fsgeo));
41202 error = xfs_fs_geometry(mp, &fsgeo, 3);
41203 if (error)
41204 return -error;
41205diff -urNp linux-3.0.3/fs/xfs/linux-2.6/xfs_ioctl.c linux-3.0.3/fs/xfs/linux-2.6/xfs_ioctl.c
41206--- linux-3.0.3/fs/xfs/linux-2.6/xfs_ioctl.c 2011-07-21 22:17:23.000000000 -0400
41207+++ linux-3.0.3/fs/xfs/linux-2.6/xfs_ioctl.c 2011-08-23 21:47:56.000000000 -0400
41208@@ -128,7 +128,7 @@ xfs_find_handle(
41209 }
41210
41211 error = -EFAULT;
41212- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
41213+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
41214 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
41215 goto out_put;
41216
41217diff -urNp linux-3.0.3/fs/xfs/linux-2.6/xfs_iops.c linux-3.0.3/fs/xfs/linux-2.6/xfs_iops.c
41218--- linux-3.0.3/fs/xfs/linux-2.6/xfs_iops.c 2011-07-21 22:17:23.000000000 -0400
41219+++ linux-3.0.3/fs/xfs/linux-2.6/xfs_iops.c 2011-08-23 21:47:56.000000000 -0400
41220@@ -437,7 +437,7 @@ xfs_vn_put_link(
41221 struct nameidata *nd,
41222 void *p)
41223 {
41224- char *s = nd_get_link(nd);
41225+ const char *s = nd_get_link(nd);
41226
41227 if (!IS_ERR(s))
41228 kfree(s);
41229diff -urNp linux-3.0.3/fs/xfs/xfs_bmap.c linux-3.0.3/fs/xfs/xfs_bmap.c
41230--- linux-3.0.3/fs/xfs/xfs_bmap.c 2011-07-21 22:17:23.000000000 -0400
41231+++ linux-3.0.3/fs/xfs/xfs_bmap.c 2011-08-23 21:47:56.000000000 -0400
41232@@ -253,7 +253,7 @@ xfs_bmap_validate_ret(
41233 int nmap,
41234 int ret_nmap);
41235 #else
41236-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
41237+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
41238 #endif /* DEBUG */
41239
41240 STATIC int
41241diff -urNp linux-3.0.3/fs/xfs/xfs_dir2_sf.c linux-3.0.3/fs/xfs/xfs_dir2_sf.c
41242--- linux-3.0.3/fs/xfs/xfs_dir2_sf.c 2011-07-21 22:17:23.000000000 -0400
41243+++ linux-3.0.3/fs/xfs/xfs_dir2_sf.c 2011-08-23 21:47:56.000000000 -0400
41244@@ -780,7 +780,15 @@ xfs_dir2_sf_getdents(
41245 }
41246
41247 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
41248- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
41249+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
41250+ char name[sfep->namelen];
41251+ memcpy(name, sfep->name, sfep->namelen);
41252+ if (filldir(dirent, name, sfep->namelen,
41253+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
41254+ *offset = off & 0x7fffffff;
41255+ return 0;
41256+ }
41257+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
41258 off & 0x7fffffff, ino, DT_UNKNOWN)) {
41259 *offset = off & 0x7fffffff;
41260 return 0;
41261diff -urNp linux-3.0.3/grsecurity/gracl_alloc.c linux-3.0.3/grsecurity/gracl_alloc.c
41262--- linux-3.0.3/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
41263+++ linux-3.0.3/grsecurity/gracl_alloc.c 2011-08-23 21:48:14.000000000 -0400
41264@@ -0,0 +1,105 @@
41265+#include <linux/kernel.h>
41266+#include <linux/mm.h>
41267+#include <linux/slab.h>
41268+#include <linux/vmalloc.h>
41269+#include <linux/gracl.h>
41270+#include <linux/grsecurity.h>
41271+
41272+static unsigned long alloc_stack_next = 1;
41273+static unsigned long alloc_stack_size = 1;
41274+static void **alloc_stack;
41275+
41276+static __inline__ int
41277+alloc_pop(void)
41278+{
41279+ if (alloc_stack_next == 1)
41280+ return 0;
41281+
41282+ kfree(alloc_stack[alloc_stack_next - 2]);
41283+
41284+ alloc_stack_next--;
41285+
41286+ return 1;
41287+}
41288+
41289+static __inline__ int
41290+alloc_push(void *buf)
41291+{
41292+ if (alloc_stack_next >= alloc_stack_size)
41293+ return 1;
41294+
41295+ alloc_stack[alloc_stack_next - 1] = buf;
41296+
41297+ alloc_stack_next++;
41298+
41299+ return 0;
41300+}
41301+
41302+void *
41303+acl_alloc(unsigned long len)
41304+{
41305+ void *ret = NULL;
41306+
41307+ if (!len || len > PAGE_SIZE)
41308+ goto out;
41309+
41310+ ret = kmalloc(len, GFP_KERNEL);
41311+
41312+ if (ret) {
41313+ if (alloc_push(ret)) {
41314+ kfree(ret);
41315+ ret = NULL;
41316+ }
41317+ }
41318+
41319+out:
41320+ return ret;
41321+}
41322+
41323+void *
41324+acl_alloc_num(unsigned long num, unsigned long len)
41325+{
41326+ if (!len || (num > (PAGE_SIZE / len)))
41327+ return NULL;
41328+
41329+ return acl_alloc(num * len);
41330+}
41331+
41332+void
41333+acl_free_all(void)
41334+{
41335+ if (gr_acl_is_enabled() || !alloc_stack)
41336+ return;
41337+
41338+ while (alloc_pop()) ;
41339+
41340+ if (alloc_stack) {
41341+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
41342+ kfree(alloc_stack);
41343+ else
41344+ vfree(alloc_stack);
41345+ }
41346+
41347+ alloc_stack = NULL;
41348+ alloc_stack_size = 1;
41349+ alloc_stack_next = 1;
41350+
41351+ return;
41352+}
41353+
41354+int
41355+acl_alloc_stack_init(unsigned long size)
41356+{
41357+ if ((size * sizeof (void *)) <= PAGE_SIZE)
41358+ alloc_stack =
41359+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
41360+ else
41361+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
41362+
41363+ alloc_stack_size = size;
41364+
41365+ if (!alloc_stack)
41366+ return 0;
41367+ else
41368+ return 1;
41369+}
41370diff -urNp linux-3.0.3/grsecurity/gracl.c linux-3.0.3/grsecurity/gracl.c
41371--- linux-3.0.3/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
41372+++ linux-3.0.3/grsecurity/gracl.c 2011-08-23 21:48:14.000000000 -0400
41373@@ -0,0 +1,4106 @@
41374+#include <linux/kernel.h>
41375+#include <linux/module.h>
41376+#include <linux/sched.h>
41377+#include <linux/mm.h>
41378+#include <linux/file.h>
41379+#include <linux/fs.h>
41380+#include <linux/namei.h>
41381+#include <linux/mount.h>
41382+#include <linux/tty.h>
41383+#include <linux/proc_fs.h>
41384+#include <linux/lglock.h>
41385+#include <linux/slab.h>
41386+#include <linux/vmalloc.h>
41387+#include <linux/types.h>
41388+#include <linux/sysctl.h>
41389+#include <linux/netdevice.h>
41390+#include <linux/ptrace.h>
41391+#include <linux/gracl.h>
41392+#include <linux/gralloc.h>
41393+#include <linux/grsecurity.h>
41394+#include <linux/grinternal.h>
41395+#include <linux/pid_namespace.h>
41396+#include <linux/fdtable.h>
41397+#include <linux/percpu.h>
41398+
41399+#include <asm/uaccess.h>
41400+#include <asm/errno.h>
41401+#include <asm/mman.h>
41402+
41403+static struct acl_role_db acl_role_set;
41404+static struct name_db name_set;
41405+static struct inodev_db inodev_set;
41406+
41407+/* for keeping track of userspace pointers used for subjects, so we
41408+ can share references in the kernel as well
41409+*/
41410+
41411+static struct path real_root;
41412+
41413+static struct acl_subj_map_db subj_map_set;
41414+
41415+static struct acl_role_label *default_role;
41416+
41417+static struct acl_role_label *role_list;
41418+
41419+static u16 acl_sp_role_value;
41420+
41421+extern char *gr_shared_page[4];
41422+static DEFINE_MUTEX(gr_dev_mutex);
41423+DEFINE_RWLOCK(gr_inode_lock);
41424+
41425+struct gr_arg *gr_usermode;
41426+
41427+static unsigned int gr_status __read_only = GR_STATUS_INIT;
41428+
41429+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
41430+extern void gr_clear_learn_entries(void);
41431+
41432+#ifdef CONFIG_GRKERNSEC_RESLOG
41433+extern void gr_log_resource(const struct task_struct *task,
41434+ const int res, const unsigned long wanted, const int gt);
41435+#endif
41436+
41437+unsigned char *gr_system_salt;
41438+unsigned char *gr_system_sum;
41439+
41440+static struct sprole_pw **acl_special_roles = NULL;
41441+static __u16 num_sprole_pws = 0;
41442+
41443+static struct acl_role_label *kernel_role = NULL;
41444+
41445+static unsigned int gr_auth_attempts = 0;
41446+static unsigned long gr_auth_expires = 0UL;
41447+
41448+#ifdef CONFIG_NET
41449+extern struct vfsmount *sock_mnt;
41450+#endif
41451+
41452+extern struct vfsmount *pipe_mnt;
41453+extern struct vfsmount *shm_mnt;
41454+#ifdef CONFIG_HUGETLBFS
41455+extern struct vfsmount *hugetlbfs_vfsmount;
41456+#endif
41457+
41458+static struct acl_object_label *fakefs_obj_rw;
41459+static struct acl_object_label *fakefs_obj_rwx;
41460+
41461+extern int gr_init_uidset(void);
41462+extern void gr_free_uidset(void);
41463+extern void gr_remove_uid(uid_t uid);
41464+extern int gr_find_uid(uid_t uid);
41465+
41466+DECLARE_BRLOCK(vfsmount_lock);
41467+
41468+__inline__ int
41469+gr_acl_is_enabled(void)
41470+{
41471+ return (gr_status & GR_READY);
41472+}
41473+
41474+#ifdef CONFIG_BTRFS_FS
41475+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
41476+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
41477+#endif
41478+
41479+static inline dev_t __get_dev(const struct dentry *dentry)
41480+{
41481+#ifdef CONFIG_BTRFS_FS
41482+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
41483+ return get_btrfs_dev_from_inode(dentry->d_inode);
41484+ else
41485+#endif
41486+ return dentry->d_inode->i_sb->s_dev;
41487+}
41488+
41489+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
41490+{
41491+ return __get_dev(dentry);
41492+}
41493+
41494+static char gr_task_roletype_to_char(struct task_struct *task)
41495+{
41496+ switch (task->role->roletype &
41497+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
41498+ GR_ROLE_SPECIAL)) {
41499+ case GR_ROLE_DEFAULT:
41500+ return 'D';
41501+ case GR_ROLE_USER:
41502+ return 'U';
41503+ case GR_ROLE_GROUP:
41504+ return 'G';
41505+ case GR_ROLE_SPECIAL:
41506+ return 'S';
41507+ }
41508+
41509+ return 'X';
41510+}
41511+
41512+char gr_roletype_to_char(void)
41513+{
41514+ return gr_task_roletype_to_char(current);
41515+}
41516+
41517+__inline__ int
41518+gr_acl_tpe_check(void)
41519+{
41520+ if (unlikely(!(gr_status & GR_READY)))
41521+ return 0;
41522+ if (current->role->roletype & GR_ROLE_TPE)
41523+ return 1;
41524+ else
41525+ return 0;
41526+}
41527+
41528+int
41529+gr_handle_rawio(const struct inode *inode)
41530+{
41531+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
41532+ if (inode && S_ISBLK(inode->i_mode) &&
41533+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
41534+ !capable(CAP_SYS_RAWIO))
41535+ return 1;
41536+#endif
41537+ return 0;
41538+}
41539+
41540+static int
41541+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
41542+{
41543+ if (likely(lena != lenb))
41544+ return 0;
41545+
41546+ return !memcmp(a, b, lena);
41547+}
41548+
41549+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
41550+{
41551+ *buflen -= namelen;
41552+ if (*buflen < 0)
41553+ return -ENAMETOOLONG;
41554+ *buffer -= namelen;
41555+ memcpy(*buffer, str, namelen);
41556+ return 0;
41557+}
41558+
41559+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
41560+{
41561+ return prepend(buffer, buflen, name->name, name->len);
41562+}
41563+
41564+static int prepend_path(const struct path *path, struct path *root,
41565+ char **buffer, int *buflen)
41566+{
41567+ struct dentry *dentry = path->dentry;
41568+ struct vfsmount *vfsmnt = path->mnt;
41569+ bool slash = false;
41570+ int error = 0;
41571+
41572+ while (dentry != root->dentry || vfsmnt != root->mnt) {
41573+ struct dentry * parent;
41574+
41575+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
41576+ /* Global root? */
41577+ if (vfsmnt->mnt_parent == vfsmnt) {
41578+ goto out;
41579+ }
41580+ dentry = vfsmnt->mnt_mountpoint;
41581+ vfsmnt = vfsmnt->mnt_parent;
41582+ continue;
41583+ }
41584+ parent = dentry->d_parent;
41585+ prefetch(parent);
41586+ spin_lock(&dentry->d_lock);
41587+ error = prepend_name(buffer, buflen, &dentry->d_name);
41588+ spin_unlock(&dentry->d_lock);
41589+ if (!error)
41590+ error = prepend(buffer, buflen, "/", 1);
41591+ if (error)
41592+ break;
41593+
41594+ slash = true;
41595+ dentry = parent;
41596+ }
41597+
41598+out:
41599+ if (!error && !slash)
41600+ error = prepend(buffer, buflen, "/", 1);
41601+
41602+ return error;
41603+}
41604+
41605+/* this must be called with vfsmount_lock and rename_lock held */
41606+
41607+static char *__our_d_path(const struct path *path, struct path *root,
41608+ char *buf, int buflen)
41609+{
41610+ char *res = buf + buflen;
41611+ int error;
41612+
41613+ prepend(&res, &buflen, "\0", 1);
41614+ error = prepend_path(path, root, &res, &buflen);
41615+ if (error)
41616+ return ERR_PTR(error);
41617+
41618+ return res;
41619+}
41620+
41621+static char *
41622+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
41623+{
41624+ char *retval;
41625+
41626+ retval = __our_d_path(path, root, buf, buflen);
41627+ if (unlikely(IS_ERR(retval)))
41628+ retval = strcpy(buf, "<path too long>");
41629+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
41630+ retval[1] = '\0';
41631+
41632+ return retval;
41633+}
41634+
41635+static char *
41636+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
41637+ char *buf, int buflen)
41638+{
41639+ struct path path;
41640+ char *res;
41641+
41642+ path.dentry = (struct dentry *)dentry;
41643+ path.mnt = (struct vfsmount *)vfsmnt;
41644+
41645+ /* we can use real_root.dentry, real_root.mnt, because this is only called
41646+ by the RBAC system */
41647+ res = gen_full_path(&path, &real_root, buf, buflen);
41648+
41649+ return res;
41650+}
41651+
41652+static char *
41653+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
41654+ char *buf, int buflen)
41655+{
41656+ char *res;
41657+ struct path path;
41658+ struct path root;
41659+ struct task_struct *reaper = &init_task;
41660+
41661+ path.dentry = (struct dentry *)dentry;
41662+ path.mnt = (struct vfsmount *)vfsmnt;
41663+
41664+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
41665+ get_fs_root(reaper->fs, &root);
41666+
41667+ write_seqlock(&rename_lock);
41668+ br_read_lock(vfsmount_lock);
41669+ res = gen_full_path(&path, &root, buf, buflen);
41670+ br_read_unlock(vfsmount_lock);
41671+ write_sequnlock(&rename_lock);
41672+
41673+ path_put(&root);
41674+ return res;
41675+}
41676+
41677+static char *
41678+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
41679+{
41680+ char *ret;
41681+ write_seqlock(&rename_lock);
41682+ br_read_lock(vfsmount_lock);
41683+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
41684+ PAGE_SIZE);
41685+ br_read_unlock(vfsmount_lock);
41686+ write_sequnlock(&rename_lock);
41687+ return ret;
41688+}
41689+
41690+char *
41691+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
41692+{
41693+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
41694+ PAGE_SIZE);
41695+}
41696+
41697+char *
41698+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
41699+{
41700+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
41701+ PAGE_SIZE);
41702+}
41703+
41704+char *
41705+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
41706+{
41707+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
41708+ PAGE_SIZE);
41709+}
41710+
41711+char *
41712+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
41713+{
41714+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
41715+ PAGE_SIZE);
41716+}
41717+
41718+char *
41719+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
41720+{
41721+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
41722+ PAGE_SIZE);
41723+}
41724+
41725+__inline__ __u32
41726+to_gr_audit(const __u32 reqmode)
41727+{
41728+ /* masks off auditable permission flags, then shifts them to create
41729+ auditing flags, and adds the special case of append auditing if
41730+ we're requesting write */
41731+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
41732+}
41733+
41734+struct acl_subject_label *
41735+lookup_subject_map(const struct acl_subject_label *userp)
41736+{
41737+ unsigned int index = shash(userp, subj_map_set.s_size);
41738+ struct subject_map *match;
41739+
41740+ match = subj_map_set.s_hash[index];
41741+
41742+ while (match && match->user != userp)
41743+ match = match->next;
41744+
41745+ if (match != NULL)
41746+ return match->kernel;
41747+ else
41748+ return NULL;
41749+}
41750+
41751+static void
41752+insert_subj_map_entry(struct subject_map *subjmap)
41753+{
41754+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
41755+ struct subject_map **curr;
41756+
41757+ subjmap->prev = NULL;
41758+
41759+ curr = &subj_map_set.s_hash[index];
41760+ if (*curr != NULL)
41761+ (*curr)->prev = subjmap;
41762+
41763+ subjmap->next = *curr;
41764+ *curr = subjmap;
41765+
41766+ return;
41767+}
41768+
41769+static struct acl_role_label *
41770+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
41771+ const gid_t gid)
41772+{
41773+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
41774+ struct acl_role_label *match;
41775+ struct role_allowed_ip *ipp;
41776+ unsigned int x;
41777+ u32 curr_ip = task->signal->curr_ip;
41778+
41779+ task->signal->saved_ip = curr_ip;
41780+
41781+ match = acl_role_set.r_hash[index];
41782+
41783+ while (match) {
41784+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
41785+ for (x = 0; x < match->domain_child_num; x++) {
41786+ if (match->domain_children[x] == uid)
41787+ goto found;
41788+ }
41789+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
41790+ break;
41791+ match = match->next;
41792+ }
41793+found:
41794+ if (match == NULL) {
41795+ try_group:
41796+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
41797+ match = acl_role_set.r_hash[index];
41798+
41799+ while (match) {
41800+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
41801+ for (x = 0; x < match->domain_child_num; x++) {
41802+ if (match->domain_children[x] == gid)
41803+ goto found2;
41804+ }
41805+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
41806+ break;
41807+ match = match->next;
41808+ }
41809+found2:
41810+ if (match == NULL)
41811+ match = default_role;
41812+ if (match->allowed_ips == NULL)
41813+ return match;
41814+ else {
41815+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
41816+ if (likely
41817+ ((ntohl(curr_ip) & ipp->netmask) ==
41818+ (ntohl(ipp->addr) & ipp->netmask)))
41819+ return match;
41820+ }
41821+ match = default_role;
41822+ }
41823+ } else if (match->allowed_ips == NULL) {
41824+ return match;
41825+ } else {
41826+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
41827+ if (likely
41828+ ((ntohl(curr_ip) & ipp->netmask) ==
41829+ (ntohl(ipp->addr) & ipp->netmask)))
41830+ return match;
41831+ }
41832+ goto try_group;
41833+ }
41834+
41835+ return match;
41836+}
41837+
41838+struct acl_subject_label *
41839+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
41840+ const struct acl_role_label *role)
41841+{
41842+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
41843+ struct acl_subject_label *match;
41844+
41845+ match = role->subj_hash[index];
41846+
41847+ while (match && (match->inode != ino || match->device != dev ||
41848+ (match->mode & GR_DELETED))) {
41849+ match = match->next;
41850+ }
41851+
41852+ if (match && !(match->mode & GR_DELETED))
41853+ return match;
41854+ else
41855+ return NULL;
41856+}
41857+
41858+struct acl_subject_label *
41859+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
41860+ const struct acl_role_label *role)
41861+{
41862+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
41863+ struct acl_subject_label *match;
41864+
41865+ match = role->subj_hash[index];
41866+
41867+ while (match && (match->inode != ino || match->device != dev ||
41868+ !(match->mode & GR_DELETED))) {
41869+ match = match->next;
41870+ }
41871+
41872+ if (match && (match->mode & GR_DELETED))
41873+ return match;
41874+ else
41875+ return NULL;
41876+}
41877+
41878+static struct acl_object_label *
41879+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
41880+ const struct acl_subject_label *subj)
41881+{
41882+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
41883+ struct acl_object_label *match;
41884+
41885+ match = subj->obj_hash[index];
41886+
41887+ while (match && (match->inode != ino || match->device != dev ||
41888+ (match->mode & GR_DELETED))) {
41889+ match = match->next;
41890+ }
41891+
41892+ if (match && !(match->mode & GR_DELETED))
41893+ return match;
41894+ else
41895+ return NULL;
41896+}
41897+
41898+static struct acl_object_label *
41899+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
41900+ const struct acl_subject_label *subj)
41901+{
41902+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
41903+ struct acl_object_label *match;
41904+
41905+ match = subj->obj_hash[index];
41906+
41907+ while (match && (match->inode != ino || match->device != dev ||
41908+ !(match->mode & GR_DELETED))) {
41909+ match = match->next;
41910+ }
41911+
41912+ if (match && (match->mode & GR_DELETED))
41913+ return match;
41914+
41915+ match = subj->obj_hash[index];
41916+
41917+ while (match && (match->inode != ino || match->device != dev ||
41918+ (match->mode & GR_DELETED))) {
41919+ match = match->next;
41920+ }
41921+
41922+ if (match && !(match->mode & GR_DELETED))
41923+ return match;
41924+ else
41925+ return NULL;
41926+}
41927+
41928+static struct name_entry *
41929+lookup_name_entry(const char *name)
41930+{
41931+ unsigned int len = strlen(name);
41932+ unsigned int key = full_name_hash(name, len);
41933+ unsigned int index = key % name_set.n_size;
41934+ struct name_entry *match;
41935+
41936+ match = name_set.n_hash[index];
41937+
41938+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
41939+ match = match->next;
41940+
41941+ return match;
41942+}
41943+
41944+static struct name_entry *
41945+lookup_name_entry_create(const char *name)
41946+{
41947+ unsigned int len = strlen(name);
41948+ unsigned int key = full_name_hash(name, len);
41949+ unsigned int index = key % name_set.n_size;
41950+ struct name_entry *match;
41951+
41952+ match = name_set.n_hash[index];
41953+
41954+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
41955+ !match->deleted))
41956+ match = match->next;
41957+
41958+ if (match && match->deleted)
41959+ return match;
41960+
41961+ match = name_set.n_hash[index];
41962+
41963+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
41964+ match->deleted))
41965+ match = match->next;
41966+
41967+ if (match && !match->deleted)
41968+ return match;
41969+ else
41970+ return NULL;
41971+}
41972+
41973+static struct inodev_entry *
41974+lookup_inodev_entry(const ino_t ino, const dev_t dev)
41975+{
41976+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
41977+ struct inodev_entry *match;
41978+
41979+ match = inodev_set.i_hash[index];
41980+
41981+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
41982+ match = match->next;
41983+
41984+ return match;
41985+}
41986+
41987+static void
41988+insert_inodev_entry(struct inodev_entry *entry)
41989+{
41990+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
41991+ inodev_set.i_size);
41992+ struct inodev_entry **curr;
41993+
41994+ entry->prev = NULL;
41995+
41996+ curr = &inodev_set.i_hash[index];
41997+ if (*curr != NULL)
41998+ (*curr)->prev = entry;
41999+
42000+ entry->next = *curr;
42001+ *curr = entry;
42002+
42003+ return;
42004+}
42005+
42006+static void
42007+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
42008+{
42009+ unsigned int index =
42010+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
42011+ struct acl_role_label **curr;
42012+ struct acl_role_label *tmp;
42013+
42014+ curr = &acl_role_set.r_hash[index];
42015+
42016+ /* if role was already inserted due to domains and already has
42017+ a role in the same bucket as it attached, then we need to
42018+ combine these two buckets
42019+ */
42020+ if (role->next) {
42021+ tmp = role->next;
42022+ while (tmp->next)
42023+ tmp = tmp->next;
42024+ tmp->next = *curr;
42025+ } else
42026+ role->next = *curr;
42027+ *curr = role;
42028+
42029+ return;
42030+}
42031+
42032+static void
42033+insert_acl_role_label(struct acl_role_label *role)
42034+{
42035+ int i;
42036+
42037+ if (role_list == NULL) {
42038+ role_list = role;
42039+ role->prev = NULL;
42040+ } else {
42041+ role->prev = role_list;
42042+ role_list = role;
42043+ }
42044+
42045+ /* used for hash chains */
42046+ role->next = NULL;
42047+
42048+ if (role->roletype & GR_ROLE_DOMAIN) {
42049+ for (i = 0; i < role->domain_child_num; i++)
42050+ __insert_acl_role_label(role, role->domain_children[i]);
42051+ } else
42052+ __insert_acl_role_label(role, role->uidgid);
42053+}
42054+
42055+static int
42056+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
42057+{
42058+ struct name_entry **curr, *nentry;
42059+ struct inodev_entry *ientry;
42060+ unsigned int len = strlen(name);
42061+ unsigned int key = full_name_hash(name, len);
42062+ unsigned int index = key % name_set.n_size;
42063+
42064+ curr = &name_set.n_hash[index];
42065+
42066+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
42067+ curr = &((*curr)->next);
42068+
42069+ if (*curr != NULL)
42070+ return 1;
42071+
42072+ nentry = acl_alloc(sizeof (struct name_entry));
42073+ if (nentry == NULL)
42074+ return 0;
42075+ ientry = acl_alloc(sizeof (struct inodev_entry));
42076+ if (ientry == NULL)
42077+ return 0;
42078+ ientry->nentry = nentry;
42079+
42080+ nentry->key = key;
42081+ nentry->name = name;
42082+ nentry->inode = inode;
42083+ nentry->device = device;
42084+ nentry->len = len;
42085+ nentry->deleted = deleted;
42086+
42087+ nentry->prev = NULL;
42088+ curr = &name_set.n_hash[index];
42089+ if (*curr != NULL)
42090+ (*curr)->prev = nentry;
42091+ nentry->next = *curr;
42092+ *curr = nentry;
42093+
42094+ /* insert us into the table searchable by inode/dev */
42095+ insert_inodev_entry(ientry);
42096+
42097+ return 1;
42098+}
42099+
42100+static void
42101+insert_acl_obj_label(struct acl_object_label *obj,
42102+ struct acl_subject_label *subj)
42103+{
42104+ unsigned int index =
42105+ fhash(obj->inode, obj->device, subj->obj_hash_size);
42106+ struct acl_object_label **curr;
42107+
42108+
42109+ obj->prev = NULL;
42110+
42111+ curr = &subj->obj_hash[index];
42112+ if (*curr != NULL)
42113+ (*curr)->prev = obj;
42114+
42115+ obj->next = *curr;
42116+ *curr = obj;
42117+
42118+ return;
42119+}
42120+
42121+static void
42122+insert_acl_subj_label(struct acl_subject_label *obj,
42123+ struct acl_role_label *role)
42124+{
42125+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
42126+ struct acl_subject_label **curr;
42127+
42128+ obj->prev = NULL;
42129+
42130+ curr = &role->subj_hash[index];
42131+ if (*curr != NULL)
42132+ (*curr)->prev = obj;
42133+
42134+ obj->next = *curr;
42135+ *curr = obj;
42136+
42137+ return;
42138+}
42139+
42140+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
42141+
42142+static void *
42143+create_table(__u32 * len, int elementsize)
42144+{
42145+ unsigned int table_sizes[] = {
42146+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
42147+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
42148+ 4194301, 8388593, 16777213, 33554393, 67108859
42149+ };
42150+ void *newtable = NULL;
42151+ unsigned int pwr = 0;
42152+
42153+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
42154+ table_sizes[pwr] <= *len)
42155+ pwr++;
42156+
42157+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
42158+ return newtable;
42159+
42160+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
42161+ newtable =
42162+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
42163+ else
42164+ newtable = vmalloc(table_sizes[pwr] * elementsize);
42165+
42166+ *len = table_sizes[pwr];
42167+
42168+ return newtable;
42169+}
42170+
42171+static int
42172+init_variables(const struct gr_arg *arg)
42173+{
42174+ struct task_struct *reaper = &init_task;
42175+ unsigned int stacksize;
42176+
42177+ subj_map_set.s_size = arg->role_db.num_subjects;
42178+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
42179+ name_set.n_size = arg->role_db.num_objects;
42180+ inodev_set.i_size = arg->role_db.num_objects;
42181+
42182+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
42183+ !name_set.n_size || !inodev_set.i_size)
42184+ return 1;
42185+
42186+ if (!gr_init_uidset())
42187+ return 1;
42188+
42189+ /* set up the stack that holds allocation info */
42190+
42191+ stacksize = arg->role_db.num_pointers + 5;
42192+
42193+ if (!acl_alloc_stack_init(stacksize))
42194+ return 1;
42195+
42196+ /* grab reference for the real root dentry and vfsmount */
42197+ get_fs_root(reaper->fs, &real_root);
42198+
42199+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
42200+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
42201+#endif
42202+
42203+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
42204+ if (fakefs_obj_rw == NULL)
42205+ return 1;
42206+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
42207+
42208+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
42209+ if (fakefs_obj_rwx == NULL)
42210+ return 1;
42211+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
42212+
42213+ subj_map_set.s_hash =
42214+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
42215+ acl_role_set.r_hash =
42216+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
42217+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
42218+ inodev_set.i_hash =
42219+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
42220+
42221+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
42222+ !name_set.n_hash || !inodev_set.i_hash)
42223+ return 1;
42224+
42225+ memset(subj_map_set.s_hash, 0,
42226+ sizeof(struct subject_map *) * subj_map_set.s_size);
42227+ memset(acl_role_set.r_hash, 0,
42228+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
42229+ memset(name_set.n_hash, 0,
42230+ sizeof (struct name_entry *) * name_set.n_size);
42231+ memset(inodev_set.i_hash, 0,
42232+ sizeof (struct inodev_entry *) * inodev_set.i_size);
42233+
42234+ return 0;
42235+}
42236+
42237+/* free information not needed after startup
42238+ currently contains user->kernel pointer mappings for subjects
42239+*/
42240+
42241+static void
42242+free_init_variables(void)
42243+{
42244+ __u32 i;
42245+
42246+ if (subj_map_set.s_hash) {
42247+ for (i = 0; i < subj_map_set.s_size; i++) {
42248+ if (subj_map_set.s_hash[i]) {
42249+ kfree(subj_map_set.s_hash[i]);
42250+ subj_map_set.s_hash[i] = NULL;
42251+ }
42252+ }
42253+
42254+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
42255+ PAGE_SIZE)
42256+ kfree(subj_map_set.s_hash);
42257+ else
42258+ vfree(subj_map_set.s_hash);
42259+ }
42260+
42261+ return;
42262+}
42263+
42264+static void
42265+free_variables(void)
42266+{
42267+ struct acl_subject_label *s;
42268+ struct acl_role_label *r;
42269+ struct task_struct *task, *task2;
42270+ unsigned int x;
42271+
42272+ gr_clear_learn_entries();
42273+
42274+ read_lock(&tasklist_lock);
42275+ do_each_thread(task2, task) {
42276+ task->acl_sp_role = 0;
42277+ task->acl_role_id = 0;
42278+ task->acl = NULL;
42279+ task->role = NULL;
42280+ } while_each_thread(task2, task);
42281+ read_unlock(&tasklist_lock);
42282+
42283+ /* release the reference to the real root dentry and vfsmount */
42284+ path_put(&real_root);
42285+
42286+ /* free all object hash tables */
42287+
42288+ FOR_EACH_ROLE_START(r)
42289+ if (r->subj_hash == NULL)
42290+ goto next_role;
42291+ FOR_EACH_SUBJECT_START(r, s, x)
42292+ if (s->obj_hash == NULL)
42293+ break;
42294+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
42295+ kfree(s->obj_hash);
42296+ else
42297+ vfree(s->obj_hash);
42298+ FOR_EACH_SUBJECT_END(s, x)
42299+ FOR_EACH_NESTED_SUBJECT_START(r, s)
42300+ if (s->obj_hash == NULL)
42301+ break;
42302+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
42303+ kfree(s->obj_hash);
42304+ else
42305+ vfree(s->obj_hash);
42306+ FOR_EACH_NESTED_SUBJECT_END(s)
42307+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
42308+ kfree(r->subj_hash);
42309+ else
42310+ vfree(r->subj_hash);
42311+ r->subj_hash = NULL;
42312+next_role:
42313+ FOR_EACH_ROLE_END(r)
42314+
42315+ acl_free_all();
42316+
42317+ if (acl_role_set.r_hash) {
42318+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
42319+ PAGE_SIZE)
42320+ kfree(acl_role_set.r_hash);
42321+ else
42322+ vfree(acl_role_set.r_hash);
42323+ }
42324+ if (name_set.n_hash) {
42325+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
42326+ PAGE_SIZE)
42327+ kfree(name_set.n_hash);
42328+ else
42329+ vfree(name_set.n_hash);
42330+ }
42331+
42332+ if (inodev_set.i_hash) {
42333+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
42334+ PAGE_SIZE)
42335+ kfree(inodev_set.i_hash);
42336+ else
42337+ vfree(inodev_set.i_hash);
42338+ }
42339+
42340+ gr_free_uidset();
42341+
42342+ memset(&name_set, 0, sizeof (struct name_db));
42343+ memset(&inodev_set, 0, sizeof (struct inodev_db));
42344+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
42345+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
42346+
42347+ default_role = NULL;
42348+ role_list = NULL;
42349+
42350+ return;
42351+}
42352+
42353+static __u32
42354+count_user_objs(struct acl_object_label *userp)
42355+{
42356+ struct acl_object_label o_tmp;
42357+ __u32 num = 0;
42358+
42359+ while (userp) {
42360+ if (copy_from_user(&o_tmp, userp,
42361+ sizeof (struct acl_object_label)))
42362+ break;
42363+
42364+ userp = o_tmp.prev;
42365+ num++;
42366+ }
42367+
42368+ return num;
42369+}
42370+
42371+static struct acl_subject_label *
42372+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
42373+
42374+static int
42375+copy_user_glob(struct acl_object_label *obj)
42376+{
42377+ struct acl_object_label *g_tmp, **guser;
42378+ unsigned int len;
42379+ char *tmp;
42380+
42381+ if (obj->globbed == NULL)
42382+ return 0;
42383+
42384+ guser = &obj->globbed;
42385+ while (*guser) {
42386+ g_tmp = (struct acl_object_label *)
42387+ acl_alloc(sizeof (struct acl_object_label));
42388+ if (g_tmp == NULL)
42389+ return -ENOMEM;
42390+
42391+ if (copy_from_user(g_tmp, *guser,
42392+ sizeof (struct acl_object_label)))
42393+ return -EFAULT;
42394+
42395+ len = strnlen_user(g_tmp->filename, PATH_MAX);
42396+
42397+ if (!len || len >= PATH_MAX)
42398+ return -EINVAL;
42399+
42400+ if ((tmp = (char *) acl_alloc(len)) == NULL)
42401+ return -ENOMEM;
42402+
42403+ if (copy_from_user(tmp, g_tmp->filename, len))
42404+ return -EFAULT;
42405+ tmp[len-1] = '\0';
42406+ g_tmp->filename = tmp;
42407+
42408+ *guser = g_tmp;
42409+ guser = &(g_tmp->next);
42410+ }
42411+
42412+ return 0;
42413+}
42414+
42415+static int
42416+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
42417+ struct acl_role_label *role)
42418+{
42419+ struct acl_object_label *o_tmp;
42420+ unsigned int len;
42421+ int ret;
42422+ char *tmp;
42423+
42424+ while (userp) {
42425+ if ((o_tmp = (struct acl_object_label *)
42426+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
42427+ return -ENOMEM;
42428+
42429+ if (copy_from_user(o_tmp, userp,
42430+ sizeof (struct acl_object_label)))
42431+ return -EFAULT;
42432+
42433+ userp = o_tmp->prev;
42434+
42435+ len = strnlen_user(o_tmp->filename, PATH_MAX);
42436+
42437+ if (!len || len >= PATH_MAX)
42438+ return -EINVAL;
42439+
42440+ if ((tmp = (char *) acl_alloc(len)) == NULL)
42441+ return -ENOMEM;
42442+
42443+ if (copy_from_user(tmp, o_tmp->filename, len))
42444+ return -EFAULT;
42445+ tmp[len-1] = '\0';
42446+ o_tmp->filename = tmp;
42447+
42448+ insert_acl_obj_label(o_tmp, subj);
42449+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
42450+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
42451+ return -ENOMEM;
42452+
42453+ ret = copy_user_glob(o_tmp);
42454+ if (ret)
42455+ return ret;
42456+
42457+ if (o_tmp->nested) {
42458+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
42459+ if (IS_ERR(o_tmp->nested))
42460+ return PTR_ERR(o_tmp->nested);
42461+
42462+ /* insert into nested subject list */
42463+ o_tmp->nested->next = role->hash->first;
42464+ role->hash->first = o_tmp->nested;
42465+ }
42466+ }
42467+
42468+ return 0;
42469+}
42470+
42471+static __u32
42472+count_user_subjs(struct acl_subject_label *userp)
42473+{
42474+ struct acl_subject_label s_tmp;
42475+ __u32 num = 0;
42476+
42477+ while (userp) {
42478+ if (copy_from_user(&s_tmp, userp,
42479+ sizeof (struct acl_subject_label)))
42480+ break;
42481+
42482+ userp = s_tmp.prev;
42483+ /* do not count nested subjects against this count, since
42484+ they are not included in the hash table, but are
42485+ attached to objects. We have already counted
42486+ the subjects in userspace for the allocation
42487+ stack
42488+ */
42489+ if (!(s_tmp.mode & GR_NESTED))
42490+ num++;
42491+ }
42492+
42493+ return num;
42494+}
42495+
42496+static int
42497+copy_user_allowedips(struct acl_role_label *rolep)
42498+{
42499+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
42500+
42501+ ruserip = rolep->allowed_ips;
42502+
42503+ while (ruserip) {
42504+ rlast = rtmp;
42505+
42506+ if ((rtmp = (struct role_allowed_ip *)
42507+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
42508+ return -ENOMEM;
42509+
42510+ if (copy_from_user(rtmp, ruserip,
42511+ sizeof (struct role_allowed_ip)))
42512+ return -EFAULT;
42513+
42514+ ruserip = rtmp->prev;
42515+
42516+ if (!rlast) {
42517+ rtmp->prev = NULL;
42518+ rolep->allowed_ips = rtmp;
42519+ } else {
42520+ rlast->next = rtmp;
42521+ rtmp->prev = rlast;
42522+ }
42523+
42524+ if (!ruserip)
42525+ rtmp->next = NULL;
42526+ }
42527+
42528+ return 0;
42529+}
42530+
42531+static int
42532+copy_user_transitions(struct acl_role_label *rolep)
42533+{
42534+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
42535+
42536+ unsigned int len;
42537+ char *tmp;
42538+
42539+ rusertp = rolep->transitions;
42540+
42541+ while (rusertp) {
42542+ rlast = rtmp;
42543+
42544+ if ((rtmp = (struct role_transition *)
42545+ acl_alloc(sizeof (struct role_transition))) == NULL)
42546+ return -ENOMEM;
42547+
42548+ if (copy_from_user(rtmp, rusertp,
42549+ sizeof (struct role_transition)))
42550+ return -EFAULT;
42551+
42552+ rusertp = rtmp->prev;
42553+
42554+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
42555+
42556+ if (!len || len >= GR_SPROLE_LEN)
42557+ return -EINVAL;
42558+
42559+ if ((tmp = (char *) acl_alloc(len)) == NULL)
42560+ return -ENOMEM;
42561+
42562+ if (copy_from_user(tmp, rtmp->rolename, len))
42563+ return -EFAULT;
42564+ tmp[len-1] = '\0';
42565+ rtmp->rolename = tmp;
42566+
42567+ if (!rlast) {
42568+ rtmp->prev = NULL;
42569+ rolep->transitions = rtmp;
42570+ } else {
42571+ rlast->next = rtmp;
42572+ rtmp->prev = rlast;
42573+ }
42574+
42575+ if (!rusertp)
42576+ rtmp->next = NULL;
42577+ }
42578+
42579+ return 0;
42580+}
42581+
42582+static struct acl_subject_label *
42583+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
42584+{
42585+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
42586+ unsigned int len;
42587+ char *tmp;
42588+ __u32 num_objs;
42589+ struct acl_ip_label **i_tmp, *i_utmp2;
42590+ struct gr_hash_struct ghash;
42591+ struct subject_map *subjmap;
42592+ unsigned int i_num;
42593+ int err;
42594+
42595+ s_tmp = lookup_subject_map(userp);
42596+
42597+ /* we've already copied this subject into the kernel, just return
42598+ the reference to it, and don't copy it over again
42599+ */
42600+ if (s_tmp)
42601+ return(s_tmp);
42602+
42603+ if ((s_tmp = (struct acl_subject_label *)
42604+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
42605+ return ERR_PTR(-ENOMEM);
42606+
42607+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
42608+ if (subjmap == NULL)
42609+ return ERR_PTR(-ENOMEM);
42610+
42611+ subjmap->user = userp;
42612+ subjmap->kernel = s_tmp;
42613+ insert_subj_map_entry(subjmap);
42614+
42615+ if (copy_from_user(s_tmp, userp,
42616+ sizeof (struct acl_subject_label)))
42617+ return ERR_PTR(-EFAULT);
42618+
42619+ len = strnlen_user(s_tmp->filename, PATH_MAX);
42620+
42621+ if (!len || len >= PATH_MAX)
42622+ return ERR_PTR(-EINVAL);
42623+
42624+ if ((tmp = (char *) acl_alloc(len)) == NULL)
42625+ return ERR_PTR(-ENOMEM);
42626+
42627+ if (copy_from_user(tmp, s_tmp->filename, len))
42628+ return ERR_PTR(-EFAULT);
42629+ tmp[len-1] = '\0';
42630+ s_tmp->filename = tmp;
42631+
42632+ if (!strcmp(s_tmp->filename, "/"))
42633+ role->root_label = s_tmp;
42634+
42635+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
42636+ return ERR_PTR(-EFAULT);
42637+
42638+ /* copy user and group transition tables */
42639+
42640+ if (s_tmp->user_trans_num) {
42641+ uid_t *uidlist;
42642+
42643+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
42644+ if (uidlist == NULL)
42645+ return ERR_PTR(-ENOMEM);
42646+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
42647+ return ERR_PTR(-EFAULT);
42648+
42649+ s_tmp->user_transitions = uidlist;
42650+ }
42651+
42652+ if (s_tmp->group_trans_num) {
42653+ gid_t *gidlist;
42654+
42655+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
42656+ if (gidlist == NULL)
42657+ return ERR_PTR(-ENOMEM);
42658+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
42659+ return ERR_PTR(-EFAULT);
42660+
42661+ s_tmp->group_transitions = gidlist;
42662+ }
42663+
42664+ /* set up object hash table */
42665+ num_objs = count_user_objs(ghash.first);
42666+
42667+ s_tmp->obj_hash_size = num_objs;
42668+ s_tmp->obj_hash =
42669+ (struct acl_object_label **)
42670+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
42671+
42672+ if (!s_tmp->obj_hash)
42673+ return ERR_PTR(-ENOMEM);
42674+
42675+ memset(s_tmp->obj_hash, 0,
42676+ s_tmp->obj_hash_size *
42677+ sizeof (struct acl_object_label *));
42678+
42679+ /* add in objects */
42680+ err = copy_user_objs(ghash.first, s_tmp, role);
42681+
42682+ if (err)
42683+ return ERR_PTR(err);
42684+
42685+ /* set pointer for parent subject */
42686+ if (s_tmp->parent_subject) {
42687+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
42688+
42689+ if (IS_ERR(s_tmp2))
42690+ return s_tmp2;
42691+
42692+ s_tmp->parent_subject = s_tmp2;
42693+ }
42694+
42695+ /* add in ip acls */
42696+
42697+ if (!s_tmp->ip_num) {
42698+ s_tmp->ips = NULL;
42699+ goto insert;
42700+ }
42701+
42702+ i_tmp =
42703+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
42704+ sizeof (struct acl_ip_label *));
42705+
42706+ if (!i_tmp)
42707+ return ERR_PTR(-ENOMEM);
42708+
42709+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
42710+ *(i_tmp + i_num) =
42711+ (struct acl_ip_label *)
42712+ acl_alloc(sizeof (struct acl_ip_label));
42713+ if (!*(i_tmp + i_num))
42714+ return ERR_PTR(-ENOMEM);
42715+
42716+ if (copy_from_user
42717+ (&i_utmp2, s_tmp->ips + i_num,
42718+ sizeof (struct acl_ip_label *)))
42719+ return ERR_PTR(-EFAULT);
42720+
42721+ if (copy_from_user
42722+ (*(i_tmp + i_num), i_utmp2,
42723+ sizeof (struct acl_ip_label)))
42724+ return ERR_PTR(-EFAULT);
42725+
42726+ if ((*(i_tmp + i_num))->iface == NULL)
42727+ continue;
42728+
42729+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
42730+ if (!len || len >= IFNAMSIZ)
42731+ return ERR_PTR(-EINVAL);
42732+ tmp = acl_alloc(len);
42733+ if (tmp == NULL)
42734+ return ERR_PTR(-ENOMEM);
42735+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
42736+ return ERR_PTR(-EFAULT);
42737+ (*(i_tmp + i_num))->iface = tmp;
42738+ }
42739+
42740+ s_tmp->ips = i_tmp;
42741+
42742+insert:
42743+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
42744+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
42745+ return ERR_PTR(-ENOMEM);
42746+
42747+ return s_tmp;
42748+}
42749+
42750+static int
42751+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
42752+{
42753+ struct acl_subject_label s_pre;
42754+ struct acl_subject_label * ret;
42755+ int err;
42756+
42757+ while (userp) {
42758+ if (copy_from_user(&s_pre, userp,
42759+ sizeof (struct acl_subject_label)))
42760+ return -EFAULT;
42761+
42762+ /* do not add nested subjects here, add
42763+ while parsing objects
42764+ */
42765+
42766+ if (s_pre.mode & GR_NESTED) {
42767+ userp = s_pre.prev;
42768+ continue;
42769+ }
42770+
42771+ ret = do_copy_user_subj(userp, role);
42772+
42773+ err = PTR_ERR(ret);
42774+ if (IS_ERR(ret))
42775+ return err;
42776+
42777+ insert_acl_subj_label(ret, role);
42778+
42779+ userp = s_pre.prev;
42780+ }
42781+
42782+ return 0;
42783+}
42784+
42785+static int
42786+copy_user_acl(struct gr_arg *arg)
42787+{
42788+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
42789+ struct sprole_pw *sptmp;
42790+ struct gr_hash_struct *ghash;
42791+ uid_t *domainlist;
42792+ unsigned int r_num;
42793+ unsigned int len;
42794+ char *tmp;
42795+ int err = 0;
42796+ __u16 i;
42797+ __u32 num_subjs;
42798+
42799+ /* we need a default and kernel role */
42800+ if (arg->role_db.num_roles < 2)
42801+ return -EINVAL;
42802+
42803+ /* copy special role authentication info from userspace */
42804+
42805+ num_sprole_pws = arg->num_sprole_pws;
42806+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
42807+
42808+ if (!acl_special_roles) {
42809+ err = -ENOMEM;
42810+ goto cleanup;
42811+ }
42812+
42813+ for (i = 0; i < num_sprole_pws; i++) {
42814+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
42815+ if (!sptmp) {
42816+ err = -ENOMEM;
42817+ goto cleanup;
42818+ }
42819+ if (copy_from_user(sptmp, arg->sprole_pws + i,
42820+ sizeof (struct sprole_pw))) {
42821+ err = -EFAULT;
42822+ goto cleanup;
42823+ }
42824+
42825+ len =
42826+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
42827+
42828+ if (!len || len >= GR_SPROLE_LEN) {
42829+ err = -EINVAL;
42830+ goto cleanup;
42831+ }
42832+
42833+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
42834+ err = -ENOMEM;
42835+ goto cleanup;
42836+ }
42837+
42838+ if (copy_from_user(tmp, sptmp->rolename, len)) {
42839+ err = -EFAULT;
42840+ goto cleanup;
42841+ }
42842+ tmp[len-1] = '\0';
42843+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
42844+ printk(KERN_ALERT "Copying special role %s\n", tmp);
42845+#endif
42846+ sptmp->rolename = tmp;
42847+ acl_special_roles[i] = sptmp;
42848+ }
42849+
42850+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
42851+
42852+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
42853+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
42854+
42855+ if (!r_tmp) {
42856+ err = -ENOMEM;
42857+ goto cleanup;
42858+ }
42859+
42860+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
42861+ sizeof (struct acl_role_label *))) {
42862+ err = -EFAULT;
42863+ goto cleanup;
42864+ }
42865+
42866+ if (copy_from_user(r_tmp, r_utmp2,
42867+ sizeof (struct acl_role_label))) {
42868+ err = -EFAULT;
42869+ goto cleanup;
42870+ }
42871+
42872+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
42873+
42874+ if (!len || len >= PATH_MAX) {
42875+ err = -EINVAL;
42876+ goto cleanup;
42877+ }
42878+
42879+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
42880+ err = -ENOMEM;
42881+ goto cleanup;
42882+ }
42883+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
42884+ err = -EFAULT;
42885+ goto cleanup;
42886+ }
42887+ tmp[len-1] = '\0';
42888+ r_tmp->rolename = tmp;
42889+
42890+ if (!strcmp(r_tmp->rolename, "default")
42891+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
42892+ default_role = r_tmp;
42893+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
42894+ kernel_role = r_tmp;
42895+ }
42896+
42897+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
42898+ err = -ENOMEM;
42899+ goto cleanup;
42900+ }
42901+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
42902+ err = -EFAULT;
42903+ goto cleanup;
42904+ }
42905+
42906+ r_tmp->hash = ghash;
42907+
42908+ num_subjs = count_user_subjs(r_tmp->hash->first);
42909+
42910+ r_tmp->subj_hash_size = num_subjs;
42911+ r_tmp->subj_hash =
42912+ (struct acl_subject_label **)
42913+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
42914+
42915+ if (!r_tmp->subj_hash) {
42916+ err = -ENOMEM;
42917+ goto cleanup;
42918+ }
42919+
42920+ err = copy_user_allowedips(r_tmp);
42921+ if (err)
42922+ goto cleanup;
42923+
42924+ /* copy domain info */
42925+ if (r_tmp->domain_children != NULL) {
42926+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
42927+ if (domainlist == NULL) {
42928+ err = -ENOMEM;
42929+ goto cleanup;
42930+ }
42931+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
42932+ err = -EFAULT;
42933+ goto cleanup;
42934+ }
42935+ r_tmp->domain_children = domainlist;
42936+ }
42937+
42938+ err = copy_user_transitions(r_tmp);
42939+ if (err)
42940+ goto cleanup;
42941+
42942+ memset(r_tmp->subj_hash, 0,
42943+ r_tmp->subj_hash_size *
42944+ sizeof (struct acl_subject_label *));
42945+
42946+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
42947+
42948+ if (err)
42949+ goto cleanup;
42950+
42951+ /* set nested subject list to null */
42952+ r_tmp->hash->first = NULL;
42953+
42954+ insert_acl_role_label(r_tmp);
42955+ }
42956+
42957+ goto return_err;
42958+ cleanup:
42959+ free_variables();
42960+ return_err:
42961+ return err;
42962+
42963+}
42964+
42965+static int
42966+gracl_init(struct gr_arg *args)
42967+{
42968+ int error = 0;
42969+
42970+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
42971+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
42972+
42973+ if (init_variables(args)) {
42974+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
42975+ error = -ENOMEM;
42976+ free_variables();
42977+ goto out;
42978+ }
42979+
42980+ error = copy_user_acl(args);
42981+ free_init_variables();
42982+ if (error) {
42983+ free_variables();
42984+ goto out;
42985+ }
42986+
42987+ if ((error = gr_set_acls(0))) {
42988+ free_variables();
42989+ goto out;
42990+ }
42991+
42992+ pax_open_kernel();
42993+ gr_status |= GR_READY;
42994+ pax_close_kernel();
42995+
42996+ out:
42997+ return error;
42998+}
42999+
43000+/* derived from glibc fnmatch() 0: match, 1: no match*/
43001+
43002+static int
43003+glob_match(const char *p, const char *n)
43004+{
43005+ char c;
43006+
43007+ while ((c = *p++) != '\0') {
43008+ switch (c) {
43009+ case '?':
43010+ if (*n == '\0')
43011+ return 1;
43012+ else if (*n == '/')
43013+ return 1;
43014+ break;
43015+ case '\\':
43016+ if (*n != c)
43017+ return 1;
43018+ break;
43019+ case '*':
43020+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
43021+ if (*n == '/')
43022+ return 1;
43023+ else if (c == '?') {
43024+ if (*n == '\0')
43025+ return 1;
43026+ else
43027+ ++n;
43028+ }
43029+ }
43030+ if (c == '\0') {
43031+ return 0;
43032+ } else {
43033+ const char *endp;
43034+
43035+ if ((endp = strchr(n, '/')) == NULL)
43036+ endp = n + strlen(n);
43037+
43038+ if (c == '[') {
43039+ for (--p; n < endp; ++n)
43040+ if (!glob_match(p, n))
43041+ return 0;
43042+ } else if (c == '/') {
43043+ while (*n != '\0' && *n != '/')
43044+ ++n;
43045+ if (*n == '/' && !glob_match(p, n + 1))
43046+ return 0;
43047+ } else {
43048+ for (--p; n < endp; ++n)
43049+ if (*n == c && !glob_match(p, n))
43050+ return 0;
43051+ }
43052+
43053+ return 1;
43054+ }
43055+ case '[':
43056+ {
43057+ int not;
43058+ char cold;
43059+
43060+ if (*n == '\0' || *n == '/')
43061+ return 1;
43062+
43063+ not = (*p == '!' || *p == '^');
43064+ if (not)
43065+ ++p;
43066+
43067+ c = *p++;
43068+ for (;;) {
43069+ unsigned char fn = (unsigned char)*n;
43070+
43071+ if (c == '\0')
43072+ return 1;
43073+ else {
43074+ if (c == fn)
43075+ goto matched;
43076+ cold = c;
43077+ c = *p++;
43078+
43079+ if (c == '-' && *p != ']') {
43080+ unsigned char cend = *p++;
43081+
43082+ if (cend == '\0')
43083+ return 1;
43084+
43085+ if (cold <= fn && fn <= cend)
43086+ goto matched;
43087+
43088+ c = *p++;
43089+ }
43090+ }
43091+
43092+ if (c == ']')
43093+ break;
43094+ }
43095+ if (!not)
43096+ return 1;
43097+ break;
43098+ matched:
43099+ while (c != ']') {
43100+ if (c == '\0')
43101+ return 1;
43102+
43103+ c = *p++;
43104+ }
43105+ if (not)
43106+ return 1;
43107+ }
43108+ break;
43109+ default:
43110+ if (c != *n)
43111+ return 1;
43112+ }
43113+
43114+ ++n;
43115+ }
43116+
43117+ if (*n == '\0')
43118+ return 0;
43119+
43120+ if (*n == '/')
43121+ return 0;
43122+
43123+ return 1;
43124+}
43125+
43126+static struct acl_object_label *
43127+chk_glob_label(struct acl_object_label *globbed,
43128+ struct dentry *dentry, struct vfsmount *mnt, char **path)
43129+{
43130+ struct acl_object_label *tmp;
43131+
43132+ if (*path == NULL)
43133+ *path = gr_to_filename_nolock(dentry, mnt);
43134+
43135+ tmp = globbed;
43136+
43137+ while (tmp) {
43138+ if (!glob_match(tmp->filename, *path))
43139+ return tmp;
43140+ tmp = tmp->next;
43141+ }
43142+
43143+ return NULL;
43144+}
43145+
43146+static struct acl_object_label *
43147+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
43148+ const ino_t curr_ino, const dev_t curr_dev,
43149+ const struct acl_subject_label *subj, char **path, const int checkglob)
43150+{
43151+ struct acl_subject_label *tmpsubj;
43152+ struct acl_object_label *retval;
43153+ struct acl_object_label *retval2;
43154+
43155+ tmpsubj = (struct acl_subject_label *) subj;
43156+ read_lock(&gr_inode_lock);
43157+ do {
43158+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
43159+ if (retval) {
43160+ if (checkglob && retval->globbed) {
43161+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
43162+ (struct vfsmount *)orig_mnt, path);
43163+ if (retval2)
43164+ retval = retval2;
43165+ }
43166+ break;
43167+ }
43168+ } while ((tmpsubj = tmpsubj->parent_subject));
43169+ read_unlock(&gr_inode_lock);
43170+
43171+ return retval;
43172+}
43173+
43174+static __inline__ struct acl_object_label *
43175+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
43176+ struct dentry *curr_dentry,
43177+ const struct acl_subject_label *subj, char **path, const int checkglob)
43178+{
43179+ int newglob = checkglob;
43180+ ino_t inode;
43181+ dev_t device;
43182+
43183+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
43184+ as we don't want a / * rule to match instead of the / object
43185+ don't do this for create lookups that call this function though, since they're looking up
43186+ on the parent and thus need globbing checks on all paths
43187+ */
43188+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
43189+ newglob = GR_NO_GLOB;
43190+
43191+ spin_lock(&curr_dentry->d_lock);
43192+ inode = curr_dentry->d_inode->i_ino;
43193+ device = __get_dev(curr_dentry);
43194+ spin_unlock(&curr_dentry->d_lock);
43195+
43196+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
43197+}
43198+
43199+static struct acl_object_label *
43200+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43201+ const struct acl_subject_label *subj, char *path, const int checkglob)
43202+{
43203+ struct dentry *dentry = (struct dentry *) l_dentry;
43204+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
43205+ struct acl_object_label *retval;
43206+ struct dentry *parent;
43207+
43208+ write_seqlock(&rename_lock);
43209+ br_read_lock(vfsmount_lock);
43210+
43211+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
43212+#ifdef CONFIG_NET
43213+ mnt == sock_mnt ||
43214+#endif
43215+#ifdef CONFIG_HUGETLBFS
43216+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
43217+#endif
43218+ /* ignore Eric Biederman */
43219+ IS_PRIVATE(l_dentry->d_inode))) {
43220+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
43221+ goto out;
43222+ }
43223+
43224+ for (;;) {
43225+ if (dentry == real_root.dentry && mnt == real_root.mnt)
43226+ break;
43227+
43228+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
43229+ if (mnt->mnt_parent == mnt)
43230+ break;
43231+
43232+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
43233+ if (retval != NULL)
43234+ goto out;
43235+
43236+ dentry = mnt->mnt_mountpoint;
43237+ mnt = mnt->mnt_parent;
43238+ continue;
43239+ }
43240+
43241+ parent = dentry->d_parent;
43242+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
43243+ if (retval != NULL)
43244+ goto out;
43245+
43246+ dentry = parent;
43247+ }
43248+
43249+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
43250+
43251+ /* real_root is pinned so we don't have to hold a reference */
43252+ if (retval == NULL)
43253+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
43254+out:
43255+ br_read_unlock(vfsmount_lock);
43256+ write_sequnlock(&rename_lock);
43257+
43258+ BUG_ON(retval == NULL);
43259+
43260+ return retval;
43261+}
43262+
43263+static __inline__ struct acl_object_label *
43264+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43265+ const struct acl_subject_label *subj)
43266+{
43267+ char *path = NULL;
43268+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
43269+}
43270+
43271+static __inline__ struct acl_object_label *
43272+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43273+ const struct acl_subject_label *subj)
43274+{
43275+ char *path = NULL;
43276+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
43277+}
43278+
43279+static __inline__ struct acl_object_label *
43280+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43281+ const struct acl_subject_label *subj, char *path)
43282+{
43283+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
43284+}
43285+
43286+static struct acl_subject_label *
43287+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43288+ const struct acl_role_label *role)
43289+{
43290+ struct dentry *dentry = (struct dentry *) l_dentry;
43291+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
43292+ struct acl_subject_label *retval;
43293+ struct dentry *parent;
43294+
43295+ write_seqlock(&rename_lock);
43296+ br_read_lock(vfsmount_lock);
43297+
43298+ for (;;) {
43299+ if (dentry == real_root.dentry && mnt == real_root.mnt)
43300+ break;
43301+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
43302+ if (mnt->mnt_parent == mnt)
43303+ break;
43304+
43305+ spin_lock(&dentry->d_lock);
43306+ read_lock(&gr_inode_lock);
43307+ retval =
43308+ lookup_acl_subj_label(dentry->d_inode->i_ino,
43309+ __get_dev(dentry), role);
43310+ read_unlock(&gr_inode_lock);
43311+ spin_unlock(&dentry->d_lock);
43312+ if (retval != NULL)
43313+ goto out;
43314+
43315+ dentry = mnt->mnt_mountpoint;
43316+ mnt = mnt->mnt_parent;
43317+ continue;
43318+ }
43319+
43320+ spin_lock(&dentry->d_lock);
43321+ read_lock(&gr_inode_lock);
43322+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
43323+ __get_dev(dentry), role);
43324+ read_unlock(&gr_inode_lock);
43325+ parent = dentry->d_parent;
43326+ spin_unlock(&dentry->d_lock);
43327+
43328+ if (retval != NULL)
43329+ goto out;
43330+
43331+ dentry = parent;
43332+ }
43333+
43334+ spin_lock(&dentry->d_lock);
43335+ read_lock(&gr_inode_lock);
43336+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
43337+ __get_dev(dentry), role);
43338+ read_unlock(&gr_inode_lock);
43339+ spin_unlock(&dentry->d_lock);
43340+
43341+ if (unlikely(retval == NULL)) {
43342+ /* real_root is pinned, we don't need to hold a reference */
43343+ read_lock(&gr_inode_lock);
43344+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
43345+ __get_dev(real_root.dentry), role);
43346+ read_unlock(&gr_inode_lock);
43347+ }
43348+out:
43349+ br_read_unlock(vfsmount_lock);
43350+ write_sequnlock(&rename_lock);
43351+
43352+ BUG_ON(retval == NULL);
43353+
43354+ return retval;
43355+}
43356+
43357+static void
43358+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
43359+{
43360+ struct task_struct *task = current;
43361+ const struct cred *cred = current_cred();
43362+
43363+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
43364+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
43365+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
43366+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
43367+
43368+ return;
43369+}
43370+
43371+static void
43372+gr_log_learn_sysctl(const char *path, const __u32 mode)
43373+{
43374+ struct task_struct *task = current;
43375+ const struct cred *cred = current_cred();
43376+
43377+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
43378+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
43379+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
43380+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
43381+
43382+ return;
43383+}
43384+
43385+static void
43386+gr_log_learn_id_change(const char type, const unsigned int real,
43387+ const unsigned int effective, const unsigned int fs)
43388+{
43389+ struct task_struct *task = current;
43390+ const struct cred *cred = current_cred();
43391+
43392+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
43393+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
43394+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
43395+ type, real, effective, fs, &task->signal->saved_ip);
43396+
43397+ return;
43398+}
43399+
43400+__u32
43401+gr_check_link(const struct dentry * new_dentry,
43402+ const struct dentry * parent_dentry,
43403+ const struct vfsmount * parent_mnt,
43404+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
43405+{
43406+ struct acl_object_label *obj;
43407+ __u32 oldmode, newmode;
43408+ __u32 needmode;
43409+
43410+ if (unlikely(!(gr_status & GR_READY)))
43411+ return (GR_CREATE | GR_LINK);
43412+
43413+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
43414+ oldmode = obj->mode;
43415+
43416+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
43417+ oldmode |= (GR_CREATE | GR_LINK);
43418+
43419+ needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
43420+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
43421+ needmode |= GR_SETID | GR_AUDIT_SETID;
43422+
43423+ newmode =
43424+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
43425+ oldmode | needmode);
43426+
43427+ needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
43428+ GR_SETID | GR_READ | GR_FIND | GR_DELETE |
43429+ GR_INHERIT | GR_AUDIT_INHERIT);
43430+
43431+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
43432+ goto bad;
43433+
43434+ if ((oldmode & needmode) != needmode)
43435+ goto bad;
43436+
43437+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
43438+ if ((newmode & needmode) != needmode)
43439+ goto bad;
43440+
43441+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
43442+ return newmode;
43443+bad:
43444+ needmode = oldmode;
43445+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
43446+ needmode |= GR_SETID;
43447+
43448+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
43449+ gr_log_learn(old_dentry, old_mnt, needmode);
43450+ return (GR_CREATE | GR_LINK);
43451+ } else if (newmode & GR_SUPPRESS)
43452+ return GR_SUPPRESS;
43453+ else
43454+ return 0;
43455+}
43456+
43457+__u32
43458+gr_search_file(const struct dentry * dentry, const __u32 mode,
43459+ const struct vfsmount * mnt)
43460+{
43461+ __u32 retval = mode;
43462+ struct acl_subject_label *curracl;
43463+ struct acl_object_label *currobj;
43464+
43465+ if (unlikely(!(gr_status & GR_READY)))
43466+ return (mode & ~GR_AUDITS);
43467+
43468+ curracl = current->acl;
43469+
43470+ currobj = chk_obj_label(dentry, mnt, curracl);
43471+ retval = currobj->mode & mode;
43472+
43473+ /* if we're opening a specified transfer file for writing
43474+ (e.g. /dev/initctl), then transfer our role to init
43475+ */
43476+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
43477+ current->role->roletype & GR_ROLE_PERSIST)) {
43478+ struct task_struct *task = init_pid_ns.child_reaper;
43479+
43480+ if (task->role != current->role) {
43481+ task->acl_sp_role = 0;
43482+ task->acl_role_id = current->acl_role_id;
43483+ task->role = current->role;
43484+ rcu_read_lock();
43485+ read_lock(&grsec_exec_file_lock);
43486+ gr_apply_subject_to_task(task);
43487+ read_unlock(&grsec_exec_file_lock);
43488+ rcu_read_unlock();
43489+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
43490+ }
43491+ }
43492+
43493+ if (unlikely
43494+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
43495+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
43496+ __u32 new_mode = mode;
43497+
43498+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
43499+
43500+ retval = new_mode;
43501+
43502+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
43503+ new_mode |= GR_INHERIT;
43504+
43505+ if (!(mode & GR_NOLEARN))
43506+ gr_log_learn(dentry, mnt, new_mode);
43507+ }
43508+
43509+ return retval;
43510+}
43511+
43512+__u32
43513+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
43514+ const struct vfsmount * mnt, const __u32 mode)
43515+{
43516+ struct name_entry *match;
43517+ struct acl_object_label *matchpo;
43518+ struct acl_subject_label *curracl;
43519+ char *path;
43520+ __u32 retval;
43521+
43522+ if (unlikely(!(gr_status & GR_READY)))
43523+ return (mode & ~GR_AUDITS);
43524+
43525+ preempt_disable();
43526+ path = gr_to_filename_rbac(new_dentry, mnt);
43527+ match = lookup_name_entry_create(path);
43528+
43529+ if (!match)
43530+ goto check_parent;
43531+
43532+ curracl = current->acl;
43533+
43534+ read_lock(&gr_inode_lock);
43535+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
43536+ read_unlock(&gr_inode_lock);
43537+
43538+ if (matchpo) {
43539+ if ((matchpo->mode & mode) !=
43540+ (mode & ~(GR_AUDITS | GR_SUPPRESS))
43541+ && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
43542+ __u32 new_mode = mode;
43543+
43544+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
43545+
43546+ gr_log_learn(new_dentry, mnt, new_mode);
43547+
43548+ preempt_enable();
43549+ return new_mode;
43550+ }
43551+ preempt_enable();
43552+ return (matchpo->mode & mode);
43553+ }
43554+
43555+ check_parent:
43556+ curracl = current->acl;
43557+
43558+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
43559+ retval = matchpo->mode & mode;
43560+
43561+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
43562+ && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
43563+ __u32 new_mode = mode;
43564+
43565+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
43566+
43567+ gr_log_learn(new_dentry, mnt, new_mode);
43568+ preempt_enable();
43569+ return new_mode;
43570+ }
43571+
43572+ preempt_enable();
43573+ return retval;
43574+}
43575+
43576+int
43577+gr_check_hidden_task(const struct task_struct *task)
43578+{
43579+ if (unlikely(!(gr_status & GR_READY)))
43580+ return 0;
43581+
43582+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
43583+ return 1;
43584+
43585+ return 0;
43586+}
43587+
43588+int
43589+gr_check_protected_task(const struct task_struct *task)
43590+{
43591+ if (unlikely(!(gr_status & GR_READY) || !task))
43592+ return 0;
43593+
43594+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
43595+ task->acl != current->acl)
43596+ return 1;
43597+
43598+ return 0;
43599+}
43600+
43601+int
43602+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
43603+{
43604+ struct task_struct *p;
43605+ int ret = 0;
43606+
43607+ if (unlikely(!(gr_status & GR_READY) || !pid))
43608+ return ret;
43609+
43610+ read_lock(&tasklist_lock);
43611+ do_each_pid_task(pid, type, p) {
43612+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
43613+ p->acl != current->acl) {
43614+ ret = 1;
43615+ goto out;
43616+ }
43617+ } while_each_pid_task(pid, type, p);
43618+out:
43619+ read_unlock(&tasklist_lock);
43620+
43621+ return ret;
43622+}
43623+
43624+void
43625+gr_copy_label(struct task_struct *tsk)
43626+{
43627+ tsk->signal->used_accept = 0;
43628+ tsk->acl_sp_role = 0;
43629+ tsk->acl_role_id = current->acl_role_id;
43630+ tsk->acl = current->acl;
43631+ tsk->role = current->role;
43632+ tsk->signal->curr_ip = current->signal->curr_ip;
43633+ tsk->signal->saved_ip = current->signal->saved_ip;
43634+ if (current->exec_file)
43635+ get_file(current->exec_file);
43636+ tsk->exec_file = current->exec_file;
43637+ tsk->is_writable = current->is_writable;
43638+ if (unlikely(current->signal->used_accept)) {
43639+ current->signal->curr_ip = 0;
43640+ current->signal->saved_ip = 0;
43641+ }
43642+
43643+ return;
43644+}
43645+
43646+static void
43647+gr_set_proc_res(struct task_struct *task)
43648+{
43649+ struct acl_subject_label *proc;
43650+ unsigned short i;
43651+
43652+ proc = task->acl;
43653+
43654+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
43655+ return;
43656+
43657+ for (i = 0; i < RLIM_NLIMITS; i++) {
43658+ if (!(proc->resmask & (1 << i)))
43659+ continue;
43660+
43661+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
43662+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
43663+ }
43664+
43665+ return;
43666+}
43667+
43668+extern int __gr_process_user_ban(struct user_struct *user);
43669+
43670+int
43671+gr_check_user_change(int real, int effective, int fs)
43672+{
43673+ unsigned int i;
43674+ __u16 num;
43675+ uid_t *uidlist;
43676+ int curuid;
43677+ int realok = 0;
43678+ int effectiveok = 0;
43679+ int fsok = 0;
43680+
43681+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
43682+ struct user_struct *user;
43683+
43684+ if (real == -1)
43685+ goto skipit;
43686+
43687+ user = find_user(real);
43688+ if (user == NULL)
43689+ goto skipit;
43690+
43691+ if (__gr_process_user_ban(user)) {
43692+ /* for find_user */
43693+ free_uid(user);
43694+ return 1;
43695+ }
43696+
43697+ /* for find_user */
43698+ free_uid(user);
43699+
43700+skipit:
43701+#endif
43702+
43703+ if (unlikely(!(gr_status & GR_READY)))
43704+ return 0;
43705+
43706+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
43707+ gr_log_learn_id_change('u', real, effective, fs);
43708+
43709+ num = current->acl->user_trans_num;
43710+ uidlist = current->acl->user_transitions;
43711+
43712+ if (uidlist == NULL)
43713+ return 0;
43714+
43715+ if (real == -1)
43716+ realok = 1;
43717+ if (effective == -1)
43718+ effectiveok = 1;
43719+ if (fs == -1)
43720+ fsok = 1;
43721+
43722+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
43723+ for (i = 0; i < num; i++) {
43724+ curuid = (int)uidlist[i];
43725+ if (real == curuid)
43726+ realok = 1;
43727+ if (effective == curuid)
43728+ effectiveok = 1;
43729+ if (fs == curuid)
43730+ fsok = 1;
43731+ }
43732+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
43733+ for (i = 0; i < num; i++) {
43734+ curuid = (int)uidlist[i];
43735+ if (real == curuid)
43736+ break;
43737+ if (effective == curuid)
43738+ break;
43739+ if (fs == curuid)
43740+ break;
43741+ }
43742+ /* not in deny list */
43743+ if (i == num) {
43744+ realok = 1;
43745+ effectiveok = 1;
43746+ fsok = 1;
43747+ }
43748+ }
43749+
43750+ if (realok && effectiveok && fsok)
43751+ return 0;
43752+ else {
43753+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
43754+ return 1;
43755+ }
43756+}
43757+
43758+int
43759+gr_check_group_change(int real, int effective, int fs)
43760+{
43761+ unsigned int i;
43762+ __u16 num;
43763+ gid_t *gidlist;
43764+ int curgid;
43765+ int realok = 0;
43766+ int effectiveok = 0;
43767+ int fsok = 0;
43768+
43769+ if (unlikely(!(gr_status & GR_READY)))
43770+ return 0;
43771+
43772+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
43773+ gr_log_learn_id_change('g', real, effective, fs);
43774+
43775+ num = current->acl->group_trans_num;
43776+ gidlist = current->acl->group_transitions;
43777+
43778+ if (gidlist == NULL)
43779+ return 0;
43780+
43781+ if (real == -1)
43782+ realok = 1;
43783+ if (effective == -1)
43784+ effectiveok = 1;
43785+ if (fs == -1)
43786+ fsok = 1;
43787+
43788+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
43789+ for (i = 0; i < num; i++) {
43790+ curgid = (int)gidlist[i];
43791+ if (real == curgid)
43792+ realok = 1;
43793+ if (effective == curgid)
43794+ effectiveok = 1;
43795+ if (fs == curgid)
43796+ fsok = 1;
43797+ }
43798+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
43799+ for (i = 0; i < num; i++) {
43800+ curgid = (int)gidlist[i];
43801+ if (real == curgid)
43802+ break;
43803+ if (effective == curgid)
43804+ break;
43805+ if (fs == curgid)
43806+ break;
43807+ }
43808+ /* not in deny list */
43809+ if (i == num) {
43810+ realok = 1;
43811+ effectiveok = 1;
43812+ fsok = 1;
43813+ }
43814+ }
43815+
43816+ if (realok && effectiveok && fsok)
43817+ return 0;
43818+ else {
43819+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
43820+ return 1;
43821+ }
43822+}
43823+
43824+void
43825+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
43826+{
43827+ struct acl_role_label *role = task->role;
43828+ struct acl_subject_label *subj = NULL;
43829+ struct acl_object_label *obj;
43830+ struct file *filp;
43831+
43832+ if (unlikely(!(gr_status & GR_READY)))
43833+ return;
43834+
43835+ filp = task->exec_file;
43836+
43837+ /* kernel process, we'll give them the kernel role */
43838+ if (unlikely(!filp)) {
43839+ task->role = kernel_role;
43840+ task->acl = kernel_role->root_label;
43841+ return;
43842+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
43843+ role = lookup_acl_role_label(task, uid, gid);
43844+
43845+ /* perform subject lookup in possibly new role
43846+ we can use this result below in the case where role == task->role
43847+ */
43848+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
43849+
43850+ /* if we changed uid/gid, but result in the same role
43851+ and are using inheritance, don't lose the inherited subject
43852+ if current subject is other than what normal lookup
43853+ would result in, we arrived via inheritance, don't
43854+ lose subject
43855+ */
43856+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
43857+ (subj == task->acl)))
43858+ task->acl = subj;
43859+
43860+ task->role = role;
43861+
43862+ task->is_writable = 0;
43863+
43864+ /* ignore additional mmap checks for processes that are writable
43865+ by the default ACL */
43866+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
43867+ if (unlikely(obj->mode & GR_WRITE))
43868+ task->is_writable = 1;
43869+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
43870+ if (unlikely(obj->mode & GR_WRITE))
43871+ task->is_writable = 1;
43872+
43873+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
43874+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
43875+#endif
43876+
43877+ gr_set_proc_res(task);
43878+
43879+ return;
43880+}
43881+
43882+int
43883+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
43884+ const int unsafe_share)
43885+{
43886+ struct task_struct *task = current;
43887+ struct acl_subject_label *newacl;
43888+ struct acl_object_label *obj;
43889+ __u32 retmode;
43890+
43891+ if (unlikely(!(gr_status & GR_READY)))
43892+ return 0;
43893+
43894+ newacl = chk_subj_label(dentry, mnt, task->role);
43895+
43896+ task_lock(task);
43897+ if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
43898+ !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
43899+ !(task->role->roletype & GR_ROLE_GOD) &&
43900+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
43901+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
43902+ task_unlock(task);
43903+ if (unsafe_share)
43904+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
43905+ else
43906+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
43907+ return -EACCES;
43908+ }
43909+ task_unlock(task);
43910+
43911+ obj = chk_obj_label(dentry, mnt, task->acl);
43912+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
43913+
43914+ if (!(task->acl->mode & GR_INHERITLEARN) &&
43915+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
43916+ if (obj->nested)
43917+ task->acl = obj->nested;
43918+ else
43919+ task->acl = newacl;
43920+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
43921+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
43922+
43923+ task->is_writable = 0;
43924+
43925+ /* ignore additional mmap checks for processes that are writable
43926+ by the default ACL */
43927+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
43928+ if (unlikely(obj->mode & GR_WRITE))
43929+ task->is_writable = 1;
43930+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
43931+ if (unlikely(obj->mode & GR_WRITE))
43932+ task->is_writable = 1;
43933+
43934+ gr_set_proc_res(task);
43935+
43936+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
43937+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
43938+#endif
43939+ return 0;
43940+}
43941+
43942+/* always called with valid inodev ptr */
43943+static void
43944+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
43945+{
43946+ struct acl_object_label *matchpo;
43947+ struct acl_subject_label *matchps;
43948+ struct acl_subject_label *subj;
43949+ struct acl_role_label *role;
43950+ unsigned int x;
43951+
43952+ FOR_EACH_ROLE_START(role)
43953+ FOR_EACH_SUBJECT_START(role, subj, x)
43954+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
43955+ matchpo->mode |= GR_DELETED;
43956+ FOR_EACH_SUBJECT_END(subj,x)
43957+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
43958+ if (subj->inode == ino && subj->device == dev)
43959+ subj->mode |= GR_DELETED;
43960+ FOR_EACH_NESTED_SUBJECT_END(subj)
43961+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
43962+ matchps->mode |= GR_DELETED;
43963+ FOR_EACH_ROLE_END(role)
43964+
43965+ inodev->nentry->deleted = 1;
43966+
43967+ return;
43968+}
43969+
43970+void
43971+gr_handle_delete(const ino_t ino, const dev_t dev)
43972+{
43973+ struct inodev_entry *inodev;
43974+
43975+ if (unlikely(!(gr_status & GR_READY)))
43976+ return;
43977+
43978+ write_lock(&gr_inode_lock);
43979+ inodev = lookup_inodev_entry(ino, dev);
43980+ if (inodev != NULL)
43981+ do_handle_delete(inodev, ino, dev);
43982+ write_unlock(&gr_inode_lock);
43983+
43984+ return;
43985+}
43986+
43987+static void
43988+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
43989+ const ino_t newinode, const dev_t newdevice,
43990+ struct acl_subject_label *subj)
43991+{
43992+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
43993+ struct acl_object_label *match;
43994+
43995+ match = subj->obj_hash[index];
43996+
43997+ while (match && (match->inode != oldinode ||
43998+ match->device != olddevice ||
43999+ !(match->mode & GR_DELETED)))
44000+ match = match->next;
44001+
44002+ if (match && (match->inode == oldinode)
44003+ && (match->device == olddevice)
44004+ && (match->mode & GR_DELETED)) {
44005+ if (match->prev == NULL) {
44006+ subj->obj_hash[index] = match->next;
44007+ if (match->next != NULL)
44008+ match->next->prev = NULL;
44009+ } else {
44010+ match->prev->next = match->next;
44011+ if (match->next != NULL)
44012+ match->next->prev = match->prev;
44013+ }
44014+ match->prev = NULL;
44015+ match->next = NULL;
44016+ match->inode = newinode;
44017+ match->device = newdevice;
44018+ match->mode &= ~GR_DELETED;
44019+
44020+ insert_acl_obj_label(match, subj);
44021+ }
44022+
44023+ return;
44024+}
44025+
44026+static void
44027+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
44028+ const ino_t newinode, const dev_t newdevice,
44029+ struct acl_role_label *role)
44030+{
44031+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
44032+ struct acl_subject_label *match;
44033+
44034+ match = role->subj_hash[index];
44035+
44036+ while (match && (match->inode != oldinode ||
44037+ match->device != olddevice ||
44038+ !(match->mode & GR_DELETED)))
44039+ match = match->next;
44040+
44041+ if (match && (match->inode == oldinode)
44042+ && (match->device == olddevice)
44043+ && (match->mode & GR_DELETED)) {
44044+ if (match->prev == NULL) {
44045+ role->subj_hash[index] = match->next;
44046+ if (match->next != NULL)
44047+ match->next->prev = NULL;
44048+ } else {
44049+ match->prev->next = match->next;
44050+ if (match->next != NULL)
44051+ match->next->prev = match->prev;
44052+ }
44053+ match->prev = NULL;
44054+ match->next = NULL;
44055+ match->inode = newinode;
44056+ match->device = newdevice;
44057+ match->mode &= ~GR_DELETED;
44058+
44059+ insert_acl_subj_label(match, role);
44060+ }
44061+
44062+ return;
44063+}
44064+
44065+static void
44066+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
44067+ const ino_t newinode, const dev_t newdevice)
44068+{
44069+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
44070+ struct inodev_entry *match;
44071+
44072+ match = inodev_set.i_hash[index];
44073+
44074+ while (match && (match->nentry->inode != oldinode ||
44075+ match->nentry->device != olddevice || !match->nentry->deleted))
44076+ match = match->next;
44077+
44078+ if (match && (match->nentry->inode == oldinode)
44079+ && (match->nentry->device == olddevice) &&
44080+ match->nentry->deleted) {
44081+ if (match->prev == NULL) {
44082+ inodev_set.i_hash[index] = match->next;
44083+ if (match->next != NULL)
44084+ match->next->prev = NULL;
44085+ } else {
44086+ match->prev->next = match->next;
44087+ if (match->next != NULL)
44088+ match->next->prev = match->prev;
44089+ }
44090+ match->prev = NULL;
44091+ match->next = NULL;
44092+ match->nentry->inode = newinode;
44093+ match->nentry->device = newdevice;
44094+ match->nentry->deleted = 0;
44095+
44096+ insert_inodev_entry(match);
44097+ }
44098+
44099+ return;
44100+}
44101+
44102+static void
44103+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
44104+ const struct vfsmount *mnt)
44105+{
44106+ struct acl_subject_label *subj;
44107+ struct acl_role_label *role;
44108+ unsigned int x;
44109+ ino_t ino = dentry->d_inode->i_ino;
44110+ dev_t dev = __get_dev(dentry);
44111+
44112+ FOR_EACH_ROLE_START(role)
44113+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
44114+
44115+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
44116+ if ((subj->inode == ino) && (subj->device == dev)) {
44117+ subj->inode = ino;
44118+ subj->device = dev;
44119+ }
44120+ FOR_EACH_NESTED_SUBJECT_END(subj)
44121+ FOR_EACH_SUBJECT_START(role, subj, x)
44122+ update_acl_obj_label(matchn->inode, matchn->device,
44123+ ino, dev, subj);
44124+ FOR_EACH_SUBJECT_END(subj,x)
44125+ FOR_EACH_ROLE_END(role)
44126+
44127+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
44128+
44129+ return;
44130+}
44131+
44132+void
44133+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
44134+{
44135+ struct name_entry *matchn;
44136+
44137+ if (unlikely(!(gr_status & GR_READY)))
44138+ return;
44139+
44140+ preempt_disable();
44141+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
44142+
44143+ if (unlikely((unsigned long)matchn)) {
44144+ write_lock(&gr_inode_lock);
44145+ do_handle_create(matchn, dentry, mnt);
44146+ write_unlock(&gr_inode_lock);
44147+ }
44148+ preempt_enable();
44149+
44150+ return;
44151+}
44152+
44153+void
44154+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
44155+ struct dentry *old_dentry,
44156+ struct dentry *new_dentry,
44157+ struct vfsmount *mnt, const __u8 replace)
44158+{
44159+ struct name_entry *matchn;
44160+ struct inodev_entry *inodev;
44161+ ino_t old_ino = old_dentry->d_inode->i_ino;
44162+ dev_t old_dev = __get_dev(old_dentry);
44163+
44164+ /* vfs_rename swaps the name and parent link for old_dentry and
44165+ new_dentry
44166+ at this point, old_dentry has the new name, parent link, and inode
44167+ for the renamed file
44168+ if a file is being replaced by a rename, new_dentry has the inode
44169+ and name for the replaced file
44170+ */
44171+
44172+ if (unlikely(!(gr_status & GR_READY)))
44173+ return;
44174+
44175+ preempt_disable();
44176+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
44177+
44178+ /* we wouldn't have to check d_inode if it weren't for
44179+ NFS silly-renaming
44180+ */
44181+
44182+ write_lock(&gr_inode_lock);
44183+ if (unlikely(replace && new_dentry->d_inode)) {
44184+ ino_t new_ino = new_dentry->d_inode->i_ino;
44185+ dev_t new_dev = __get_dev(new_dentry);
44186+
44187+ inodev = lookup_inodev_entry(new_ino, new_dev);
44188+ if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
44189+ do_handle_delete(inodev, new_ino, new_dev);
44190+ }
44191+
44192+ inodev = lookup_inodev_entry(old_ino, old_dev);
44193+ if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
44194+ do_handle_delete(inodev, old_ino, old_dev);
44195+
44196+ if (unlikely((unsigned long)matchn))
44197+ do_handle_create(matchn, old_dentry, mnt);
44198+
44199+ write_unlock(&gr_inode_lock);
44200+ preempt_enable();
44201+
44202+ return;
44203+}
44204+
44205+static int
44206+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
44207+ unsigned char **sum)
44208+{
44209+ struct acl_role_label *r;
44210+ struct role_allowed_ip *ipp;
44211+ struct role_transition *trans;
44212+ unsigned int i;
44213+ int found = 0;
44214+ u32 curr_ip = current->signal->curr_ip;
44215+
44216+ current->signal->saved_ip = curr_ip;
44217+
44218+ /* check transition table */
44219+
44220+ for (trans = current->role->transitions; trans; trans = trans->next) {
44221+ if (!strcmp(rolename, trans->rolename)) {
44222+ found = 1;
44223+ break;
44224+ }
44225+ }
44226+
44227+ if (!found)
44228+ return 0;
44229+
44230+ /* handle special roles that do not require authentication
44231+ and check ip */
44232+
44233+ FOR_EACH_ROLE_START(r)
44234+ if (!strcmp(rolename, r->rolename) &&
44235+ (r->roletype & GR_ROLE_SPECIAL)) {
44236+ found = 0;
44237+ if (r->allowed_ips != NULL) {
44238+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
44239+ if ((ntohl(curr_ip) & ipp->netmask) ==
44240+ (ntohl(ipp->addr) & ipp->netmask))
44241+ found = 1;
44242+ }
44243+ } else
44244+ found = 2;
44245+ if (!found)
44246+ return 0;
44247+
44248+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
44249+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
44250+ *salt = NULL;
44251+ *sum = NULL;
44252+ return 1;
44253+ }
44254+ }
44255+ FOR_EACH_ROLE_END(r)
44256+
44257+ for (i = 0; i < num_sprole_pws; i++) {
44258+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
44259+ *salt = acl_special_roles[i]->salt;
44260+ *sum = acl_special_roles[i]->sum;
44261+ return 1;
44262+ }
44263+ }
44264+
44265+ return 0;
44266+}
44267+
44268+static void
44269+assign_special_role(char *rolename)
44270+{
44271+ struct acl_object_label *obj;
44272+ struct acl_role_label *r;
44273+ struct acl_role_label *assigned = NULL;
44274+ struct task_struct *tsk;
44275+ struct file *filp;
44276+
44277+ FOR_EACH_ROLE_START(r)
44278+ if (!strcmp(rolename, r->rolename) &&
44279+ (r->roletype & GR_ROLE_SPECIAL)) {
44280+ assigned = r;
44281+ break;
44282+ }
44283+ FOR_EACH_ROLE_END(r)
44284+
44285+ if (!assigned)
44286+ return;
44287+
44288+ read_lock(&tasklist_lock);
44289+ read_lock(&grsec_exec_file_lock);
44290+
44291+ tsk = current->real_parent;
44292+ if (tsk == NULL)
44293+ goto out_unlock;
44294+
44295+ filp = tsk->exec_file;
44296+ if (filp == NULL)
44297+ goto out_unlock;
44298+
44299+ tsk->is_writable = 0;
44300+
44301+ tsk->acl_sp_role = 1;
44302+ tsk->acl_role_id = ++acl_sp_role_value;
44303+ tsk->role = assigned;
44304+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
44305+
44306+ /* ignore additional mmap checks for processes that are writable
44307+ by the default ACL */
44308+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
44309+ if (unlikely(obj->mode & GR_WRITE))
44310+ tsk->is_writable = 1;
44311+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
44312+ if (unlikely(obj->mode & GR_WRITE))
44313+ tsk->is_writable = 1;
44314+
44315+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44316+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
44317+#endif
44318+
44319+out_unlock:
44320+ read_unlock(&grsec_exec_file_lock);
44321+ read_unlock(&tasklist_lock);
44322+ return;
44323+}
44324+
44325+int gr_check_secure_terminal(struct task_struct *task)
44326+{
44327+ struct task_struct *p, *p2, *p3;
44328+ struct files_struct *files;
44329+ struct fdtable *fdt;
44330+ struct file *our_file = NULL, *file;
44331+ int i;
44332+
44333+ if (task->signal->tty == NULL)
44334+ return 1;
44335+
44336+ files = get_files_struct(task);
44337+ if (files != NULL) {
44338+ rcu_read_lock();
44339+ fdt = files_fdtable(files);
44340+ for (i=0; i < fdt->max_fds; i++) {
44341+ file = fcheck_files(files, i);
44342+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
44343+ get_file(file);
44344+ our_file = file;
44345+ }
44346+ }
44347+ rcu_read_unlock();
44348+ put_files_struct(files);
44349+ }
44350+
44351+ if (our_file == NULL)
44352+ return 1;
44353+
44354+ read_lock(&tasklist_lock);
44355+ do_each_thread(p2, p) {
44356+ files = get_files_struct(p);
44357+ if (files == NULL ||
44358+ (p->signal && p->signal->tty == task->signal->tty)) {
44359+ if (files != NULL)
44360+ put_files_struct(files);
44361+ continue;
44362+ }
44363+ rcu_read_lock();
44364+ fdt = files_fdtable(files);
44365+ for (i=0; i < fdt->max_fds; i++) {
44366+ file = fcheck_files(files, i);
44367+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
44368+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
44369+ p3 = task;
44370+ while (p3->pid > 0) {
44371+ if (p3 == p)
44372+ break;
44373+ p3 = p3->real_parent;
44374+ }
44375+ if (p3 == p)
44376+ break;
44377+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
44378+ gr_handle_alertkill(p);
44379+ rcu_read_unlock();
44380+ put_files_struct(files);
44381+ read_unlock(&tasklist_lock);
44382+ fput(our_file);
44383+ return 0;
44384+ }
44385+ }
44386+ rcu_read_unlock();
44387+ put_files_struct(files);
44388+ } while_each_thread(p2, p);
44389+ read_unlock(&tasklist_lock);
44390+
44391+ fput(our_file);
44392+ return 1;
44393+}
44394+
44395+ssize_t
44396+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
44397+{
44398+ struct gr_arg_wrapper uwrap;
44399+ unsigned char *sprole_salt = NULL;
44400+ unsigned char *sprole_sum = NULL;
44401+ int error = sizeof (struct gr_arg_wrapper);
44402+ int error2 = 0;
44403+
44404+ mutex_lock(&gr_dev_mutex);
44405+
44406+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
44407+ error = -EPERM;
44408+ goto out;
44409+ }
44410+
44411+ if (count != sizeof (struct gr_arg_wrapper)) {
44412+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
44413+ error = -EINVAL;
44414+ goto out;
44415+ }
44416+
44417+
44418+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
44419+ gr_auth_expires = 0;
44420+ gr_auth_attempts = 0;
44421+ }
44422+
44423+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
44424+ error = -EFAULT;
44425+ goto out;
44426+ }
44427+
44428+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
44429+ error = -EINVAL;
44430+ goto out;
44431+ }
44432+
44433+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
44434+ error = -EFAULT;
44435+ goto out;
44436+ }
44437+
44438+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
44439+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
44440+ time_after(gr_auth_expires, get_seconds())) {
44441+ error = -EBUSY;
44442+ goto out;
44443+ }
44444+
44445+ /* if non-root trying to do anything other than use a special role,
44446+ do not attempt authentication, do not count towards authentication
44447+ locking
44448+ */
44449+
44450+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
44451+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
44452+ current_uid()) {
44453+ error = -EPERM;
44454+ goto out;
44455+ }
44456+
44457+ /* ensure pw and special role name are null terminated */
44458+
44459+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
44460+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
44461+
44462+ /* Okay.
44463+ * We have our enough of the argument structure..(we have yet
44464+ * to copy_from_user the tables themselves) . Copy the tables
44465+ * only if we need them, i.e. for loading operations. */
44466+
44467+ switch (gr_usermode->mode) {
44468+ case GR_STATUS:
44469+ if (gr_status & GR_READY) {
44470+ error = 1;
44471+ if (!gr_check_secure_terminal(current))
44472+ error = 3;
44473+ } else
44474+ error = 2;
44475+ goto out;
44476+ case GR_SHUTDOWN:
44477+ if ((gr_status & GR_READY)
44478+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
44479+ pax_open_kernel();
44480+ gr_status &= ~GR_READY;
44481+ pax_close_kernel();
44482+
44483+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
44484+ free_variables();
44485+ memset(gr_usermode, 0, sizeof (struct gr_arg));
44486+ memset(gr_system_salt, 0, GR_SALT_LEN);
44487+ memset(gr_system_sum, 0, GR_SHA_LEN);
44488+ } else if (gr_status & GR_READY) {
44489+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
44490+ error = -EPERM;
44491+ } else {
44492+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
44493+ error = -EAGAIN;
44494+ }
44495+ break;
44496+ case GR_ENABLE:
44497+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
44498+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
44499+ else {
44500+ if (gr_status & GR_READY)
44501+ error = -EAGAIN;
44502+ else
44503+ error = error2;
44504+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
44505+ }
44506+ break;
44507+ case GR_RELOAD:
44508+ if (!(gr_status & GR_READY)) {
44509+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
44510+ error = -EAGAIN;
44511+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
44512+ preempt_disable();
44513+
44514+ pax_open_kernel();
44515+ gr_status &= ~GR_READY;
44516+ pax_close_kernel();
44517+
44518+ free_variables();
44519+ if (!(error2 = gracl_init(gr_usermode))) {
44520+ preempt_enable();
44521+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
44522+ } else {
44523+ preempt_enable();
44524+ error = error2;
44525+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
44526+ }
44527+ } else {
44528+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
44529+ error = -EPERM;
44530+ }
44531+ break;
44532+ case GR_SEGVMOD:
44533+ if (unlikely(!(gr_status & GR_READY))) {
44534+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
44535+ error = -EAGAIN;
44536+ break;
44537+ }
44538+
44539+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
44540+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
44541+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
44542+ struct acl_subject_label *segvacl;
44543+ segvacl =
44544+ lookup_acl_subj_label(gr_usermode->segv_inode,
44545+ gr_usermode->segv_device,
44546+ current->role);
44547+ if (segvacl) {
44548+ segvacl->crashes = 0;
44549+ segvacl->expires = 0;
44550+ }
44551+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
44552+ gr_remove_uid(gr_usermode->segv_uid);
44553+ }
44554+ } else {
44555+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
44556+ error = -EPERM;
44557+ }
44558+ break;
44559+ case GR_SPROLE:
44560+ case GR_SPROLEPAM:
44561+ if (unlikely(!(gr_status & GR_READY))) {
44562+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
44563+ error = -EAGAIN;
44564+ break;
44565+ }
44566+
44567+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
44568+ current->role->expires = 0;
44569+ current->role->auth_attempts = 0;
44570+ }
44571+
44572+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
44573+ time_after(current->role->expires, get_seconds())) {
44574+ error = -EBUSY;
44575+ goto out;
44576+ }
44577+
44578+ if (lookup_special_role_auth
44579+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
44580+ && ((!sprole_salt && !sprole_sum)
44581+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
44582+ char *p = "";
44583+ assign_special_role(gr_usermode->sp_role);
44584+ read_lock(&tasklist_lock);
44585+ if (current->real_parent)
44586+ p = current->real_parent->role->rolename;
44587+ read_unlock(&tasklist_lock);
44588+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
44589+ p, acl_sp_role_value);
44590+ } else {
44591+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
44592+ error = -EPERM;
44593+ if(!(current->role->auth_attempts++))
44594+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
44595+
44596+ goto out;
44597+ }
44598+ break;
44599+ case GR_UNSPROLE:
44600+ if (unlikely(!(gr_status & GR_READY))) {
44601+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
44602+ error = -EAGAIN;
44603+ break;
44604+ }
44605+
44606+ if (current->role->roletype & GR_ROLE_SPECIAL) {
44607+ char *p = "";
44608+ int i = 0;
44609+
44610+ read_lock(&tasklist_lock);
44611+ if (current->real_parent) {
44612+ p = current->real_parent->role->rolename;
44613+ i = current->real_parent->acl_role_id;
44614+ }
44615+ read_unlock(&tasklist_lock);
44616+
44617+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
44618+ gr_set_acls(1);
44619+ } else {
44620+ error = -EPERM;
44621+ goto out;
44622+ }
44623+ break;
44624+ default:
44625+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
44626+ error = -EINVAL;
44627+ break;
44628+ }
44629+
44630+ if (error != -EPERM)
44631+ goto out;
44632+
44633+ if(!(gr_auth_attempts++))
44634+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
44635+
44636+ out:
44637+ mutex_unlock(&gr_dev_mutex);
44638+ return error;
44639+}
44640+
44641+/* must be called with
44642+ rcu_read_lock();
44643+ read_lock(&tasklist_lock);
44644+ read_lock(&grsec_exec_file_lock);
44645+*/
44646+int gr_apply_subject_to_task(struct task_struct *task)
44647+{
44648+ struct acl_object_label *obj;
44649+ char *tmpname;
44650+ struct acl_subject_label *tmpsubj;
44651+ struct file *filp;
44652+ struct name_entry *nmatch;
44653+
44654+ filp = task->exec_file;
44655+ if (filp == NULL)
44656+ return 0;
44657+
44658+ /* the following is to apply the correct subject
44659+ on binaries running when the RBAC system
44660+ is enabled, when the binaries have been
44661+ replaced or deleted since their execution
44662+ -----
44663+ when the RBAC system starts, the inode/dev
44664+ from exec_file will be one the RBAC system
44665+ is unaware of. It only knows the inode/dev
44666+ of the present file on disk, or the absence
44667+ of it.
44668+ */
44669+ preempt_disable();
44670+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
44671+
44672+ nmatch = lookup_name_entry(tmpname);
44673+ preempt_enable();
44674+ tmpsubj = NULL;
44675+ if (nmatch) {
44676+ if (nmatch->deleted)
44677+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
44678+ else
44679+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
44680+ if (tmpsubj != NULL)
44681+ task->acl = tmpsubj;
44682+ }
44683+ if (tmpsubj == NULL)
44684+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
44685+ task->role);
44686+ if (task->acl) {
44687+ task->is_writable = 0;
44688+ /* ignore additional mmap checks for processes that are writable
44689+ by the default ACL */
44690+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
44691+ if (unlikely(obj->mode & GR_WRITE))
44692+ task->is_writable = 1;
44693+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
44694+ if (unlikely(obj->mode & GR_WRITE))
44695+ task->is_writable = 1;
44696+
44697+ gr_set_proc_res(task);
44698+
44699+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44700+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
44701+#endif
44702+ } else {
44703+ return 1;
44704+ }
44705+
44706+ return 0;
44707+}
44708+
44709+int
44710+gr_set_acls(const int type)
44711+{
44712+ struct task_struct *task, *task2;
44713+ struct acl_role_label *role = current->role;
44714+ __u16 acl_role_id = current->acl_role_id;
44715+ const struct cred *cred;
44716+ int ret;
44717+
44718+ rcu_read_lock();
44719+ read_lock(&tasklist_lock);
44720+ read_lock(&grsec_exec_file_lock);
44721+ do_each_thread(task2, task) {
44722+ /* check to see if we're called from the exit handler,
44723+ if so, only replace ACLs that have inherited the admin
44724+ ACL */
44725+
44726+ if (type && (task->role != role ||
44727+ task->acl_role_id != acl_role_id))
44728+ continue;
44729+
44730+ task->acl_role_id = 0;
44731+ task->acl_sp_role = 0;
44732+
44733+ if (task->exec_file) {
44734+ cred = __task_cred(task);
44735+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
44736+ ret = gr_apply_subject_to_task(task);
44737+ if (ret) {
44738+ read_unlock(&grsec_exec_file_lock);
44739+ read_unlock(&tasklist_lock);
44740+ rcu_read_unlock();
44741+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
44742+ return ret;
44743+ }
44744+ } else {
44745+ // it's a kernel process
44746+ task->role = kernel_role;
44747+ task->acl = kernel_role->root_label;
44748+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
44749+ task->acl->mode &= ~GR_PROCFIND;
44750+#endif
44751+ }
44752+ } while_each_thread(task2, task);
44753+ read_unlock(&grsec_exec_file_lock);
44754+ read_unlock(&tasklist_lock);
44755+ rcu_read_unlock();
44756+
44757+ return 0;
44758+}
44759+
44760+void
44761+gr_learn_resource(const struct task_struct *task,
44762+ const int res, const unsigned long wanted, const int gt)
44763+{
44764+ struct acl_subject_label *acl;
44765+ const struct cred *cred;
44766+
44767+ if (unlikely((gr_status & GR_READY) &&
44768+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
44769+ goto skip_reslog;
44770+
44771+#ifdef CONFIG_GRKERNSEC_RESLOG
44772+ gr_log_resource(task, res, wanted, gt);
44773+#endif
44774+ skip_reslog:
44775+
44776+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
44777+ return;
44778+
44779+ acl = task->acl;
44780+
44781+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
44782+ !(acl->resmask & (1 << (unsigned short) res))))
44783+ return;
44784+
44785+ if (wanted >= acl->res[res].rlim_cur) {
44786+ unsigned long res_add;
44787+
44788+ res_add = wanted;
44789+ switch (res) {
44790+ case RLIMIT_CPU:
44791+ res_add += GR_RLIM_CPU_BUMP;
44792+ break;
44793+ case RLIMIT_FSIZE:
44794+ res_add += GR_RLIM_FSIZE_BUMP;
44795+ break;
44796+ case RLIMIT_DATA:
44797+ res_add += GR_RLIM_DATA_BUMP;
44798+ break;
44799+ case RLIMIT_STACK:
44800+ res_add += GR_RLIM_STACK_BUMP;
44801+ break;
44802+ case RLIMIT_CORE:
44803+ res_add += GR_RLIM_CORE_BUMP;
44804+ break;
44805+ case RLIMIT_RSS:
44806+ res_add += GR_RLIM_RSS_BUMP;
44807+ break;
44808+ case RLIMIT_NPROC:
44809+ res_add += GR_RLIM_NPROC_BUMP;
44810+ break;
44811+ case RLIMIT_NOFILE:
44812+ res_add += GR_RLIM_NOFILE_BUMP;
44813+ break;
44814+ case RLIMIT_MEMLOCK:
44815+ res_add += GR_RLIM_MEMLOCK_BUMP;
44816+ break;
44817+ case RLIMIT_AS:
44818+ res_add += GR_RLIM_AS_BUMP;
44819+ break;
44820+ case RLIMIT_LOCKS:
44821+ res_add += GR_RLIM_LOCKS_BUMP;
44822+ break;
44823+ case RLIMIT_SIGPENDING:
44824+ res_add += GR_RLIM_SIGPENDING_BUMP;
44825+ break;
44826+ case RLIMIT_MSGQUEUE:
44827+ res_add += GR_RLIM_MSGQUEUE_BUMP;
44828+ break;
44829+ case RLIMIT_NICE:
44830+ res_add += GR_RLIM_NICE_BUMP;
44831+ break;
44832+ case RLIMIT_RTPRIO:
44833+ res_add += GR_RLIM_RTPRIO_BUMP;
44834+ break;
44835+ case RLIMIT_RTTIME:
44836+ res_add += GR_RLIM_RTTIME_BUMP;
44837+ break;
44838+ }
44839+
44840+ acl->res[res].rlim_cur = res_add;
44841+
44842+ if (wanted > acl->res[res].rlim_max)
44843+ acl->res[res].rlim_max = res_add;
44844+
44845+ /* only log the subject filename, since resource logging is supported for
44846+ single-subject learning only */
44847+ rcu_read_lock();
44848+ cred = __task_cred(task);
44849+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
44850+ task->role->roletype, cred->uid, cred->gid, acl->filename,
44851+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
44852+ "", (unsigned long) res, &task->signal->saved_ip);
44853+ rcu_read_unlock();
44854+ }
44855+
44856+ return;
44857+}
44858+
44859+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
44860+void
44861+pax_set_initial_flags(struct linux_binprm *bprm)
44862+{
44863+ struct task_struct *task = current;
44864+ struct acl_subject_label *proc;
44865+ unsigned long flags;
44866+
44867+ if (unlikely(!(gr_status & GR_READY)))
44868+ return;
44869+
44870+ flags = pax_get_flags(task);
44871+
44872+ proc = task->acl;
44873+
44874+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
44875+ flags &= ~MF_PAX_PAGEEXEC;
44876+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
44877+ flags &= ~MF_PAX_SEGMEXEC;
44878+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
44879+ flags &= ~MF_PAX_RANDMMAP;
44880+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
44881+ flags &= ~MF_PAX_EMUTRAMP;
44882+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
44883+ flags &= ~MF_PAX_MPROTECT;
44884+
44885+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
44886+ flags |= MF_PAX_PAGEEXEC;
44887+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
44888+ flags |= MF_PAX_SEGMEXEC;
44889+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
44890+ flags |= MF_PAX_RANDMMAP;
44891+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
44892+ flags |= MF_PAX_EMUTRAMP;
44893+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
44894+ flags |= MF_PAX_MPROTECT;
44895+
44896+ pax_set_flags(task, flags);
44897+
44898+ return;
44899+}
44900+#endif
44901+
44902+#ifdef CONFIG_SYSCTL
44903+/* Eric Biederman likes breaking userland ABI and every inode-based security
44904+ system to save 35kb of memory */
44905+
44906+/* we modify the passed in filename, but adjust it back before returning */
44907+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
44908+{
44909+ struct name_entry *nmatch;
44910+ char *p, *lastp = NULL;
44911+ struct acl_object_label *obj = NULL, *tmp;
44912+ struct acl_subject_label *tmpsubj;
44913+ char c = '\0';
44914+
44915+ read_lock(&gr_inode_lock);
44916+
44917+ p = name + len - 1;
44918+ do {
44919+ nmatch = lookup_name_entry(name);
44920+ if (lastp != NULL)
44921+ *lastp = c;
44922+
44923+ if (nmatch == NULL)
44924+ goto next_component;
44925+ tmpsubj = current->acl;
44926+ do {
44927+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
44928+ if (obj != NULL) {
44929+ tmp = obj->globbed;
44930+ while (tmp) {
44931+ if (!glob_match(tmp->filename, name)) {
44932+ obj = tmp;
44933+ goto found_obj;
44934+ }
44935+ tmp = tmp->next;
44936+ }
44937+ goto found_obj;
44938+ }
44939+ } while ((tmpsubj = tmpsubj->parent_subject));
44940+next_component:
44941+ /* end case */
44942+ if (p == name)
44943+ break;
44944+
44945+ while (*p != '/')
44946+ p--;
44947+ if (p == name)
44948+ lastp = p + 1;
44949+ else {
44950+ lastp = p;
44951+ p--;
44952+ }
44953+ c = *lastp;
44954+ *lastp = '\0';
44955+ } while (1);
44956+found_obj:
44957+ read_unlock(&gr_inode_lock);
44958+ /* obj returned will always be non-null */
44959+ return obj;
44960+}
44961+
44962+/* returns 0 when allowing, non-zero on error
44963+ op of 0 is used for readdir, so we don't log the names of hidden files
44964+*/
44965+__u32
44966+gr_handle_sysctl(const struct ctl_table *table, const int op)
44967+{
44968+ struct ctl_table *tmp;
44969+ const char *proc_sys = "/proc/sys";
44970+ char *path;
44971+ struct acl_object_label *obj;
44972+ unsigned short len = 0, pos = 0, depth = 0, i;
44973+ __u32 err = 0;
44974+ __u32 mode = 0;
44975+
44976+ if (unlikely(!(gr_status & GR_READY)))
44977+ return 0;
44978+
44979+ /* for now, ignore operations on non-sysctl entries if it's not a
44980+ readdir*/
44981+ if (table->child != NULL && op != 0)
44982+ return 0;
44983+
44984+ mode |= GR_FIND;
44985+ /* it's only a read if it's an entry, read on dirs is for readdir */
44986+ if (op & MAY_READ)
44987+ mode |= GR_READ;
44988+ if (op & MAY_WRITE)
44989+ mode |= GR_WRITE;
44990+
44991+ preempt_disable();
44992+
44993+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
44994+
44995+ /* it's only a read/write if it's an actual entry, not a dir
44996+ (which are opened for readdir)
44997+ */
44998+
44999+ /* convert the requested sysctl entry into a pathname */
45000+
45001+ for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
45002+ len += strlen(tmp->procname);
45003+ len++;
45004+ depth++;
45005+ }
45006+
45007+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
45008+ /* deny */
45009+ goto out;
45010+ }
45011+
45012+ memset(path, 0, PAGE_SIZE);
45013+
45014+ memcpy(path, proc_sys, strlen(proc_sys));
45015+
45016+ pos += strlen(proc_sys);
45017+
45018+ for (; depth > 0; depth--) {
45019+ path[pos] = '/';
45020+ pos++;
45021+ for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
45022+ if (depth == i) {
45023+ memcpy(path + pos, tmp->procname,
45024+ strlen(tmp->procname));
45025+ pos += strlen(tmp->procname);
45026+ }
45027+ i++;
45028+ }
45029+ }
45030+
45031+ obj = gr_lookup_by_name(path, pos);
45032+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
45033+
45034+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
45035+ ((err & mode) != mode))) {
45036+ __u32 new_mode = mode;
45037+
45038+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
45039+
45040+ err = 0;
45041+ gr_log_learn_sysctl(path, new_mode);
45042+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
45043+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
45044+ err = -ENOENT;
45045+ } else if (!(err & GR_FIND)) {
45046+ err = -ENOENT;
45047+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
45048+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
45049+ path, (mode & GR_READ) ? " reading" : "",
45050+ (mode & GR_WRITE) ? " writing" : "");
45051+ err = -EACCES;
45052+ } else if ((err & mode) != mode) {
45053+ err = -EACCES;
45054+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
45055+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
45056+ path, (mode & GR_READ) ? " reading" : "",
45057+ (mode & GR_WRITE) ? " writing" : "");
45058+ err = 0;
45059+ } else
45060+ err = 0;
45061+
45062+ out:
45063+ preempt_enable();
45064+
45065+ return err;
45066+}
45067+#endif
45068+
45069+int
45070+gr_handle_proc_ptrace(struct task_struct *task)
45071+{
45072+ struct file *filp;
45073+ struct task_struct *tmp = task;
45074+ struct task_struct *curtemp = current;
45075+ __u32 retmode;
45076+
45077+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
45078+ if (unlikely(!(gr_status & GR_READY)))
45079+ return 0;
45080+#endif
45081+
45082+ read_lock(&tasklist_lock);
45083+ read_lock(&grsec_exec_file_lock);
45084+ filp = task->exec_file;
45085+
45086+ while (tmp->pid > 0) {
45087+ if (tmp == curtemp)
45088+ break;
45089+ tmp = tmp->real_parent;
45090+ }
45091+
45092+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
45093+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
45094+ read_unlock(&grsec_exec_file_lock);
45095+ read_unlock(&tasklist_lock);
45096+ return 1;
45097+ }
45098+
45099+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
45100+ if (!(gr_status & GR_READY)) {
45101+ read_unlock(&grsec_exec_file_lock);
45102+ read_unlock(&tasklist_lock);
45103+ return 0;
45104+ }
45105+#endif
45106+
45107+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
45108+ read_unlock(&grsec_exec_file_lock);
45109+ read_unlock(&tasklist_lock);
45110+
45111+ if (retmode & GR_NOPTRACE)
45112+ return 1;
45113+
45114+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
45115+ && (current->acl != task->acl || (current->acl != current->role->root_label
45116+ && current->pid != task->pid)))
45117+ return 1;
45118+
45119+ return 0;
45120+}
45121+
45122+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
45123+{
45124+ if (unlikely(!(gr_status & GR_READY)))
45125+ return;
45126+
45127+ if (!(current->role->roletype & GR_ROLE_GOD))
45128+ return;
45129+
45130+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
45131+ p->role->rolename, gr_task_roletype_to_char(p),
45132+ p->acl->filename);
45133+}
45134+
45135+int
45136+gr_handle_ptrace(struct task_struct *task, const long request)
45137+{
45138+ struct task_struct *tmp = task;
45139+ struct task_struct *curtemp = current;
45140+ __u32 retmode;
45141+
45142+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
45143+ if (unlikely(!(gr_status & GR_READY)))
45144+ return 0;
45145+#endif
45146+
45147+ read_lock(&tasklist_lock);
45148+ while (tmp->pid > 0) {
45149+ if (tmp == curtemp)
45150+ break;
45151+ tmp = tmp->real_parent;
45152+ }
45153+
45154+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
45155+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
45156+ read_unlock(&tasklist_lock);
45157+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
45158+ return 1;
45159+ }
45160+ read_unlock(&tasklist_lock);
45161+
45162+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
45163+ if (!(gr_status & GR_READY))
45164+ return 0;
45165+#endif
45166+
45167+ read_lock(&grsec_exec_file_lock);
45168+ if (unlikely(!task->exec_file)) {
45169+ read_unlock(&grsec_exec_file_lock);
45170+ return 0;
45171+ }
45172+
45173+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
45174+ read_unlock(&grsec_exec_file_lock);
45175+
45176+ if (retmode & GR_NOPTRACE) {
45177+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
45178+ return 1;
45179+ }
45180+
45181+ if (retmode & GR_PTRACERD) {
45182+ switch (request) {
45183+ case PTRACE_POKETEXT:
45184+ case PTRACE_POKEDATA:
45185+ case PTRACE_POKEUSR:
45186+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
45187+ case PTRACE_SETREGS:
45188+ case PTRACE_SETFPREGS:
45189+#endif
45190+#ifdef CONFIG_X86
45191+ case PTRACE_SETFPXREGS:
45192+#endif
45193+#ifdef CONFIG_ALTIVEC
45194+ case PTRACE_SETVRREGS:
45195+#endif
45196+ return 1;
45197+ default:
45198+ return 0;
45199+ }
45200+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
45201+ !(current->role->roletype & GR_ROLE_GOD) &&
45202+ (current->acl != task->acl)) {
45203+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
45204+ return 1;
45205+ }
45206+
45207+ return 0;
45208+}
45209+
45210+static int is_writable_mmap(const struct file *filp)
45211+{
45212+ struct task_struct *task = current;
45213+ struct acl_object_label *obj, *obj2;
45214+
45215+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
45216+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
45217+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
45218+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
45219+ task->role->root_label);
45220+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
45221+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
45222+ return 1;
45223+ }
45224+ }
45225+ return 0;
45226+}
45227+
45228+int
45229+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
45230+{
45231+ __u32 mode;
45232+
45233+ if (unlikely(!file || !(prot & PROT_EXEC)))
45234+ return 1;
45235+
45236+ if (is_writable_mmap(file))
45237+ return 0;
45238+
45239+ mode =
45240+ gr_search_file(file->f_path.dentry,
45241+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
45242+ file->f_path.mnt);
45243+
45244+ if (!gr_tpe_allow(file))
45245+ return 0;
45246+
45247+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
45248+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45249+ return 0;
45250+ } else if (unlikely(!(mode & GR_EXEC))) {
45251+ return 0;
45252+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
45253+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45254+ return 1;
45255+ }
45256+
45257+ return 1;
45258+}
45259+
45260+int
45261+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
45262+{
45263+ __u32 mode;
45264+
45265+ if (unlikely(!file || !(prot & PROT_EXEC)))
45266+ return 1;
45267+
45268+ if (is_writable_mmap(file))
45269+ return 0;
45270+
45271+ mode =
45272+ gr_search_file(file->f_path.dentry,
45273+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
45274+ file->f_path.mnt);
45275+
45276+ if (!gr_tpe_allow(file))
45277+ return 0;
45278+
45279+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
45280+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45281+ return 0;
45282+ } else if (unlikely(!(mode & GR_EXEC))) {
45283+ return 0;
45284+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
45285+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45286+ return 1;
45287+ }
45288+
45289+ return 1;
45290+}
45291+
45292+void
45293+gr_acl_handle_psacct(struct task_struct *task, const long code)
45294+{
45295+ unsigned long runtime;
45296+ unsigned long cputime;
45297+ unsigned int wday, cday;
45298+ __u8 whr, chr;
45299+ __u8 wmin, cmin;
45300+ __u8 wsec, csec;
45301+ struct timespec timeval;
45302+
45303+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
45304+ !(task->acl->mode & GR_PROCACCT)))
45305+ return;
45306+
45307+ do_posix_clock_monotonic_gettime(&timeval);
45308+ runtime = timeval.tv_sec - task->start_time.tv_sec;
45309+ wday = runtime / (3600 * 24);
45310+ runtime -= wday * (3600 * 24);
45311+ whr = runtime / 3600;
45312+ runtime -= whr * 3600;
45313+ wmin = runtime / 60;
45314+ runtime -= wmin * 60;
45315+ wsec = runtime;
45316+
45317+ cputime = (task->utime + task->stime) / HZ;
45318+ cday = cputime / (3600 * 24);
45319+ cputime -= cday * (3600 * 24);
45320+ chr = cputime / 3600;
45321+ cputime -= chr * 3600;
45322+ cmin = cputime / 60;
45323+ cputime -= cmin * 60;
45324+ csec = cputime;
45325+
45326+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
45327+
45328+ return;
45329+}
45330+
45331+void gr_set_kernel_label(struct task_struct *task)
45332+{
45333+ if (gr_status & GR_READY) {
45334+ task->role = kernel_role;
45335+ task->acl = kernel_role->root_label;
45336+ }
45337+ return;
45338+}
45339+
45340+#ifdef CONFIG_TASKSTATS
45341+int gr_is_taskstats_denied(int pid)
45342+{
45343+ struct task_struct *task;
45344+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45345+ const struct cred *cred;
45346+#endif
45347+ int ret = 0;
45348+
45349+ /* restrict taskstats viewing to un-chrooted root users
45350+ who have the 'view' subject flag if the RBAC system is enabled
45351+ */
45352+
45353+ rcu_read_lock();
45354+ read_lock(&tasklist_lock);
45355+ task = find_task_by_vpid(pid);
45356+ if (task) {
45357+#ifdef CONFIG_GRKERNSEC_CHROOT
45358+ if (proc_is_chrooted(task))
45359+ ret = -EACCES;
45360+#endif
45361+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45362+ cred = __task_cred(task);
45363+#ifdef CONFIG_GRKERNSEC_PROC_USER
45364+ if (cred->uid != 0)
45365+ ret = -EACCES;
45366+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45367+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
45368+ ret = -EACCES;
45369+#endif
45370+#endif
45371+ if (gr_status & GR_READY) {
45372+ if (!(task->acl->mode & GR_VIEW))
45373+ ret = -EACCES;
45374+ }
45375+ } else
45376+ ret = -ENOENT;
45377+
45378+ read_unlock(&tasklist_lock);
45379+ rcu_read_unlock();
45380+
45381+ return ret;
45382+}
45383+#endif
45384+
45385+/* AUXV entries are filled via a descendant of search_binary_handler
45386+ after we've already applied the subject for the target
45387+*/
45388+int gr_acl_enable_at_secure(void)
45389+{
45390+ if (unlikely(!(gr_status & GR_READY)))
45391+ return 0;
45392+
45393+ if (current->acl->mode & GR_ATSECURE)
45394+ return 1;
45395+
45396+ return 0;
45397+}
45398+
45399+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
45400+{
45401+ struct task_struct *task = current;
45402+ struct dentry *dentry = file->f_path.dentry;
45403+ struct vfsmount *mnt = file->f_path.mnt;
45404+ struct acl_object_label *obj, *tmp;
45405+ struct acl_subject_label *subj;
45406+ unsigned int bufsize;
45407+ int is_not_root;
45408+ char *path;
45409+ dev_t dev = __get_dev(dentry);
45410+
45411+ if (unlikely(!(gr_status & GR_READY)))
45412+ return 1;
45413+
45414+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
45415+ return 1;
45416+
45417+ /* ignore Eric Biederman */
45418+ if (IS_PRIVATE(dentry->d_inode))
45419+ return 1;
45420+
45421+ subj = task->acl;
45422+ do {
45423+ obj = lookup_acl_obj_label(ino, dev, subj);
45424+ if (obj != NULL)
45425+ return (obj->mode & GR_FIND) ? 1 : 0;
45426+ } while ((subj = subj->parent_subject));
45427+
45428+ /* this is purely an optimization since we're looking for an object
45429+ for the directory we're doing a readdir on
45430+ if it's possible for any globbed object to match the entry we're
45431+ filling into the directory, then the object we find here will be
45432+ an anchor point with attached globbed objects
45433+ */
45434+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
45435+ if (obj->globbed == NULL)
45436+ return (obj->mode & GR_FIND) ? 1 : 0;
45437+
45438+ is_not_root = ((obj->filename[0] == '/') &&
45439+ (obj->filename[1] == '\0')) ? 0 : 1;
45440+ bufsize = PAGE_SIZE - namelen - is_not_root;
45441+
45442+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
45443+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
45444+ return 1;
45445+
45446+ preempt_disable();
45447+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
45448+ bufsize);
45449+
45450+ bufsize = strlen(path);
45451+
45452+ /* if base is "/", don't append an additional slash */
45453+ if (is_not_root)
45454+ *(path + bufsize) = '/';
45455+ memcpy(path + bufsize + is_not_root, name, namelen);
45456+ *(path + bufsize + namelen + is_not_root) = '\0';
45457+
45458+ tmp = obj->globbed;
45459+ while (tmp) {
45460+ if (!glob_match(tmp->filename, path)) {
45461+ preempt_enable();
45462+ return (tmp->mode & GR_FIND) ? 1 : 0;
45463+ }
45464+ tmp = tmp->next;
45465+ }
45466+ preempt_enable();
45467+ return (obj->mode & GR_FIND) ? 1 : 0;
45468+}
45469+
45470+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
45471+EXPORT_SYMBOL(gr_acl_is_enabled);
45472+#endif
45473+EXPORT_SYMBOL(gr_learn_resource);
45474+EXPORT_SYMBOL(gr_set_kernel_label);
45475+#ifdef CONFIG_SECURITY
45476+EXPORT_SYMBOL(gr_check_user_change);
45477+EXPORT_SYMBOL(gr_check_group_change);
45478+#endif
45479+
45480diff -urNp linux-3.0.3/grsecurity/gracl_cap.c linux-3.0.3/grsecurity/gracl_cap.c
45481--- linux-3.0.3/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
45482+++ linux-3.0.3/grsecurity/gracl_cap.c 2011-08-23 21:48:14.000000000 -0400
45483@@ -0,0 +1,139 @@
45484+#include <linux/kernel.h>
45485+#include <linux/module.h>
45486+#include <linux/sched.h>
45487+#include <linux/gracl.h>
45488+#include <linux/grsecurity.h>
45489+#include <linux/grinternal.h>
45490+
45491+static const char *captab_log[] = {
45492+ "CAP_CHOWN",
45493+ "CAP_DAC_OVERRIDE",
45494+ "CAP_DAC_READ_SEARCH",
45495+ "CAP_FOWNER",
45496+ "CAP_FSETID",
45497+ "CAP_KILL",
45498+ "CAP_SETGID",
45499+ "CAP_SETUID",
45500+ "CAP_SETPCAP",
45501+ "CAP_LINUX_IMMUTABLE",
45502+ "CAP_NET_BIND_SERVICE",
45503+ "CAP_NET_BROADCAST",
45504+ "CAP_NET_ADMIN",
45505+ "CAP_NET_RAW",
45506+ "CAP_IPC_LOCK",
45507+ "CAP_IPC_OWNER",
45508+ "CAP_SYS_MODULE",
45509+ "CAP_SYS_RAWIO",
45510+ "CAP_SYS_CHROOT",
45511+ "CAP_SYS_PTRACE",
45512+ "CAP_SYS_PACCT",
45513+ "CAP_SYS_ADMIN",
45514+ "CAP_SYS_BOOT",
45515+ "CAP_SYS_NICE",
45516+ "CAP_SYS_RESOURCE",
45517+ "CAP_SYS_TIME",
45518+ "CAP_SYS_TTY_CONFIG",
45519+ "CAP_MKNOD",
45520+ "CAP_LEASE",
45521+ "CAP_AUDIT_WRITE",
45522+ "CAP_AUDIT_CONTROL",
45523+ "CAP_SETFCAP",
45524+ "CAP_MAC_OVERRIDE",
45525+ "CAP_MAC_ADMIN",
45526+ "CAP_SYSLOG"
45527+};
45528+
45529+EXPORT_SYMBOL(gr_is_capable);
45530+EXPORT_SYMBOL(gr_is_capable_nolog);
45531+
45532+int
45533+gr_is_capable(const int cap)
45534+{
45535+ struct task_struct *task = current;
45536+ const struct cred *cred = current_cred();
45537+ struct acl_subject_label *curracl;
45538+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
45539+ kernel_cap_t cap_audit = __cap_empty_set;
45540+
45541+ if (!gr_acl_is_enabled())
45542+ return 1;
45543+
45544+ curracl = task->acl;
45545+
45546+ cap_drop = curracl->cap_lower;
45547+ cap_mask = curracl->cap_mask;
45548+ cap_audit = curracl->cap_invert_audit;
45549+
45550+ while ((curracl = curracl->parent_subject)) {
45551+ /* if the cap isn't specified in the current computed mask but is specified in the
45552+ current level subject, and is lowered in the current level subject, then add
45553+ it to the set of dropped capabilities
45554+ otherwise, add the current level subject's mask to the current computed mask
45555+ */
45556+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
45557+ cap_raise(cap_mask, cap);
45558+ if (cap_raised(curracl->cap_lower, cap))
45559+ cap_raise(cap_drop, cap);
45560+ if (cap_raised(curracl->cap_invert_audit, cap))
45561+ cap_raise(cap_audit, cap);
45562+ }
45563+ }
45564+
45565+ if (!cap_raised(cap_drop, cap)) {
45566+ if (cap_raised(cap_audit, cap))
45567+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
45568+ return 1;
45569+ }
45570+
45571+ curracl = task->acl;
45572+
45573+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
45574+ && cap_raised(cred->cap_effective, cap)) {
45575+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
45576+ task->role->roletype, cred->uid,
45577+ cred->gid, task->exec_file ?
45578+ gr_to_filename(task->exec_file->f_path.dentry,
45579+ task->exec_file->f_path.mnt) : curracl->filename,
45580+ curracl->filename, 0UL,
45581+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
45582+ return 1;
45583+ }
45584+
45585+ if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
45586+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
45587+ return 0;
45588+}
45589+
45590+int
45591+gr_is_capable_nolog(const int cap)
45592+{
45593+ struct acl_subject_label *curracl;
45594+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
45595+
45596+ if (!gr_acl_is_enabled())
45597+ return 1;
45598+
45599+ curracl = current->acl;
45600+
45601+ cap_drop = curracl->cap_lower;
45602+ cap_mask = curracl->cap_mask;
45603+
45604+ while ((curracl = curracl->parent_subject)) {
45605+ /* if the cap isn't specified in the current computed mask but is specified in the
45606+ current level subject, and is lowered in the current level subject, then add
45607+ it to the set of dropped capabilities
45608+ otherwise, add the current level subject's mask to the current computed mask
45609+ */
45610+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
45611+ cap_raise(cap_mask, cap);
45612+ if (cap_raised(curracl->cap_lower, cap))
45613+ cap_raise(cap_drop, cap);
45614+ }
45615+ }
45616+
45617+ if (!cap_raised(cap_drop, cap))
45618+ return 1;
45619+
45620+ return 0;
45621+}
45622+
45623diff -urNp linux-3.0.3/grsecurity/gracl_fs.c linux-3.0.3/grsecurity/gracl_fs.c
45624--- linux-3.0.3/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
45625+++ linux-3.0.3/grsecurity/gracl_fs.c 2011-08-23 21:48:14.000000000 -0400
45626@@ -0,0 +1,431 @@
45627+#include <linux/kernel.h>
45628+#include <linux/sched.h>
45629+#include <linux/types.h>
45630+#include <linux/fs.h>
45631+#include <linux/file.h>
45632+#include <linux/stat.h>
45633+#include <linux/grsecurity.h>
45634+#include <linux/grinternal.h>
45635+#include <linux/gracl.h>
45636+
45637+__u32
45638+gr_acl_handle_hidden_file(const struct dentry * dentry,
45639+ const struct vfsmount * mnt)
45640+{
45641+ __u32 mode;
45642+
45643+ if (unlikely(!dentry->d_inode))
45644+ return GR_FIND;
45645+
45646+ mode =
45647+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
45648+
45649+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
45650+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
45651+ return mode;
45652+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
45653+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
45654+ return 0;
45655+ } else if (unlikely(!(mode & GR_FIND)))
45656+ return 0;
45657+
45658+ return GR_FIND;
45659+}
45660+
45661+__u32
45662+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
45663+ const int fmode)
45664+{
45665+ __u32 reqmode = GR_FIND;
45666+ __u32 mode;
45667+
45668+ if (unlikely(!dentry->d_inode))
45669+ return reqmode;
45670+
45671+ if (unlikely(fmode & O_APPEND))
45672+ reqmode |= GR_APPEND;
45673+ else if (unlikely(fmode & FMODE_WRITE))
45674+ reqmode |= GR_WRITE;
45675+ if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
45676+ reqmode |= GR_READ;
45677+ if ((fmode & FMODE_GREXEC) && (fmode & __FMODE_EXEC))
45678+ reqmode &= ~GR_READ;
45679+ mode =
45680+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
45681+ mnt);
45682+
45683+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
45684+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
45685+ reqmode & GR_READ ? " reading" : "",
45686+ reqmode & GR_WRITE ? " writing" : reqmode &
45687+ GR_APPEND ? " appending" : "");
45688+ return reqmode;
45689+ } else
45690+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
45691+ {
45692+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
45693+ reqmode & GR_READ ? " reading" : "",
45694+ reqmode & GR_WRITE ? " writing" : reqmode &
45695+ GR_APPEND ? " appending" : "");
45696+ return 0;
45697+ } else if (unlikely((mode & reqmode) != reqmode))
45698+ return 0;
45699+
45700+ return reqmode;
45701+}
45702+
45703+__u32
45704+gr_acl_handle_creat(const struct dentry * dentry,
45705+ const struct dentry * p_dentry,
45706+ const struct vfsmount * p_mnt, const int fmode,
45707+ const int imode)
45708+{
45709+ __u32 reqmode = GR_WRITE | GR_CREATE;
45710+ __u32 mode;
45711+
45712+ if (unlikely(fmode & O_APPEND))
45713+ reqmode |= GR_APPEND;
45714+ if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
45715+ reqmode |= GR_READ;
45716+ if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
45717+ reqmode |= GR_SETID;
45718+
45719+ mode =
45720+ gr_check_create(dentry, p_dentry, p_mnt,
45721+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
45722+
45723+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
45724+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
45725+ reqmode & GR_READ ? " reading" : "",
45726+ reqmode & GR_WRITE ? " writing" : reqmode &
45727+ GR_APPEND ? " appending" : "");
45728+ return reqmode;
45729+ } else
45730+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
45731+ {
45732+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
45733+ reqmode & GR_READ ? " reading" : "",
45734+ reqmode & GR_WRITE ? " writing" : reqmode &
45735+ GR_APPEND ? " appending" : "");
45736+ return 0;
45737+ } else if (unlikely((mode & reqmode) != reqmode))
45738+ return 0;
45739+
45740+ return reqmode;
45741+}
45742+
45743+__u32
45744+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
45745+ const int fmode)
45746+{
45747+ __u32 mode, reqmode = GR_FIND;
45748+
45749+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
45750+ reqmode |= GR_EXEC;
45751+ if (fmode & S_IWOTH)
45752+ reqmode |= GR_WRITE;
45753+ if (fmode & S_IROTH)
45754+ reqmode |= GR_READ;
45755+
45756+ mode =
45757+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
45758+ mnt);
45759+
45760+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
45761+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
45762+ reqmode & GR_READ ? " reading" : "",
45763+ reqmode & GR_WRITE ? " writing" : "",
45764+ reqmode & GR_EXEC ? " executing" : "");
45765+ return reqmode;
45766+ } else
45767+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
45768+ {
45769+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
45770+ reqmode & GR_READ ? " reading" : "",
45771+ reqmode & GR_WRITE ? " writing" : "",
45772+ reqmode & GR_EXEC ? " executing" : "");
45773+ return 0;
45774+ } else if (unlikely((mode & reqmode) != reqmode))
45775+ return 0;
45776+
45777+ return reqmode;
45778+}
45779+
45780+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
45781+{
45782+ __u32 mode;
45783+
45784+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
45785+
45786+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
45787+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
45788+ return mode;
45789+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
45790+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
45791+ return 0;
45792+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
45793+ return 0;
45794+
45795+ return (reqmode);
45796+}
45797+
45798+__u32
45799+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
45800+{
45801+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
45802+}
45803+
45804+__u32
45805+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
45806+{
45807+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
45808+}
45809+
45810+__u32
45811+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
45812+{
45813+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
45814+}
45815+
45816+__u32
45817+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
45818+{
45819+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
45820+}
45821+
45822+__u32
45823+gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
45824+ mode_t mode)
45825+{
45826+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
45827+ return 1;
45828+
45829+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
45830+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
45831+ GR_FCHMOD_ACL_MSG);
45832+ } else {
45833+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
45834+ }
45835+}
45836+
45837+__u32
45838+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
45839+ mode_t mode)
45840+{
45841+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
45842+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
45843+ GR_CHMOD_ACL_MSG);
45844+ } else {
45845+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
45846+ }
45847+}
45848+
45849+__u32
45850+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
45851+{
45852+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
45853+}
45854+
45855+__u32
45856+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
45857+{
45858+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
45859+}
45860+
45861+__u32
45862+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
45863+{
45864+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
45865+}
45866+
45867+__u32
45868+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
45869+{
45870+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
45871+ GR_UNIXCONNECT_ACL_MSG);
45872+}
45873+
45874+/* hardlinks require at minimum create permission,
45875+ any additional privilege required is based on the
45876+ privilege of the file being linked to
45877+*/
45878+__u32
45879+gr_acl_handle_link(const struct dentry * new_dentry,
45880+ const struct dentry * parent_dentry,
45881+ const struct vfsmount * parent_mnt,
45882+ const struct dentry * old_dentry,
45883+ const struct vfsmount * old_mnt, const char *to)
45884+{
45885+ __u32 mode;
45886+ __u32 needmode = GR_CREATE | GR_LINK;
45887+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
45888+
45889+ mode =
45890+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
45891+ old_mnt);
45892+
45893+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
45894+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
45895+ return mode;
45896+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
45897+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
45898+ return 0;
45899+ } else if (unlikely((mode & needmode) != needmode))
45900+ return 0;
45901+
45902+ return 1;
45903+}
45904+
45905+__u32
45906+gr_acl_handle_symlink(const struct dentry * new_dentry,
45907+ const struct dentry * parent_dentry,
45908+ const struct vfsmount * parent_mnt, const char *from)
45909+{
45910+ __u32 needmode = GR_WRITE | GR_CREATE;
45911+ __u32 mode;
45912+
45913+ mode =
45914+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
45915+ GR_CREATE | GR_AUDIT_CREATE |
45916+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
45917+
45918+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
45919+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
45920+ return mode;
45921+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
45922+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
45923+ return 0;
45924+ } else if (unlikely((mode & needmode) != needmode))
45925+ return 0;
45926+
45927+ return (GR_WRITE | GR_CREATE);
45928+}
45929+
45930+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
45931+{
45932+ __u32 mode;
45933+
45934+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
45935+
45936+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
45937+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
45938+ return mode;
45939+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
45940+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
45941+ return 0;
45942+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
45943+ return 0;
45944+
45945+ return (reqmode);
45946+}
45947+
45948+__u32
45949+gr_acl_handle_mknod(const struct dentry * new_dentry,
45950+ const struct dentry * parent_dentry,
45951+ const struct vfsmount * parent_mnt,
45952+ const int mode)
45953+{
45954+ __u32 reqmode = GR_WRITE | GR_CREATE;
45955+ if (unlikely(mode & (S_ISUID | S_ISGID)))
45956+ reqmode |= GR_SETID;
45957+
45958+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
45959+ reqmode, GR_MKNOD_ACL_MSG);
45960+}
45961+
45962+__u32
45963+gr_acl_handle_mkdir(const struct dentry *new_dentry,
45964+ const struct dentry *parent_dentry,
45965+ const struct vfsmount *parent_mnt)
45966+{
45967+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
45968+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
45969+}
45970+
45971+#define RENAME_CHECK_SUCCESS(old, new) \
45972+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
45973+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
45974+
45975+int
45976+gr_acl_handle_rename(struct dentry *new_dentry,
45977+ struct dentry *parent_dentry,
45978+ const struct vfsmount *parent_mnt,
45979+ struct dentry *old_dentry,
45980+ struct inode *old_parent_inode,
45981+ struct vfsmount *old_mnt, const char *newname)
45982+{
45983+ __u32 comp1, comp2;
45984+ int error = 0;
45985+
45986+ if (unlikely(!gr_acl_is_enabled()))
45987+ return 0;
45988+
45989+ if (!new_dentry->d_inode) {
45990+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
45991+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
45992+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
45993+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
45994+ GR_DELETE | GR_AUDIT_DELETE |
45995+ GR_AUDIT_READ | GR_AUDIT_WRITE |
45996+ GR_SUPPRESS, old_mnt);
45997+ } else {
45998+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
45999+ GR_CREATE | GR_DELETE |
46000+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
46001+ GR_AUDIT_READ | GR_AUDIT_WRITE |
46002+ GR_SUPPRESS, parent_mnt);
46003+ comp2 =
46004+ gr_search_file(old_dentry,
46005+ GR_READ | GR_WRITE | GR_AUDIT_READ |
46006+ GR_DELETE | GR_AUDIT_DELETE |
46007+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
46008+ }
46009+
46010+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
46011+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
46012+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
46013+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
46014+ && !(comp2 & GR_SUPPRESS)) {
46015+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
46016+ error = -EACCES;
46017+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
46018+ error = -EACCES;
46019+
46020+ return error;
46021+}
46022+
46023+void
46024+gr_acl_handle_exit(void)
46025+{
46026+ u16 id;
46027+ char *rolename;
46028+ struct file *exec_file;
46029+
46030+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
46031+ !(current->role->roletype & GR_ROLE_PERSIST))) {
46032+ id = current->acl_role_id;
46033+ rolename = current->role->rolename;
46034+ gr_set_acls(1);
46035+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
46036+ }
46037+
46038+ write_lock(&grsec_exec_file_lock);
46039+ exec_file = current->exec_file;
46040+ current->exec_file = NULL;
46041+ write_unlock(&grsec_exec_file_lock);
46042+
46043+ if (exec_file)
46044+ fput(exec_file);
46045+}
46046+
46047+int
46048+gr_acl_handle_procpidmem(const struct task_struct *task)
46049+{
46050+ if (unlikely(!gr_acl_is_enabled()))
46051+ return 0;
46052+
46053+ if (task != current && task->acl->mode & GR_PROTPROCFD)
46054+ return -EACCES;
46055+
46056+ return 0;
46057+}
46058diff -urNp linux-3.0.3/grsecurity/gracl_ip.c linux-3.0.3/grsecurity/gracl_ip.c
46059--- linux-3.0.3/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
46060+++ linux-3.0.3/grsecurity/gracl_ip.c 2011-08-23 21:48:14.000000000 -0400
46061@@ -0,0 +1,381 @@
46062+#include <linux/kernel.h>
46063+#include <asm/uaccess.h>
46064+#include <asm/errno.h>
46065+#include <net/sock.h>
46066+#include <linux/file.h>
46067+#include <linux/fs.h>
46068+#include <linux/net.h>
46069+#include <linux/in.h>
46070+#include <linux/skbuff.h>
46071+#include <linux/ip.h>
46072+#include <linux/udp.h>
46073+#include <linux/types.h>
46074+#include <linux/sched.h>
46075+#include <linux/netdevice.h>
46076+#include <linux/inetdevice.h>
46077+#include <linux/gracl.h>
46078+#include <linux/grsecurity.h>
46079+#include <linux/grinternal.h>
46080+
46081+#define GR_BIND 0x01
46082+#define GR_CONNECT 0x02
46083+#define GR_INVERT 0x04
46084+#define GR_BINDOVERRIDE 0x08
46085+#define GR_CONNECTOVERRIDE 0x10
46086+#define GR_SOCK_FAMILY 0x20
46087+
46088+static const char * gr_protocols[IPPROTO_MAX] = {
46089+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
46090+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
46091+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
46092+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
46093+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
46094+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
46095+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
46096+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
46097+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
46098+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
46099+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
46100+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
46101+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
46102+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
46103+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
46104+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
46105+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
46106+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
46107+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
46108+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
46109+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
46110+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
46111+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
46112+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
46113+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
46114+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
46115+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
46116+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
46117+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
46118+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
46119+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
46120+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
46121+ };
46122+
46123+static const char * gr_socktypes[SOCK_MAX] = {
46124+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
46125+ "unknown:7", "unknown:8", "unknown:9", "packet"
46126+ };
46127+
46128+static const char * gr_sockfamilies[AF_MAX+1] = {
46129+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
46130+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
46131+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
46132+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
46133+ };
46134+
46135+const char *
46136+gr_proto_to_name(unsigned char proto)
46137+{
46138+ return gr_protocols[proto];
46139+}
46140+
46141+const char *
46142+gr_socktype_to_name(unsigned char type)
46143+{
46144+ return gr_socktypes[type];
46145+}
46146+
46147+const char *
46148+gr_sockfamily_to_name(unsigned char family)
46149+{
46150+ return gr_sockfamilies[family];
46151+}
46152+
46153+int
46154+gr_search_socket(const int domain, const int type, const int protocol)
46155+{
46156+ struct acl_subject_label *curr;
46157+ const struct cred *cred = current_cred();
46158+
46159+ if (unlikely(!gr_acl_is_enabled()))
46160+ goto exit;
46161+
46162+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
46163+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
46164+ goto exit; // let the kernel handle it
46165+
46166+ curr = current->acl;
46167+
46168+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
46169+ /* the family is allowed, if this is PF_INET allow it only if
46170+ the extra sock type/protocol checks pass */
46171+ if (domain == PF_INET)
46172+ goto inet_check;
46173+ goto exit;
46174+ } else {
46175+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
46176+ __u32 fakeip = 0;
46177+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46178+ current->role->roletype, cred->uid,
46179+ cred->gid, current->exec_file ?
46180+ gr_to_filename(current->exec_file->f_path.dentry,
46181+ current->exec_file->f_path.mnt) :
46182+ curr->filename, curr->filename,
46183+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
46184+ &current->signal->saved_ip);
46185+ goto exit;
46186+ }
46187+ goto exit_fail;
46188+ }
46189+
46190+inet_check:
46191+ /* the rest of this checking is for IPv4 only */
46192+ if (!curr->ips)
46193+ goto exit;
46194+
46195+ if ((curr->ip_type & (1 << type)) &&
46196+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
46197+ goto exit;
46198+
46199+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
46200+ /* we don't place acls on raw sockets , and sometimes
46201+ dgram/ip sockets are opened for ioctl and not
46202+ bind/connect, so we'll fake a bind learn log */
46203+ if (type == SOCK_RAW || type == SOCK_PACKET) {
46204+ __u32 fakeip = 0;
46205+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46206+ current->role->roletype, cred->uid,
46207+ cred->gid, current->exec_file ?
46208+ gr_to_filename(current->exec_file->f_path.dentry,
46209+ current->exec_file->f_path.mnt) :
46210+ curr->filename, curr->filename,
46211+ &fakeip, 0, type,
46212+ protocol, GR_CONNECT, &current->signal->saved_ip);
46213+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
46214+ __u32 fakeip = 0;
46215+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46216+ current->role->roletype, cred->uid,
46217+ cred->gid, current->exec_file ?
46218+ gr_to_filename(current->exec_file->f_path.dentry,
46219+ current->exec_file->f_path.mnt) :
46220+ curr->filename, curr->filename,
46221+ &fakeip, 0, type,
46222+ protocol, GR_BIND, &current->signal->saved_ip);
46223+ }
46224+ /* we'll log when they use connect or bind */
46225+ goto exit;
46226+ }
46227+
46228+exit_fail:
46229+ if (domain == PF_INET)
46230+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
46231+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
46232+ else
46233+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
46234+ gr_socktype_to_name(type), protocol);
46235+
46236+ return 0;
46237+exit:
46238+ return 1;
46239+}
46240+
46241+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
46242+{
46243+ if ((ip->mode & mode) &&
46244+ (ip_port >= ip->low) &&
46245+ (ip_port <= ip->high) &&
46246+ ((ntohl(ip_addr) & our_netmask) ==
46247+ (ntohl(our_addr) & our_netmask))
46248+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
46249+ && (ip->type & (1 << type))) {
46250+ if (ip->mode & GR_INVERT)
46251+ return 2; // specifically denied
46252+ else
46253+ return 1; // allowed
46254+ }
46255+
46256+ return 0; // not specifically allowed, may continue parsing
46257+}
46258+
46259+static int
46260+gr_search_connectbind(const int full_mode, struct sock *sk,
46261+ struct sockaddr_in *addr, const int type)
46262+{
46263+ char iface[IFNAMSIZ] = {0};
46264+ struct acl_subject_label *curr;
46265+ struct acl_ip_label *ip;
46266+ struct inet_sock *isk;
46267+ struct net_device *dev;
46268+ struct in_device *idev;
46269+ unsigned long i;
46270+ int ret;
46271+ int mode = full_mode & (GR_BIND | GR_CONNECT);
46272+ __u32 ip_addr = 0;
46273+ __u32 our_addr;
46274+ __u32 our_netmask;
46275+ char *p;
46276+ __u16 ip_port = 0;
46277+ const struct cred *cred = current_cred();
46278+
46279+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
46280+ return 0;
46281+
46282+ curr = current->acl;
46283+ isk = inet_sk(sk);
46284+
46285+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
46286+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
46287+ addr->sin_addr.s_addr = curr->inaddr_any_override;
46288+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
46289+ struct sockaddr_in saddr;
46290+ int err;
46291+
46292+ saddr.sin_family = AF_INET;
46293+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
46294+ saddr.sin_port = isk->inet_sport;
46295+
46296+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
46297+ if (err)
46298+ return err;
46299+
46300+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
46301+ if (err)
46302+ return err;
46303+ }
46304+
46305+ if (!curr->ips)
46306+ return 0;
46307+
46308+ ip_addr = addr->sin_addr.s_addr;
46309+ ip_port = ntohs(addr->sin_port);
46310+
46311+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
46312+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46313+ current->role->roletype, cred->uid,
46314+ cred->gid, current->exec_file ?
46315+ gr_to_filename(current->exec_file->f_path.dentry,
46316+ current->exec_file->f_path.mnt) :
46317+ curr->filename, curr->filename,
46318+ &ip_addr, ip_port, type,
46319+ sk->sk_protocol, mode, &current->signal->saved_ip);
46320+ return 0;
46321+ }
46322+
46323+ for (i = 0; i < curr->ip_num; i++) {
46324+ ip = *(curr->ips + i);
46325+ if (ip->iface != NULL) {
46326+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
46327+ p = strchr(iface, ':');
46328+ if (p != NULL)
46329+ *p = '\0';
46330+ dev = dev_get_by_name(sock_net(sk), iface);
46331+ if (dev == NULL)
46332+ continue;
46333+ idev = in_dev_get(dev);
46334+ if (idev == NULL) {
46335+ dev_put(dev);
46336+ continue;
46337+ }
46338+ rcu_read_lock();
46339+ for_ifa(idev) {
46340+ if (!strcmp(ip->iface, ifa->ifa_label)) {
46341+ our_addr = ifa->ifa_address;
46342+ our_netmask = 0xffffffff;
46343+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
46344+ if (ret == 1) {
46345+ rcu_read_unlock();
46346+ in_dev_put(idev);
46347+ dev_put(dev);
46348+ return 0;
46349+ } else if (ret == 2) {
46350+ rcu_read_unlock();
46351+ in_dev_put(idev);
46352+ dev_put(dev);
46353+ goto denied;
46354+ }
46355+ }
46356+ } endfor_ifa(idev);
46357+ rcu_read_unlock();
46358+ in_dev_put(idev);
46359+ dev_put(dev);
46360+ } else {
46361+ our_addr = ip->addr;
46362+ our_netmask = ip->netmask;
46363+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
46364+ if (ret == 1)
46365+ return 0;
46366+ else if (ret == 2)
46367+ goto denied;
46368+ }
46369+ }
46370+
46371+denied:
46372+ if (mode == GR_BIND)
46373+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
46374+ else if (mode == GR_CONNECT)
46375+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
46376+
46377+ return -EACCES;
46378+}
46379+
46380+int
46381+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
46382+{
46383+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
46384+}
46385+
46386+int
46387+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
46388+{
46389+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
46390+}
46391+
46392+int gr_search_listen(struct socket *sock)
46393+{
46394+ struct sock *sk = sock->sk;
46395+ struct sockaddr_in addr;
46396+
46397+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
46398+ addr.sin_port = inet_sk(sk)->inet_sport;
46399+
46400+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
46401+}
46402+
46403+int gr_search_accept(struct socket *sock)
46404+{
46405+ struct sock *sk = sock->sk;
46406+ struct sockaddr_in addr;
46407+
46408+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
46409+ addr.sin_port = inet_sk(sk)->inet_sport;
46410+
46411+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
46412+}
46413+
46414+int
46415+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
46416+{
46417+ if (addr)
46418+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
46419+ else {
46420+ struct sockaddr_in sin;
46421+ const struct inet_sock *inet = inet_sk(sk);
46422+
46423+ sin.sin_addr.s_addr = inet->inet_daddr;
46424+ sin.sin_port = inet->inet_dport;
46425+
46426+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
46427+ }
46428+}
46429+
46430+int
46431+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
46432+{
46433+ struct sockaddr_in sin;
46434+
46435+ if (unlikely(skb->len < sizeof (struct udphdr)))
46436+ return 0; // skip this packet
46437+
46438+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
46439+ sin.sin_port = udp_hdr(skb)->source;
46440+
46441+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
46442+}
46443diff -urNp linux-3.0.3/grsecurity/gracl_learn.c linux-3.0.3/grsecurity/gracl_learn.c
46444--- linux-3.0.3/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
46445+++ linux-3.0.3/grsecurity/gracl_learn.c 2011-08-23 21:48:14.000000000 -0400
46446@@ -0,0 +1,207 @@
46447+#include <linux/kernel.h>
46448+#include <linux/mm.h>
46449+#include <linux/sched.h>
46450+#include <linux/poll.h>
46451+#include <linux/string.h>
46452+#include <linux/file.h>
46453+#include <linux/types.h>
46454+#include <linux/vmalloc.h>
46455+#include <linux/grinternal.h>
46456+
46457+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
46458+ size_t count, loff_t *ppos);
46459+extern int gr_acl_is_enabled(void);
46460+
46461+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
46462+static int gr_learn_attached;
46463+
46464+/* use a 512k buffer */
46465+#define LEARN_BUFFER_SIZE (512 * 1024)
46466+
46467+static DEFINE_SPINLOCK(gr_learn_lock);
46468+static DEFINE_MUTEX(gr_learn_user_mutex);
46469+
46470+/* we need to maintain two buffers, so that the kernel context of grlearn
46471+ uses a semaphore around the userspace copying, and the other kernel contexts
46472+ use a spinlock when copying into the buffer, since they cannot sleep
46473+*/
46474+static char *learn_buffer;
46475+static char *learn_buffer_user;
46476+static int learn_buffer_len;
46477+static int learn_buffer_user_len;
46478+
46479+static ssize_t
46480+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
46481+{
46482+ DECLARE_WAITQUEUE(wait, current);
46483+ ssize_t retval = 0;
46484+
46485+ add_wait_queue(&learn_wait, &wait);
46486+ set_current_state(TASK_INTERRUPTIBLE);
46487+ do {
46488+ mutex_lock(&gr_learn_user_mutex);
46489+ spin_lock(&gr_learn_lock);
46490+ if (learn_buffer_len)
46491+ break;
46492+ spin_unlock(&gr_learn_lock);
46493+ mutex_unlock(&gr_learn_user_mutex);
46494+ if (file->f_flags & O_NONBLOCK) {
46495+ retval = -EAGAIN;
46496+ goto out;
46497+ }
46498+ if (signal_pending(current)) {
46499+ retval = -ERESTARTSYS;
46500+ goto out;
46501+ }
46502+
46503+ schedule();
46504+ } while (1);
46505+
46506+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
46507+ learn_buffer_user_len = learn_buffer_len;
46508+ retval = learn_buffer_len;
46509+ learn_buffer_len = 0;
46510+
46511+ spin_unlock(&gr_learn_lock);
46512+
46513+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
46514+ retval = -EFAULT;
46515+
46516+ mutex_unlock(&gr_learn_user_mutex);
46517+out:
46518+ set_current_state(TASK_RUNNING);
46519+ remove_wait_queue(&learn_wait, &wait);
46520+ return retval;
46521+}
46522+
46523+static unsigned int
46524+poll_learn(struct file * file, poll_table * wait)
46525+{
46526+ poll_wait(file, &learn_wait, wait);
46527+
46528+ if (learn_buffer_len)
46529+ return (POLLIN | POLLRDNORM);
46530+
46531+ return 0;
46532+}
46533+
46534+void
46535+gr_clear_learn_entries(void)
46536+{
46537+ char *tmp;
46538+
46539+ mutex_lock(&gr_learn_user_mutex);
46540+ spin_lock(&gr_learn_lock);
46541+ tmp = learn_buffer;
46542+ learn_buffer = NULL;
46543+ spin_unlock(&gr_learn_lock);
46544+ if (tmp)
46545+ vfree(tmp);
46546+ if (learn_buffer_user != NULL) {
46547+ vfree(learn_buffer_user);
46548+ learn_buffer_user = NULL;
46549+ }
46550+ learn_buffer_len = 0;
46551+ mutex_unlock(&gr_learn_user_mutex);
46552+
46553+ return;
46554+}
46555+
46556+void
46557+gr_add_learn_entry(const char *fmt, ...)
46558+{
46559+ va_list args;
46560+ unsigned int len;
46561+
46562+ if (!gr_learn_attached)
46563+ return;
46564+
46565+ spin_lock(&gr_learn_lock);
46566+
46567+ /* leave a gap at the end so we know when it's "full" but don't have to
46568+ compute the exact length of the string we're trying to append
46569+ */
46570+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
46571+ spin_unlock(&gr_learn_lock);
46572+ wake_up_interruptible(&learn_wait);
46573+ return;
46574+ }
46575+ if (learn_buffer == NULL) {
46576+ spin_unlock(&gr_learn_lock);
46577+ return;
46578+ }
46579+
46580+ va_start(args, fmt);
46581+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
46582+ va_end(args);
46583+
46584+ learn_buffer_len += len + 1;
46585+
46586+ spin_unlock(&gr_learn_lock);
46587+ wake_up_interruptible(&learn_wait);
46588+
46589+ return;
46590+}
46591+
46592+static int
46593+open_learn(struct inode *inode, struct file *file)
46594+{
46595+ if (file->f_mode & FMODE_READ && gr_learn_attached)
46596+ return -EBUSY;
46597+ if (file->f_mode & FMODE_READ) {
46598+ int retval = 0;
46599+ mutex_lock(&gr_learn_user_mutex);
46600+ if (learn_buffer == NULL)
46601+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
46602+ if (learn_buffer_user == NULL)
46603+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
46604+ if (learn_buffer == NULL) {
46605+ retval = -ENOMEM;
46606+ goto out_error;
46607+ }
46608+ if (learn_buffer_user == NULL) {
46609+ retval = -ENOMEM;
46610+ goto out_error;
46611+ }
46612+ learn_buffer_len = 0;
46613+ learn_buffer_user_len = 0;
46614+ gr_learn_attached = 1;
46615+out_error:
46616+ mutex_unlock(&gr_learn_user_mutex);
46617+ return retval;
46618+ }
46619+ return 0;
46620+}
46621+
46622+static int
46623+close_learn(struct inode *inode, struct file *file)
46624+{
46625+ if (file->f_mode & FMODE_READ) {
46626+ char *tmp = NULL;
46627+ mutex_lock(&gr_learn_user_mutex);
46628+ spin_lock(&gr_learn_lock);
46629+ tmp = learn_buffer;
46630+ learn_buffer = NULL;
46631+ spin_unlock(&gr_learn_lock);
46632+ if (tmp)
46633+ vfree(tmp);
46634+ if (learn_buffer_user != NULL) {
46635+ vfree(learn_buffer_user);
46636+ learn_buffer_user = NULL;
46637+ }
46638+ learn_buffer_len = 0;
46639+ learn_buffer_user_len = 0;
46640+ gr_learn_attached = 0;
46641+ mutex_unlock(&gr_learn_user_mutex);
46642+ }
46643+
46644+ return 0;
46645+}
46646+
46647+const struct file_operations grsec_fops = {
46648+ .read = read_learn,
46649+ .write = write_grsec_handler,
46650+ .open = open_learn,
46651+ .release = close_learn,
46652+ .poll = poll_learn,
46653+};
46654diff -urNp linux-3.0.3/grsecurity/gracl_res.c linux-3.0.3/grsecurity/gracl_res.c
46655--- linux-3.0.3/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
46656+++ linux-3.0.3/grsecurity/gracl_res.c 2011-08-23 21:48:14.000000000 -0400
46657@@ -0,0 +1,68 @@
46658+#include <linux/kernel.h>
46659+#include <linux/sched.h>
46660+#include <linux/gracl.h>
46661+#include <linux/grinternal.h>
46662+
46663+static const char *restab_log[] = {
46664+ [RLIMIT_CPU] = "RLIMIT_CPU",
46665+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
46666+ [RLIMIT_DATA] = "RLIMIT_DATA",
46667+ [RLIMIT_STACK] = "RLIMIT_STACK",
46668+ [RLIMIT_CORE] = "RLIMIT_CORE",
46669+ [RLIMIT_RSS] = "RLIMIT_RSS",
46670+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
46671+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
46672+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
46673+ [RLIMIT_AS] = "RLIMIT_AS",
46674+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
46675+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
46676+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
46677+ [RLIMIT_NICE] = "RLIMIT_NICE",
46678+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
46679+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
46680+ [GR_CRASH_RES] = "RLIMIT_CRASH"
46681+};
46682+
46683+void
46684+gr_log_resource(const struct task_struct *task,
46685+ const int res, const unsigned long wanted, const int gt)
46686+{
46687+ const struct cred *cred;
46688+ unsigned long rlim;
46689+
46690+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
46691+ return;
46692+
46693+ // not yet supported resource
46694+ if (unlikely(!restab_log[res]))
46695+ return;
46696+
46697+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
46698+ rlim = task_rlimit_max(task, res);
46699+ else
46700+ rlim = task_rlimit(task, res);
46701+
46702+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
46703+ return;
46704+
46705+ rcu_read_lock();
46706+ cred = __task_cred(task);
46707+
46708+ if (res == RLIMIT_NPROC &&
46709+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
46710+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
46711+ goto out_rcu_unlock;
46712+ else if (res == RLIMIT_MEMLOCK &&
46713+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
46714+ goto out_rcu_unlock;
46715+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
46716+ goto out_rcu_unlock;
46717+ rcu_read_unlock();
46718+
46719+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
46720+
46721+ return;
46722+out_rcu_unlock:
46723+ rcu_read_unlock();
46724+ return;
46725+}
46726diff -urNp linux-3.0.3/grsecurity/gracl_segv.c linux-3.0.3/grsecurity/gracl_segv.c
46727--- linux-3.0.3/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
46728+++ linux-3.0.3/grsecurity/gracl_segv.c 2011-08-23 21:48:14.000000000 -0400
46729@@ -0,0 +1,299 @@
46730+#include <linux/kernel.h>
46731+#include <linux/mm.h>
46732+#include <asm/uaccess.h>
46733+#include <asm/errno.h>
46734+#include <asm/mman.h>
46735+#include <net/sock.h>
46736+#include <linux/file.h>
46737+#include <linux/fs.h>
46738+#include <linux/net.h>
46739+#include <linux/in.h>
46740+#include <linux/slab.h>
46741+#include <linux/types.h>
46742+#include <linux/sched.h>
46743+#include <linux/timer.h>
46744+#include <linux/gracl.h>
46745+#include <linux/grsecurity.h>
46746+#include <linux/grinternal.h>
46747+
46748+static struct crash_uid *uid_set;
46749+static unsigned short uid_used;
46750+static DEFINE_SPINLOCK(gr_uid_lock);
46751+extern rwlock_t gr_inode_lock;
46752+extern struct acl_subject_label *
46753+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
46754+ struct acl_role_label *role);
46755+
46756+#ifdef CONFIG_BTRFS_FS
46757+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
46758+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
46759+#endif
46760+
46761+static inline dev_t __get_dev(const struct dentry *dentry)
46762+{
46763+#ifdef CONFIG_BTRFS_FS
46764+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
46765+ return get_btrfs_dev_from_inode(dentry->d_inode);
46766+ else
46767+#endif
46768+ return dentry->d_inode->i_sb->s_dev;
46769+}
46770+
46771+int
46772+gr_init_uidset(void)
46773+{
46774+ uid_set =
46775+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
46776+ uid_used = 0;
46777+
46778+ return uid_set ? 1 : 0;
46779+}
46780+
46781+void
46782+gr_free_uidset(void)
46783+{
46784+ if (uid_set)
46785+ kfree(uid_set);
46786+
46787+ return;
46788+}
46789+
46790+int
46791+gr_find_uid(const uid_t uid)
46792+{
46793+ struct crash_uid *tmp = uid_set;
46794+ uid_t buid;
46795+ int low = 0, high = uid_used - 1, mid;
46796+
46797+ while (high >= low) {
46798+ mid = (low + high) >> 1;
46799+ buid = tmp[mid].uid;
46800+ if (buid == uid)
46801+ return mid;
46802+ if (buid > uid)
46803+ high = mid - 1;
46804+ if (buid < uid)
46805+ low = mid + 1;
46806+ }
46807+
46808+ return -1;
46809+}
46810+
46811+static __inline__ void
46812+gr_insertsort(void)
46813+{
46814+ unsigned short i, j;
46815+ struct crash_uid index;
46816+
46817+ for (i = 1; i < uid_used; i++) {
46818+ index = uid_set[i];
46819+ j = i;
46820+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
46821+ uid_set[j] = uid_set[j - 1];
46822+ j--;
46823+ }
46824+ uid_set[j] = index;
46825+ }
46826+
46827+ return;
46828+}
46829+
46830+static __inline__ void
46831+gr_insert_uid(const uid_t uid, const unsigned long expires)
46832+{
46833+ int loc;
46834+
46835+ if (uid_used == GR_UIDTABLE_MAX)
46836+ return;
46837+
46838+ loc = gr_find_uid(uid);
46839+
46840+ if (loc >= 0) {
46841+ uid_set[loc].expires = expires;
46842+ return;
46843+ }
46844+
46845+ uid_set[uid_used].uid = uid;
46846+ uid_set[uid_used].expires = expires;
46847+ uid_used++;
46848+
46849+ gr_insertsort();
46850+
46851+ return;
46852+}
46853+
46854+void
46855+gr_remove_uid(const unsigned short loc)
46856+{
46857+ unsigned short i;
46858+
46859+ for (i = loc + 1; i < uid_used; i++)
46860+ uid_set[i - 1] = uid_set[i];
46861+
46862+ uid_used--;
46863+
46864+ return;
46865+}
46866+
46867+int
46868+gr_check_crash_uid(const uid_t uid)
46869+{
46870+ int loc;
46871+ int ret = 0;
46872+
46873+ if (unlikely(!gr_acl_is_enabled()))
46874+ return 0;
46875+
46876+ spin_lock(&gr_uid_lock);
46877+ loc = gr_find_uid(uid);
46878+
46879+ if (loc < 0)
46880+ goto out_unlock;
46881+
46882+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
46883+ gr_remove_uid(loc);
46884+ else
46885+ ret = 1;
46886+
46887+out_unlock:
46888+ spin_unlock(&gr_uid_lock);
46889+ return ret;
46890+}
46891+
46892+static __inline__ int
46893+proc_is_setxid(const struct cred *cred)
46894+{
46895+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
46896+ cred->uid != cred->fsuid)
46897+ return 1;
46898+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
46899+ cred->gid != cred->fsgid)
46900+ return 1;
46901+
46902+ return 0;
46903+}
46904+
46905+extern int gr_fake_force_sig(int sig, struct task_struct *t);
46906+
46907+void
46908+gr_handle_crash(struct task_struct *task, const int sig)
46909+{
46910+ struct acl_subject_label *curr;
46911+ struct acl_subject_label *curr2;
46912+ struct task_struct *tsk, *tsk2;
46913+ const struct cred *cred;
46914+ const struct cred *cred2;
46915+
46916+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
46917+ return;
46918+
46919+ if (unlikely(!gr_acl_is_enabled()))
46920+ return;
46921+
46922+ curr = task->acl;
46923+
46924+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
46925+ return;
46926+
46927+ if (time_before_eq(curr->expires, get_seconds())) {
46928+ curr->expires = 0;
46929+ curr->crashes = 0;
46930+ }
46931+
46932+ curr->crashes++;
46933+
46934+ if (!curr->expires)
46935+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
46936+
46937+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
46938+ time_after(curr->expires, get_seconds())) {
46939+ rcu_read_lock();
46940+ cred = __task_cred(task);
46941+ if (cred->uid && proc_is_setxid(cred)) {
46942+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
46943+ spin_lock(&gr_uid_lock);
46944+ gr_insert_uid(cred->uid, curr->expires);
46945+ spin_unlock(&gr_uid_lock);
46946+ curr->expires = 0;
46947+ curr->crashes = 0;
46948+ read_lock(&tasklist_lock);
46949+ do_each_thread(tsk2, tsk) {
46950+ cred2 = __task_cred(tsk);
46951+ if (tsk != task && cred2->uid == cred->uid)
46952+ gr_fake_force_sig(SIGKILL, tsk);
46953+ } while_each_thread(tsk2, tsk);
46954+ read_unlock(&tasklist_lock);
46955+ } else {
46956+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
46957+ read_lock(&tasklist_lock);
46958+ do_each_thread(tsk2, tsk) {
46959+ if (likely(tsk != task)) {
46960+ curr2 = tsk->acl;
46961+
46962+ if (curr2->device == curr->device &&
46963+ curr2->inode == curr->inode)
46964+ gr_fake_force_sig(SIGKILL, tsk);
46965+ }
46966+ } while_each_thread(tsk2, tsk);
46967+ read_unlock(&tasklist_lock);
46968+ }
46969+ rcu_read_unlock();
46970+ }
46971+
46972+ return;
46973+}
46974+
46975+int
46976+gr_check_crash_exec(const struct file *filp)
46977+{
46978+ struct acl_subject_label *curr;
46979+
46980+ if (unlikely(!gr_acl_is_enabled()))
46981+ return 0;
46982+
46983+ read_lock(&gr_inode_lock);
46984+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
46985+ __get_dev(filp->f_path.dentry),
46986+ current->role);
46987+ read_unlock(&gr_inode_lock);
46988+
46989+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
46990+ (!curr->crashes && !curr->expires))
46991+ return 0;
46992+
46993+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
46994+ time_after(curr->expires, get_seconds()))
46995+ return 1;
46996+ else if (time_before_eq(curr->expires, get_seconds())) {
46997+ curr->crashes = 0;
46998+ curr->expires = 0;
46999+ }
47000+
47001+ return 0;
47002+}
47003+
47004+void
47005+gr_handle_alertkill(struct task_struct *task)
47006+{
47007+ struct acl_subject_label *curracl;
47008+ __u32 curr_ip;
47009+ struct task_struct *p, *p2;
47010+
47011+ if (unlikely(!gr_acl_is_enabled()))
47012+ return;
47013+
47014+ curracl = task->acl;
47015+ curr_ip = task->signal->curr_ip;
47016+
47017+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
47018+ read_lock(&tasklist_lock);
47019+ do_each_thread(p2, p) {
47020+ if (p->signal->curr_ip == curr_ip)
47021+ gr_fake_force_sig(SIGKILL, p);
47022+ } while_each_thread(p2, p);
47023+ read_unlock(&tasklist_lock);
47024+ } else if (curracl->mode & GR_KILLPROC)
47025+ gr_fake_force_sig(SIGKILL, task);
47026+
47027+ return;
47028+}
47029diff -urNp linux-3.0.3/grsecurity/gracl_shm.c linux-3.0.3/grsecurity/gracl_shm.c
47030--- linux-3.0.3/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
47031+++ linux-3.0.3/grsecurity/gracl_shm.c 2011-08-23 21:48:14.000000000 -0400
47032@@ -0,0 +1,40 @@
47033+#include <linux/kernel.h>
47034+#include <linux/mm.h>
47035+#include <linux/sched.h>
47036+#include <linux/file.h>
47037+#include <linux/ipc.h>
47038+#include <linux/gracl.h>
47039+#include <linux/grsecurity.h>
47040+#include <linux/grinternal.h>
47041+
47042+int
47043+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
47044+ const time_t shm_createtime, const uid_t cuid, const int shmid)
47045+{
47046+ struct task_struct *task;
47047+
47048+ if (!gr_acl_is_enabled())
47049+ return 1;
47050+
47051+ rcu_read_lock();
47052+ read_lock(&tasklist_lock);
47053+
47054+ task = find_task_by_vpid(shm_cprid);
47055+
47056+ if (unlikely(!task))
47057+ task = find_task_by_vpid(shm_lapid);
47058+
47059+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
47060+ (task->pid == shm_lapid)) &&
47061+ (task->acl->mode & GR_PROTSHM) &&
47062+ (task->acl != current->acl))) {
47063+ read_unlock(&tasklist_lock);
47064+ rcu_read_unlock();
47065+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
47066+ return 0;
47067+ }
47068+ read_unlock(&tasklist_lock);
47069+ rcu_read_unlock();
47070+
47071+ return 1;
47072+}
47073diff -urNp linux-3.0.3/grsecurity/grsec_chdir.c linux-3.0.3/grsecurity/grsec_chdir.c
47074--- linux-3.0.3/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
47075+++ linux-3.0.3/grsecurity/grsec_chdir.c 2011-08-23 21:48:14.000000000 -0400
47076@@ -0,0 +1,19 @@
47077+#include <linux/kernel.h>
47078+#include <linux/sched.h>
47079+#include <linux/fs.h>
47080+#include <linux/file.h>
47081+#include <linux/grsecurity.h>
47082+#include <linux/grinternal.h>
47083+
47084+void
47085+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
47086+{
47087+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
47088+ if ((grsec_enable_chdir && grsec_enable_group &&
47089+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
47090+ !grsec_enable_group)) {
47091+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
47092+ }
47093+#endif
47094+ return;
47095+}
47096diff -urNp linux-3.0.3/grsecurity/grsec_chroot.c linux-3.0.3/grsecurity/grsec_chroot.c
47097--- linux-3.0.3/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
47098+++ linux-3.0.3/grsecurity/grsec_chroot.c 2011-08-23 21:48:14.000000000 -0400
47099@@ -0,0 +1,349 @@
47100+#include <linux/kernel.h>
47101+#include <linux/module.h>
47102+#include <linux/sched.h>
47103+#include <linux/file.h>
47104+#include <linux/fs.h>
47105+#include <linux/mount.h>
47106+#include <linux/types.h>
47107+#include <linux/pid_namespace.h>
47108+#include <linux/grsecurity.h>
47109+#include <linux/grinternal.h>
47110+
47111+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
47112+{
47113+#ifdef CONFIG_GRKERNSEC
47114+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
47115+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
47116+ task->gr_is_chrooted = 1;
47117+ else
47118+ task->gr_is_chrooted = 0;
47119+
47120+ task->gr_chroot_dentry = path->dentry;
47121+#endif
47122+ return;
47123+}
47124+
47125+void gr_clear_chroot_entries(struct task_struct *task)
47126+{
47127+#ifdef CONFIG_GRKERNSEC
47128+ task->gr_is_chrooted = 0;
47129+ task->gr_chroot_dentry = NULL;
47130+#endif
47131+ return;
47132+}
47133+
47134+int
47135+gr_handle_chroot_unix(const pid_t pid)
47136+{
47137+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
47138+ struct task_struct *p;
47139+
47140+ if (unlikely(!grsec_enable_chroot_unix))
47141+ return 1;
47142+
47143+ if (likely(!proc_is_chrooted(current)))
47144+ return 1;
47145+
47146+ rcu_read_lock();
47147+ read_lock(&tasklist_lock);
47148+ p = find_task_by_vpid_unrestricted(pid);
47149+ if (unlikely(p && !have_same_root(current, p))) {
47150+ read_unlock(&tasklist_lock);
47151+ rcu_read_unlock();
47152+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
47153+ return 0;
47154+ }
47155+ read_unlock(&tasklist_lock);
47156+ rcu_read_unlock();
47157+#endif
47158+ return 1;
47159+}
47160+
47161+int
47162+gr_handle_chroot_nice(void)
47163+{
47164+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
47165+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
47166+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
47167+ return -EPERM;
47168+ }
47169+#endif
47170+ return 0;
47171+}
47172+
47173+int
47174+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
47175+{
47176+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
47177+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
47178+ && proc_is_chrooted(current)) {
47179+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
47180+ return -EACCES;
47181+ }
47182+#endif
47183+ return 0;
47184+}
47185+
47186+int
47187+gr_handle_chroot_rawio(const struct inode *inode)
47188+{
47189+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
47190+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
47191+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
47192+ return 1;
47193+#endif
47194+ return 0;
47195+}
47196+
47197+int
47198+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
47199+{
47200+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
47201+ struct task_struct *p;
47202+ int ret = 0;
47203+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
47204+ return ret;
47205+
47206+ read_lock(&tasklist_lock);
47207+ do_each_pid_task(pid, type, p) {
47208+ if (!have_same_root(current, p)) {
47209+ ret = 1;
47210+ goto out;
47211+ }
47212+ } while_each_pid_task(pid, type, p);
47213+out:
47214+ read_unlock(&tasklist_lock);
47215+ return ret;
47216+#endif
47217+ return 0;
47218+}
47219+
47220+int
47221+gr_pid_is_chrooted(struct task_struct *p)
47222+{
47223+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
47224+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
47225+ return 0;
47226+
47227+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
47228+ !have_same_root(current, p)) {
47229+ return 1;
47230+ }
47231+#endif
47232+ return 0;
47233+}
47234+
47235+EXPORT_SYMBOL(gr_pid_is_chrooted);
47236+
47237+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
47238+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
47239+{
47240+ struct path path, currentroot;
47241+ int ret = 0;
47242+
47243+ path.dentry = (struct dentry *)u_dentry;
47244+ path.mnt = (struct vfsmount *)u_mnt;
47245+ get_fs_root(current->fs, &currentroot);
47246+ if (path_is_under(&path, &currentroot))
47247+ ret = 1;
47248+ path_put(&currentroot);
47249+
47250+ return ret;
47251+}
47252+#endif
47253+
47254+int
47255+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
47256+{
47257+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
47258+ if (!grsec_enable_chroot_fchdir)
47259+ return 1;
47260+
47261+ if (!proc_is_chrooted(current))
47262+ return 1;
47263+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
47264+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
47265+ return 0;
47266+ }
47267+#endif
47268+ return 1;
47269+}
47270+
47271+int
47272+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
47273+ const time_t shm_createtime)
47274+{
47275+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
47276+ struct task_struct *p;
47277+ time_t starttime;
47278+
47279+ if (unlikely(!grsec_enable_chroot_shmat))
47280+ return 1;
47281+
47282+ if (likely(!proc_is_chrooted(current)))
47283+ return 1;
47284+
47285+ rcu_read_lock();
47286+ read_lock(&tasklist_lock);
47287+
47288+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
47289+ starttime = p->start_time.tv_sec;
47290+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
47291+ if (have_same_root(current, p)) {
47292+ goto allow;
47293+ } else {
47294+ read_unlock(&tasklist_lock);
47295+ rcu_read_unlock();
47296+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
47297+ return 0;
47298+ }
47299+ }
47300+ /* creator exited, pid reuse, fall through to next check */
47301+ }
47302+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
47303+ if (unlikely(!have_same_root(current, p))) {
47304+ read_unlock(&tasklist_lock);
47305+ rcu_read_unlock();
47306+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
47307+ return 0;
47308+ }
47309+ }
47310+
47311+allow:
47312+ read_unlock(&tasklist_lock);
47313+ rcu_read_unlock();
47314+#endif
47315+ return 1;
47316+}
47317+
47318+void
47319+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
47320+{
47321+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
47322+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
47323+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
47324+#endif
47325+ return;
47326+}
47327+
47328+int
47329+gr_handle_chroot_mknod(const struct dentry *dentry,
47330+ const struct vfsmount *mnt, const int mode)
47331+{
47332+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
47333+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
47334+ proc_is_chrooted(current)) {
47335+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
47336+ return -EPERM;
47337+ }
47338+#endif
47339+ return 0;
47340+}
47341+
47342+int
47343+gr_handle_chroot_mount(const struct dentry *dentry,
47344+ const struct vfsmount *mnt, const char *dev_name)
47345+{
47346+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
47347+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
47348+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
47349+ return -EPERM;
47350+ }
47351+#endif
47352+ return 0;
47353+}
47354+
47355+int
47356+gr_handle_chroot_pivot(void)
47357+{
47358+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
47359+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
47360+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
47361+ return -EPERM;
47362+ }
47363+#endif
47364+ return 0;
47365+}
47366+
47367+int
47368+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
47369+{
47370+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
47371+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
47372+ !gr_is_outside_chroot(dentry, mnt)) {
47373+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
47374+ return -EPERM;
47375+ }
47376+#endif
47377+ return 0;
47378+}
47379+
47380+int
47381+gr_handle_chroot_caps(struct path *path)
47382+{
47383+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
47384+ if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
47385+ (init_task.fs->root.dentry != path->dentry) &&
47386+ (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
47387+
47388+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
47389+ const struct cred *old = current_cred();
47390+ struct cred *new = prepare_creds();
47391+ if (new == NULL)
47392+ return 1;
47393+
47394+ new->cap_permitted = cap_drop(old->cap_permitted,
47395+ chroot_caps);
47396+ new->cap_inheritable = cap_drop(old->cap_inheritable,
47397+ chroot_caps);
47398+ new->cap_effective = cap_drop(old->cap_effective,
47399+ chroot_caps);
47400+
47401+ commit_creds(new);
47402+
47403+ return 0;
47404+ }
47405+#endif
47406+ return 0;
47407+}
47408+
47409+int
47410+gr_handle_chroot_sysctl(const int op)
47411+{
47412+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
47413+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
47414+ proc_is_chrooted(current))
47415+ return -EACCES;
47416+#endif
47417+ return 0;
47418+}
47419+
47420+void
47421+gr_handle_chroot_chdir(struct path *path)
47422+{
47423+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
47424+ if (grsec_enable_chroot_chdir)
47425+ set_fs_pwd(current->fs, path);
47426+#endif
47427+ return;
47428+}
47429+
47430+int
47431+gr_handle_chroot_chmod(const struct dentry *dentry,
47432+ const struct vfsmount *mnt, const int mode)
47433+{
47434+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
47435+ /* allow chmod +s on directories, but not files */
47436+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
47437+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
47438+ proc_is_chrooted(current)) {
47439+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
47440+ return -EPERM;
47441+ }
47442+#endif
47443+ return 0;
47444+}
47445+
47446+#ifdef CONFIG_SECURITY
47447+EXPORT_SYMBOL(gr_handle_chroot_caps);
47448+#endif
47449diff -urNp linux-3.0.3/grsecurity/grsec_disabled.c linux-3.0.3/grsecurity/grsec_disabled.c
47450--- linux-3.0.3/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
47451+++ linux-3.0.3/grsecurity/grsec_disabled.c 2011-08-23 21:48:14.000000000 -0400
47452@@ -0,0 +1,447 @@
47453+#include <linux/kernel.h>
47454+#include <linux/module.h>
47455+#include <linux/sched.h>
47456+#include <linux/file.h>
47457+#include <linux/fs.h>
47458+#include <linux/kdev_t.h>
47459+#include <linux/net.h>
47460+#include <linux/in.h>
47461+#include <linux/ip.h>
47462+#include <linux/skbuff.h>
47463+#include <linux/sysctl.h>
47464+
47465+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
47466+void
47467+pax_set_initial_flags(struct linux_binprm *bprm)
47468+{
47469+ return;
47470+}
47471+#endif
47472+
47473+#ifdef CONFIG_SYSCTL
47474+__u32
47475+gr_handle_sysctl(const struct ctl_table * table, const int op)
47476+{
47477+ return 0;
47478+}
47479+#endif
47480+
47481+#ifdef CONFIG_TASKSTATS
47482+int gr_is_taskstats_denied(int pid)
47483+{
47484+ return 0;
47485+}
47486+#endif
47487+
47488+int
47489+gr_acl_is_enabled(void)
47490+{
47491+ return 0;
47492+}
47493+
47494+int
47495+gr_handle_rawio(const struct inode *inode)
47496+{
47497+ return 0;
47498+}
47499+
47500+void
47501+gr_acl_handle_psacct(struct task_struct *task, const long code)
47502+{
47503+ return;
47504+}
47505+
47506+int
47507+gr_handle_ptrace(struct task_struct *task, const long request)
47508+{
47509+ return 0;
47510+}
47511+
47512+int
47513+gr_handle_proc_ptrace(struct task_struct *task)
47514+{
47515+ return 0;
47516+}
47517+
47518+void
47519+gr_learn_resource(const struct task_struct *task,
47520+ const int res, const unsigned long wanted, const int gt)
47521+{
47522+ return;
47523+}
47524+
47525+int
47526+gr_set_acls(const int type)
47527+{
47528+ return 0;
47529+}
47530+
47531+int
47532+gr_check_hidden_task(const struct task_struct *tsk)
47533+{
47534+ return 0;
47535+}
47536+
47537+int
47538+gr_check_protected_task(const struct task_struct *task)
47539+{
47540+ return 0;
47541+}
47542+
47543+int
47544+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
47545+{
47546+ return 0;
47547+}
47548+
47549+void
47550+gr_copy_label(struct task_struct *tsk)
47551+{
47552+ return;
47553+}
47554+
47555+void
47556+gr_set_pax_flags(struct task_struct *task)
47557+{
47558+ return;
47559+}
47560+
47561+int
47562+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
47563+ const int unsafe_share)
47564+{
47565+ return 0;
47566+}
47567+
47568+void
47569+gr_handle_delete(const ino_t ino, const dev_t dev)
47570+{
47571+ return;
47572+}
47573+
47574+void
47575+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
47576+{
47577+ return;
47578+}
47579+
47580+void
47581+gr_handle_crash(struct task_struct *task, const int sig)
47582+{
47583+ return;
47584+}
47585+
47586+int
47587+gr_check_crash_exec(const struct file *filp)
47588+{
47589+ return 0;
47590+}
47591+
47592+int
47593+gr_check_crash_uid(const uid_t uid)
47594+{
47595+ return 0;
47596+}
47597+
47598+void
47599+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
47600+ struct dentry *old_dentry,
47601+ struct dentry *new_dentry,
47602+ struct vfsmount *mnt, const __u8 replace)
47603+{
47604+ return;
47605+}
47606+
47607+int
47608+gr_search_socket(const int family, const int type, const int protocol)
47609+{
47610+ return 1;
47611+}
47612+
47613+int
47614+gr_search_connectbind(const int mode, const struct socket *sock,
47615+ const struct sockaddr_in *addr)
47616+{
47617+ return 0;
47618+}
47619+
47620+int
47621+gr_is_capable(const int cap)
47622+{
47623+ return 1;
47624+}
47625+
47626+int
47627+gr_is_capable_nolog(const int cap)
47628+{
47629+ return 1;
47630+}
47631+
47632+void
47633+gr_handle_alertkill(struct task_struct *task)
47634+{
47635+ return;
47636+}
47637+
47638+__u32
47639+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
47640+{
47641+ return 1;
47642+}
47643+
47644+__u32
47645+gr_acl_handle_hidden_file(const struct dentry * dentry,
47646+ const struct vfsmount * mnt)
47647+{
47648+ return 1;
47649+}
47650+
47651+__u32
47652+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
47653+ const int fmode)
47654+{
47655+ return 1;
47656+}
47657+
47658+__u32
47659+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
47660+{
47661+ return 1;
47662+}
47663+
47664+__u32
47665+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
47666+{
47667+ return 1;
47668+}
47669+
47670+int
47671+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
47672+ unsigned int *vm_flags)
47673+{
47674+ return 1;
47675+}
47676+
47677+__u32
47678+gr_acl_handle_truncate(const struct dentry * dentry,
47679+ const struct vfsmount * mnt)
47680+{
47681+ return 1;
47682+}
47683+
47684+__u32
47685+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
47686+{
47687+ return 1;
47688+}
47689+
47690+__u32
47691+gr_acl_handle_access(const struct dentry * dentry,
47692+ const struct vfsmount * mnt, const int fmode)
47693+{
47694+ return 1;
47695+}
47696+
47697+__u32
47698+gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
47699+ mode_t mode)
47700+{
47701+ return 1;
47702+}
47703+
47704+__u32
47705+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
47706+ mode_t mode)
47707+{
47708+ return 1;
47709+}
47710+
47711+__u32
47712+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
47713+{
47714+ return 1;
47715+}
47716+
47717+__u32
47718+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
47719+{
47720+ return 1;
47721+}
47722+
47723+void
47724+grsecurity_init(void)
47725+{
47726+ return;
47727+}
47728+
47729+__u32
47730+gr_acl_handle_mknod(const struct dentry * new_dentry,
47731+ const struct dentry * parent_dentry,
47732+ const struct vfsmount * parent_mnt,
47733+ const int mode)
47734+{
47735+ return 1;
47736+}
47737+
47738+__u32
47739+gr_acl_handle_mkdir(const struct dentry * new_dentry,
47740+ const struct dentry * parent_dentry,
47741+ const struct vfsmount * parent_mnt)
47742+{
47743+ return 1;
47744+}
47745+
47746+__u32
47747+gr_acl_handle_symlink(const struct dentry * new_dentry,
47748+ const struct dentry * parent_dentry,
47749+ const struct vfsmount * parent_mnt, const char *from)
47750+{
47751+ return 1;
47752+}
47753+
47754+__u32
47755+gr_acl_handle_link(const struct dentry * new_dentry,
47756+ const struct dentry * parent_dentry,
47757+ const struct vfsmount * parent_mnt,
47758+ const struct dentry * old_dentry,
47759+ const struct vfsmount * old_mnt, const char *to)
47760+{
47761+ return 1;
47762+}
47763+
47764+int
47765+gr_acl_handle_rename(const struct dentry *new_dentry,
47766+ const struct dentry *parent_dentry,
47767+ const struct vfsmount *parent_mnt,
47768+ const struct dentry *old_dentry,
47769+ const struct inode *old_parent_inode,
47770+ const struct vfsmount *old_mnt, const char *newname)
47771+{
47772+ return 0;
47773+}
47774+
47775+int
47776+gr_acl_handle_filldir(const struct file *file, const char *name,
47777+ const int namelen, const ino_t ino)
47778+{
47779+ return 1;
47780+}
47781+
47782+int
47783+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
47784+ const time_t shm_createtime, const uid_t cuid, const int shmid)
47785+{
47786+ return 1;
47787+}
47788+
47789+int
47790+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
47791+{
47792+ return 0;
47793+}
47794+
47795+int
47796+gr_search_accept(const struct socket *sock)
47797+{
47798+ return 0;
47799+}
47800+
47801+int
47802+gr_search_listen(const struct socket *sock)
47803+{
47804+ return 0;
47805+}
47806+
47807+int
47808+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
47809+{
47810+ return 0;
47811+}
47812+
47813+__u32
47814+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
47815+{
47816+ return 1;
47817+}
47818+
47819+__u32
47820+gr_acl_handle_creat(const struct dentry * dentry,
47821+ const struct dentry * p_dentry,
47822+ const struct vfsmount * p_mnt, const int fmode,
47823+ const int imode)
47824+{
47825+ return 1;
47826+}
47827+
47828+void
47829+gr_acl_handle_exit(void)
47830+{
47831+ return;
47832+}
47833+
47834+int
47835+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
47836+{
47837+ return 1;
47838+}
47839+
47840+void
47841+gr_set_role_label(const uid_t uid, const gid_t gid)
47842+{
47843+ return;
47844+}
47845+
47846+int
47847+gr_acl_handle_procpidmem(const struct task_struct *task)
47848+{
47849+ return 0;
47850+}
47851+
47852+int
47853+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
47854+{
47855+ return 0;
47856+}
47857+
47858+int
47859+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
47860+{
47861+ return 0;
47862+}
47863+
47864+void
47865+gr_set_kernel_label(struct task_struct *task)
47866+{
47867+ return;
47868+}
47869+
47870+int
47871+gr_check_user_change(int real, int effective, int fs)
47872+{
47873+ return 0;
47874+}
47875+
47876+int
47877+gr_check_group_change(int real, int effective, int fs)
47878+{
47879+ return 0;
47880+}
47881+
47882+int gr_acl_enable_at_secure(void)
47883+{
47884+ return 0;
47885+}
47886+
47887+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
47888+{
47889+ return dentry->d_inode->i_sb->s_dev;
47890+}
47891+
47892+EXPORT_SYMBOL(gr_is_capable);
47893+EXPORT_SYMBOL(gr_is_capable_nolog);
47894+EXPORT_SYMBOL(gr_learn_resource);
47895+EXPORT_SYMBOL(gr_set_kernel_label);
47896+#ifdef CONFIG_SECURITY
47897+EXPORT_SYMBOL(gr_check_user_change);
47898+EXPORT_SYMBOL(gr_check_group_change);
47899+#endif
47900diff -urNp linux-3.0.3/grsecurity/grsec_exec.c linux-3.0.3/grsecurity/grsec_exec.c
47901--- linux-3.0.3/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
47902+++ linux-3.0.3/grsecurity/grsec_exec.c 2011-08-25 17:25:59.000000000 -0400
47903@@ -0,0 +1,72 @@
47904+#include <linux/kernel.h>
47905+#include <linux/sched.h>
47906+#include <linux/file.h>
47907+#include <linux/binfmts.h>
47908+#include <linux/fs.h>
47909+#include <linux/types.h>
47910+#include <linux/grdefs.h>
47911+#include <linux/grsecurity.h>
47912+#include <linux/grinternal.h>
47913+#include <linux/capability.h>
47914+
47915+#include <asm/uaccess.h>
47916+
47917+#ifdef CONFIG_GRKERNSEC_EXECLOG
47918+static char gr_exec_arg_buf[132];
47919+static DEFINE_MUTEX(gr_exec_arg_mutex);
47920+#endif
47921+
47922+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
47923+
47924+void
47925+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
47926+{
47927+#ifdef CONFIG_GRKERNSEC_EXECLOG
47928+ char *grarg = gr_exec_arg_buf;
47929+ unsigned int i, x, execlen = 0;
47930+ char c;
47931+
47932+ if (!((grsec_enable_execlog && grsec_enable_group &&
47933+ in_group_p(grsec_audit_gid))
47934+ || (grsec_enable_execlog && !grsec_enable_group)))
47935+ return;
47936+
47937+ mutex_lock(&gr_exec_arg_mutex);
47938+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
47939+
47940+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
47941+ const char __user *p;
47942+ unsigned int len;
47943+
47944+ p = get_user_arg_ptr(argv, i);
47945+ if (IS_ERR(p))
47946+ goto log;
47947+
47948+ len = strnlen_user(p, 128 - execlen);
47949+ if (len > 128 - execlen)
47950+ len = 128 - execlen;
47951+ else if (len > 0)
47952+ len--;
47953+ if (copy_from_user(grarg + execlen, p, len))
47954+ goto log;
47955+
47956+ /* rewrite unprintable characters */
47957+ for (x = 0; x < len; x++) {
47958+ c = *(grarg + execlen + x);
47959+ if (c < 32 || c > 126)
47960+ *(grarg + execlen + x) = ' ';
47961+ }
47962+
47963+ execlen += len;
47964+ *(grarg + execlen) = ' ';
47965+ *(grarg + execlen + 1) = '\0';
47966+ execlen++;
47967+ }
47968+
47969+ log:
47970+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
47971+ bprm->file->f_path.mnt, grarg);
47972+ mutex_unlock(&gr_exec_arg_mutex);
47973+#endif
47974+ return;
47975+}
47976diff -urNp linux-3.0.3/grsecurity/grsec_fifo.c linux-3.0.3/grsecurity/grsec_fifo.c
47977--- linux-3.0.3/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
47978+++ linux-3.0.3/grsecurity/grsec_fifo.c 2011-08-23 21:48:14.000000000 -0400
47979@@ -0,0 +1,24 @@
47980+#include <linux/kernel.h>
47981+#include <linux/sched.h>
47982+#include <linux/fs.h>
47983+#include <linux/file.h>
47984+#include <linux/grinternal.h>
47985+
47986+int
47987+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
47988+ const struct dentry *dir, const int flag, const int acc_mode)
47989+{
47990+#ifdef CONFIG_GRKERNSEC_FIFO
47991+ const struct cred *cred = current_cred();
47992+
47993+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
47994+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
47995+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
47996+ (cred->fsuid != dentry->d_inode->i_uid)) {
47997+ if (!inode_permission(dentry->d_inode, acc_mode))
47998+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
47999+ return -EACCES;
48000+ }
48001+#endif
48002+ return 0;
48003+}
48004diff -urNp linux-3.0.3/grsecurity/grsec_fork.c linux-3.0.3/grsecurity/grsec_fork.c
48005--- linux-3.0.3/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
48006+++ linux-3.0.3/grsecurity/grsec_fork.c 2011-08-23 21:48:14.000000000 -0400
48007@@ -0,0 +1,23 @@
48008+#include <linux/kernel.h>
48009+#include <linux/sched.h>
48010+#include <linux/grsecurity.h>
48011+#include <linux/grinternal.h>
48012+#include <linux/errno.h>
48013+
48014+void
48015+gr_log_forkfail(const int retval)
48016+{
48017+#ifdef CONFIG_GRKERNSEC_FORKFAIL
48018+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
48019+ switch (retval) {
48020+ case -EAGAIN:
48021+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
48022+ break;
48023+ case -ENOMEM:
48024+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
48025+ break;
48026+ }
48027+ }
48028+#endif
48029+ return;
48030+}
48031diff -urNp linux-3.0.3/grsecurity/grsec_init.c linux-3.0.3/grsecurity/grsec_init.c
48032--- linux-3.0.3/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
48033+++ linux-3.0.3/grsecurity/grsec_init.c 2011-08-25 17:25:12.000000000 -0400
48034@@ -0,0 +1,269 @@
48035+#include <linux/kernel.h>
48036+#include <linux/sched.h>
48037+#include <linux/mm.h>
48038+#include <linux/gracl.h>
48039+#include <linux/slab.h>
48040+#include <linux/vmalloc.h>
48041+#include <linux/percpu.h>
48042+#include <linux/module.h>
48043+
48044+int grsec_enable_brute;
48045+int grsec_enable_link;
48046+int grsec_enable_dmesg;
48047+int grsec_enable_harden_ptrace;
48048+int grsec_enable_fifo;
48049+int grsec_enable_execlog;
48050+int grsec_enable_signal;
48051+int grsec_enable_forkfail;
48052+int grsec_enable_audit_ptrace;
48053+int grsec_enable_time;
48054+int grsec_enable_audit_textrel;
48055+int grsec_enable_group;
48056+int grsec_audit_gid;
48057+int grsec_enable_chdir;
48058+int grsec_enable_mount;
48059+int grsec_enable_rofs;
48060+int grsec_enable_chroot_findtask;
48061+int grsec_enable_chroot_mount;
48062+int grsec_enable_chroot_shmat;
48063+int grsec_enable_chroot_fchdir;
48064+int grsec_enable_chroot_double;
48065+int grsec_enable_chroot_pivot;
48066+int grsec_enable_chroot_chdir;
48067+int grsec_enable_chroot_chmod;
48068+int grsec_enable_chroot_mknod;
48069+int grsec_enable_chroot_nice;
48070+int grsec_enable_chroot_execlog;
48071+int grsec_enable_chroot_caps;
48072+int grsec_enable_chroot_sysctl;
48073+int grsec_enable_chroot_unix;
48074+int grsec_enable_tpe;
48075+int grsec_tpe_gid;
48076+int grsec_enable_blackhole;
48077+#ifdef CONFIG_IPV6_MODULE
48078+EXPORT_SYMBOL(grsec_enable_blackhole);
48079+#endif
48080+int grsec_lastack_retries;
48081+int grsec_enable_tpe_all;
48082+int grsec_enable_tpe_invert;
48083+int grsec_enable_socket_all;
48084+int grsec_socket_all_gid;
48085+int grsec_enable_socket_client;
48086+int grsec_socket_client_gid;
48087+int grsec_enable_socket_server;
48088+int grsec_socket_server_gid;
48089+int grsec_resource_logging;
48090+int grsec_disable_privio;
48091+int grsec_enable_log_rwxmaps;
48092+int grsec_lock;
48093+
48094+DEFINE_SPINLOCK(grsec_alert_lock);
48095+unsigned long grsec_alert_wtime = 0;
48096+unsigned long grsec_alert_fyet = 0;
48097+
48098+DEFINE_SPINLOCK(grsec_audit_lock);
48099+
48100+DEFINE_RWLOCK(grsec_exec_file_lock);
48101+
48102+char *gr_shared_page[4];
48103+
48104+char *gr_alert_log_fmt;
48105+char *gr_audit_log_fmt;
48106+char *gr_alert_log_buf;
48107+char *gr_audit_log_buf;
48108+
48109+extern struct gr_arg *gr_usermode;
48110+extern unsigned char *gr_system_salt;
48111+extern unsigned char *gr_system_sum;
48112+
48113+void __init
48114+grsecurity_init(void)
48115+{
48116+ int j;
48117+ /* create the per-cpu shared pages */
48118+
48119+#ifdef CONFIG_X86
48120+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
48121+#endif
48122+
48123+ for (j = 0; j < 4; j++) {
48124+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
48125+ if (gr_shared_page[j] == NULL) {
48126+ panic("Unable to allocate grsecurity shared page");
48127+ return;
48128+ }
48129+ }
48130+
48131+ /* allocate log buffers */
48132+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
48133+ if (!gr_alert_log_fmt) {
48134+ panic("Unable to allocate grsecurity alert log format buffer");
48135+ return;
48136+ }
48137+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
48138+ if (!gr_audit_log_fmt) {
48139+ panic("Unable to allocate grsecurity audit log format buffer");
48140+ return;
48141+ }
48142+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
48143+ if (!gr_alert_log_buf) {
48144+ panic("Unable to allocate grsecurity alert log buffer");
48145+ return;
48146+ }
48147+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
48148+ if (!gr_audit_log_buf) {
48149+ panic("Unable to allocate grsecurity audit log buffer");
48150+ return;
48151+ }
48152+
48153+ /* allocate memory for authentication structure */
48154+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
48155+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
48156+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
48157+
48158+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
48159+ panic("Unable to allocate grsecurity authentication structure");
48160+ return;
48161+ }
48162+
48163+
48164+#ifdef CONFIG_GRKERNSEC_IO
48165+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
48166+ grsec_disable_privio = 1;
48167+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
48168+ grsec_disable_privio = 1;
48169+#else
48170+ grsec_disable_privio = 0;
48171+#endif
48172+#endif
48173+
48174+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
48175+ /* for backward compatibility, tpe_invert always defaults to on if
48176+ enabled in the kernel
48177+ */
48178+ grsec_enable_tpe_invert = 1;
48179+#endif
48180+
48181+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
48182+#ifndef CONFIG_GRKERNSEC_SYSCTL
48183+ grsec_lock = 1;
48184+#endif
48185+
48186+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
48187+ grsec_enable_audit_textrel = 1;
48188+#endif
48189+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
48190+ grsec_enable_log_rwxmaps = 1;
48191+#endif
48192+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
48193+ grsec_enable_group = 1;
48194+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
48195+#endif
48196+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
48197+ grsec_enable_chdir = 1;
48198+#endif
48199+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
48200+ grsec_enable_harden_ptrace = 1;
48201+#endif
48202+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48203+ grsec_enable_mount = 1;
48204+#endif
48205+#ifdef CONFIG_GRKERNSEC_LINK
48206+ grsec_enable_link = 1;
48207+#endif
48208+#ifdef CONFIG_GRKERNSEC_BRUTE
48209+ grsec_enable_brute = 1;
48210+#endif
48211+#ifdef CONFIG_GRKERNSEC_DMESG
48212+ grsec_enable_dmesg = 1;
48213+#endif
48214+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
48215+ grsec_enable_blackhole = 1;
48216+ grsec_lastack_retries = 4;
48217+#endif
48218+#ifdef CONFIG_GRKERNSEC_FIFO
48219+ grsec_enable_fifo = 1;
48220+#endif
48221+#ifdef CONFIG_GRKERNSEC_EXECLOG
48222+ grsec_enable_execlog = 1;
48223+#endif
48224+#ifdef CONFIG_GRKERNSEC_SIGNAL
48225+ grsec_enable_signal = 1;
48226+#endif
48227+#ifdef CONFIG_GRKERNSEC_FORKFAIL
48228+ grsec_enable_forkfail = 1;
48229+#endif
48230+#ifdef CONFIG_GRKERNSEC_TIME
48231+ grsec_enable_time = 1;
48232+#endif
48233+#ifdef CONFIG_GRKERNSEC_RESLOG
48234+ grsec_resource_logging = 1;
48235+#endif
48236+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
48237+ grsec_enable_chroot_findtask = 1;
48238+#endif
48239+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
48240+ grsec_enable_chroot_unix = 1;
48241+#endif
48242+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
48243+ grsec_enable_chroot_mount = 1;
48244+#endif
48245+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
48246+ grsec_enable_chroot_fchdir = 1;
48247+#endif
48248+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
48249+ grsec_enable_chroot_shmat = 1;
48250+#endif
48251+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
48252+ grsec_enable_audit_ptrace = 1;
48253+#endif
48254+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
48255+ grsec_enable_chroot_double = 1;
48256+#endif
48257+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
48258+ grsec_enable_chroot_pivot = 1;
48259+#endif
48260+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
48261+ grsec_enable_chroot_chdir = 1;
48262+#endif
48263+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
48264+ grsec_enable_chroot_chmod = 1;
48265+#endif
48266+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
48267+ grsec_enable_chroot_mknod = 1;
48268+#endif
48269+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
48270+ grsec_enable_chroot_nice = 1;
48271+#endif
48272+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
48273+ grsec_enable_chroot_execlog = 1;
48274+#endif
48275+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
48276+ grsec_enable_chroot_caps = 1;
48277+#endif
48278+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
48279+ grsec_enable_chroot_sysctl = 1;
48280+#endif
48281+#ifdef CONFIG_GRKERNSEC_TPE
48282+ grsec_enable_tpe = 1;
48283+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
48284+#ifdef CONFIG_GRKERNSEC_TPE_ALL
48285+ grsec_enable_tpe_all = 1;
48286+#endif
48287+#endif
48288+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
48289+ grsec_enable_socket_all = 1;
48290+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
48291+#endif
48292+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
48293+ grsec_enable_socket_client = 1;
48294+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
48295+#endif
48296+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
48297+ grsec_enable_socket_server = 1;
48298+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
48299+#endif
48300+#endif
48301+
48302+ return;
48303+}
48304diff -urNp linux-3.0.3/grsecurity/grsec_link.c linux-3.0.3/grsecurity/grsec_link.c
48305--- linux-3.0.3/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
48306+++ linux-3.0.3/grsecurity/grsec_link.c 2011-08-23 21:48:14.000000000 -0400
48307@@ -0,0 +1,43 @@
48308+#include <linux/kernel.h>
48309+#include <linux/sched.h>
48310+#include <linux/fs.h>
48311+#include <linux/file.h>
48312+#include <linux/grinternal.h>
48313+
48314+int
48315+gr_handle_follow_link(const struct inode *parent,
48316+ const struct inode *inode,
48317+ const struct dentry *dentry, const struct vfsmount *mnt)
48318+{
48319+#ifdef CONFIG_GRKERNSEC_LINK
48320+ const struct cred *cred = current_cred();
48321+
48322+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
48323+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
48324+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
48325+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
48326+ return -EACCES;
48327+ }
48328+#endif
48329+ return 0;
48330+}
48331+
48332+int
48333+gr_handle_hardlink(const struct dentry *dentry,
48334+ const struct vfsmount *mnt,
48335+ struct inode *inode, const int mode, const char *to)
48336+{
48337+#ifdef CONFIG_GRKERNSEC_LINK
48338+ const struct cred *cred = current_cred();
48339+
48340+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
48341+ (!S_ISREG(mode) || (mode & S_ISUID) ||
48342+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
48343+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
48344+ !capable(CAP_FOWNER) && cred->uid) {
48345+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
48346+ return -EPERM;
48347+ }
48348+#endif
48349+ return 0;
48350+}
48351diff -urNp linux-3.0.3/grsecurity/grsec_log.c linux-3.0.3/grsecurity/grsec_log.c
48352--- linux-3.0.3/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
48353+++ linux-3.0.3/grsecurity/grsec_log.c 2011-08-23 21:48:14.000000000 -0400
48354@@ -0,0 +1,310 @@
48355+#include <linux/kernel.h>
48356+#include <linux/sched.h>
48357+#include <linux/file.h>
48358+#include <linux/tty.h>
48359+#include <linux/fs.h>
48360+#include <linux/grinternal.h>
48361+
48362+#ifdef CONFIG_TREE_PREEMPT_RCU
48363+#define DISABLE_PREEMPT() preempt_disable()
48364+#define ENABLE_PREEMPT() preempt_enable()
48365+#else
48366+#define DISABLE_PREEMPT()
48367+#define ENABLE_PREEMPT()
48368+#endif
48369+
48370+#define BEGIN_LOCKS(x) \
48371+ DISABLE_PREEMPT(); \
48372+ rcu_read_lock(); \
48373+ read_lock(&tasklist_lock); \
48374+ read_lock(&grsec_exec_file_lock); \
48375+ if (x != GR_DO_AUDIT) \
48376+ spin_lock(&grsec_alert_lock); \
48377+ else \
48378+ spin_lock(&grsec_audit_lock)
48379+
48380+#define END_LOCKS(x) \
48381+ if (x != GR_DO_AUDIT) \
48382+ spin_unlock(&grsec_alert_lock); \
48383+ else \
48384+ spin_unlock(&grsec_audit_lock); \
48385+ read_unlock(&grsec_exec_file_lock); \
48386+ read_unlock(&tasklist_lock); \
48387+ rcu_read_unlock(); \
48388+ ENABLE_PREEMPT(); \
48389+ if (x == GR_DONT_AUDIT) \
48390+ gr_handle_alertkill(current)
48391+
48392+enum {
48393+ FLOODING,
48394+ NO_FLOODING
48395+};
48396+
48397+extern char *gr_alert_log_fmt;
48398+extern char *gr_audit_log_fmt;
48399+extern char *gr_alert_log_buf;
48400+extern char *gr_audit_log_buf;
48401+
48402+static int gr_log_start(int audit)
48403+{
48404+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
48405+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
48406+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48407+
48408+ if (audit == GR_DO_AUDIT)
48409+ goto set_fmt;
48410+
48411+ if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
48412+ grsec_alert_wtime = jiffies;
48413+ grsec_alert_fyet = 0;
48414+ } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
48415+ grsec_alert_fyet++;
48416+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
48417+ grsec_alert_wtime = jiffies;
48418+ grsec_alert_fyet++;
48419+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
48420+ return FLOODING;
48421+ } else return FLOODING;
48422+
48423+set_fmt:
48424+ memset(buf, 0, PAGE_SIZE);
48425+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
48426+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
48427+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
48428+ } else if (current->signal->curr_ip) {
48429+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
48430+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
48431+ } else if (gr_acl_is_enabled()) {
48432+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
48433+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
48434+ } else {
48435+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
48436+ strcpy(buf, fmt);
48437+ }
48438+
48439+ return NO_FLOODING;
48440+}
48441+
48442+static void gr_log_middle(int audit, const char *msg, va_list ap)
48443+ __attribute__ ((format (printf, 2, 0)));
48444+
48445+static void gr_log_middle(int audit, const char *msg, va_list ap)
48446+{
48447+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48448+ unsigned int len = strlen(buf);
48449+
48450+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
48451+
48452+ return;
48453+}
48454+
48455+static void gr_log_middle_varargs(int audit, const char *msg, ...)
48456+ __attribute__ ((format (printf, 2, 3)));
48457+
48458+static void gr_log_middle_varargs(int audit, const char *msg, ...)
48459+{
48460+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48461+ unsigned int len = strlen(buf);
48462+ va_list ap;
48463+
48464+ va_start(ap, msg);
48465+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
48466+ va_end(ap);
48467+
48468+ return;
48469+}
48470+
48471+static void gr_log_end(int audit)
48472+{
48473+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48474+ unsigned int len = strlen(buf);
48475+
48476+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
48477+ printk("%s\n", buf);
48478+
48479+ return;
48480+}
48481+
48482+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
48483+{
48484+ int logtype;
48485+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
48486+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
48487+ void *voidptr = NULL;
48488+ int num1 = 0, num2 = 0;
48489+ unsigned long ulong1 = 0, ulong2 = 0;
48490+ struct dentry *dentry = NULL;
48491+ struct vfsmount *mnt = NULL;
48492+ struct file *file = NULL;
48493+ struct task_struct *task = NULL;
48494+ const struct cred *cred, *pcred;
48495+ va_list ap;
48496+
48497+ BEGIN_LOCKS(audit);
48498+ logtype = gr_log_start(audit);
48499+ if (logtype == FLOODING) {
48500+ END_LOCKS(audit);
48501+ return;
48502+ }
48503+ va_start(ap, argtypes);
48504+ switch (argtypes) {
48505+ case GR_TTYSNIFF:
48506+ task = va_arg(ap, struct task_struct *);
48507+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
48508+ break;
48509+ case GR_SYSCTL_HIDDEN:
48510+ str1 = va_arg(ap, char *);
48511+ gr_log_middle_varargs(audit, msg, result, str1);
48512+ break;
48513+ case GR_RBAC:
48514+ dentry = va_arg(ap, struct dentry *);
48515+ mnt = va_arg(ap, struct vfsmount *);
48516+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
48517+ break;
48518+ case GR_RBAC_STR:
48519+ dentry = va_arg(ap, struct dentry *);
48520+ mnt = va_arg(ap, struct vfsmount *);
48521+ str1 = va_arg(ap, char *);
48522+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
48523+ break;
48524+ case GR_STR_RBAC:
48525+ str1 = va_arg(ap, char *);
48526+ dentry = va_arg(ap, struct dentry *);
48527+ mnt = va_arg(ap, struct vfsmount *);
48528+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
48529+ break;
48530+ case GR_RBAC_MODE2:
48531+ dentry = va_arg(ap, struct dentry *);
48532+ mnt = va_arg(ap, struct vfsmount *);
48533+ str1 = va_arg(ap, char *);
48534+ str2 = va_arg(ap, char *);
48535+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
48536+ break;
48537+ case GR_RBAC_MODE3:
48538+ dentry = va_arg(ap, struct dentry *);
48539+ mnt = va_arg(ap, struct vfsmount *);
48540+ str1 = va_arg(ap, char *);
48541+ str2 = va_arg(ap, char *);
48542+ str3 = va_arg(ap, char *);
48543+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
48544+ break;
48545+ case GR_FILENAME:
48546+ dentry = va_arg(ap, struct dentry *);
48547+ mnt = va_arg(ap, struct vfsmount *);
48548+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
48549+ break;
48550+ case GR_STR_FILENAME:
48551+ str1 = va_arg(ap, char *);
48552+ dentry = va_arg(ap, struct dentry *);
48553+ mnt = va_arg(ap, struct vfsmount *);
48554+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
48555+ break;
48556+ case GR_FILENAME_STR:
48557+ dentry = va_arg(ap, struct dentry *);
48558+ mnt = va_arg(ap, struct vfsmount *);
48559+ str1 = va_arg(ap, char *);
48560+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
48561+ break;
48562+ case GR_FILENAME_TWO_INT:
48563+ dentry = va_arg(ap, struct dentry *);
48564+ mnt = va_arg(ap, struct vfsmount *);
48565+ num1 = va_arg(ap, int);
48566+ num2 = va_arg(ap, int);
48567+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
48568+ break;
48569+ case GR_FILENAME_TWO_INT_STR:
48570+ dentry = va_arg(ap, struct dentry *);
48571+ mnt = va_arg(ap, struct vfsmount *);
48572+ num1 = va_arg(ap, int);
48573+ num2 = va_arg(ap, int);
48574+ str1 = va_arg(ap, char *);
48575+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
48576+ break;
48577+ case GR_TEXTREL:
48578+ file = va_arg(ap, struct file *);
48579+ ulong1 = va_arg(ap, unsigned long);
48580+ ulong2 = va_arg(ap, unsigned long);
48581+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
48582+ break;
48583+ case GR_PTRACE:
48584+ task = va_arg(ap, struct task_struct *);
48585+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
48586+ break;
48587+ case GR_RESOURCE:
48588+ task = va_arg(ap, struct task_struct *);
48589+ cred = __task_cred(task);
48590+ pcred = __task_cred(task->real_parent);
48591+ ulong1 = va_arg(ap, unsigned long);
48592+ str1 = va_arg(ap, char *);
48593+ ulong2 = va_arg(ap, unsigned long);
48594+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48595+ break;
48596+ case GR_CAP:
48597+ task = va_arg(ap, struct task_struct *);
48598+ cred = __task_cred(task);
48599+ pcred = __task_cred(task->real_parent);
48600+ str1 = va_arg(ap, char *);
48601+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48602+ break;
48603+ case GR_SIG:
48604+ str1 = va_arg(ap, char *);
48605+ voidptr = va_arg(ap, void *);
48606+ gr_log_middle_varargs(audit, msg, str1, voidptr);
48607+ break;
48608+ case GR_SIG2:
48609+ task = va_arg(ap, struct task_struct *);
48610+ cred = __task_cred(task);
48611+ pcred = __task_cred(task->real_parent);
48612+ num1 = va_arg(ap, int);
48613+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48614+ break;
48615+ case GR_CRASH1:
48616+ task = va_arg(ap, struct task_struct *);
48617+ cred = __task_cred(task);
48618+ pcred = __task_cred(task->real_parent);
48619+ ulong1 = va_arg(ap, unsigned long);
48620+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
48621+ break;
48622+ case GR_CRASH2:
48623+ task = va_arg(ap, struct task_struct *);
48624+ cred = __task_cred(task);
48625+ pcred = __task_cred(task->real_parent);
48626+ ulong1 = va_arg(ap, unsigned long);
48627+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
48628+ break;
48629+ case GR_RWXMAP:
48630+ file = va_arg(ap, struct file *);
48631+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
48632+ break;
48633+ case GR_PSACCT:
48634+ {
48635+ unsigned int wday, cday;
48636+ __u8 whr, chr;
48637+ __u8 wmin, cmin;
48638+ __u8 wsec, csec;
48639+ char cur_tty[64] = { 0 };
48640+ char parent_tty[64] = { 0 };
48641+
48642+ task = va_arg(ap, struct task_struct *);
48643+ wday = va_arg(ap, unsigned int);
48644+ cday = va_arg(ap, unsigned int);
48645+ whr = va_arg(ap, int);
48646+ chr = va_arg(ap, int);
48647+ wmin = va_arg(ap, int);
48648+ cmin = va_arg(ap, int);
48649+ wsec = va_arg(ap, int);
48650+ csec = va_arg(ap, int);
48651+ ulong1 = va_arg(ap, unsigned long);
48652+ cred = __task_cred(task);
48653+ pcred = __task_cred(task->real_parent);
48654+
48655+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48656+ }
48657+ break;
48658+ default:
48659+ gr_log_middle(audit, msg, ap);
48660+ }
48661+ va_end(ap);
48662+ gr_log_end(audit);
48663+ END_LOCKS(audit);
48664+}
48665diff -urNp linux-3.0.3/grsecurity/grsec_mem.c linux-3.0.3/grsecurity/grsec_mem.c
48666--- linux-3.0.3/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
48667+++ linux-3.0.3/grsecurity/grsec_mem.c 2011-08-23 21:48:14.000000000 -0400
48668@@ -0,0 +1,33 @@
48669+#include <linux/kernel.h>
48670+#include <linux/sched.h>
48671+#include <linux/mm.h>
48672+#include <linux/mman.h>
48673+#include <linux/grinternal.h>
48674+
48675+void
48676+gr_handle_ioperm(void)
48677+{
48678+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
48679+ return;
48680+}
48681+
48682+void
48683+gr_handle_iopl(void)
48684+{
48685+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
48686+ return;
48687+}
48688+
48689+void
48690+gr_handle_mem_readwrite(u64 from, u64 to)
48691+{
48692+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
48693+ return;
48694+}
48695+
48696+void
48697+gr_handle_vm86(void)
48698+{
48699+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
48700+ return;
48701+}
48702diff -urNp linux-3.0.3/grsecurity/grsec_mount.c linux-3.0.3/grsecurity/grsec_mount.c
48703--- linux-3.0.3/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
48704+++ linux-3.0.3/grsecurity/grsec_mount.c 2011-08-23 21:48:14.000000000 -0400
48705@@ -0,0 +1,62 @@
48706+#include <linux/kernel.h>
48707+#include <linux/sched.h>
48708+#include <linux/mount.h>
48709+#include <linux/grsecurity.h>
48710+#include <linux/grinternal.h>
48711+
48712+void
48713+gr_log_remount(const char *devname, const int retval)
48714+{
48715+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48716+ if (grsec_enable_mount && (retval >= 0))
48717+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
48718+#endif
48719+ return;
48720+}
48721+
48722+void
48723+gr_log_unmount(const char *devname, const int retval)
48724+{
48725+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48726+ if (grsec_enable_mount && (retval >= 0))
48727+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
48728+#endif
48729+ return;
48730+}
48731+
48732+void
48733+gr_log_mount(const char *from, const char *to, const int retval)
48734+{
48735+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48736+ if (grsec_enable_mount && (retval >= 0))
48737+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
48738+#endif
48739+ return;
48740+}
48741+
48742+int
48743+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
48744+{
48745+#ifdef CONFIG_GRKERNSEC_ROFS
48746+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
48747+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
48748+ return -EPERM;
48749+ } else
48750+ return 0;
48751+#endif
48752+ return 0;
48753+}
48754+
48755+int
48756+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
48757+{
48758+#ifdef CONFIG_GRKERNSEC_ROFS
48759+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
48760+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
48761+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
48762+ return -EPERM;
48763+ } else
48764+ return 0;
48765+#endif
48766+ return 0;
48767+}
48768diff -urNp linux-3.0.3/grsecurity/grsec_pax.c linux-3.0.3/grsecurity/grsec_pax.c
48769--- linux-3.0.3/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
48770+++ linux-3.0.3/grsecurity/grsec_pax.c 2011-08-23 21:48:14.000000000 -0400
48771@@ -0,0 +1,36 @@
48772+#include <linux/kernel.h>
48773+#include <linux/sched.h>
48774+#include <linux/mm.h>
48775+#include <linux/file.h>
48776+#include <linux/grinternal.h>
48777+#include <linux/grsecurity.h>
48778+
48779+void
48780+gr_log_textrel(struct vm_area_struct * vma)
48781+{
48782+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
48783+ if (grsec_enable_audit_textrel)
48784+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
48785+#endif
48786+ return;
48787+}
48788+
48789+void
48790+gr_log_rwxmmap(struct file *file)
48791+{
48792+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
48793+ if (grsec_enable_log_rwxmaps)
48794+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
48795+#endif
48796+ return;
48797+}
48798+
48799+void
48800+gr_log_rwxmprotect(struct file *file)
48801+{
48802+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
48803+ if (grsec_enable_log_rwxmaps)
48804+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
48805+#endif
48806+ return;
48807+}
48808diff -urNp linux-3.0.3/grsecurity/grsec_ptrace.c linux-3.0.3/grsecurity/grsec_ptrace.c
48809--- linux-3.0.3/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
48810+++ linux-3.0.3/grsecurity/grsec_ptrace.c 2011-08-23 21:48:14.000000000 -0400
48811@@ -0,0 +1,14 @@
48812+#include <linux/kernel.h>
48813+#include <linux/sched.h>
48814+#include <linux/grinternal.h>
48815+#include <linux/grsecurity.h>
48816+
48817+void
48818+gr_audit_ptrace(struct task_struct *task)
48819+{
48820+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
48821+ if (grsec_enable_audit_ptrace)
48822+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
48823+#endif
48824+ return;
48825+}
48826diff -urNp linux-3.0.3/grsecurity/grsec_sig.c linux-3.0.3/grsecurity/grsec_sig.c
48827--- linux-3.0.3/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
48828+++ linux-3.0.3/grsecurity/grsec_sig.c 2011-08-23 21:48:14.000000000 -0400
48829@@ -0,0 +1,206 @@
48830+#include <linux/kernel.h>
48831+#include <linux/sched.h>
48832+#include <linux/delay.h>
48833+#include <linux/grsecurity.h>
48834+#include <linux/grinternal.h>
48835+#include <linux/hardirq.h>
48836+
48837+char *signames[] = {
48838+ [SIGSEGV] = "Segmentation fault",
48839+ [SIGILL] = "Illegal instruction",
48840+ [SIGABRT] = "Abort",
48841+ [SIGBUS] = "Invalid alignment/Bus error"
48842+};
48843+
48844+void
48845+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
48846+{
48847+#ifdef CONFIG_GRKERNSEC_SIGNAL
48848+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
48849+ (sig == SIGABRT) || (sig == SIGBUS))) {
48850+ if (t->pid == current->pid) {
48851+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
48852+ } else {
48853+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
48854+ }
48855+ }
48856+#endif
48857+ return;
48858+}
48859+
48860+int
48861+gr_handle_signal(const struct task_struct *p, const int sig)
48862+{
48863+#ifdef CONFIG_GRKERNSEC
48864+ if (current->pid > 1 && gr_check_protected_task(p)) {
48865+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
48866+ return -EPERM;
48867+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
48868+ return -EPERM;
48869+ }
48870+#endif
48871+ return 0;
48872+}
48873+
48874+#ifdef CONFIG_GRKERNSEC
48875+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
48876+
48877+int gr_fake_force_sig(int sig, struct task_struct *t)
48878+{
48879+ unsigned long int flags;
48880+ int ret, blocked, ignored;
48881+ struct k_sigaction *action;
48882+
48883+ spin_lock_irqsave(&t->sighand->siglock, flags);
48884+ action = &t->sighand->action[sig-1];
48885+ ignored = action->sa.sa_handler == SIG_IGN;
48886+ blocked = sigismember(&t->blocked, sig);
48887+ if (blocked || ignored) {
48888+ action->sa.sa_handler = SIG_DFL;
48889+ if (blocked) {
48890+ sigdelset(&t->blocked, sig);
48891+ recalc_sigpending_and_wake(t);
48892+ }
48893+ }
48894+ if (action->sa.sa_handler == SIG_DFL)
48895+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
48896+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
48897+
48898+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
48899+
48900+ return ret;
48901+}
48902+#endif
48903+
48904+#ifdef CONFIG_GRKERNSEC_BRUTE
48905+#define GR_USER_BAN_TIME (15 * 60)
48906+
48907+static int __get_dumpable(unsigned long mm_flags)
48908+{
48909+ int ret;
48910+
48911+ ret = mm_flags & MMF_DUMPABLE_MASK;
48912+ return (ret >= 2) ? 2 : ret;
48913+}
48914+#endif
48915+
48916+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
48917+{
48918+#ifdef CONFIG_GRKERNSEC_BRUTE
48919+ uid_t uid = 0;
48920+
48921+ if (!grsec_enable_brute)
48922+ return;
48923+
48924+ rcu_read_lock();
48925+ read_lock(&tasklist_lock);
48926+ read_lock(&grsec_exec_file_lock);
48927+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
48928+ p->real_parent->brute = 1;
48929+ else {
48930+ const struct cred *cred = __task_cred(p), *cred2;
48931+ struct task_struct *tsk, *tsk2;
48932+
48933+ if (!__get_dumpable(mm_flags) && cred->uid) {
48934+ struct user_struct *user;
48935+
48936+ uid = cred->uid;
48937+
48938+ /* this is put upon execution past expiration */
48939+ user = find_user(uid);
48940+ if (user == NULL)
48941+ goto unlock;
48942+ user->banned = 1;
48943+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
48944+ if (user->ban_expires == ~0UL)
48945+ user->ban_expires--;
48946+
48947+ do_each_thread(tsk2, tsk) {
48948+ cred2 = __task_cred(tsk);
48949+ if (tsk != p && cred2->uid == uid)
48950+ gr_fake_force_sig(SIGKILL, tsk);
48951+ } while_each_thread(tsk2, tsk);
48952+ }
48953+ }
48954+unlock:
48955+ read_unlock(&grsec_exec_file_lock);
48956+ read_unlock(&tasklist_lock);
48957+ rcu_read_unlock();
48958+
48959+ if (uid)
48960+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
48961+
48962+#endif
48963+ return;
48964+}
48965+
48966+void gr_handle_brute_check(void)
48967+{
48968+#ifdef CONFIG_GRKERNSEC_BRUTE
48969+ if (current->brute)
48970+ msleep(30 * 1000);
48971+#endif
48972+ return;
48973+}
48974+
48975+void gr_handle_kernel_exploit(void)
48976+{
48977+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
48978+ const struct cred *cred;
48979+ struct task_struct *tsk, *tsk2;
48980+ struct user_struct *user;
48981+ uid_t uid;
48982+
48983+ if (in_irq() || in_serving_softirq() || in_nmi())
48984+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
48985+
48986+ uid = current_uid();
48987+
48988+ if (uid == 0)
48989+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
48990+ else {
48991+ /* kill all the processes of this user, hold a reference
48992+ to their creds struct, and prevent them from creating
48993+ another process until system reset
48994+ */
48995+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
48996+ /* we intentionally leak this ref */
48997+ user = get_uid(current->cred->user);
48998+ if (user) {
48999+ user->banned = 1;
49000+ user->ban_expires = ~0UL;
49001+ }
49002+
49003+ read_lock(&tasklist_lock);
49004+ do_each_thread(tsk2, tsk) {
49005+ cred = __task_cred(tsk);
49006+ if (cred->uid == uid)
49007+ gr_fake_force_sig(SIGKILL, tsk);
49008+ } while_each_thread(tsk2, tsk);
49009+ read_unlock(&tasklist_lock);
49010+ }
49011+#endif
49012+}
49013+
49014+int __gr_process_user_ban(struct user_struct *user)
49015+{
49016+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
49017+ if (unlikely(user->banned)) {
49018+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
49019+ user->banned = 0;
49020+ user->ban_expires = 0;
49021+ free_uid(user);
49022+ } else
49023+ return -EPERM;
49024+ }
49025+#endif
49026+ return 0;
49027+}
49028+
49029+int gr_process_user_ban(void)
49030+{
49031+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
49032+ return __gr_process_user_ban(current->cred->user);
49033+#endif
49034+ return 0;
49035+}
49036diff -urNp linux-3.0.3/grsecurity/grsec_sock.c linux-3.0.3/grsecurity/grsec_sock.c
49037--- linux-3.0.3/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
49038+++ linux-3.0.3/grsecurity/grsec_sock.c 2011-08-23 21:48:14.000000000 -0400
49039@@ -0,0 +1,244 @@
49040+#include <linux/kernel.h>
49041+#include <linux/module.h>
49042+#include <linux/sched.h>
49043+#include <linux/file.h>
49044+#include <linux/net.h>
49045+#include <linux/in.h>
49046+#include <linux/ip.h>
49047+#include <net/sock.h>
49048+#include <net/inet_sock.h>
49049+#include <linux/grsecurity.h>
49050+#include <linux/grinternal.h>
49051+#include <linux/gracl.h>
49052+
49053+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
49054+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
49055+
49056+EXPORT_SYMBOL(gr_search_udp_recvmsg);
49057+EXPORT_SYMBOL(gr_search_udp_sendmsg);
49058+
49059+#ifdef CONFIG_UNIX_MODULE
49060+EXPORT_SYMBOL(gr_acl_handle_unix);
49061+EXPORT_SYMBOL(gr_acl_handle_mknod);
49062+EXPORT_SYMBOL(gr_handle_chroot_unix);
49063+EXPORT_SYMBOL(gr_handle_create);
49064+#endif
49065+
49066+#ifdef CONFIG_GRKERNSEC
49067+#define gr_conn_table_size 32749
49068+struct conn_table_entry {
49069+ struct conn_table_entry *next;
49070+ struct signal_struct *sig;
49071+};
49072+
49073+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
49074+DEFINE_SPINLOCK(gr_conn_table_lock);
49075+
49076+extern const char * gr_socktype_to_name(unsigned char type);
49077+extern const char * gr_proto_to_name(unsigned char proto);
49078+extern const char * gr_sockfamily_to_name(unsigned char family);
49079+
49080+static __inline__ int
49081+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
49082+{
49083+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
49084+}
49085+
49086+static __inline__ int
49087+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
49088+ __u16 sport, __u16 dport)
49089+{
49090+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
49091+ sig->gr_sport == sport && sig->gr_dport == dport))
49092+ return 1;
49093+ else
49094+ return 0;
49095+}
49096+
49097+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
49098+{
49099+ struct conn_table_entry **match;
49100+ unsigned int index;
49101+
49102+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
49103+ sig->gr_sport, sig->gr_dport,
49104+ gr_conn_table_size);
49105+
49106+ newent->sig = sig;
49107+
49108+ match = &gr_conn_table[index];
49109+ newent->next = *match;
49110+ *match = newent;
49111+
49112+ return;
49113+}
49114+
49115+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
49116+{
49117+ struct conn_table_entry *match, *last = NULL;
49118+ unsigned int index;
49119+
49120+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
49121+ sig->gr_sport, sig->gr_dport,
49122+ gr_conn_table_size);
49123+
49124+ match = gr_conn_table[index];
49125+ while (match && !conn_match(match->sig,
49126+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
49127+ sig->gr_dport)) {
49128+ last = match;
49129+ match = match->next;
49130+ }
49131+
49132+ if (match) {
49133+ if (last)
49134+ last->next = match->next;
49135+ else
49136+ gr_conn_table[index] = NULL;
49137+ kfree(match);
49138+ }
49139+
49140+ return;
49141+}
49142+
49143+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
49144+ __u16 sport, __u16 dport)
49145+{
49146+ struct conn_table_entry *match;
49147+ unsigned int index;
49148+
49149+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
49150+
49151+ match = gr_conn_table[index];
49152+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
49153+ match = match->next;
49154+
49155+ if (match)
49156+ return match->sig;
49157+ else
49158+ return NULL;
49159+}
49160+
49161+#endif
49162+
49163+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
49164+{
49165+#ifdef CONFIG_GRKERNSEC
49166+ struct signal_struct *sig = task->signal;
49167+ struct conn_table_entry *newent;
49168+
49169+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
49170+ if (newent == NULL)
49171+ return;
49172+ /* no bh lock needed since we are called with bh disabled */
49173+ spin_lock(&gr_conn_table_lock);
49174+ gr_del_task_from_ip_table_nolock(sig);
49175+ sig->gr_saddr = inet->inet_rcv_saddr;
49176+ sig->gr_daddr = inet->inet_daddr;
49177+ sig->gr_sport = inet->inet_sport;
49178+ sig->gr_dport = inet->inet_dport;
49179+ gr_add_to_task_ip_table_nolock(sig, newent);
49180+ spin_unlock(&gr_conn_table_lock);
49181+#endif
49182+ return;
49183+}
49184+
49185+void gr_del_task_from_ip_table(struct task_struct *task)
49186+{
49187+#ifdef CONFIG_GRKERNSEC
49188+ spin_lock_bh(&gr_conn_table_lock);
49189+ gr_del_task_from_ip_table_nolock(task->signal);
49190+ spin_unlock_bh(&gr_conn_table_lock);
49191+#endif
49192+ return;
49193+}
49194+
49195+void
49196+gr_attach_curr_ip(const struct sock *sk)
49197+{
49198+#ifdef CONFIG_GRKERNSEC
49199+ struct signal_struct *p, *set;
49200+ const struct inet_sock *inet = inet_sk(sk);
49201+
49202+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
49203+ return;
49204+
49205+ set = current->signal;
49206+
49207+ spin_lock_bh(&gr_conn_table_lock);
49208+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
49209+ inet->inet_dport, inet->inet_sport);
49210+ if (unlikely(p != NULL)) {
49211+ set->curr_ip = p->curr_ip;
49212+ set->used_accept = 1;
49213+ gr_del_task_from_ip_table_nolock(p);
49214+ spin_unlock_bh(&gr_conn_table_lock);
49215+ return;
49216+ }
49217+ spin_unlock_bh(&gr_conn_table_lock);
49218+
49219+ set->curr_ip = inet->inet_daddr;
49220+ set->used_accept = 1;
49221+#endif
49222+ return;
49223+}
49224+
49225+int
49226+gr_handle_sock_all(const int family, const int type, const int protocol)
49227+{
49228+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
49229+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
49230+ (family != AF_UNIX)) {
49231+ if (family == AF_INET)
49232+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
49233+ else
49234+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
49235+ return -EACCES;
49236+ }
49237+#endif
49238+ return 0;
49239+}
49240+
49241+int
49242+gr_handle_sock_server(const struct sockaddr *sck)
49243+{
49244+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
49245+ if (grsec_enable_socket_server &&
49246+ in_group_p(grsec_socket_server_gid) &&
49247+ sck && (sck->sa_family != AF_UNIX) &&
49248+ (sck->sa_family != AF_LOCAL)) {
49249+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
49250+ return -EACCES;
49251+ }
49252+#endif
49253+ return 0;
49254+}
49255+
49256+int
49257+gr_handle_sock_server_other(const struct sock *sck)
49258+{
49259+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
49260+ if (grsec_enable_socket_server &&
49261+ in_group_p(grsec_socket_server_gid) &&
49262+ sck && (sck->sk_family != AF_UNIX) &&
49263+ (sck->sk_family != AF_LOCAL)) {
49264+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
49265+ return -EACCES;
49266+ }
49267+#endif
49268+ return 0;
49269+}
49270+
49271+int
49272+gr_handle_sock_client(const struct sockaddr *sck)
49273+{
49274+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
49275+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
49276+ sck && (sck->sa_family != AF_UNIX) &&
49277+ (sck->sa_family != AF_LOCAL)) {
49278+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
49279+ return -EACCES;
49280+ }
49281+#endif
49282+ return 0;
49283+}
49284diff -urNp linux-3.0.3/grsecurity/grsec_sysctl.c linux-3.0.3/grsecurity/grsec_sysctl.c
49285--- linux-3.0.3/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
49286+++ linux-3.0.3/grsecurity/grsec_sysctl.c 2011-08-25 17:26:15.000000000 -0400
49287@@ -0,0 +1,433 @@
49288+#include <linux/kernel.h>
49289+#include <linux/sched.h>
49290+#include <linux/sysctl.h>
49291+#include <linux/grsecurity.h>
49292+#include <linux/grinternal.h>
49293+
49294+int
49295+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
49296+{
49297+#ifdef CONFIG_GRKERNSEC_SYSCTL
49298+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
49299+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
49300+ return -EACCES;
49301+ }
49302+#endif
49303+ return 0;
49304+}
49305+
49306+#ifdef CONFIG_GRKERNSEC_ROFS
49307+static int __maybe_unused one = 1;
49308+#endif
49309+
49310+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
49311+struct ctl_table grsecurity_table[] = {
49312+#ifdef CONFIG_GRKERNSEC_SYSCTL
49313+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
49314+#ifdef CONFIG_GRKERNSEC_IO
49315+ {
49316+ .procname = "disable_priv_io",
49317+ .data = &grsec_disable_privio,
49318+ .maxlen = sizeof(int),
49319+ .mode = 0600,
49320+ .proc_handler = &proc_dointvec,
49321+ },
49322+#endif
49323+#endif
49324+#ifdef CONFIG_GRKERNSEC_LINK
49325+ {
49326+ .procname = "linking_restrictions",
49327+ .data = &grsec_enable_link,
49328+ .maxlen = sizeof(int),
49329+ .mode = 0600,
49330+ .proc_handler = &proc_dointvec,
49331+ },
49332+#endif
49333+#ifdef CONFIG_GRKERNSEC_BRUTE
49334+ {
49335+ .procname = "deter_bruteforce",
49336+ .data = &grsec_enable_brute,
49337+ .maxlen = sizeof(int),
49338+ .mode = 0600,
49339+ .proc_handler = &proc_dointvec,
49340+ },
49341+#endif
49342+#ifdef CONFIG_GRKERNSEC_FIFO
49343+ {
49344+ .procname = "fifo_restrictions",
49345+ .data = &grsec_enable_fifo,
49346+ .maxlen = sizeof(int),
49347+ .mode = 0600,
49348+ .proc_handler = &proc_dointvec,
49349+ },
49350+#endif
49351+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
49352+ {
49353+ .procname = "ip_blackhole",
49354+ .data = &grsec_enable_blackhole,
49355+ .maxlen = sizeof(int),
49356+ .mode = 0600,
49357+ .proc_handler = &proc_dointvec,
49358+ },
49359+ {
49360+ .procname = "lastack_retries",
49361+ .data = &grsec_lastack_retries,
49362+ .maxlen = sizeof(int),
49363+ .mode = 0600,
49364+ .proc_handler = &proc_dointvec,
49365+ },
49366+#endif
49367+#ifdef CONFIG_GRKERNSEC_EXECLOG
49368+ {
49369+ .procname = "exec_logging",
49370+ .data = &grsec_enable_execlog,
49371+ .maxlen = sizeof(int),
49372+ .mode = 0600,
49373+ .proc_handler = &proc_dointvec,
49374+ },
49375+#endif
49376+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
49377+ {
49378+ .procname = "rwxmap_logging",
49379+ .data = &grsec_enable_log_rwxmaps,
49380+ .maxlen = sizeof(int),
49381+ .mode = 0600,
49382+ .proc_handler = &proc_dointvec,
49383+ },
49384+#endif
49385+#ifdef CONFIG_GRKERNSEC_SIGNAL
49386+ {
49387+ .procname = "signal_logging",
49388+ .data = &grsec_enable_signal,
49389+ .maxlen = sizeof(int),
49390+ .mode = 0600,
49391+ .proc_handler = &proc_dointvec,
49392+ },
49393+#endif
49394+#ifdef CONFIG_GRKERNSEC_FORKFAIL
49395+ {
49396+ .procname = "forkfail_logging",
49397+ .data = &grsec_enable_forkfail,
49398+ .maxlen = sizeof(int),
49399+ .mode = 0600,
49400+ .proc_handler = &proc_dointvec,
49401+ },
49402+#endif
49403+#ifdef CONFIG_GRKERNSEC_TIME
49404+ {
49405+ .procname = "timechange_logging",
49406+ .data = &grsec_enable_time,
49407+ .maxlen = sizeof(int),
49408+ .mode = 0600,
49409+ .proc_handler = &proc_dointvec,
49410+ },
49411+#endif
49412+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
49413+ {
49414+ .procname = "chroot_deny_shmat",
49415+ .data = &grsec_enable_chroot_shmat,
49416+ .maxlen = sizeof(int),
49417+ .mode = 0600,
49418+ .proc_handler = &proc_dointvec,
49419+ },
49420+#endif
49421+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
49422+ {
49423+ .procname = "chroot_deny_unix",
49424+ .data = &grsec_enable_chroot_unix,
49425+ .maxlen = sizeof(int),
49426+ .mode = 0600,
49427+ .proc_handler = &proc_dointvec,
49428+ },
49429+#endif
49430+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
49431+ {
49432+ .procname = "chroot_deny_mount",
49433+ .data = &grsec_enable_chroot_mount,
49434+ .maxlen = sizeof(int),
49435+ .mode = 0600,
49436+ .proc_handler = &proc_dointvec,
49437+ },
49438+#endif
49439+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
49440+ {
49441+ .procname = "chroot_deny_fchdir",
49442+ .data = &grsec_enable_chroot_fchdir,
49443+ .maxlen = sizeof(int),
49444+ .mode = 0600,
49445+ .proc_handler = &proc_dointvec,
49446+ },
49447+#endif
49448+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
49449+ {
49450+ .procname = "chroot_deny_chroot",
49451+ .data = &grsec_enable_chroot_double,
49452+ .maxlen = sizeof(int),
49453+ .mode = 0600,
49454+ .proc_handler = &proc_dointvec,
49455+ },
49456+#endif
49457+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
49458+ {
49459+ .procname = "chroot_deny_pivot",
49460+ .data = &grsec_enable_chroot_pivot,
49461+ .maxlen = sizeof(int),
49462+ .mode = 0600,
49463+ .proc_handler = &proc_dointvec,
49464+ },
49465+#endif
49466+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
49467+ {
49468+ .procname = "chroot_enforce_chdir",
49469+ .data = &grsec_enable_chroot_chdir,
49470+ .maxlen = sizeof(int),
49471+ .mode = 0600,
49472+ .proc_handler = &proc_dointvec,
49473+ },
49474+#endif
49475+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
49476+ {
49477+ .procname = "chroot_deny_chmod",
49478+ .data = &grsec_enable_chroot_chmod,
49479+ .maxlen = sizeof(int),
49480+ .mode = 0600,
49481+ .proc_handler = &proc_dointvec,
49482+ },
49483+#endif
49484+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
49485+ {
49486+ .procname = "chroot_deny_mknod",
49487+ .data = &grsec_enable_chroot_mknod,
49488+ .maxlen = sizeof(int),
49489+ .mode = 0600,
49490+ .proc_handler = &proc_dointvec,
49491+ },
49492+#endif
49493+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
49494+ {
49495+ .procname = "chroot_restrict_nice",
49496+ .data = &grsec_enable_chroot_nice,
49497+ .maxlen = sizeof(int),
49498+ .mode = 0600,
49499+ .proc_handler = &proc_dointvec,
49500+ },
49501+#endif
49502+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
49503+ {
49504+ .procname = "chroot_execlog",
49505+ .data = &grsec_enable_chroot_execlog,
49506+ .maxlen = sizeof(int),
49507+ .mode = 0600,
49508+ .proc_handler = &proc_dointvec,
49509+ },
49510+#endif
49511+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
49512+ {
49513+ .procname = "chroot_caps",
49514+ .data = &grsec_enable_chroot_caps,
49515+ .maxlen = sizeof(int),
49516+ .mode = 0600,
49517+ .proc_handler = &proc_dointvec,
49518+ },
49519+#endif
49520+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
49521+ {
49522+ .procname = "chroot_deny_sysctl",
49523+ .data = &grsec_enable_chroot_sysctl,
49524+ .maxlen = sizeof(int),
49525+ .mode = 0600,
49526+ .proc_handler = &proc_dointvec,
49527+ },
49528+#endif
49529+#ifdef CONFIG_GRKERNSEC_TPE
49530+ {
49531+ .procname = "tpe",
49532+ .data = &grsec_enable_tpe,
49533+ .maxlen = sizeof(int),
49534+ .mode = 0600,
49535+ .proc_handler = &proc_dointvec,
49536+ },
49537+ {
49538+ .procname = "tpe_gid",
49539+ .data = &grsec_tpe_gid,
49540+ .maxlen = sizeof(int),
49541+ .mode = 0600,
49542+ .proc_handler = &proc_dointvec,
49543+ },
49544+#endif
49545+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
49546+ {
49547+ .procname = "tpe_invert",
49548+ .data = &grsec_enable_tpe_invert,
49549+ .maxlen = sizeof(int),
49550+ .mode = 0600,
49551+ .proc_handler = &proc_dointvec,
49552+ },
49553+#endif
49554+#ifdef CONFIG_GRKERNSEC_TPE_ALL
49555+ {
49556+ .procname = "tpe_restrict_all",
49557+ .data = &grsec_enable_tpe_all,
49558+ .maxlen = sizeof(int),
49559+ .mode = 0600,
49560+ .proc_handler = &proc_dointvec,
49561+ },
49562+#endif
49563+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
49564+ {
49565+ .procname = "socket_all",
49566+ .data = &grsec_enable_socket_all,
49567+ .maxlen = sizeof(int),
49568+ .mode = 0600,
49569+ .proc_handler = &proc_dointvec,
49570+ },
49571+ {
49572+ .procname = "socket_all_gid",
49573+ .data = &grsec_socket_all_gid,
49574+ .maxlen = sizeof(int),
49575+ .mode = 0600,
49576+ .proc_handler = &proc_dointvec,
49577+ },
49578+#endif
49579+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
49580+ {
49581+ .procname = "socket_client",
49582+ .data = &grsec_enable_socket_client,
49583+ .maxlen = sizeof(int),
49584+ .mode = 0600,
49585+ .proc_handler = &proc_dointvec,
49586+ },
49587+ {
49588+ .procname = "socket_client_gid",
49589+ .data = &grsec_socket_client_gid,
49590+ .maxlen = sizeof(int),
49591+ .mode = 0600,
49592+ .proc_handler = &proc_dointvec,
49593+ },
49594+#endif
49595+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
49596+ {
49597+ .procname = "socket_server",
49598+ .data = &grsec_enable_socket_server,
49599+ .maxlen = sizeof(int),
49600+ .mode = 0600,
49601+ .proc_handler = &proc_dointvec,
49602+ },
49603+ {
49604+ .procname = "socket_server_gid",
49605+ .data = &grsec_socket_server_gid,
49606+ .maxlen = sizeof(int),
49607+ .mode = 0600,
49608+ .proc_handler = &proc_dointvec,
49609+ },
49610+#endif
49611+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
49612+ {
49613+ .procname = "audit_group",
49614+ .data = &grsec_enable_group,
49615+ .maxlen = sizeof(int),
49616+ .mode = 0600,
49617+ .proc_handler = &proc_dointvec,
49618+ },
49619+ {
49620+ .procname = "audit_gid",
49621+ .data = &grsec_audit_gid,
49622+ .maxlen = sizeof(int),
49623+ .mode = 0600,
49624+ .proc_handler = &proc_dointvec,
49625+ },
49626+#endif
49627+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
49628+ {
49629+ .procname = "audit_chdir",
49630+ .data = &grsec_enable_chdir,
49631+ .maxlen = sizeof(int),
49632+ .mode = 0600,
49633+ .proc_handler = &proc_dointvec,
49634+ },
49635+#endif
49636+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
49637+ {
49638+ .procname = "audit_mount",
49639+ .data = &grsec_enable_mount,
49640+ .maxlen = sizeof(int),
49641+ .mode = 0600,
49642+ .proc_handler = &proc_dointvec,
49643+ },
49644+#endif
49645+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
49646+ {
49647+ .procname = "audit_textrel",
49648+ .data = &grsec_enable_audit_textrel,
49649+ .maxlen = sizeof(int),
49650+ .mode = 0600,
49651+ .proc_handler = &proc_dointvec,
49652+ },
49653+#endif
49654+#ifdef CONFIG_GRKERNSEC_DMESG
49655+ {
49656+ .procname = "dmesg",
49657+ .data = &grsec_enable_dmesg,
49658+ .maxlen = sizeof(int),
49659+ .mode = 0600,
49660+ .proc_handler = &proc_dointvec,
49661+ },
49662+#endif
49663+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
49664+ {
49665+ .procname = "chroot_findtask",
49666+ .data = &grsec_enable_chroot_findtask,
49667+ .maxlen = sizeof(int),
49668+ .mode = 0600,
49669+ .proc_handler = &proc_dointvec,
49670+ },
49671+#endif
49672+#ifdef CONFIG_GRKERNSEC_RESLOG
49673+ {
49674+ .procname = "resource_logging",
49675+ .data = &grsec_resource_logging,
49676+ .maxlen = sizeof(int),
49677+ .mode = 0600,
49678+ .proc_handler = &proc_dointvec,
49679+ },
49680+#endif
49681+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
49682+ {
49683+ .procname = "audit_ptrace",
49684+ .data = &grsec_enable_audit_ptrace,
49685+ .maxlen = sizeof(int),
49686+ .mode = 0600,
49687+ .proc_handler = &proc_dointvec,
49688+ },
49689+#endif
49690+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
49691+ {
49692+ .procname = "harden_ptrace",
49693+ .data = &grsec_enable_harden_ptrace,
49694+ .maxlen = sizeof(int),
49695+ .mode = 0600,
49696+ .proc_handler = &proc_dointvec,
49697+ },
49698+#endif
49699+ {
49700+ .procname = "grsec_lock",
49701+ .data = &grsec_lock,
49702+ .maxlen = sizeof(int),
49703+ .mode = 0600,
49704+ .proc_handler = &proc_dointvec,
49705+ },
49706+#endif
49707+#ifdef CONFIG_GRKERNSEC_ROFS
49708+ {
49709+ .procname = "romount_protect",
49710+ .data = &grsec_enable_rofs,
49711+ .maxlen = sizeof(int),
49712+ .mode = 0600,
49713+ .proc_handler = &proc_dointvec_minmax,
49714+ .extra1 = &one,
49715+ .extra2 = &one,
49716+ },
49717+#endif
49718+ { }
49719+};
49720+#endif
49721diff -urNp linux-3.0.3/grsecurity/grsec_time.c linux-3.0.3/grsecurity/grsec_time.c
49722--- linux-3.0.3/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
49723+++ linux-3.0.3/grsecurity/grsec_time.c 2011-08-23 21:48:14.000000000 -0400
49724@@ -0,0 +1,16 @@
49725+#include <linux/kernel.h>
49726+#include <linux/sched.h>
49727+#include <linux/grinternal.h>
49728+#include <linux/module.h>
49729+
49730+void
49731+gr_log_timechange(void)
49732+{
49733+#ifdef CONFIG_GRKERNSEC_TIME
49734+ if (grsec_enable_time)
49735+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
49736+#endif
49737+ return;
49738+}
49739+
49740+EXPORT_SYMBOL(gr_log_timechange);
49741diff -urNp linux-3.0.3/grsecurity/grsec_tpe.c linux-3.0.3/grsecurity/grsec_tpe.c
49742--- linux-3.0.3/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
49743+++ linux-3.0.3/grsecurity/grsec_tpe.c 2011-08-23 21:48:14.000000000 -0400
49744@@ -0,0 +1,39 @@
49745+#include <linux/kernel.h>
49746+#include <linux/sched.h>
49747+#include <linux/file.h>
49748+#include <linux/fs.h>
49749+#include <linux/grinternal.h>
49750+
49751+extern int gr_acl_tpe_check(void);
49752+
49753+int
49754+gr_tpe_allow(const struct file *file)
49755+{
49756+#ifdef CONFIG_GRKERNSEC
49757+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
49758+ const struct cred *cred = current_cred();
49759+
49760+ if (cred->uid && ((grsec_enable_tpe &&
49761+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
49762+ ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
49763+ (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
49764+#else
49765+ in_group_p(grsec_tpe_gid)
49766+#endif
49767+ ) || gr_acl_tpe_check()) &&
49768+ (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
49769+ (inode->i_mode & S_IWOTH))))) {
49770+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
49771+ return 0;
49772+ }
49773+#ifdef CONFIG_GRKERNSEC_TPE_ALL
49774+ if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
49775+ ((inode->i_uid && (inode->i_uid != cred->uid)) ||
49776+ (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
49777+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
49778+ return 0;
49779+ }
49780+#endif
49781+#endif
49782+ return 1;
49783+}
49784diff -urNp linux-3.0.3/grsecurity/grsum.c linux-3.0.3/grsecurity/grsum.c
49785--- linux-3.0.3/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
49786+++ linux-3.0.3/grsecurity/grsum.c 2011-08-23 21:48:14.000000000 -0400
49787@@ -0,0 +1,61 @@
49788+#include <linux/err.h>
49789+#include <linux/kernel.h>
49790+#include <linux/sched.h>
49791+#include <linux/mm.h>
49792+#include <linux/scatterlist.h>
49793+#include <linux/crypto.h>
49794+#include <linux/gracl.h>
49795+
49796+
49797+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
49798+#error "crypto and sha256 must be built into the kernel"
49799+#endif
49800+
49801+int
49802+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
49803+{
49804+ char *p;
49805+ struct crypto_hash *tfm;
49806+ struct hash_desc desc;
49807+ struct scatterlist sg;
49808+ unsigned char temp_sum[GR_SHA_LEN];
49809+ volatile int retval = 0;
49810+ volatile int dummy = 0;
49811+ unsigned int i;
49812+
49813+ sg_init_table(&sg, 1);
49814+
49815+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
49816+ if (IS_ERR(tfm)) {
49817+ /* should never happen, since sha256 should be built in */
49818+ return 1;
49819+ }
49820+
49821+ desc.tfm = tfm;
49822+ desc.flags = 0;
49823+
49824+ crypto_hash_init(&desc);
49825+
49826+ p = salt;
49827+ sg_set_buf(&sg, p, GR_SALT_LEN);
49828+ crypto_hash_update(&desc, &sg, sg.length);
49829+
49830+ p = entry->pw;
49831+ sg_set_buf(&sg, p, strlen(p));
49832+
49833+ crypto_hash_update(&desc, &sg, sg.length);
49834+
49835+ crypto_hash_final(&desc, temp_sum);
49836+
49837+ memset(entry->pw, 0, GR_PW_LEN);
49838+
49839+ for (i = 0; i < GR_SHA_LEN; i++)
49840+ if (sum[i] != temp_sum[i])
49841+ retval = 1;
49842+ else
49843+ dummy = 1; // waste a cycle
49844+
49845+ crypto_free_hash(tfm);
49846+
49847+ return retval;
49848+}
49849diff -urNp linux-3.0.3/grsecurity/Kconfig linux-3.0.3/grsecurity/Kconfig
49850--- linux-3.0.3/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
49851+++ linux-3.0.3/grsecurity/Kconfig 2011-08-25 17:25:34.000000000 -0400
49852@@ -0,0 +1,1038 @@
49853+#
49854+# grecurity configuration
49855+#
49856+
49857+menu "Grsecurity"
49858+
49859+config GRKERNSEC
49860+ bool "Grsecurity"
49861+ select CRYPTO
49862+ select CRYPTO_SHA256
49863+ help
49864+ If you say Y here, you will be able to configure many features
49865+ that will enhance the security of your system. It is highly
49866+ recommended that you say Y here and read through the help
49867+ for each option so that you fully understand the features and
49868+ can evaluate their usefulness for your machine.
49869+
49870+choice
49871+ prompt "Security Level"
49872+ depends on GRKERNSEC
49873+ default GRKERNSEC_CUSTOM
49874+
49875+config GRKERNSEC_LOW
49876+ bool "Low"
49877+ select GRKERNSEC_LINK
49878+ select GRKERNSEC_FIFO
49879+ select GRKERNSEC_RANDNET
49880+ select GRKERNSEC_DMESG
49881+ select GRKERNSEC_CHROOT
49882+ select GRKERNSEC_CHROOT_CHDIR
49883+
49884+ help
49885+ If you choose this option, several of the grsecurity options will
49886+ be enabled that will give you greater protection against a number
49887+ of attacks, while assuring that none of your software will have any
49888+ conflicts with the additional security measures. If you run a lot
49889+ of unusual software, or you are having problems with the higher
49890+ security levels, you should say Y here. With this option, the
49891+ following features are enabled:
49892+
49893+ - Linking restrictions
49894+ - FIFO restrictions
49895+ - Restricted dmesg
49896+ - Enforced chdir("/") on chroot
49897+ - Runtime module disabling
49898+
49899+config GRKERNSEC_MEDIUM
49900+ bool "Medium"
49901+ select PAX
49902+ select PAX_EI_PAX
49903+ select PAX_PT_PAX_FLAGS
49904+ select PAX_HAVE_ACL_FLAGS
49905+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
49906+ select GRKERNSEC_CHROOT
49907+ select GRKERNSEC_CHROOT_SYSCTL
49908+ select GRKERNSEC_LINK
49909+ select GRKERNSEC_FIFO
49910+ select GRKERNSEC_DMESG
49911+ select GRKERNSEC_RANDNET
49912+ select GRKERNSEC_FORKFAIL
49913+ select GRKERNSEC_TIME
49914+ select GRKERNSEC_SIGNAL
49915+ select GRKERNSEC_CHROOT
49916+ select GRKERNSEC_CHROOT_UNIX
49917+ select GRKERNSEC_CHROOT_MOUNT
49918+ select GRKERNSEC_CHROOT_PIVOT
49919+ select GRKERNSEC_CHROOT_DOUBLE
49920+ select GRKERNSEC_CHROOT_CHDIR
49921+ select GRKERNSEC_CHROOT_MKNOD
49922+ select GRKERNSEC_PROC
49923+ select GRKERNSEC_PROC_USERGROUP
49924+ select PAX_RANDUSTACK
49925+ select PAX_ASLR
49926+ select PAX_RANDMMAP
49927+ select PAX_REFCOUNT if (X86 || SPARC64)
49928+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
49929+
49930+ help
49931+ If you say Y here, several features in addition to those included
49932+ in the low additional security level will be enabled. These
49933+ features provide even more security to your system, though in rare
49934+ cases they may be incompatible with very old or poorly written
49935+ software. If you enable this option, make sure that your auth
49936+ service (identd) is running as gid 1001. With this option,
49937+ the following features (in addition to those provided in the
49938+ low additional security level) will be enabled:
49939+
49940+ - Failed fork logging
49941+ - Time change logging
49942+ - Signal logging
49943+ - Deny mounts in chroot
49944+ - Deny double chrooting
49945+ - Deny sysctl writes in chroot
49946+ - Deny mknod in chroot
49947+ - Deny access to abstract AF_UNIX sockets out of chroot
49948+ - Deny pivot_root in chroot
49949+ - Denied writes of /dev/kmem, /dev/mem, and /dev/port
49950+ - /proc restrictions with special GID set to 10 (usually wheel)
49951+ - Address Space Layout Randomization (ASLR)
49952+ - Prevent exploitation of most refcount overflows
49953+ - Bounds checking of copying between the kernel and userland
49954+
49955+config GRKERNSEC_HIGH
49956+ bool "High"
49957+ select GRKERNSEC_LINK
49958+ select GRKERNSEC_FIFO
49959+ select GRKERNSEC_DMESG
49960+ select GRKERNSEC_FORKFAIL
49961+ select GRKERNSEC_TIME
49962+ select GRKERNSEC_SIGNAL
49963+ select GRKERNSEC_CHROOT
49964+ select GRKERNSEC_CHROOT_SHMAT
49965+ select GRKERNSEC_CHROOT_UNIX
49966+ select GRKERNSEC_CHROOT_MOUNT
49967+ select GRKERNSEC_CHROOT_FCHDIR
49968+ select GRKERNSEC_CHROOT_PIVOT
49969+ select GRKERNSEC_CHROOT_DOUBLE
49970+ select GRKERNSEC_CHROOT_CHDIR
49971+ select GRKERNSEC_CHROOT_MKNOD
49972+ select GRKERNSEC_CHROOT_CAPS
49973+ select GRKERNSEC_CHROOT_SYSCTL
49974+ select GRKERNSEC_CHROOT_FINDTASK
49975+ select GRKERNSEC_SYSFS_RESTRICT
49976+ select GRKERNSEC_PROC
49977+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
49978+ select GRKERNSEC_HIDESYM
49979+ select GRKERNSEC_BRUTE
49980+ select GRKERNSEC_PROC_USERGROUP
49981+ select GRKERNSEC_KMEM
49982+ select GRKERNSEC_RESLOG
49983+ select GRKERNSEC_RANDNET
49984+ select GRKERNSEC_PROC_ADD
49985+ select GRKERNSEC_CHROOT_CHMOD
49986+ select GRKERNSEC_CHROOT_NICE
49987+ select GRKERNSEC_AUDIT_MOUNT
49988+ select GRKERNSEC_MODHARDEN if (MODULES)
49989+ select GRKERNSEC_HARDEN_PTRACE
49990+ select GRKERNSEC_VM86 if (X86_32)
49991+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
49992+ select PAX
49993+ select PAX_RANDUSTACK
49994+ select PAX_ASLR
49995+ select PAX_RANDMMAP
49996+ select PAX_NOEXEC
49997+ select PAX_MPROTECT
49998+ select PAX_EI_PAX
49999+ select PAX_PT_PAX_FLAGS
50000+ select PAX_HAVE_ACL_FLAGS
50001+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
50002+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
50003+ select PAX_RANDKSTACK if (X86_TSC && X86)
50004+ select PAX_SEGMEXEC if (X86_32)
50005+ select PAX_PAGEEXEC
50006+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
50007+ select PAX_EMUTRAMP if (PARISC)
50008+ select PAX_EMUSIGRT if (PARISC)
50009+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
50010+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
50011+ select PAX_REFCOUNT if (X86 || SPARC64)
50012+ select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
50013+ help
50014+ If you say Y here, many of the features of grsecurity will be
50015+ enabled, which will protect you against many kinds of attacks
50016+ against your system. The heightened security comes at a cost
50017+ of an increased chance of incompatibilities with rare software
50018+ on your machine. Since this security level enables PaX, you should
50019+ view <http://pax.grsecurity.net> and read about the PaX
50020+ project. While you are there, download chpax and run it on
50021+ binaries that cause problems with PaX. Also remember that
50022+ since the /proc restrictions are enabled, you must run your
50023+ identd as gid 1001. This security level enables the following
50024+ features in addition to those listed in the low and medium
50025+ security levels:
50026+
50027+ - Additional /proc restrictions
50028+ - Chmod restrictions in chroot
50029+ - No signals, ptrace, or viewing of processes outside of chroot
50030+ - Capability restrictions in chroot
50031+ - Deny fchdir out of chroot
50032+ - Priority restrictions in chroot
50033+ - Segmentation-based implementation of PaX
50034+ - Mprotect restrictions
50035+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
50036+ - Kernel stack randomization
50037+ - Mount/unmount/remount logging
50038+ - Kernel symbol hiding
50039+ - Prevention of memory exhaustion-based exploits
50040+ - Hardening of module auto-loading
50041+ - Ptrace restrictions
50042+ - Restricted vm86 mode
50043+ - Restricted sysfs/debugfs
50044+ - Active kernel exploit response
50045+
50046+config GRKERNSEC_CUSTOM
50047+ bool "Custom"
50048+ help
50049+ If you say Y here, you will be able to configure every grsecurity
50050+ option, which allows you to enable many more features that aren't
50051+ covered in the basic security levels. These additional features
50052+ include TPE, socket restrictions, and the sysctl system for
50053+ grsecurity. It is advised that you read through the help for
50054+ each option to determine its usefulness in your situation.
50055+
50056+endchoice
50057+
50058+menu "Address Space Protection"
50059+depends on GRKERNSEC
50060+
50061+config GRKERNSEC_KMEM
50062+ bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
50063+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
50064+ help
50065+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
50066+ be written to via mmap or otherwise to modify the running kernel.
50067+ /dev/port will also not be allowed to be opened. If you have module
50068+ support disabled, enabling this will close up four ways that are
50069+ currently used to insert malicious code into the running kernel.
50070+ Even with all these features enabled, we still highly recommend that
50071+ you use the RBAC system, as it is still possible for an attacker to
50072+ modify the running kernel through privileged I/O granted by ioperm/iopl.
50073+ If you are not using XFree86, you may be able to stop this additional
50074+ case by enabling the 'Disable privileged I/O' option. Though nothing
50075+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
50076+ but only to video memory, which is the only writing we allow in this
50077+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
50078+ not be allowed to mprotect it with PROT_WRITE later.
50079+ It is highly recommended that you say Y here if you meet all the
50080+ conditions above.
50081+
50082+config GRKERNSEC_VM86
50083+ bool "Restrict VM86 mode"
50084+ depends on X86_32
50085+
50086+ help
50087+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
50088+ make use of a special execution mode on 32bit x86 processors called
50089+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
50090+ video cards and will still work with this option enabled. The purpose
50091+ of the option is to prevent exploitation of emulation errors in
50092+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
50093+ Nearly all users should be able to enable this option.
50094+
50095+config GRKERNSEC_IO
50096+ bool "Disable privileged I/O"
50097+ depends on X86
50098+ select RTC_CLASS
50099+ select RTC_INTF_DEV
50100+ select RTC_DRV_CMOS
50101+
50102+ help
50103+ If you say Y here, all ioperm and iopl calls will return an error.
50104+ Ioperm and iopl can be used to modify the running kernel.
50105+ Unfortunately, some programs need this access to operate properly,
50106+ the most notable of which are XFree86 and hwclock. hwclock can be
50107+ remedied by having RTC support in the kernel, so real-time
50108+ clock support is enabled if this option is enabled, to ensure
50109+ that hwclock operates correctly. XFree86 still will not
50110+ operate correctly with this option enabled, so DO NOT CHOOSE Y
50111+ IF YOU USE XFree86. If you use XFree86 and you still want to
50112+ protect your kernel against modification, use the RBAC system.
50113+
50114+config GRKERNSEC_PROC_MEMMAP
50115+ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
50116+ default y if (PAX_NOEXEC || PAX_ASLR)
50117+ depends on PAX_NOEXEC || PAX_ASLR
50118+ help
50119+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
50120+ give no information about the addresses of its mappings if
50121+ PaX features that rely on random addresses are enabled on the task.
50122+ If you use PaX it is greatly recommended that you say Y here as it
50123+ closes up a hole that makes the full ASLR useless for suid
50124+ binaries.
50125+
50126+config GRKERNSEC_BRUTE
50127+ bool "Deter exploit bruteforcing"
50128+ help
50129+ If you say Y here, attempts to bruteforce exploits against forking
50130+ daemons such as apache or sshd, as well as against suid/sgid binaries
50131+ will be deterred. When a child of a forking daemon is killed by PaX
50132+ or crashes due to an illegal instruction or other suspicious signal,
50133+ the parent process will be delayed 30 seconds upon every subsequent
50134+ fork until the administrator is able to assess the situation and
50135+ restart the daemon.
50136+ In the suid/sgid case, the attempt is logged, the user has all their
50137+ processes terminated, and they are prevented from executing any further
50138+ processes for 15 minutes.
50139+ It is recommended that you also enable signal logging in the auditing
50140+ section so that logs are generated when a process triggers a suspicious
50141+ signal.
50142+ If the sysctl option is enabled, a sysctl option with name
50143+ "deter_bruteforce" is created.
50144+
50145+
50146+config GRKERNSEC_MODHARDEN
50147+ bool "Harden module auto-loading"
50148+ depends on MODULES
50149+ help
50150+ If you say Y here, module auto-loading in response to use of some
50151+ feature implemented by an unloaded module will be restricted to
50152+ root users. Enabling this option helps defend against attacks
50153+ by unprivileged users who abuse the auto-loading behavior to
50154+ cause a vulnerable module to load that is then exploited.
50155+
50156+ If this option prevents a legitimate use of auto-loading for a
50157+ non-root user, the administrator can execute modprobe manually
50158+ with the exact name of the module mentioned in the alert log.
50159+ Alternatively, the administrator can add the module to the list
50160+ of modules loaded at boot by modifying init scripts.
50161+
50162+ Modification of init scripts will most likely be needed on
50163+ Ubuntu servers with encrypted home directory support enabled,
50164+ as the first non-root user logging in will cause the ecb(aes),
50165+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
50166+
50167+config GRKERNSEC_HIDESYM
50168+ bool "Hide kernel symbols"
50169+ help
50170+ If you say Y here, getting information on loaded modules, and
50171+ displaying all kernel symbols through a syscall will be restricted
50172+ to users with CAP_SYS_MODULE. For software compatibility reasons,
50173+ /proc/kallsyms will be restricted to the root user. The RBAC
50174+ system can hide that entry even from root.
50175+
50176+ This option also prevents leaking of kernel addresses through
50177+ several /proc entries.
50178+
50179+ Note that this option is only effective provided the following
50180+ conditions are met:
50181+ 1) The kernel using grsecurity is not precompiled by some distribution
50182+ 2) You have also enabled GRKERNSEC_DMESG
50183+ 3) You are using the RBAC system and hiding other files such as your
50184+ kernel image and System.map. Alternatively, enabling this option
50185+ causes the permissions on /boot, /lib/modules, and the kernel
50186+ source directory to change at compile time to prevent
50187+ reading by non-root users.
50188+ If the above conditions are met, this option will aid in providing a
50189+ useful protection against local kernel exploitation of overflows
50190+ and arbitrary read/write vulnerabilities.
50191+
50192+config GRKERNSEC_KERN_LOCKOUT
50193+ bool "Active kernel exploit response"
50194+ depends on X86 || ARM || PPC || SPARC
50195+ help
50196+ If you say Y here, when a PaX alert is triggered due to suspicious
50197+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
50198+ or an OOPs occurs due to bad memory accesses, instead of just
50199+ terminating the offending process (and potentially allowing
50200+ a subsequent exploit from the same user), we will take one of two
50201+ actions:
50202+ If the user was root, we will panic the system
50203+ If the user was non-root, we will log the attempt, terminate
50204+ all processes owned by the user, then prevent them from creating
50205+ any new processes until the system is restarted
50206+ This deters repeated kernel exploitation/bruteforcing attempts
50207+ and is useful for later forensics.
50208+
50209+endmenu
50210+menu "Role Based Access Control Options"
50211+depends on GRKERNSEC
50212+
50213+config GRKERNSEC_RBAC_DEBUG
50214+ bool
50215+
50216+config GRKERNSEC_NO_RBAC
50217+ bool "Disable RBAC system"
50218+ help
50219+ If you say Y here, the /dev/grsec device will be removed from the kernel,
50220+ preventing the RBAC system from being enabled. You should only say Y
50221+ here if you have no intention of using the RBAC system, so as to prevent
50222+ an attacker with root access from misusing the RBAC system to hide files
50223+ and processes when loadable module support and /dev/[k]mem have been
50224+ locked down.
50225+
50226+config GRKERNSEC_ACL_HIDEKERN
50227+ bool "Hide kernel processes"
50228+ help
50229+ If you say Y here, all kernel threads will be hidden to all
50230+ processes but those whose subject has the "view hidden processes"
50231+ flag.
50232+
50233+config GRKERNSEC_ACL_MAXTRIES
50234+ int "Maximum tries before password lockout"
50235+ default 3
50236+ help
50237+ This option enforces the maximum number of times a user can attempt
50238+ to authorize themselves with the grsecurity RBAC system before being
50239+ denied the ability to attempt authorization again for a specified time.
50240+ The lower the number, the harder it will be to brute-force a password.
50241+
50242+config GRKERNSEC_ACL_TIMEOUT
50243+ int "Time to wait after max password tries, in seconds"
50244+ default 30
50245+ help
50246+ This option specifies the time the user must wait after attempting to
50247+ authorize to the RBAC system with the maximum number of invalid
50248+ passwords. The higher the number, the harder it will be to brute-force
50249+ a password.
50250+
50251+endmenu
50252+menu "Filesystem Protections"
50253+depends on GRKERNSEC
50254+
50255+config GRKERNSEC_PROC
50256+ bool "Proc restrictions"
50257+ help
50258+ If you say Y here, the permissions of the /proc filesystem
50259+ will be altered to enhance system security and privacy. You MUST
50260+ choose either a user only restriction or a user and group restriction.
50261+ Depending upon the option you choose, you can either restrict users to
50262+ see only the processes they themselves run, or choose a group that can
50263+ view all processes and files normally restricted to root if you choose
50264+ the "restrict to user only" option. NOTE: If you're running identd as
50265+ a non-root user, you will have to run it as the group you specify here.
50266+
50267+config GRKERNSEC_PROC_USER
50268+ bool "Restrict /proc to user only"
50269+ depends on GRKERNSEC_PROC
50270+ help
50271+ If you say Y here, non-root users will only be able to view their own
50272+ processes, and restricts them from viewing network-related information,
50273+ and viewing kernel symbol and module information.
50274+
50275+config GRKERNSEC_PROC_USERGROUP
50276+ bool "Allow special group"
50277+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
50278+ help
50279+ If you say Y here, you will be able to select a group that will be
50280+ able to view all processes and network-related information. If you've
50281+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
50282+ remain hidden. This option is useful if you want to run identd as
50283+ a non-root user.
50284+
50285+config GRKERNSEC_PROC_GID
50286+ int "GID for special group"
50287+ depends on GRKERNSEC_PROC_USERGROUP
50288+ default 1001
50289+
50290+config GRKERNSEC_PROC_ADD
50291+ bool "Additional restrictions"
50292+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
50293+ help
50294+ If you say Y here, additional restrictions will be placed on
50295+ /proc that keep normal users from viewing device information and
50296+ slabinfo information that could be useful for exploits.
50297+
50298+config GRKERNSEC_LINK
50299+ bool "Linking restrictions"
50300+ help
50301+ If you say Y here, /tmp race exploits will be prevented, since users
50302+ will no longer be able to follow symlinks owned by other users in
50303+ world-writable +t directories (e.g. /tmp), unless the owner of the
50304+ symlink is the owner of the directory. users will also not be
50305+ able to hardlink to files they do not own. If the sysctl option is
50306+ enabled, a sysctl option with name "linking_restrictions" is created.
50307+
50308+config GRKERNSEC_FIFO
50309+ bool "FIFO restrictions"
50310+ help
50311+ If you say Y here, users will not be able to write to FIFOs they don't
50312+ own in world-writable +t directories (e.g. /tmp), unless the owner of
50313+ the FIFO is the same owner of the directory it's held in. If the sysctl
50314+ option is enabled, a sysctl option with name "fifo_restrictions" is
50315+ created.
50316+
50317+config GRKERNSEC_SYSFS_RESTRICT
50318+ bool "Sysfs/debugfs restriction"
50319+ depends on SYSFS
50320+ help
50321+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
50322+ any filesystem normally mounted under it (e.g. debugfs) will only
50323+ be accessible by root. These filesystems generally provide access
50324+ to hardware and debug information that isn't appropriate for unprivileged
50325+ users of the system. Sysfs and debugfs have also become a large source
50326+ of new vulnerabilities, ranging from infoleaks to local compromise.
50327+ There has been very little oversight with an eye toward security involved
50328+ in adding new exporters of information to these filesystems, so their
50329+ use is discouraged.
50330+ This option is equivalent to a chmod 0700 of the mount paths.
50331+
50332+config GRKERNSEC_ROFS
50333+ bool "Runtime read-only mount protection"
50334+ help
50335+ If you say Y here, a sysctl option with name "romount_protect" will
50336+ be created. By setting this option to 1 at runtime, filesystems
50337+ will be protected in the following ways:
50338+ * No new writable mounts will be allowed
50339+ * Existing read-only mounts won't be able to be remounted read/write
50340+ * Write operations will be denied on all block devices
50341+ This option acts independently of grsec_lock: once it is set to 1,
50342+ it cannot be turned off. Therefore, please be mindful of the resulting
50343+ behavior if this option is enabled in an init script on a read-only
50344+ filesystem. This feature is mainly intended for secure embedded systems.
50345+
50346+config GRKERNSEC_CHROOT
50347+ bool "Chroot jail restrictions"
50348+ help
50349+ If you say Y here, you will be able to choose several options that will
50350+ make breaking out of a chrooted jail much more difficult. If you
50351+ encounter no software incompatibilities with the following options, it
50352+ is recommended that you enable each one.
50353+
50354+config GRKERNSEC_CHROOT_MOUNT
50355+ bool "Deny mounts"
50356+ depends on GRKERNSEC_CHROOT
50357+ help
50358+ If you say Y here, processes inside a chroot will not be able to
50359+ mount or remount filesystems. If the sysctl option is enabled, a
50360+ sysctl option with name "chroot_deny_mount" is created.
50361+
50362+config GRKERNSEC_CHROOT_DOUBLE
50363+ bool "Deny double-chroots"
50364+ depends on GRKERNSEC_CHROOT
50365+ help
50366+ If you say Y here, processes inside a chroot will not be able to chroot
50367+ again outside the chroot. This is a widely used method of breaking
50368+ out of a chroot jail and should not be allowed. If the sysctl
50369+ option is enabled, a sysctl option with name
50370+ "chroot_deny_chroot" is created.
50371+
50372+config GRKERNSEC_CHROOT_PIVOT
50373+ bool "Deny pivot_root in chroot"
50374+ depends on GRKERNSEC_CHROOT
50375+ help
50376+ If you say Y here, processes inside a chroot will not be able to use
50377+ a function called pivot_root() that was introduced in Linux 2.3.41. It
50378+ works similar to chroot in that it changes the root filesystem. This
50379+ function could be misused in a chrooted process to attempt to break out
50380+ of the chroot, and therefore should not be allowed. If the sysctl
50381+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
50382+ created.
50383+
50384+config GRKERNSEC_CHROOT_CHDIR
50385+ bool "Enforce chdir(\"/\") on all chroots"
50386+ depends on GRKERNSEC_CHROOT
50387+ help
50388+ If you say Y here, the current working directory of all newly-chrooted
50389+ applications will be set to the the root directory of the chroot.
50390+ The man page on chroot(2) states:
50391+ Note that this call does not change the current working
50392+ directory, so that `.' can be outside the tree rooted at
50393+ `/'. In particular, the super-user can escape from a
50394+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
50395+
50396+ It is recommended that you say Y here, since it's not known to break
50397+ any software. If the sysctl option is enabled, a sysctl option with
50398+ name "chroot_enforce_chdir" is created.
50399+
50400+config GRKERNSEC_CHROOT_CHMOD
50401+ bool "Deny (f)chmod +s"
50402+ depends on GRKERNSEC_CHROOT
50403+ help
50404+ If you say Y here, processes inside a chroot will not be able to chmod
50405+ or fchmod files to make them have suid or sgid bits. This protects
50406+ against another published method of breaking a chroot. If the sysctl
50407+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
50408+ created.
50409+
50410+config GRKERNSEC_CHROOT_FCHDIR
50411+ bool "Deny fchdir out of chroot"
50412+ depends on GRKERNSEC_CHROOT
50413+ help
50414+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
50415+ to a file descriptor of the chrooting process that points to a directory
50416+ outside the filesystem will be stopped. If the sysctl option
50417+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
50418+
50419+config GRKERNSEC_CHROOT_MKNOD
50420+ bool "Deny mknod"
50421+ depends on GRKERNSEC_CHROOT
50422+ help
50423+ If you say Y here, processes inside a chroot will not be allowed to
50424+ mknod. The problem with using mknod inside a chroot is that it
50425+ would allow an attacker to create a device entry that is the same
50426+ as one on the physical root of your system, which could range from
50427+ anything from the console device to a device for your harddrive (which
50428+ they could then use to wipe the drive or steal data). It is recommended
50429+ that you say Y here, unless you run into software incompatibilities.
50430+ If the sysctl option is enabled, a sysctl option with name
50431+ "chroot_deny_mknod" is created.
50432+
50433+config GRKERNSEC_CHROOT_SHMAT
50434+ bool "Deny shmat() out of chroot"
50435+ depends on GRKERNSEC_CHROOT
50436+ help
50437+ If you say Y here, processes inside a chroot will not be able to attach
50438+ to shared memory segments that were created outside of the chroot jail.
50439+ It is recommended that you say Y here. If the sysctl option is enabled,
50440+ a sysctl option with name "chroot_deny_shmat" is created.
50441+
50442+config GRKERNSEC_CHROOT_UNIX
50443+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
50444+ depends on GRKERNSEC_CHROOT
50445+ help
50446+ If you say Y here, processes inside a chroot will not be able to
50447+ connect to abstract (meaning not belonging to a filesystem) Unix
50448+ domain sockets that were bound outside of a chroot. It is recommended
50449+ that you say Y here. If the sysctl option is enabled, a sysctl option
50450+ with name "chroot_deny_unix" is created.
50451+
50452+config GRKERNSEC_CHROOT_FINDTASK
50453+ bool "Protect outside processes"
50454+ depends on GRKERNSEC_CHROOT
50455+ help
50456+ If you say Y here, processes inside a chroot will not be able to
50457+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
50458+ getsid, or view any process outside of the chroot. If the sysctl
50459+ option is enabled, a sysctl option with name "chroot_findtask" is
50460+ created.
50461+
50462+config GRKERNSEC_CHROOT_NICE
50463+ bool "Restrict priority changes"
50464+ depends on GRKERNSEC_CHROOT
50465+ help
50466+ If you say Y here, processes inside a chroot will not be able to raise
50467+ the priority of processes in the chroot, or alter the priority of
50468+ processes outside the chroot. This provides more security than simply
50469+ removing CAP_SYS_NICE from the process' capability set. If the
50470+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
50471+ is created.
50472+
50473+config GRKERNSEC_CHROOT_SYSCTL
50474+ bool "Deny sysctl writes"
50475+ depends on GRKERNSEC_CHROOT
50476+ help
50477+ If you say Y here, an attacker in a chroot will not be able to
50478+ write to sysctl entries, either by sysctl(2) or through a /proc
50479+ interface. It is strongly recommended that you say Y here. If the
50480+ sysctl option is enabled, a sysctl option with name
50481+ "chroot_deny_sysctl" is created.
50482+
50483+config GRKERNSEC_CHROOT_CAPS
50484+ bool "Capability restrictions"
50485+ depends on GRKERNSEC_CHROOT
50486+ help
50487+ If you say Y here, the capabilities on all root processes within a
50488+ chroot jail will be lowered to stop module insertion, raw i/o,
50489+ system and net admin tasks, rebooting the system, modifying immutable
50490+ files, modifying IPC owned by another, and changing the system time.
50491+ This is left an option because it can break some apps. Disable this
50492+ if your chrooted apps are having problems performing those kinds of
50493+ tasks. If the sysctl option is enabled, a sysctl option with
50494+ name "chroot_caps" is created.
50495+
50496+endmenu
50497+menu "Kernel Auditing"
50498+depends on GRKERNSEC
50499+
50500+config GRKERNSEC_AUDIT_GROUP
50501+ bool "Single group for auditing"
50502+ help
50503+ If you say Y here, the exec, chdir, and (un)mount logging features
50504+ will only operate on a group you specify. This option is recommended
50505+ if you only want to watch certain users instead of having a large
50506+ amount of logs from the entire system. If the sysctl option is enabled,
50507+ a sysctl option with name "audit_group" is created.
50508+
50509+config GRKERNSEC_AUDIT_GID
50510+ int "GID for auditing"
50511+ depends on GRKERNSEC_AUDIT_GROUP
50512+ default 1007
50513+
50514+config GRKERNSEC_EXECLOG
50515+ bool "Exec logging"
50516+ help
50517+ If you say Y here, all execve() calls will be logged (since the
50518+ other exec*() calls are frontends to execve(), all execution
50519+ will be logged). Useful for shell-servers that like to keep track
50520+ of their users. If the sysctl option is enabled, a sysctl option with
50521+ name "exec_logging" is created.
50522+ WARNING: This option when enabled will produce a LOT of logs, especially
50523+ on an active system.
50524+
50525+config GRKERNSEC_RESLOG
50526+ bool "Resource logging"
50527+ help
50528+ If you say Y here, all attempts to overstep resource limits will
50529+ be logged with the resource name, the requested size, and the current
50530+ limit. It is highly recommended that you say Y here. If the sysctl
50531+ option is enabled, a sysctl option with name "resource_logging" is
50532+ created. If the RBAC system is enabled, the sysctl value is ignored.
50533+
50534+config GRKERNSEC_CHROOT_EXECLOG
50535+ bool "Log execs within chroot"
50536+ help
50537+ If you say Y here, all executions inside a chroot jail will be logged
50538+ to syslog. This can cause a large amount of logs if certain
50539+ applications (eg. djb's daemontools) are installed on the system, and
50540+ is therefore left as an option. If the sysctl option is enabled, a
50541+ sysctl option with name "chroot_execlog" is created.
50542+
50543+config GRKERNSEC_AUDIT_PTRACE
50544+ bool "Ptrace logging"
50545+ help
50546+ If you say Y here, all attempts to attach to a process via ptrace
50547+ will be logged. If the sysctl option is enabled, a sysctl option
50548+ with name "audit_ptrace" is created.
50549+
50550+config GRKERNSEC_AUDIT_CHDIR
50551+ bool "Chdir logging"
50552+ help
50553+ If you say Y here, all chdir() calls will be logged. If the sysctl
50554+ option is enabled, a sysctl option with name "audit_chdir" is created.
50555+
50556+config GRKERNSEC_AUDIT_MOUNT
50557+ bool "(Un)Mount logging"
50558+ help
50559+ If you say Y here, all mounts and unmounts will be logged. If the
50560+ sysctl option is enabled, a sysctl option with name "audit_mount" is
50561+ created.
50562+
50563+config GRKERNSEC_SIGNAL
50564+ bool "Signal logging"
50565+ help
50566+ If you say Y here, certain important signals will be logged, such as
50567+ SIGSEGV, which will as a result inform you of when a error in a program
50568+ occurred, which in some cases could mean a possible exploit attempt.
50569+ If the sysctl option is enabled, a sysctl option with name
50570+ "signal_logging" is created.
50571+
50572+config GRKERNSEC_FORKFAIL
50573+ bool "Fork failure logging"
50574+ help
50575+ If you say Y here, all failed fork() attempts will be logged.
50576+ This could suggest a fork bomb, or someone attempting to overstep
50577+ their process limit. If the sysctl option is enabled, a sysctl option
50578+ with name "forkfail_logging" is created.
50579+
50580+config GRKERNSEC_TIME
50581+ bool "Time change logging"
50582+ help
50583+ If you say Y here, any changes of the system clock will be logged.
50584+ If the sysctl option is enabled, a sysctl option with name
50585+ "timechange_logging" is created.
50586+
50587+config GRKERNSEC_PROC_IPADDR
50588+ bool "/proc/<pid>/ipaddr support"
50589+ help
50590+ If you say Y here, a new entry will be added to each /proc/<pid>
50591+ directory that contains the IP address of the person using the task.
50592+ The IP is carried across local TCP and AF_UNIX stream sockets.
50593+ This information can be useful for IDS/IPSes to perform remote response
50594+ to a local attack. The entry is readable by only the owner of the
50595+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
50596+ the RBAC system), and thus does not create privacy concerns.
50597+
50598+config GRKERNSEC_RWXMAP_LOG
50599+ bool 'Denied RWX mmap/mprotect logging'
50600+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
50601+ help
50602+ If you say Y here, calls to mmap() and mprotect() with explicit
50603+ usage of PROT_WRITE and PROT_EXEC together will be logged when
50604+ denied by the PAX_MPROTECT feature. If the sysctl option is
50605+ enabled, a sysctl option with name "rwxmap_logging" is created.
50606+
50607+config GRKERNSEC_AUDIT_TEXTREL
50608+ bool 'ELF text relocations logging (READ HELP)'
50609+ depends on PAX_MPROTECT
50610+ help
50611+ If you say Y here, text relocations will be logged with the filename
50612+ of the offending library or binary. The purpose of the feature is
50613+ to help Linux distribution developers get rid of libraries and
50614+ binaries that need text relocations which hinder the future progress
50615+ of PaX. Only Linux distribution developers should say Y here, and
50616+ never on a production machine, as this option creates an information
50617+ leak that could aid an attacker in defeating the randomization of
50618+ a single memory region. If the sysctl option is enabled, a sysctl
50619+ option with name "audit_textrel" is created.
50620+
50621+endmenu
50622+
50623+menu "Executable Protections"
50624+depends on GRKERNSEC
50625+
50626+config GRKERNSEC_DMESG
50627+ bool "Dmesg(8) restriction"
50628+ help
50629+ If you say Y here, non-root users will not be able to use dmesg(8)
50630+ to view up to the last 4kb of messages in the kernel's log buffer.
50631+ The kernel's log buffer often contains kernel addresses and other
50632+ identifying information useful to an attacker in fingerprinting a
50633+ system for a targeted exploit.
50634+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
50635+ created.
50636+
50637+config GRKERNSEC_HARDEN_PTRACE
50638+ bool "Deter ptrace-based process snooping"
50639+ help
50640+ If you say Y here, TTY sniffers and other malicious monitoring
50641+ programs implemented through ptrace will be defeated. If you
50642+ have been using the RBAC system, this option has already been
50643+ enabled for several years for all users, with the ability to make
50644+ fine-grained exceptions.
50645+
50646+ This option only affects the ability of non-root users to ptrace
50647+ processes that are not a descendent of the ptracing process.
50648+ This means that strace ./binary and gdb ./binary will still work,
50649+ but attaching to arbitrary processes will not. If the sysctl
50650+ option is enabled, a sysctl option with name "harden_ptrace" is
50651+ created.
50652+
50653+config GRKERNSEC_TPE
50654+ bool "Trusted Path Execution (TPE)"
50655+ help
50656+ If you say Y here, you will be able to choose a gid to add to the
50657+ supplementary groups of users you want to mark as "untrusted."
50658+ These users will not be able to execute any files that are not in
50659+ root-owned directories writable only by root. If the sysctl option
50660+ is enabled, a sysctl option with name "tpe" is created.
50661+
50662+config GRKERNSEC_TPE_ALL
50663+ bool "Partially restrict all non-root users"
50664+ depends on GRKERNSEC_TPE
50665+ help
50666+ If you say Y here, all non-root users will be covered under
50667+ a weaker TPE restriction. This is separate from, and in addition to,
50668+ the main TPE options that you have selected elsewhere. Thus, if a
50669+ "trusted" GID is chosen, this restriction applies to even that GID.
50670+ Under this restriction, all non-root users will only be allowed to
50671+ execute files in directories they own that are not group or
50672+ world-writable, or in directories owned by root and writable only by
50673+ root. If the sysctl option is enabled, a sysctl option with name
50674+ "tpe_restrict_all" is created.
50675+
50676+config GRKERNSEC_TPE_INVERT
50677+ bool "Invert GID option"
50678+ depends on GRKERNSEC_TPE
50679+ help
50680+ If you say Y here, the group you specify in the TPE configuration will
50681+ decide what group TPE restrictions will be *disabled* for. This
50682+ option is useful if you want TPE restrictions to be applied to most
50683+ users on the system. If the sysctl option is enabled, a sysctl option
50684+ with name "tpe_invert" is created. Unlike other sysctl options, this
50685+ entry will default to on for backward-compatibility.
50686+
50687+config GRKERNSEC_TPE_GID
50688+ int "GID for untrusted users"
50689+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
50690+ default 1005
50691+ help
50692+ Setting this GID determines what group TPE restrictions will be
50693+ *enabled* for. If the sysctl option is enabled, a sysctl option
50694+ with name "tpe_gid" is created.
50695+
50696+config GRKERNSEC_TPE_GID
50697+ int "GID for trusted users"
50698+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
50699+ default 1005
50700+ help
50701+ Setting this GID determines what group TPE restrictions will be
50702+ *disabled* for. If the sysctl option is enabled, a sysctl option
50703+ with name "tpe_gid" is created.
50704+
50705+endmenu
50706+menu "Network Protections"
50707+depends on GRKERNSEC
50708+
50709+config GRKERNSEC_RANDNET
50710+ bool "Larger entropy pools"
50711+ help
50712+ If you say Y here, the entropy pools used for many features of Linux
50713+ and grsecurity will be doubled in size. Since several grsecurity
50714+ features use additional randomness, it is recommended that you say Y
50715+ here. Saying Y here has a similar effect as modifying
50716+ /proc/sys/kernel/random/poolsize.
50717+
50718+config GRKERNSEC_BLACKHOLE
50719+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
50720+ depends on NET
50721+ help
50722+ If you say Y here, neither TCP resets nor ICMP
50723+ destination-unreachable packets will be sent in response to packets
50724+ sent to ports for which no associated listening process exists.
50725+ This feature supports both IPV4 and IPV6 and exempts the
50726+ loopback interface from blackholing. Enabling this feature
50727+ makes a host more resilient to DoS attacks and reduces network
50728+ visibility against scanners.
50729+
50730+ The blackhole feature as-implemented is equivalent to the FreeBSD
50731+ blackhole feature, as it prevents RST responses to all packets, not
50732+ just SYNs. Under most application behavior this causes no
50733+ problems, but applications (like haproxy) may not close certain
50734+ connections in a way that cleanly terminates them on the remote
50735+ end, leaving the remote host in LAST_ACK state. Because of this
50736+ side-effect and to prevent intentional LAST_ACK DoSes, this
50737+ feature also adds automatic mitigation against such attacks.
50738+ The mitigation drastically reduces the amount of time a socket
50739+ can spend in LAST_ACK state. If you're using haproxy and not
50740+ all servers it connects to have this option enabled, consider
50741+ disabling this feature on the haproxy host.
50742+
50743+ If the sysctl option is enabled, two sysctl options with names
50744+ "ip_blackhole" and "lastack_retries" will be created.
50745+ While "ip_blackhole" takes the standard zero/non-zero on/off
50746+ toggle, "lastack_retries" uses the same kinds of values as
50747+ "tcp_retries1" and "tcp_retries2". The default value of 4
50748+ prevents a socket from lasting more than 45 seconds in LAST_ACK
50749+ state.
50750+
50751+config GRKERNSEC_SOCKET
50752+ bool "Socket restrictions"
50753+ depends on NET
50754+ help
50755+ If you say Y here, you will be able to choose from several options.
50756+ If you assign a GID on your system and add it to the supplementary
50757+ groups of users you want to restrict socket access to, this patch
50758+ will perform up to three things, based on the option(s) you choose.
50759+
50760+config GRKERNSEC_SOCKET_ALL
50761+ bool "Deny any sockets to group"
50762+ depends on GRKERNSEC_SOCKET
50763+ help
50764+ If you say Y here, you will be able to choose a GID of whose users will
50765+ be unable to connect to other hosts from your machine or run server
50766+ applications from your machine. If the sysctl option is enabled, a
50767+ sysctl option with name "socket_all" is created.
50768+
50769+config GRKERNSEC_SOCKET_ALL_GID
50770+ int "GID to deny all sockets for"
50771+ depends on GRKERNSEC_SOCKET_ALL
50772+ default 1004
50773+ help
50774+ Here you can choose the GID to disable socket access for. Remember to
50775+ add the users you want socket access disabled for to the GID
50776+ specified here. If the sysctl option is enabled, a sysctl option
50777+ with name "socket_all_gid" is created.
50778+
50779+config GRKERNSEC_SOCKET_CLIENT
50780+ bool "Deny client sockets to group"
50781+ depends on GRKERNSEC_SOCKET
50782+ help
50783+ If you say Y here, you will be able to choose a GID of whose users will
50784+ be unable to connect to other hosts from your machine, but will be
50785+ able to run servers. If this option is enabled, all users in the group
50786+ you specify will have to use passive mode when initiating ftp transfers
50787+ from the shell on your machine. If the sysctl option is enabled, a
50788+ sysctl option with name "socket_client" is created.
50789+
50790+config GRKERNSEC_SOCKET_CLIENT_GID
50791+ int "GID to deny client sockets for"
50792+ depends on GRKERNSEC_SOCKET_CLIENT
50793+ default 1003
50794+ help
50795+ Here you can choose the GID to disable client socket access for.
50796+ Remember to add the users you want client socket access disabled for to
50797+ the GID specified here. If the sysctl option is enabled, a sysctl
50798+ option with name "socket_client_gid" is created.
50799+
50800+config GRKERNSEC_SOCKET_SERVER
50801+ bool "Deny server sockets to group"
50802+ depends on GRKERNSEC_SOCKET
50803+ help
50804+ If you say Y here, you will be able to choose a GID of whose users will
50805+ be unable to run server applications from your machine. If the sysctl
50806+ option is enabled, a sysctl option with name "socket_server" is created.
50807+
50808+config GRKERNSEC_SOCKET_SERVER_GID
50809+ int "GID to deny server sockets for"
50810+ depends on GRKERNSEC_SOCKET_SERVER
50811+ default 1002
50812+ help
50813+ Here you can choose the GID to disable server socket access for.
50814+ Remember to add the users you want server socket access disabled for to
50815+ the GID specified here. If the sysctl option is enabled, a sysctl
50816+ option with name "socket_server_gid" is created.
50817+
50818+endmenu
50819+menu "Sysctl support"
50820+depends on GRKERNSEC && SYSCTL
50821+
50822+config GRKERNSEC_SYSCTL
50823+ bool "Sysctl support"
50824+ help
50825+ If you say Y here, you will be able to change the options that
50826+ grsecurity runs with at bootup, without having to recompile your
50827+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
50828+ to enable (1) or disable (0) various features. All the sysctl entries
50829+ are mutable until the "grsec_lock" entry is set to a non-zero value.
50830+ All features enabled in the kernel configuration are disabled at boot
50831+ if you do not say Y to the "Turn on features by default" option.
50832+ All options should be set at startup, and the grsec_lock entry should
50833+ be set to a non-zero value after all the options are set.
50834+ *THIS IS EXTREMELY IMPORTANT*
50835+
50836+config GRKERNSEC_SYSCTL_DISTRO
50837+ bool "Extra sysctl support for distro makers (READ HELP)"
50838+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
50839+ help
50840+ If you say Y here, additional sysctl options will be created
50841+ for features that affect processes running as root. Therefore,
50842+ it is critical when using this option that the grsec_lock entry be
50843+ enabled after boot. Only distros with prebuilt kernel packages
50844+ with this option enabled that can ensure grsec_lock is enabled
50845+ after boot should use this option.
50846+ *Failure to set grsec_lock after boot makes all grsec features
50847+ this option covers useless*
50848+
50849+ Currently this option creates the following sysctl entries:
50850+ "Disable Privileged I/O": "disable_priv_io"
50851+
50852+config GRKERNSEC_SYSCTL_ON
50853+ bool "Turn on features by default"
50854+ depends on GRKERNSEC_SYSCTL
50855+ help
50856+ If you say Y here, instead of having all features enabled in the
50857+ kernel configuration disabled at boot time, the features will be
50858+ enabled at boot time. It is recommended you say Y here unless
50859+ there is some reason you would want all sysctl-tunable features to
50860+ be disabled by default. As mentioned elsewhere, it is important
50861+ to enable the grsec_lock entry once you have finished modifying
50862+ the sysctl entries.
50863+
50864+endmenu
50865+menu "Logging Options"
50866+depends on GRKERNSEC
50867+
50868+config GRKERNSEC_FLOODTIME
50869+ int "Seconds in between log messages (minimum)"
50870+ default 10
50871+ help
50872+ This option allows you to enforce the number of seconds between
50873+ grsecurity log messages. The default should be suitable for most
50874+ people, however, if you choose to change it, choose a value small enough
50875+ to allow informative logs to be produced, but large enough to
50876+ prevent flooding.
50877+
50878+config GRKERNSEC_FLOODBURST
50879+ int "Number of messages in a burst (maximum)"
50880+ default 4
50881+ help
50882+ This option allows you to choose the maximum number of messages allowed
50883+ within the flood time interval you chose in a separate option. The
50884+ default should be suitable for most people, however if you find that
50885+ many of your logs are being interpreted as flooding, you may want to
50886+ raise this value.
50887+
50888+endmenu
50889+
50890+endmenu
50891diff -urNp linux-3.0.3/grsecurity/Makefile linux-3.0.3/grsecurity/Makefile
50892--- linux-3.0.3/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
50893+++ linux-3.0.3/grsecurity/Makefile 2011-08-23 21:48:14.000000000 -0400
50894@@ -0,0 +1,34 @@
50895+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
50896+# during 2001-2009 it has been completely redesigned by Brad Spengler
50897+# into an RBAC system
50898+#
50899+# All code in this directory and various hooks inserted throughout the kernel
50900+# are copyright Brad Spengler - Open Source Security, Inc., and released
50901+# under the GPL v2 or higher
50902+
50903+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
50904+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
50905+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
50906+
50907+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
50908+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
50909+ gracl_learn.o grsec_log.o
50910+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
50911+
50912+ifdef CONFIG_NET
50913+obj-y += grsec_sock.o
50914+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
50915+endif
50916+
50917+ifndef CONFIG_GRKERNSEC
50918+obj-y += grsec_disabled.o
50919+endif
50920+
50921+ifdef CONFIG_GRKERNSEC_HIDESYM
50922+extra-y := grsec_hidesym.o
50923+$(obj)/grsec_hidesym.o:
50924+ @-chmod -f 500 /boot
50925+ @-chmod -f 500 /lib/modules
50926+ @-chmod -f 700 .
50927+ @echo ' grsec: protected kernel image paths'
50928+endif
50929diff -urNp linux-3.0.3/include/acpi/acpi_bus.h linux-3.0.3/include/acpi/acpi_bus.h
50930--- linux-3.0.3/include/acpi/acpi_bus.h 2011-07-21 22:17:23.000000000 -0400
50931+++ linux-3.0.3/include/acpi/acpi_bus.h 2011-08-23 21:47:56.000000000 -0400
50932@@ -107,7 +107,7 @@ struct acpi_device_ops {
50933 acpi_op_bind bind;
50934 acpi_op_unbind unbind;
50935 acpi_op_notify notify;
50936-};
50937+} __no_const;
50938
50939 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
50940
50941diff -urNp linux-3.0.3/include/asm-generic/atomic-long.h linux-3.0.3/include/asm-generic/atomic-long.h
50942--- linux-3.0.3/include/asm-generic/atomic-long.h 2011-07-21 22:17:23.000000000 -0400
50943+++ linux-3.0.3/include/asm-generic/atomic-long.h 2011-08-23 21:47:56.000000000 -0400
50944@@ -22,6 +22,12 @@
50945
50946 typedef atomic64_t atomic_long_t;
50947
50948+#ifdef CONFIG_PAX_REFCOUNT
50949+typedef atomic64_unchecked_t atomic_long_unchecked_t;
50950+#else
50951+typedef atomic64_t atomic_long_unchecked_t;
50952+#endif
50953+
50954 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
50955
50956 static inline long atomic_long_read(atomic_long_t *l)
50957@@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
50958 return (long)atomic64_read(v);
50959 }
50960
50961+#ifdef CONFIG_PAX_REFCOUNT
50962+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
50963+{
50964+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
50965+
50966+ return (long)atomic64_read_unchecked(v);
50967+}
50968+#endif
50969+
50970 static inline void atomic_long_set(atomic_long_t *l, long i)
50971 {
50972 atomic64_t *v = (atomic64_t *)l;
50973@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
50974 atomic64_set(v, i);
50975 }
50976
50977+#ifdef CONFIG_PAX_REFCOUNT
50978+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
50979+{
50980+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
50981+
50982+ atomic64_set_unchecked(v, i);
50983+}
50984+#endif
50985+
50986 static inline void atomic_long_inc(atomic_long_t *l)
50987 {
50988 atomic64_t *v = (atomic64_t *)l;
50989@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
50990 atomic64_inc(v);
50991 }
50992
50993+#ifdef CONFIG_PAX_REFCOUNT
50994+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
50995+{
50996+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
50997+
50998+ atomic64_inc_unchecked(v);
50999+}
51000+#endif
51001+
51002 static inline void atomic_long_dec(atomic_long_t *l)
51003 {
51004 atomic64_t *v = (atomic64_t *)l;
51005@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
51006 atomic64_dec(v);
51007 }
51008
51009+#ifdef CONFIG_PAX_REFCOUNT
51010+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
51011+{
51012+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51013+
51014+ atomic64_dec_unchecked(v);
51015+}
51016+#endif
51017+
51018 static inline void atomic_long_add(long i, atomic_long_t *l)
51019 {
51020 atomic64_t *v = (atomic64_t *)l;
51021@@ -59,6 +101,15 @@ static inline void atomic_long_add(long
51022 atomic64_add(i, v);
51023 }
51024
51025+#ifdef CONFIG_PAX_REFCOUNT
51026+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
51027+{
51028+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51029+
51030+ atomic64_add_unchecked(i, v);
51031+}
51032+#endif
51033+
51034 static inline void atomic_long_sub(long i, atomic_long_t *l)
51035 {
51036 atomic64_t *v = (atomic64_t *)l;
51037@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long
51038 atomic64_sub(i, v);
51039 }
51040
51041+#ifdef CONFIG_PAX_REFCOUNT
51042+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
51043+{
51044+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51045+
51046+ atomic64_sub_unchecked(i, v);
51047+}
51048+#endif
51049+
51050 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
51051 {
51052 atomic64_t *v = (atomic64_t *)l;
51053@@ -115,6 +175,15 @@ static inline long atomic_long_inc_retur
51054 return (long)atomic64_inc_return(v);
51055 }
51056
51057+#ifdef CONFIG_PAX_REFCOUNT
51058+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
51059+{
51060+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51061+
51062+ return (long)atomic64_inc_return_unchecked(v);
51063+}
51064+#endif
51065+
51066 static inline long atomic_long_dec_return(atomic_long_t *l)
51067 {
51068 atomic64_t *v = (atomic64_t *)l;
51069@@ -140,6 +209,12 @@ static inline long atomic_long_add_unles
51070
51071 typedef atomic_t atomic_long_t;
51072
51073+#ifdef CONFIG_PAX_REFCOUNT
51074+typedef atomic_unchecked_t atomic_long_unchecked_t;
51075+#else
51076+typedef atomic_t atomic_long_unchecked_t;
51077+#endif
51078+
51079 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
51080 static inline long atomic_long_read(atomic_long_t *l)
51081 {
51082@@ -148,6 +223,15 @@ static inline long atomic_long_read(atom
51083 return (long)atomic_read(v);
51084 }
51085
51086+#ifdef CONFIG_PAX_REFCOUNT
51087+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
51088+{
51089+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51090+
51091+ return (long)atomic_read_unchecked(v);
51092+}
51093+#endif
51094+
51095 static inline void atomic_long_set(atomic_long_t *l, long i)
51096 {
51097 atomic_t *v = (atomic_t *)l;
51098@@ -155,6 +239,15 @@ static inline void atomic_long_set(atomi
51099 atomic_set(v, i);
51100 }
51101
51102+#ifdef CONFIG_PAX_REFCOUNT
51103+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
51104+{
51105+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51106+
51107+ atomic_set_unchecked(v, i);
51108+}
51109+#endif
51110+
51111 static inline void atomic_long_inc(atomic_long_t *l)
51112 {
51113 atomic_t *v = (atomic_t *)l;
51114@@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomi
51115 atomic_inc(v);
51116 }
51117
51118+#ifdef CONFIG_PAX_REFCOUNT
51119+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
51120+{
51121+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51122+
51123+ atomic_inc_unchecked(v);
51124+}
51125+#endif
51126+
51127 static inline void atomic_long_dec(atomic_long_t *l)
51128 {
51129 atomic_t *v = (atomic_t *)l;
51130@@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomi
51131 atomic_dec(v);
51132 }
51133
51134+#ifdef CONFIG_PAX_REFCOUNT
51135+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
51136+{
51137+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51138+
51139+ atomic_dec_unchecked(v);
51140+}
51141+#endif
51142+
51143 static inline void atomic_long_add(long i, atomic_long_t *l)
51144 {
51145 atomic_t *v = (atomic_t *)l;
51146@@ -176,6 +287,15 @@ static inline void atomic_long_add(long
51147 atomic_add(i, v);
51148 }
51149
51150+#ifdef CONFIG_PAX_REFCOUNT
51151+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
51152+{
51153+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51154+
51155+ atomic_add_unchecked(i, v);
51156+}
51157+#endif
51158+
51159 static inline void atomic_long_sub(long i, atomic_long_t *l)
51160 {
51161 atomic_t *v = (atomic_t *)l;
51162@@ -183,6 +303,15 @@ static inline void atomic_long_sub(long
51163 atomic_sub(i, v);
51164 }
51165
51166+#ifdef CONFIG_PAX_REFCOUNT
51167+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
51168+{
51169+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51170+
51171+ atomic_sub_unchecked(i, v);
51172+}
51173+#endif
51174+
51175 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
51176 {
51177 atomic_t *v = (atomic_t *)l;
51178@@ -232,6 +361,15 @@ static inline long atomic_long_inc_retur
51179 return (long)atomic_inc_return(v);
51180 }
51181
51182+#ifdef CONFIG_PAX_REFCOUNT
51183+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
51184+{
51185+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51186+
51187+ return (long)atomic_inc_return_unchecked(v);
51188+}
51189+#endif
51190+
51191 static inline long atomic_long_dec_return(atomic_long_t *l)
51192 {
51193 atomic_t *v = (atomic_t *)l;
51194@@ -255,4 +393,49 @@ static inline long atomic_long_add_unles
51195
51196 #endif /* BITS_PER_LONG == 64 */
51197
51198+#ifdef CONFIG_PAX_REFCOUNT
51199+static inline void pax_refcount_needs_these_functions(void)
51200+{
51201+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
51202+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
51203+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
51204+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
51205+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
51206+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
51207+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
51208+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
51209+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
51210+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
51211+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
51212+
51213+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
51214+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
51215+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
51216+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
51217+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
51218+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
51219+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
51220+}
51221+#else
51222+#define atomic_read_unchecked(v) atomic_read(v)
51223+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
51224+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
51225+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
51226+#define atomic_inc_unchecked(v) atomic_inc(v)
51227+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
51228+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
51229+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
51230+#define atomic_dec_unchecked(v) atomic_dec(v)
51231+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
51232+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
51233+
51234+#define atomic_long_read_unchecked(v) atomic_long_read(v)
51235+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
51236+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
51237+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
51238+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
51239+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
51240+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
51241+#endif
51242+
51243 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
51244diff -urNp linux-3.0.3/include/asm-generic/cache.h linux-3.0.3/include/asm-generic/cache.h
51245--- linux-3.0.3/include/asm-generic/cache.h 2011-07-21 22:17:23.000000000 -0400
51246+++ linux-3.0.3/include/asm-generic/cache.h 2011-08-23 21:47:56.000000000 -0400
51247@@ -6,7 +6,7 @@
51248 * cache lines need to provide their own cache.h.
51249 */
51250
51251-#define L1_CACHE_SHIFT 5
51252-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
51253+#define L1_CACHE_SHIFT 5UL
51254+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
51255
51256 #endif /* __ASM_GENERIC_CACHE_H */
51257diff -urNp linux-3.0.3/include/asm-generic/int-l64.h linux-3.0.3/include/asm-generic/int-l64.h
51258--- linux-3.0.3/include/asm-generic/int-l64.h 2011-07-21 22:17:23.000000000 -0400
51259+++ linux-3.0.3/include/asm-generic/int-l64.h 2011-08-23 21:47:56.000000000 -0400
51260@@ -46,6 +46,8 @@ typedef unsigned int u32;
51261 typedef signed long s64;
51262 typedef unsigned long u64;
51263
51264+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
51265+
51266 #define S8_C(x) x
51267 #define U8_C(x) x ## U
51268 #define S16_C(x) x
51269diff -urNp linux-3.0.3/include/asm-generic/int-ll64.h linux-3.0.3/include/asm-generic/int-ll64.h
51270--- linux-3.0.3/include/asm-generic/int-ll64.h 2011-07-21 22:17:23.000000000 -0400
51271+++ linux-3.0.3/include/asm-generic/int-ll64.h 2011-08-23 21:47:56.000000000 -0400
51272@@ -51,6 +51,8 @@ typedef unsigned int u32;
51273 typedef signed long long s64;
51274 typedef unsigned long long u64;
51275
51276+typedef unsigned long long intoverflow_t;
51277+
51278 #define S8_C(x) x
51279 #define U8_C(x) x ## U
51280 #define S16_C(x) x
51281diff -urNp linux-3.0.3/include/asm-generic/kmap_types.h linux-3.0.3/include/asm-generic/kmap_types.h
51282--- linux-3.0.3/include/asm-generic/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
51283+++ linux-3.0.3/include/asm-generic/kmap_types.h 2011-08-23 21:47:56.000000000 -0400
51284@@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
51285 KMAP_D(17) KM_NMI,
51286 KMAP_D(18) KM_NMI_PTE,
51287 KMAP_D(19) KM_KDB,
51288+KMAP_D(20) KM_CLEARPAGE,
51289 /*
51290 * Remember to update debug_kmap_atomic() when adding new kmap types!
51291 */
51292-KMAP_D(20) KM_TYPE_NR
51293+KMAP_D(21) KM_TYPE_NR
51294 };
51295
51296 #undef KMAP_D
51297diff -urNp linux-3.0.3/include/asm-generic/pgtable.h linux-3.0.3/include/asm-generic/pgtable.h
51298--- linux-3.0.3/include/asm-generic/pgtable.h 2011-07-21 22:17:23.000000000 -0400
51299+++ linux-3.0.3/include/asm-generic/pgtable.h 2011-08-23 21:47:56.000000000 -0400
51300@@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
51301 #endif /* __HAVE_ARCH_PMD_WRITE */
51302 #endif
51303
51304+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
51305+static inline unsigned long pax_open_kernel(void) { return 0; }
51306+#endif
51307+
51308+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
51309+static inline unsigned long pax_close_kernel(void) { return 0; }
51310+#endif
51311+
51312 #endif /* !__ASSEMBLY__ */
51313
51314 #endif /* _ASM_GENERIC_PGTABLE_H */
51315diff -urNp linux-3.0.3/include/asm-generic/pgtable-nopmd.h linux-3.0.3/include/asm-generic/pgtable-nopmd.h
51316--- linux-3.0.3/include/asm-generic/pgtable-nopmd.h 2011-07-21 22:17:23.000000000 -0400
51317+++ linux-3.0.3/include/asm-generic/pgtable-nopmd.h 2011-08-23 21:47:56.000000000 -0400
51318@@ -1,14 +1,19 @@
51319 #ifndef _PGTABLE_NOPMD_H
51320 #define _PGTABLE_NOPMD_H
51321
51322-#ifndef __ASSEMBLY__
51323-
51324 #include <asm-generic/pgtable-nopud.h>
51325
51326-struct mm_struct;
51327-
51328 #define __PAGETABLE_PMD_FOLDED
51329
51330+#define PMD_SHIFT PUD_SHIFT
51331+#define PTRS_PER_PMD 1
51332+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
51333+#define PMD_MASK (~(PMD_SIZE-1))
51334+
51335+#ifndef __ASSEMBLY__
51336+
51337+struct mm_struct;
51338+
51339 /*
51340 * Having the pmd type consist of a pud gets the size right, and allows
51341 * us to conceptually access the pud entry that this pmd is folded into
51342@@ -16,11 +21,6 @@ struct mm_struct;
51343 */
51344 typedef struct { pud_t pud; } pmd_t;
51345
51346-#define PMD_SHIFT PUD_SHIFT
51347-#define PTRS_PER_PMD 1
51348-#define PMD_SIZE (1UL << PMD_SHIFT)
51349-#define PMD_MASK (~(PMD_SIZE-1))
51350-
51351 /*
51352 * The "pud_xxx()" functions here are trivial for a folded two-level
51353 * setup: the pmd is never bad, and a pmd always exists (as it's folded
51354diff -urNp linux-3.0.3/include/asm-generic/pgtable-nopud.h linux-3.0.3/include/asm-generic/pgtable-nopud.h
51355--- linux-3.0.3/include/asm-generic/pgtable-nopud.h 2011-07-21 22:17:23.000000000 -0400
51356+++ linux-3.0.3/include/asm-generic/pgtable-nopud.h 2011-08-23 21:47:56.000000000 -0400
51357@@ -1,10 +1,15 @@
51358 #ifndef _PGTABLE_NOPUD_H
51359 #define _PGTABLE_NOPUD_H
51360
51361-#ifndef __ASSEMBLY__
51362-
51363 #define __PAGETABLE_PUD_FOLDED
51364
51365+#define PUD_SHIFT PGDIR_SHIFT
51366+#define PTRS_PER_PUD 1
51367+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
51368+#define PUD_MASK (~(PUD_SIZE-1))
51369+
51370+#ifndef __ASSEMBLY__
51371+
51372 /*
51373 * Having the pud type consist of a pgd gets the size right, and allows
51374 * us to conceptually access the pgd entry that this pud is folded into
51375@@ -12,11 +17,6 @@
51376 */
51377 typedef struct { pgd_t pgd; } pud_t;
51378
51379-#define PUD_SHIFT PGDIR_SHIFT
51380-#define PTRS_PER_PUD 1
51381-#define PUD_SIZE (1UL << PUD_SHIFT)
51382-#define PUD_MASK (~(PUD_SIZE-1))
51383-
51384 /*
51385 * The "pgd_xxx()" functions here are trivial for a folded two-level
51386 * setup: the pud is never bad, and a pud always exists (as it's folded
51387diff -urNp linux-3.0.3/include/asm-generic/vmlinux.lds.h linux-3.0.3/include/asm-generic/vmlinux.lds.h
51388--- linux-3.0.3/include/asm-generic/vmlinux.lds.h 2011-07-21 22:17:23.000000000 -0400
51389+++ linux-3.0.3/include/asm-generic/vmlinux.lds.h 2011-08-23 21:47:56.000000000 -0400
51390@@ -217,6 +217,7 @@
51391 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
51392 VMLINUX_SYMBOL(__start_rodata) = .; \
51393 *(.rodata) *(.rodata.*) \
51394+ *(.data..read_only) \
51395 *(__vermagic) /* Kernel version magic */ \
51396 . = ALIGN(8); \
51397 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
51398@@ -723,17 +724,18 @@
51399 * section in the linker script will go there too. @phdr should have
51400 * a leading colon.
51401 *
51402- * Note that this macros defines __per_cpu_load as an absolute symbol.
51403+ * Note that this macros defines per_cpu_load as an absolute symbol.
51404 * If there is no need to put the percpu section at a predetermined
51405 * address, use PERCPU_SECTION.
51406 */
51407 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
51408- VMLINUX_SYMBOL(__per_cpu_load) = .; \
51409- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
51410+ per_cpu_load = .; \
51411+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
51412 - LOAD_OFFSET) { \
51413+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
51414 PERCPU_INPUT(cacheline) \
51415 } phdr \
51416- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
51417+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
51418
51419 /**
51420 * PERCPU_SECTION - define output section for percpu area, simple version
51421diff -urNp linux-3.0.3/include/drm/drm_crtc_helper.h linux-3.0.3/include/drm/drm_crtc_helper.h
51422--- linux-3.0.3/include/drm/drm_crtc_helper.h 2011-07-21 22:17:23.000000000 -0400
51423+++ linux-3.0.3/include/drm/drm_crtc_helper.h 2011-08-23 21:47:56.000000000 -0400
51424@@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
51425
51426 /* disable crtc when not in use - more explicit than dpms off */
51427 void (*disable)(struct drm_crtc *crtc);
51428-};
51429+} __no_const;
51430
51431 struct drm_encoder_helper_funcs {
51432 void (*dpms)(struct drm_encoder *encoder, int mode);
51433@@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
51434 struct drm_connector *connector);
51435 /* disable encoder when not in use - more explicit than dpms off */
51436 void (*disable)(struct drm_encoder *encoder);
51437-};
51438+} __no_const;
51439
51440 struct drm_connector_helper_funcs {
51441 int (*get_modes)(struct drm_connector *connector);
51442diff -urNp linux-3.0.3/include/drm/drmP.h linux-3.0.3/include/drm/drmP.h
51443--- linux-3.0.3/include/drm/drmP.h 2011-07-21 22:17:23.000000000 -0400
51444+++ linux-3.0.3/include/drm/drmP.h 2011-08-23 21:47:56.000000000 -0400
51445@@ -73,6 +73,7 @@
51446 #include <linux/workqueue.h>
51447 #include <linux/poll.h>
51448 #include <asm/pgalloc.h>
51449+#include <asm/local.h>
51450 #include "drm.h"
51451
51452 #include <linux/idr.h>
51453@@ -1033,7 +1034,7 @@ struct drm_device {
51454
51455 /** \name Usage Counters */
51456 /*@{ */
51457- int open_count; /**< Outstanding files open */
51458+ local_t open_count; /**< Outstanding files open */
51459 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
51460 atomic_t vma_count; /**< Outstanding vma areas open */
51461 int buf_use; /**< Buffers in use -- cannot alloc */
51462@@ -1044,7 +1045,7 @@ struct drm_device {
51463 /*@{ */
51464 unsigned long counters;
51465 enum drm_stat_type types[15];
51466- atomic_t counts[15];
51467+ atomic_unchecked_t counts[15];
51468 /*@} */
51469
51470 struct list_head filelist;
51471diff -urNp linux-3.0.3/include/drm/ttm/ttm_memory.h linux-3.0.3/include/drm/ttm/ttm_memory.h
51472--- linux-3.0.3/include/drm/ttm/ttm_memory.h 2011-07-21 22:17:23.000000000 -0400
51473+++ linux-3.0.3/include/drm/ttm/ttm_memory.h 2011-08-23 21:47:56.000000000 -0400
51474@@ -47,7 +47,7 @@
51475
51476 struct ttm_mem_shrink {
51477 int (*do_shrink) (struct ttm_mem_shrink *);
51478-};
51479+} __no_const;
51480
51481 /**
51482 * struct ttm_mem_global - Global memory accounting structure.
51483diff -urNp linux-3.0.3/include/linux/a.out.h linux-3.0.3/include/linux/a.out.h
51484--- linux-3.0.3/include/linux/a.out.h 2011-07-21 22:17:23.000000000 -0400
51485+++ linux-3.0.3/include/linux/a.out.h 2011-08-23 21:47:56.000000000 -0400
51486@@ -39,6 +39,14 @@ enum machine_type {
51487 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
51488 };
51489
51490+/* Constants for the N_FLAGS field */
51491+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
51492+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
51493+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
51494+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
51495+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
51496+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
51497+
51498 #if !defined (N_MAGIC)
51499 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
51500 #endif
51501diff -urNp linux-3.0.3/include/linux/atmdev.h linux-3.0.3/include/linux/atmdev.h
51502--- linux-3.0.3/include/linux/atmdev.h 2011-07-21 22:17:23.000000000 -0400
51503+++ linux-3.0.3/include/linux/atmdev.h 2011-08-23 21:47:56.000000000 -0400
51504@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
51505 #endif
51506
51507 struct k_atm_aal_stats {
51508-#define __HANDLE_ITEM(i) atomic_t i
51509+#define __HANDLE_ITEM(i) atomic_unchecked_t i
51510 __AAL_STAT_ITEMS
51511 #undef __HANDLE_ITEM
51512 };
51513diff -urNp linux-3.0.3/include/linux/binfmts.h linux-3.0.3/include/linux/binfmts.h
51514--- linux-3.0.3/include/linux/binfmts.h 2011-07-21 22:17:23.000000000 -0400
51515+++ linux-3.0.3/include/linux/binfmts.h 2011-08-23 21:47:56.000000000 -0400
51516@@ -88,6 +88,7 @@ struct linux_binfmt {
51517 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
51518 int (*load_shlib)(struct file *);
51519 int (*core_dump)(struct coredump_params *cprm);
51520+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
51521 unsigned long min_coredump; /* minimal dump size */
51522 };
51523
51524diff -urNp linux-3.0.3/include/linux/blkdev.h linux-3.0.3/include/linux/blkdev.h
51525--- linux-3.0.3/include/linux/blkdev.h 2011-07-21 22:17:23.000000000 -0400
51526+++ linux-3.0.3/include/linux/blkdev.h 2011-08-23 21:47:56.000000000 -0400
51527@@ -1307,7 +1307,7 @@ struct block_device_operations {
51528 int (*getgeo)(struct block_device *, struct hd_geometry *);
51529 /* this callback is with swap_lock and sometimes page table lock held */
51530 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
51531- struct module *owner;
51532+ struct module * const owner;
51533 };
51534
51535 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
51536diff -urNp linux-3.0.3/include/linux/blktrace_api.h linux-3.0.3/include/linux/blktrace_api.h
51537--- linux-3.0.3/include/linux/blktrace_api.h 2011-07-21 22:17:23.000000000 -0400
51538+++ linux-3.0.3/include/linux/blktrace_api.h 2011-08-23 21:47:56.000000000 -0400
51539@@ -161,7 +161,7 @@ struct blk_trace {
51540 struct dentry *dir;
51541 struct dentry *dropped_file;
51542 struct dentry *msg_file;
51543- atomic_t dropped;
51544+ atomic_unchecked_t dropped;
51545 };
51546
51547 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
51548diff -urNp linux-3.0.3/include/linux/byteorder/little_endian.h linux-3.0.3/include/linux/byteorder/little_endian.h
51549--- linux-3.0.3/include/linux/byteorder/little_endian.h 2011-07-21 22:17:23.000000000 -0400
51550+++ linux-3.0.3/include/linux/byteorder/little_endian.h 2011-08-23 21:47:56.000000000 -0400
51551@@ -42,51 +42,51 @@
51552
51553 static inline __le64 __cpu_to_le64p(const __u64 *p)
51554 {
51555- return (__force __le64)*p;
51556+ return (__force const __le64)*p;
51557 }
51558 static inline __u64 __le64_to_cpup(const __le64 *p)
51559 {
51560- return (__force __u64)*p;
51561+ return (__force const __u64)*p;
51562 }
51563 static inline __le32 __cpu_to_le32p(const __u32 *p)
51564 {
51565- return (__force __le32)*p;
51566+ return (__force const __le32)*p;
51567 }
51568 static inline __u32 __le32_to_cpup(const __le32 *p)
51569 {
51570- return (__force __u32)*p;
51571+ return (__force const __u32)*p;
51572 }
51573 static inline __le16 __cpu_to_le16p(const __u16 *p)
51574 {
51575- return (__force __le16)*p;
51576+ return (__force const __le16)*p;
51577 }
51578 static inline __u16 __le16_to_cpup(const __le16 *p)
51579 {
51580- return (__force __u16)*p;
51581+ return (__force const __u16)*p;
51582 }
51583 static inline __be64 __cpu_to_be64p(const __u64 *p)
51584 {
51585- return (__force __be64)__swab64p(p);
51586+ return (__force const __be64)__swab64p(p);
51587 }
51588 static inline __u64 __be64_to_cpup(const __be64 *p)
51589 {
51590- return __swab64p((__u64 *)p);
51591+ return __swab64p((const __u64 *)p);
51592 }
51593 static inline __be32 __cpu_to_be32p(const __u32 *p)
51594 {
51595- return (__force __be32)__swab32p(p);
51596+ return (__force const __be32)__swab32p(p);
51597 }
51598 static inline __u32 __be32_to_cpup(const __be32 *p)
51599 {
51600- return __swab32p((__u32 *)p);
51601+ return __swab32p((const __u32 *)p);
51602 }
51603 static inline __be16 __cpu_to_be16p(const __u16 *p)
51604 {
51605- return (__force __be16)__swab16p(p);
51606+ return (__force const __be16)__swab16p(p);
51607 }
51608 static inline __u16 __be16_to_cpup(const __be16 *p)
51609 {
51610- return __swab16p((__u16 *)p);
51611+ return __swab16p((const __u16 *)p);
51612 }
51613 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
51614 #define __le64_to_cpus(x) do { (void)(x); } while (0)
51615diff -urNp linux-3.0.3/include/linux/cache.h linux-3.0.3/include/linux/cache.h
51616--- linux-3.0.3/include/linux/cache.h 2011-07-21 22:17:23.000000000 -0400
51617+++ linux-3.0.3/include/linux/cache.h 2011-08-23 21:47:56.000000000 -0400
51618@@ -16,6 +16,10 @@
51619 #define __read_mostly
51620 #endif
51621
51622+#ifndef __read_only
51623+#define __read_only __read_mostly
51624+#endif
51625+
51626 #ifndef ____cacheline_aligned
51627 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
51628 #endif
51629diff -urNp linux-3.0.3/include/linux/capability.h linux-3.0.3/include/linux/capability.h
51630--- linux-3.0.3/include/linux/capability.h 2011-07-21 22:17:23.000000000 -0400
51631+++ linux-3.0.3/include/linux/capability.h 2011-08-23 21:48:14.000000000 -0400
51632@@ -547,6 +547,9 @@ extern bool capable(int cap);
51633 extern bool ns_capable(struct user_namespace *ns, int cap);
51634 extern bool task_ns_capable(struct task_struct *t, int cap);
51635 extern bool nsown_capable(int cap);
51636+extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
51637+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
51638+extern bool capable_nolog(int cap);
51639
51640 /* audit system wants to get cap info from files as well */
51641 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
51642diff -urNp linux-3.0.3/include/linux/cleancache.h linux-3.0.3/include/linux/cleancache.h
51643--- linux-3.0.3/include/linux/cleancache.h 2011-07-21 22:17:23.000000000 -0400
51644+++ linux-3.0.3/include/linux/cleancache.h 2011-08-23 21:47:56.000000000 -0400
51645@@ -31,7 +31,7 @@ struct cleancache_ops {
51646 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
51647 void (*flush_inode)(int, struct cleancache_filekey);
51648 void (*flush_fs)(int);
51649-};
51650+} __no_const;
51651
51652 extern struct cleancache_ops
51653 cleancache_register_ops(struct cleancache_ops *ops);
51654diff -urNp linux-3.0.3/include/linux/compiler-gcc4.h linux-3.0.3/include/linux/compiler-gcc4.h
51655--- linux-3.0.3/include/linux/compiler-gcc4.h 2011-07-21 22:17:23.000000000 -0400
51656+++ linux-3.0.3/include/linux/compiler-gcc4.h 2011-08-23 21:47:56.000000000 -0400
51657@@ -31,6 +31,9 @@
51658
51659
51660 #if __GNUC_MINOR__ >= 5
51661+
51662+#define __no_const __attribute__((no_const))
51663+
51664 /*
51665 * Mark a position in code as unreachable. This can be used to
51666 * suppress control flow warnings after asm blocks that transfer
51667@@ -46,6 +49,11 @@
51668 #define __noclone __attribute__((__noclone__))
51669
51670 #endif
51671+
51672+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
51673+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
51674+#define __bos0(ptr) __bos((ptr), 0)
51675+#define __bos1(ptr) __bos((ptr), 1)
51676 #endif
51677
51678 #if __GNUC_MINOR__ > 0
51679diff -urNp linux-3.0.3/include/linux/compiler.h linux-3.0.3/include/linux/compiler.h
51680--- linux-3.0.3/include/linux/compiler.h 2011-07-21 22:17:23.000000000 -0400
51681+++ linux-3.0.3/include/linux/compiler.h 2011-08-23 21:47:56.000000000 -0400
51682@@ -264,6 +264,10 @@ void ftrace_likely_update(struct ftrace_
51683 # define __attribute_const__ /* unimplemented */
51684 #endif
51685
51686+#ifndef __no_const
51687+# define __no_const
51688+#endif
51689+
51690 /*
51691 * Tell gcc if a function is cold. The compiler will assume any path
51692 * directly leading to the call is unlikely.
51693@@ -273,6 +277,22 @@ void ftrace_likely_update(struct ftrace_
51694 #define __cold
51695 #endif
51696
51697+#ifndef __alloc_size
51698+#define __alloc_size(...)
51699+#endif
51700+
51701+#ifndef __bos
51702+#define __bos(ptr, arg)
51703+#endif
51704+
51705+#ifndef __bos0
51706+#define __bos0(ptr)
51707+#endif
51708+
51709+#ifndef __bos1
51710+#define __bos1(ptr)
51711+#endif
51712+
51713 /* Simple shorthand for a section definition */
51714 #ifndef __section
51715 # define __section(S) __attribute__ ((__section__(#S)))
51716@@ -306,6 +326,7 @@ void ftrace_likely_update(struct ftrace_
51717 * use is to mediate communication between process-level code and irq/NMI
51718 * handlers, all running on the same CPU.
51719 */
51720-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
51721+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
51722+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
51723
51724 #endif /* __LINUX_COMPILER_H */
51725diff -urNp linux-3.0.3/include/linux/cpuset.h linux-3.0.3/include/linux/cpuset.h
51726--- linux-3.0.3/include/linux/cpuset.h 2011-07-21 22:17:23.000000000 -0400
51727+++ linux-3.0.3/include/linux/cpuset.h 2011-08-23 21:47:56.000000000 -0400
51728@@ -118,7 +118,7 @@ static inline void put_mems_allowed(void
51729 * nodemask.
51730 */
51731 smp_mb();
51732- --ACCESS_ONCE(current->mems_allowed_change_disable);
51733+ --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
51734 }
51735
51736 static inline void set_mems_allowed(nodemask_t nodemask)
51737diff -urNp linux-3.0.3/include/linux/crypto.h linux-3.0.3/include/linux/crypto.h
51738--- linux-3.0.3/include/linux/crypto.h 2011-07-21 22:17:23.000000000 -0400
51739+++ linux-3.0.3/include/linux/crypto.h 2011-08-23 21:47:56.000000000 -0400
51740@@ -361,7 +361,7 @@ struct cipher_tfm {
51741 const u8 *key, unsigned int keylen);
51742 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
51743 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
51744-};
51745+} __no_const;
51746
51747 struct hash_tfm {
51748 int (*init)(struct hash_desc *desc);
51749@@ -382,13 +382,13 @@ struct compress_tfm {
51750 int (*cot_decompress)(struct crypto_tfm *tfm,
51751 const u8 *src, unsigned int slen,
51752 u8 *dst, unsigned int *dlen);
51753-};
51754+} __no_const;
51755
51756 struct rng_tfm {
51757 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
51758 unsigned int dlen);
51759 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
51760-};
51761+} __no_const;
51762
51763 #define crt_ablkcipher crt_u.ablkcipher
51764 #define crt_aead crt_u.aead
51765diff -urNp linux-3.0.3/include/linux/decompress/mm.h linux-3.0.3/include/linux/decompress/mm.h
51766--- linux-3.0.3/include/linux/decompress/mm.h 2011-07-21 22:17:23.000000000 -0400
51767+++ linux-3.0.3/include/linux/decompress/mm.h 2011-08-23 21:47:56.000000000 -0400
51768@@ -77,7 +77,7 @@ static void free(void *where)
51769 * warnings when not needed (indeed large_malloc / large_free are not
51770 * needed by inflate */
51771
51772-#define malloc(a) kmalloc(a, GFP_KERNEL)
51773+#define malloc(a) kmalloc((a), GFP_KERNEL)
51774 #define free(a) kfree(a)
51775
51776 #define large_malloc(a) vmalloc(a)
51777diff -urNp linux-3.0.3/include/linux/dma-mapping.h linux-3.0.3/include/linux/dma-mapping.h
51778--- linux-3.0.3/include/linux/dma-mapping.h 2011-07-21 22:17:23.000000000 -0400
51779+++ linux-3.0.3/include/linux/dma-mapping.h 2011-08-23 21:47:56.000000000 -0400
51780@@ -49,7 +49,7 @@ struct dma_map_ops {
51781 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
51782 int (*dma_supported)(struct device *dev, u64 mask);
51783 int (*set_dma_mask)(struct device *dev, u64 mask);
51784- int is_phys;
51785+ const int is_phys;
51786 };
51787
51788 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
51789diff -urNp linux-3.0.3/include/linux/efi.h linux-3.0.3/include/linux/efi.h
51790--- linux-3.0.3/include/linux/efi.h 2011-07-21 22:17:23.000000000 -0400
51791+++ linux-3.0.3/include/linux/efi.h 2011-08-23 21:47:56.000000000 -0400
51792@@ -410,7 +410,7 @@ struct efivar_operations {
51793 efi_get_variable_t *get_variable;
51794 efi_get_next_variable_t *get_next_variable;
51795 efi_set_variable_t *set_variable;
51796-};
51797+} __no_const;
51798
51799 struct efivars {
51800 /*
51801diff -urNp linux-3.0.3/include/linux/elf.h linux-3.0.3/include/linux/elf.h
51802--- linux-3.0.3/include/linux/elf.h 2011-07-21 22:17:23.000000000 -0400
51803+++ linux-3.0.3/include/linux/elf.h 2011-08-23 21:47:56.000000000 -0400
51804@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
51805 #define PT_GNU_EH_FRAME 0x6474e550
51806
51807 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
51808+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
51809+
51810+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
51811+
51812+/* Constants for the e_flags field */
51813+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
51814+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
51815+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
51816+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
51817+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
51818+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
51819
51820 /*
51821 * Extended Numbering
51822@@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
51823 #define DT_DEBUG 21
51824 #define DT_TEXTREL 22
51825 #define DT_JMPREL 23
51826+#define DT_FLAGS 30
51827+ #define DF_TEXTREL 0x00000004
51828 #define DT_ENCODING 32
51829 #define OLD_DT_LOOS 0x60000000
51830 #define DT_LOOS 0x6000000d
51831@@ -252,6 +265,19 @@ typedef struct elf64_hdr {
51832 #define PF_W 0x2
51833 #define PF_X 0x1
51834
51835+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
51836+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
51837+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
51838+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
51839+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
51840+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
51841+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
51842+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
51843+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
51844+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
51845+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
51846+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
51847+
51848 typedef struct elf32_phdr{
51849 Elf32_Word p_type;
51850 Elf32_Off p_offset;
51851@@ -344,6 +370,8 @@ typedef struct elf64_shdr {
51852 #define EI_OSABI 7
51853 #define EI_PAD 8
51854
51855+#define EI_PAX 14
51856+
51857 #define ELFMAG0 0x7f /* EI_MAG */
51858 #define ELFMAG1 'E'
51859 #define ELFMAG2 'L'
51860@@ -422,6 +450,7 @@ extern Elf32_Dyn _DYNAMIC [];
51861 #define elf_note elf32_note
51862 #define elf_addr_t Elf32_Off
51863 #define Elf_Half Elf32_Half
51864+#define elf_dyn Elf32_Dyn
51865
51866 #else
51867
51868@@ -432,6 +461,7 @@ extern Elf64_Dyn _DYNAMIC [];
51869 #define elf_note elf64_note
51870 #define elf_addr_t Elf64_Off
51871 #define Elf_Half Elf64_Half
51872+#define elf_dyn Elf64_Dyn
51873
51874 #endif
51875
51876diff -urNp linux-3.0.3/include/linux/firewire.h linux-3.0.3/include/linux/firewire.h
51877--- linux-3.0.3/include/linux/firewire.h 2011-07-21 22:17:23.000000000 -0400
51878+++ linux-3.0.3/include/linux/firewire.h 2011-08-23 21:47:56.000000000 -0400
51879@@ -428,7 +428,7 @@ struct fw_iso_context {
51880 union {
51881 fw_iso_callback_t sc;
51882 fw_iso_mc_callback_t mc;
51883- } callback;
51884+ } __no_const callback;
51885 void *callback_data;
51886 };
51887
51888diff -urNp linux-3.0.3/include/linux/fscache-cache.h linux-3.0.3/include/linux/fscache-cache.h
51889--- linux-3.0.3/include/linux/fscache-cache.h 2011-07-21 22:17:23.000000000 -0400
51890+++ linux-3.0.3/include/linux/fscache-cache.h 2011-08-23 21:47:56.000000000 -0400
51891@@ -102,7 +102,7 @@ struct fscache_operation {
51892 fscache_operation_release_t release;
51893 };
51894
51895-extern atomic_t fscache_op_debug_id;
51896+extern atomic_unchecked_t fscache_op_debug_id;
51897 extern void fscache_op_work_func(struct work_struct *work);
51898
51899 extern void fscache_enqueue_operation(struct fscache_operation *);
51900@@ -122,7 +122,7 @@ static inline void fscache_operation_ini
51901 {
51902 INIT_WORK(&op->work, fscache_op_work_func);
51903 atomic_set(&op->usage, 1);
51904- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
51905+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
51906 op->processor = processor;
51907 op->release = release;
51908 INIT_LIST_HEAD(&op->pend_link);
51909diff -urNp linux-3.0.3/include/linux/fs.h linux-3.0.3/include/linux/fs.h
51910--- linux-3.0.3/include/linux/fs.h 2011-07-21 22:17:23.000000000 -0400
51911+++ linux-3.0.3/include/linux/fs.h 2011-08-23 21:48:14.000000000 -0400
51912@@ -109,6 +109,11 @@ struct inodes_stat_t {
51913 /* File was opened by fanotify and shouldn't generate fanotify events */
51914 #define FMODE_NONOTIFY ((__force fmode_t)0x1000000)
51915
51916+/* Hack for grsec so as not to require read permission simply to execute
51917+ * a binary
51918+ */
51919+#define FMODE_GREXEC ((__force fmode_t)0x2000000)
51920+
51921 /*
51922 * The below are the various read and write types that we support. Some of
51923 * them include behavioral modifiers that send information down to the
51924@@ -1544,7 +1549,7 @@ struct block_device_operations;
51925 * the big kernel lock held in all filesystems.
51926 */
51927 struct file_operations {
51928- struct module *owner;
51929+ struct module * const owner;
51930 loff_t (*llseek) (struct file *, loff_t, int);
51931 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
51932 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
51933@@ -1572,6 +1577,7 @@ struct file_operations {
51934 long (*fallocate)(struct file *file, int mode, loff_t offset,
51935 loff_t len);
51936 };
51937+typedef struct file_operations __no_const file_operations_no_const;
51938
51939 #define IPERM_FLAG_RCU 0x0001
51940
51941diff -urNp linux-3.0.3/include/linux/fsnotify.h linux-3.0.3/include/linux/fsnotify.h
51942--- linux-3.0.3/include/linux/fsnotify.h 2011-07-21 22:17:23.000000000 -0400
51943+++ linux-3.0.3/include/linux/fsnotify.h 2011-08-24 18:10:29.000000000 -0400
51944@@ -314,7 +314,7 @@ static inline void fsnotify_change(struc
51945 */
51946 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
51947 {
51948- return kstrdup(name, GFP_KERNEL);
51949+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
51950 }
51951
51952 /*
51953diff -urNp linux-3.0.3/include/linux/fs_struct.h linux-3.0.3/include/linux/fs_struct.h
51954--- linux-3.0.3/include/linux/fs_struct.h 2011-07-21 22:17:23.000000000 -0400
51955+++ linux-3.0.3/include/linux/fs_struct.h 2011-08-23 21:47:56.000000000 -0400
51956@@ -6,7 +6,7 @@
51957 #include <linux/seqlock.h>
51958
51959 struct fs_struct {
51960- int users;
51961+ atomic_t users;
51962 spinlock_t lock;
51963 seqcount_t seq;
51964 int umask;
51965diff -urNp linux-3.0.3/include/linux/ftrace_event.h linux-3.0.3/include/linux/ftrace_event.h
51966--- linux-3.0.3/include/linux/ftrace_event.h 2011-07-21 22:17:23.000000000 -0400
51967+++ linux-3.0.3/include/linux/ftrace_event.h 2011-08-23 21:47:56.000000000 -0400
51968@@ -96,7 +96,7 @@ struct trace_event_functions {
51969 trace_print_func raw;
51970 trace_print_func hex;
51971 trace_print_func binary;
51972-};
51973+} __no_const;
51974
51975 struct trace_event {
51976 struct hlist_node node;
51977@@ -247,7 +247,7 @@ extern int trace_define_field(struct ftr
51978 extern int trace_add_event_call(struct ftrace_event_call *call);
51979 extern void trace_remove_event_call(struct ftrace_event_call *call);
51980
51981-#define is_signed_type(type) (((type)(-1)) < 0)
51982+#define is_signed_type(type) (((type)(-1)) < (type)1)
51983
51984 int trace_set_clr_event(const char *system, const char *event, int set);
51985
51986diff -urNp linux-3.0.3/include/linux/genhd.h linux-3.0.3/include/linux/genhd.h
51987--- linux-3.0.3/include/linux/genhd.h 2011-07-21 22:17:23.000000000 -0400
51988+++ linux-3.0.3/include/linux/genhd.h 2011-08-23 21:47:56.000000000 -0400
51989@@ -184,7 +184,7 @@ struct gendisk {
51990 struct kobject *slave_dir;
51991
51992 struct timer_rand_state *random;
51993- atomic_t sync_io; /* RAID */
51994+ atomic_unchecked_t sync_io; /* RAID */
51995 struct disk_events *ev;
51996 #ifdef CONFIG_BLK_DEV_INTEGRITY
51997 struct blk_integrity *integrity;
51998diff -urNp linux-3.0.3/include/linux/gracl.h linux-3.0.3/include/linux/gracl.h
51999--- linux-3.0.3/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
52000+++ linux-3.0.3/include/linux/gracl.h 2011-08-23 21:48:14.000000000 -0400
52001@@ -0,0 +1,317 @@
52002+#ifndef GR_ACL_H
52003+#define GR_ACL_H
52004+
52005+#include <linux/grdefs.h>
52006+#include <linux/resource.h>
52007+#include <linux/capability.h>
52008+#include <linux/dcache.h>
52009+#include <asm/resource.h>
52010+
52011+/* Major status information */
52012+
52013+#define GR_VERSION "grsecurity 2.2.2"
52014+#define GRSECURITY_VERSION 0x2202
52015+
52016+enum {
52017+ GR_SHUTDOWN = 0,
52018+ GR_ENABLE = 1,
52019+ GR_SPROLE = 2,
52020+ GR_RELOAD = 3,
52021+ GR_SEGVMOD = 4,
52022+ GR_STATUS = 5,
52023+ GR_UNSPROLE = 6,
52024+ GR_PASSSET = 7,
52025+ GR_SPROLEPAM = 8,
52026+};
52027+
52028+/* Password setup definitions
52029+ * kernel/grhash.c */
52030+enum {
52031+ GR_PW_LEN = 128,
52032+ GR_SALT_LEN = 16,
52033+ GR_SHA_LEN = 32,
52034+};
52035+
52036+enum {
52037+ GR_SPROLE_LEN = 64,
52038+};
52039+
52040+enum {
52041+ GR_NO_GLOB = 0,
52042+ GR_REG_GLOB,
52043+ GR_CREATE_GLOB
52044+};
52045+
52046+#define GR_NLIMITS 32
52047+
52048+/* Begin Data Structures */
52049+
52050+struct sprole_pw {
52051+ unsigned char *rolename;
52052+ unsigned char salt[GR_SALT_LEN];
52053+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
52054+};
52055+
52056+struct name_entry {
52057+ __u32 key;
52058+ ino_t inode;
52059+ dev_t device;
52060+ char *name;
52061+ __u16 len;
52062+ __u8 deleted;
52063+ struct name_entry *prev;
52064+ struct name_entry *next;
52065+};
52066+
52067+struct inodev_entry {
52068+ struct name_entry *nentry;
52069+ struct inodev_entry *prev;
52070+ struct inodev_entry *next;
52071+};
52072+
52073+struct acl_role_db {
52074+ struct acl_role_label **r_hash;
52075+ __u32 r_size;
52076+};
52077+
52078+struct inodev_db {
52079+ struct inodev_entry **i_hash;
52080+ __u32 i_size;
52081+};
52082+
52083+struct name_db {
52084+ struct name_entry **n_hash;
52085+ __u32 n_size;
52086+};
52087+
52088+struct crash_uid {
52089+ uid_t uid;
52090+ unsigned long expires;
52091+};
52092+
52093+struct gr_hash_struct {
52094+ void **table;
52095+ void **nametable;
52096+ void *first;
52097+ __u32 table_size;
52098+ __u32 used_size;
52099+ int type;
52100+};
52101+
52102+/* Userspace Grsecurity ACL data structures */
52103+
52104+struct acl_subject_label {
52105+ char *filename;
52106+ ino_t inode;
52107+ dev_t device;
52108+ __u32 mode;
52109+ kernel_cap_t cap_mask;
52110+ kernel_cap_t cap_lower;
52111+ kernel_cap_t cap_invert_audit;
52112+
52113+ struct rlimit res[GR_NLIMITS];
52114+ __u32 resmask;
52115+
52116+ __u8 user_trans_type;
52117+ __u8 group_trans_type;
52118+ uid_t *user_transitions;
52119+ gid_t *group_transitions;
52120+ __u16 user_trans_num;
52121+ __u16 group_trans_num;
52122+
52123+ __u32 sock_families[2];
52124+ __u32 ip_proto[8];
52125+ __u32 ip_type;
52126+ struct acl_ip_label **ips;
52127+ __u32 ip_num;
52128+ __u32 inaddr_any_override;
52129+
52130+ __u32 crashes;
52131+ unsigned long expires;
52132+
52133+ struct acl_subject_label *parent_subject;
52134+ struct gr_hash_struct *hash;
52135+ struct acl_subject_label *prev;
52136+ struct acl_subject_label *next;
52137+
52138+ struct acl_object_label **obj_hash;
52139+ __u32 obj_hash_size;
52140+ __u16 pax_flags;
52141+};
52142+
52143+struct role_allowed_ip {
52144+ __u32 addr;
52145+ __u32 netmask;
52146+
52147+ struct role_allowed_ip *prev;
52148+ struct role_allowed_ip *next;
52149+};
52150+
52151+struct role_transition {
52152+ char *rolename;
52153+
52154+ struct role_transition *prev;
52155+ struct role_transition *next;
52156+};
52157+
52158+struct acl_role_label {
52159+ char *rolename;
52160+ uid_t uidgid;
52161+ __u16 roletype;
52162+
52163+ __u16 auth_attempts;
52164+ unsigned long expires;
52165+
52166+ struct acl_subject_label *root_label;
52167+ struct gr_hash_struct *hash;
52168+
52169+ struct acl_role_label *prev;
52170+ struct acl_role_label *next;
52171+
52172+ struct role_transition *transitions;
52173+ struct role_allowed_ip *allowed_ips;
52174+ uid_t *domain_children;
52175+ __u16 domain_child_num;
52176+
52177+ struct acl_subject_label **subj_hash;
52178+ __u32 subj_hash_size;
52179+};
52180+
52181+struct user_acl_role_db {
52182+ struct acl_role_label **r_table;
52183+ __u32 num_pointers; /* Number of allocations to track */
52184+ __u32 num_roles; /* Number of roles */
52185+ __u32 num_domain_children; /* Number of domain children */
52186+ __u32 num_subjects; /* Number of subjects */
52187+ __u32 num_objects; /* Number of objects */
52188+};
52189+
52190+struct acl_object_label {
52191+ char *filename;
52192+ ino_t inode;
52193+ dev_t device;
52194+ __u32 mode;
52195+
52196+ struct acl_subject_label *nested;
52197+ struct acl_object_label *globbed;
52198+
52199+ /* next two structures not used */
52200+
52201+ struct acl_object_label *prev;
52202+ struct acl_object_label *next;
52203+};
52204+
52205+struct acl_ip_label {
52206+ char *iface;
52207+ __u32 addr;
52208+ __u32 netmask;
52209+ __u16 low, high;
52210+ __u8 mode;
52211+ __u32 type;
52212+ __u32 proto[8];
52213+
52214+ /* next two structures not used */
52215+
52216+ struct acl_ip_label *prev;
52217+ struct acl_ip_label *next;
52218+};
52219+
52220+struct gr_arg {
52221+ struct user_acl_role_db role_db;
52222+ unsigned char pw[GR_PW_LEN];
52223+ unsigned char salt[GR_SALT_LEN];
52224+ unsigned char sum[GR_SHA_LEN];
52225+ unsigned char sp_role[GR_SPROLE_LEN];
52226+ struct sprole_pw *sprole_pws;
52227+ dev_t segv_device;
52228+ ino_t segv_inode;
52229+ uid_t segv_uid;
52230+ __u16 num_sprole_pws;
52231+ __u16 mode;
52232+};
52233+
52234+struct gr_arg_wrapper {
52235+ struct gr_arg *arg;
52236+ __u32 version;
52237+ __u32 size;
52238+};
52239+
52240+struct subject_map {
52241+ struct acl_subject_label *user;
52242+ struct acl_subject_label *kernel;
52243+ struct subject_map *prev;
52244+ struct subject_map *next;
52245+};
52246+
52247+struct acl_subj_map_db {
52248+ struct subject_map **s_hash;
52249+ __u32 s_size;
52250+};
52251+
52252+/* End Data Structures Section */
52253+
52254+/* Hash functions generated by empirical testing by Brad Spengler
52255+ Makes good use of the low bits of the inode. Generally 0-1 times
52256+ in loop for successful match. 0-3 for unsuccessful match.
52257+ Shift/add algorithm with modulus of table size and an XOR*/
52258+
52259+static __inline__ unsigned int
52260+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
52261+{
52262+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
52263+}
52264+
52265+ static __inline__ unsigned int
52266+shash(const struct acl_subject_label *userp, const unsigned int sz)
52267+{
52268+ return ((const unsigned long)userp % sz);
52269+}
52270+
52271+static __inline__ unsigned int
52272+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
52273+{
52274+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
52275+}
52276+
52277+static __inline__ unsigned int
52278+nhash(const char *name, const __u16 len, const unsigned int sz)
52279+{
52280+ return full_name_hash((const unsigned char *)name, len) % sz;
52281+}
52282+
52283+#define FOR_EACH_ROLE_START(role) \
52284+ role = role_list; \
52285+ while (role) {
52286+
52287+#define FOR_EACH_ROLE_END(role) \
52288+ role = role->prev; \
52289+ }
52290+
52291+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
52292+ subj = NULL; \
52293+ iter = 0; \
52294+ while (iter < role->subj_hash_size) { \
52295+ if (subj == NULL) \
52296+ subj = role->subj_hash[iter]; \
52297+ if (subj == NULL) { \
52298+ iter++; \
52299+ continue; \
52300+ }
52301+
52302+#define FOR_EACH_SUBJECT_END(subj,iter) \
52303+ subj = subj->next; \
52304+ if (subj == NULL) \
52305+ iter++; \
52306+ }
52307+
52308+
52309+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
52310+ subj = role->hash->first; \
52311+ while (subj != NULL) {
52312+
52313+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
52314+ subj = subj->next; \
52315+ }
52316+
52317+#endif
52318+
52319diff -urNp linux-3.0.3/include/linux/gralloc.h linux-3.0.3/include/linux/gralloc.h
52320--- linux-3.0.3/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
52321+++ linux-3.0.3/include/linux/gralloc.h 2011-08-23 21:48:14.000000000 -0400
52322@@ -0,0 +1,9 @@
52323+#ifndef __GRALLOC_H
52324+#define __GRALLOC_H
52325+
52326+void acl_free_all(void);
52327+int acl_alloc_stack_init(unsigned long size);
52328+void *acl_alloc(unsigned long len);
52329+void *acl_alloc_num(unsigned long num, unsigned long len);
52330+
52331+#endif
52332diff -urNp linux-3.0.3/include/linux/grdefs.h linux-3.0.3/include/linux/grdefs.h
52333--- linux-3.0.3/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
52334+++ linux-3.0.3/include/linux/grdefs.h 2011-08-23 21:48:14.000000000 -0400
52335@@ -0,0 +1,140 @@
52336+#ifndef GRDEFS_H
52337+#define GRDEFS_H
52338+
52339+/* Begin grsecurity status declarations */
52340+
52341+enum {
52342+ GR_READY = 0x01,
52343+ GR_STATUS_INIT = 0x00 // disabled state
52344+};
52345+
52346+/* Begin ACL declarations */
52347+
52348+/* Role flags */
52349+
52350+enum {
52351+ GR_ROLE_USER = 0x0001,
52352+ GR_ROLE_GROUP = 0x0002,
52353+ GR_ROLE_DEFAULT = 0x0004,
52354+ GR_ROLE_SPECIAL = 0x0008,
52355+ GR_ROLE_AUTH = 0x0010,
52356+ GR_ROLE_NOPW = 0x0020,
52357+ GR_ROLE_GOD = 0x0040,
52358+ GR_ROLE_LEARN = 0x0080,
52359+ GR_ROLE_TPE = 0x0100,
52360+ GR_ROLE_DOMAIN = 0x0200,
52361+ GR_ROLE_PAM = 0x0400,
52362+ GR_ROLE_PERSIST = 0x0800
52363+};
52364+
52365+/* ACL Subject and Object mode flags */
52366+enum {
52367+ GR_DELETED = 0x80000000
52368+};
52369+
52370+/* ACL Object-only mode flags */
52371+enum {
52372+ GR_READ = 0x00000001,
52373+ GR_APPEND = 0x00000002,
52374+ GR_WRITE = 0x00000004,
52375+ GR_EXEC = 0x00000008,
52376+ GR_FIND = 0x00000010,
52377+ GR_INHERIT = 0x00000020,
52378+ GR_SETID = 0x00000040,
52379+ GR_CREATE = 0x00000080,
52380+ GR_DELETE = 0x00000100,
52381+ GR_LINK = 0x00000200,
52382+ GR_AUDIT_READ = 0x00000400,
52383+ GR_AUDIT_APPEND = 0x00000800,
52384+ GR_AUDIT_WRITE = 0x00001000,
52385+ GR_AUDIT_EXEC = 0x00002000,
52386+ GR_AUDIT_FIND = 0x00004000,
52387+ GR_AUDIT_INHERIT= 0x00008000,
52388+ GR_AUDIT_SETID = 0x00010000,
52389+ GR_AUDIT_CREATE = 0x00020000,
52390+ GR_AUDIT_DELETE = 0x00040000,
52391+ GR_AUDIT_LINK = 0x00080000,
52392+ GR_PTRACERD = 0x00100000,
52393+ GR_NOPTRACE = 0x00200000,
52394+ GR_SUPPRESS = 0x00400000,
52395+ GR_NOLEARN = 0x00800000,
52396+ GR_INIT_TRANSFER= 0x01000000
52397+};
52398+
52399+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
52400+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
52401+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
52402+
52403+/* ACL subject-only mode flags */
52404+enum {
52405+ GR_KILL = 0x00000001,
52406+ GR_VIEW = 0x00000002,
52407+ GR_PROTECTED = 0x00000004,
52408+ GR_LEARN = 0x00000008,
52409+ GR_OVERRIDE = 0x00000010,
52410+ /* just a placeholder, this mode is only used in userspace */
52411+ GR_DUMMY = 0x00000020,
52412+ GR_PROTSHM = 0x00000040,
52413+ GR_KILLPROC = 0x00000080,
52414+ GR_KILLIPPROC = 0x00000100,
52415+ /* just a placeholder, this mode is only used in userspace */
52416+ GR_NOTROJAN = 0x00000200,
52417+ GR_PROTPROCFD = 0x00000400,
52418+ GR_PROCACCT = 0x00000800,
52419+ GR_RELAXPTRACE = 0x00001000,
52420+ GR_NESTED = 0x00002000,
52421+ GR_INHERITLEARN = 0x00004000,
52422+ GR_PROCFIND = 0x00008000,
52423+ GR_POVERRIDE = 0x00010000,
52424+ GR_KERNELAUTH = 0x00020000,
52425+ GR_ATSECURE = 0x00040000,
52426+ GR_SHMEXEC = 0x00080000
52427+};
52428+
52429+enum {
52430+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
52431+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
52432+ GR_PAX_ENABLE_MPROTECT = 0x0004,
52433+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
52434+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
52435+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
52436+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
52437+ GR_PAX_DISABLE_MPROTECT = 0x0400,
52438+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
52439+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
52440+};
52441+
52442+enum {
52443+ GR_ID_USER = 0x01,
52444+ GR_ID_GROUP = 0x02,
52445+};
52446+
52447+enum {
52448+ GR_ID_ALLOW = 0x01,
52449+ GR_ID_DENY = 0x02,
52450+};
52451+
52452+#define GR_CRASH_RES 31
52453+#define GR_UIDTABLE_MAX 500
52454+
52455+/* begin resource learning section */
52456+enum {
52457+ GR_RLIM_CPU_BUMP = 60,
52458+ GR_RLIM_FSIZE_BUMP = 50000,
52459+ GR_RLIM_DATA_BUMP = 10000,
52460+ GR_RLIM_STACK_BUMP = 1000,
52461+ GR_RLIM_CORE_BUMP = 10000,
52462+ GR_RLIM_RSS_BUMP = 500000,
52463+ GR_RLIM_NPROC_BUMP = 1,
52464+ GR_RLIM_NOFILE_BUMP = 5,
52465+ GR_RLIM_MEMLOCK_BUMP = 50000,
52466+ GR_RLIM_AS_BUMP = 500000,
52467+ GR_RLIM_LOCKS_BUMP = 2,
52468+ GR_RLIM_SIGPENDING_BUMP = 5,
52469+ GR_RLIM_MSGQUEUE_BUMP = 10000,
52470+ GR_RLIM_NICE_BUMP = 1,
52471+ GR_RLIM_RTPRIO_BUMP = 1,
52472+ GR_RLIM_RTTIME_BUMP = 1000000
52473+};
52474+
52475+#endif
52476diff -urNp linux-3.0.3/include/linux/grinternal.h linux-3.0.3/include/linux/grinternal.h
52477--- linux-3.0.3/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
52478+++ linux-3.0.3/include/linux/grinternal.h 2011-08-23 21:48:14.000000000 -0400
52479@@ -0,0 +1,219 @@
52480+#ifndef __GRINTERNAL_H
52481+#define __GRINTERNAL_H
52482+
52483+#ifdef CONFIG_GRKERNSEC
52484+
52485+#include <linux/fs.h>
52486+#include <linux/mnt_namespace.h>
52487+#include <linux/nsproxy.h>
52488+#include <linux/gracl.h>
52489+#include <linux/grdefs.h>
52490+#include <linux/grmsg.h>
52491+
52492+void gr_add_learn_entry(const char *fmt, ...)
52493+ __attribute__ ((format (printf, 1, 2)));
52494+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
52495+ const struct vfsmount *mnt);
52496+__u32 gr_check_create(const struct dentry *new_dentry,
52497+ const struct dentry *parent,
52498+ const struct vfsmount *mnt, const __u32 mode);
52499+int gr_check_protected_task(const struct task_struct *task);
52500+__u32 to_gr_audit(const __u32 reqmode);
52501+int gr_set_acls(const int type);
52502+int gr_apply_subject_to_task(struct task_struct *task);
52503+int gr_acl_is_enabled(void);
52504+char gr_roletype_to_char(void);
52505+
52506+void gr_handle_alertkill(struct task_struct *task);
52507+char *gr_to_filename(const struct dentry *dentry,
52508+ const struct vfsmount *mnt);
52509+char *gr_to_filename1(const struct dentry *dentry,
52510+ const struct vfsmount *mnt);
52511+char *gr_to_filename2(const struct dentry *dentry,
52512+ const struct vfsmount *mnt);
52513+char *gr_to_filename3(const struct dentry *dentry,
52514+ const struct vfsmount *mnt);
52515+
52516+extern int grsec_enable_harden_ptrace;
52517+extern int grsec_enable_link;
52518+extern int grsec_enable_fifo;
52519+extern int grsec_enable_execve;
52520+extern int grsec_enable_shm;
52521+extern int grsec_enable_execlog;
52522+extern int grsec_enable_signal;
52523+extern int grsec_enable_audit_ptrace;
52524+extern int grsec_enable_forkfail;
52525+extern int grsec_enable_time;
52526+extern int grsec_enable_rofs;
52527+extern int grsec_enable_chroot_shmat;
52528+extern int grsec_enable_chroot_mount;
52529+extern int grsec_enable_chroot_double;
52530+extern int grsec_enable_chroot_pivot;
52531+extern int grsec_enable_chroot_chdir;
52532+extern int grsec_enable_chroot_chmod;
52533+extern int grsec_enable_chroot_mknod;
52534+extern int grsec_enable_chroot_fchdir;
52535+extern int grsec_enable_chroot_nice;
52536+extern int grsec_enable_chroot_execlog;
52537+extern int grsec_enable_chroot_caps;
52538+extern int grsec_enable_chroot_sysctl;
52539+extern int grsec_enable_chroot_unix;
52540+extern int grsec_enable_tpe;
52541+extern int grsec_tpe_gid;
52542+extern int grsec_enable_tpe_all;
52543+extern int grsec_enable_tpe_invert;
52544+extern int grsec_enable_socket_all;
52545+extern int grsec_socket_all_gid;
52546+extern int grsec_enable_socket_client;
52547+extern int grsec_socket_client_gid;
52548+extern int grsec_enable_socket_server;
52549+extern int grsec_socket_server_gid;
52550+extern int grsec_audit_gid;
52551+extern int grsec_enable_group;
52552+extern int grsec_enable_audit_textrel;
52553+extern int grsec_enable_log_rwxmaps;
52554+extern int grsec_enable_mount;
52555+extern int grsec_enable_chdir;
52556+extern int grsec_resource_logging;
52557+extern int grsec_enable_blackhole;
52558+extern int grsec_lastack_retries;
52559+extern int grsec_enable_brute;
52560+extern int grsec_lock;
52561+
52562+extern spinlock_t grsec_alert_lock;
52563+extern unsigned long grsec_alert_wtime;
52564+extern unsigned long grsec_alert_fyet;
52565+
52566+extern spinlock_t grsec_audit_lock;
52567+
52568+extern rwlock_t grsec_exec_file_lock;
52569+
52570+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
52571+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
52572+ (tsk)->exec_file->f_vfsmnt) : "/")
52573+
52574+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
52575+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
52576+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
52577+
52578+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
52579+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
52580+ (tsk)->exec_file->f_vfsmnt) : "/")
52581+
52582+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
52583+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
52584+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
52585+
52586+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
52587+
52588+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
52589+
52590+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
52591+ (task)->pid, (cred)->uid, \
52592+ (cred)->euid, (cred)->gid, (cred)->egid, \
52593+ gr_parent_task_fullpath(task), \
52594+ (task)->real_parent->comm, (task)->real_parent->pid, \
52595+ (pcred)->uid, (pcred)->euid, \
52596+ (pcred)->gid, (pcred)->egid
52597+
52598+#define GR_CHROOT_CAPS {{ \
52599+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
52600+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
52601+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
52602+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
52603+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
52604+ CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
52605+
52606+#define security_learn(normal_msg,args...) \
52607+({ \
52608+ read_lock(&grsec_exec_file_lock); \
52609+ gr_add_learn_entry(normal_msg "\n", ## args); \
52610+ read_unlock(&grsec_exec_file_lock); \
52611+})
52612+
52613+enum {
52614+ GR_DO_AUDIT,
52615+ GR_DONT_AUDIT,
52616+ /* used for non-audit messages that we shouldn't kill the task on */
52617+ GR_DONT_AUDIT_GOOD
52618+};
52619+
52620+enum {
52621+ GR_TTYSNIFF,
52622+ GR_RBAC,
52623+ GR_RBAC_STR,
52624+ GR_STR_RBAC,
52625+ GR_RBAC_MODE2,
52626+ GR_RBAC_MODE3,
52627+ GR_FILENAME,
52628+ GR_SYSCTL_HIDDEN,
52629+ GR_NOARGS,
52630+ GR_ONE_INT,
52631+ GR_ONE_INT_TWO_STR,
52632+ GR_ONE_STR,
52633+ GR_STR_INT,
52634+ GR_TWO_STR_INT,
52635+ GR_TWO_INT,
52636+ GR_TWO_U64,
52637+ GR_THREE_INT,
52638+ GR_FIVE_INT_TWO_STR,
52639+ GR_TWO_STR,
52640+ GR_THREE_STR,
52641+ GR_FOUR_STR,
52642+ GR_STR_FILENAME,
52643+ GR_FILENAME_STR,
52644+ GR_FILENAME_TWO_INT,
52645+ GR_FILENAME_TWO_INT_STR,
52646+ GR_TEXTREL,
52647+ GR_PTRACE,
52648+ GR_RESOURCE,
52649+ GR_CAP,
52650+ GR_SIG,
52651+ GR_SIG2,
52652+ GR_CRASH1,
52653+ GR_CRASH2,
52654+ GR_PSACCT,
52655+ GR_RWXMAP
52656+};
52657+
52658+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
52659+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
52660+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
52661+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
52662+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
52663+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
52664+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
52665+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
52666+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
52667+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
52668+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
52669+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
52670+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
52671+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
52672+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
52673+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
52674+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
52675+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
52676+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
52677+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
52678+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
52679+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
52680+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
52681+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
52682+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
52683+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
52684+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
52685+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
52686+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
52687+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
52688+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
52689+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
52690+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
52691+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
52692+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
52693+
52694+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
52695+
52696+#endif
52697+
52698+#endif
52699diff -urNp linux-3.0.3/include/linux/grmsg.h linux-3.0.3/include/linux/grmsg.h
52700--- linux-3.0.3/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
52701+++ linux-3.0.3/include/linux/grmsg.h 2011-08-25 17:27:26.000000000 -0400
52702@@ -0,0 +1,107 @@
52703+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
52704+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
52705+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
52706+#define GR_STOPMOD_MSG "denied modification of module state by "
52707+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
52708+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
52709+#define GR_IOPERM_MSG "denied use of ioperm() by "
52710+#define GR_IOPL_MSG "denied use of iopl() by "
52711+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
52712+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
52713+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
52714+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
52715+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
52716+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
52717+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
52718+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
52719+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
52720+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
52721+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
52722+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
52723+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
52724+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
52725+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
52726+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
52727+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
52728+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
52729+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
52730+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
52731+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
52732+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
52733+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
52734+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
52735+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
52736+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
52737+#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
52738+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
52739+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
52740+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
52741+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
52742+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
52743+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
52744+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
52745+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
52746+#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
52747+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
52748+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
52749+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
52750+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
52751+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
52752+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
52753+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
52754+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
52755+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
52756+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
52757+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
52758+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
52759+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
52760+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
52761+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
52762+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
52763+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
52764+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
52765+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
52766+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
52767+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
52768+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
52769+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
52770+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
52771+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
52772+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
52773+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
52774+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
52775+#define GR_FAILFORK_MSG "failed fork with errno %s by "
52776+#define GR_NICE_CHROOT_MSG "denied priority change by "
52777+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
52778+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
52779+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
52780+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
52781+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
52782+#define GR_TIME_MSG "time set by "
52783+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
52784+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
52785+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
52786+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
52787+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
52788+#define GR_BIND_MSG "denied bind() by "
52789+#define GR_CONNECT_MSG "denied connect() by "
52790+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
52791+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
52792+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
52793+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
52794+#define GR_CAP_ACL_MSG "use of %s denied for "
52795+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
52796+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
52797+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
52798+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
52799+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
52800+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
52801+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
52802+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
52803+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
52804+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
52805+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
52806+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
52807+#define GR_VM86_MSG "denied use of vm86 by "
52808+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
52809+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
52810diff -urNp linux-3.0.3/include/linux/grsecurity.h linux-3.0.3/include/linux/grsecurity.h
52811--- linux-3.0.3/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
52812+++ linux-3.0.3/include/linux/grsecurity.h 2011-08-25 17:27:36.000000000 -0400
52813@@ -0,0 +1,227 @@
52814+#ifndef GR_SECURITY_H
52815+#define GR_SECURITY_H
52816+#include <linux/fs.h>
52817+#include <linux/fs_struct.h>
52818+#include <linux/binfmts.h>
52819+#include <linux/gracl.h>
52820+
52821+/* notify of brain-dead configs */
52822+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52823+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
52824+#endif
52825+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
52826+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
52827+#endif
52828+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
52829+#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
52830+#endif
52831+#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
52832+#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
52833+#endif
52834+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
52835+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
52836+#endif
52837+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
52838+#error "CONFIG_PAX enabled, but no PaX options are enabled."
52839+#endif
52840+
52841+#include <linux/compat.h>
52842+
52843+struct user_arg_ptr {
52844+#ifdef CONFIG_COMPAT
52845+ bool is_compat;
52846+#endif
52847+ union {
52848+ const char __user *const __user *native;
52849+#ifdef CONFIG_COMPAT
52850+ compat_uptr_t __user *compat;
52851+#endif
52852+ } ptr;
52853+};
52854+
52855+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
52856+void gr_handle_brute_check(void);
52857+void gr_handle_kernel_exploit(void);
52858+int gr_process_user_ban(void);
52859+
52860+char gr_roletype_to_char(void);
52861+
52862+int gr_acl_enable_at_secure(void);
52863+
52864+int gr_check_user_change(int real, int effective, int fs);
52865+int gr_check_group_change(int real, int effective, int fs);
52866+
52867+void gr_del_task_from_ip_table(struct task_struct *p);
52868+
52869+int gr_pid_is_chrooted(struct task_struct *p);
52870+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
52871+int gr_handle_chroot_nice(void);
52872+int gr_handle_chroot_sysctl(const int op);
52873+int gr_handle_chroot_setpriority(struct task_struct *p,
52874+ const int niceval);
52875+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
52876+int gr_handle_chroot_chroot(const struct dentry *dentry,
52877+ const struct vfsmount *mnt);
52878+int gr_handle_chroot_caps(struct path *path);
52879+void gr_handle_chroot_chdir(struct path *path);
52880+int gr_handle_chroot_chmod(const struct dentry *dentry,
52881+ const struct vfsmount *mnt, const int mode);
52882+int gr_handle_chroot_mknod(const struct dentry *dentry,
52883+ const struct vfsmount *mnt, const int mode);
52884+int gr_handle_chroot_mount(const struct dentry *dentry,
52885+ const struct vfsmount *mnt,
52886+ const char *dev_name);
52887+int gr_handle_chroot_pivot(void);
52888+int gr_handle_chroot_unix(const pid_t pid);
52889+
52890+int gr_handle_rawio(const struct inode *inode);
52891+
52892+void gr_handle_ioperm(void);
52893+void gr_handle_iopl(void);
52894+
52895+int gr_tpe_allow(const struct file *file);
52896+
52897+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
52898+void gr_clear_chroot_entries(struct task_struct *task);
52899+
52900+void gr_log_forkfail(const int retval);
52901+void gr_log_timechange(void);
52902+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
52903+void gr_log_chdir(const struct dentry *dentry,
52904+ const struct vfsmount *mnt);
52905+void gr_log_chroot_exec(const struct dentry *dentry,
52906+ const struct vfsmount *mnt);
52907+void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
52908+void gr_log_remount(const char *devname, const int retval);
52909+void gr_log_unmount(const char *devname, const int retval);
52910+void gr_log_mount(const char *from, const char *to, const int retval);
52911+void gr_log_textrel(struct vm_area_struct *vma);
52912+void gr_log_rwxmmap(struct file *file);
52913+void gr_log_rwxmprotect(struct file *file);
52914+
52915+int gr_handle_follow_link(const struct inode *parent,
52916+ const struct inode *inode,
52917+ const struct dentry *dentry,
52918+ const struct vfsmount *mnt);
52919+int gr_handle_fifo(const struct dentry *dentry,
52920+ const struct vfsmount *mnt,
52921+ const struct dentry *dir, const int flag,
52922+ const int acc_mode);
52923+int gr_handle_hardlink(const struct dentry *dentry,
52924+ const struct vfsmount *mnt,
52925+ struct inode *inode,
52926+ const int mode, const char *to);
52927+
52928+int gr_is_capable(const int cap);
52929+int gr_is_capable_nolog(const int cap);
52930+void gr_learn_resource(const struct task_struct *task, const int limit,
52931+ const unsigned long wanted, const int gt);
52932+void gr_copy_label(struct task_struct *tsk);
52933+void gr_handle_crash(struct task_struct *task, const int sig);
52934+int gr_handle_signal(const struct task_struct *p, const int sig);
52935+int gr_check_crash_uid(const uid_t uid);
52936+int gr_check_protected_task(const struct task_struct *task);
52937+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
52938+int gr_acl_handle_mmap(const struct file *file,
52939+ const unsigned long prot);
52940+int gr_acl_handle_mprotect(const struct file *file,
52941+ const unsigned long prot);
52942+int gr_check_hidden_task(const struct task_struct *tsk);
52943+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
52944+ const struct vfsmount *mnt);
52945+__u32 gr_acl_handle_utime(const struct dentry *dentry,
52946+ const struct vfsmount *mnt);
52947+__u32 gr_acl_handle_access(const struct dentry *dentry,
52948+ const struct vfsmount *mnt, const int fmode);
52949+__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
52950+ const struct vfsmount *mnt, mode_t mode);
52951+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
52952+ const struct vfsmount *mnt, mode_t mode);
52953+__u32 gr_acl_handle_chown(const struct dentry *dentry,
52954+ const struct vfsmount *mnt);
52955+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
52956+ const struct vfsmount *mnt);
52957+int gr_handle_ptrace(struct task_struct *task, const long request);
52958+int gr_handle_proc_ptrace(struct task_struct *task);
52959+__u32 gr_acl_handle_execve(const struct dentry *dentry,
52960+ const struct vfsmount *mnt);
52961+int gr_check_crash_exec(const struct file *filp);
52962+int gr_acl_is_enabled(void);
52963+void gr_set_kernel_label(struct task_struct *task);
52964+void gr_set_role_label(struct task_struct *task, const uid_t uid,
52965+ const gid_t gid);
52966+int gr_set_proc_label(const struct dentry *dentry,
52967+ const struct vfsmount *mnt,
52968+ const int unsafe_share);
52969+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
52970+ const struct vfsmount *mnt);
52971+__u32 gr_acl_handle_open(const struct dentry *dentry,
52972+ const struct vfsmount *mnt, const int fmode);
52973+__u32 gr_acl_handle_creat(const struct dentry *dentry,
52974+ const struct dentry *p_dentry,
52975+ const struct vfsmount *p_mnt, const int fmode,
52976+ const int imode);
52977+void gr_handle_create(const struct dentry *dentry,
52978+ const struct vfsmount *mnt);
52979+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
52980+ const struct dentry *parent_dentry,
52981+ const struct vfsmount *parent_mnt,
52982+ const int mode);
52983+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
52984+ const struct dentry *parent_dentry,
52985+ const struct vfsmount *parent_mnt);
52986+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
52987+ const struct vfsmount *mnt);
52988+void gr_handle_delete(const ino_t ino, const dev_t dev);
52989+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
52990+ const struct vfsmount *mnt);
52991+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
52992+ const struct dentry *parent_dentry,
52993+ const struct vfsmount *parent_mnt,
52994+ const char *from);
52995+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
52996+ const struct dentry *parent_dentry,
52997+ const struct vfsmount *parent_mnt,
52998+ const struct dentry *old_dentry,
52999+ const struct vfsmount *old_mnt, const char *to);
53000+int gr_acl_handle_rename(struct dentry *new_dentry,
53001+ struct dentry *parent_dentry,
53002+ const struct vfsmount *parent_mnt,
53003+ struct dentry *old_dentry,
53004+ struct inode *old_parent_inode,
53005+ struct vfsmount *old_mnt, const char *newname);
53006+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
53007+ struct dentry *old_dentry,
53008+ struct dentry *new_dentry,
53009+ struct vfsmount *mnt, const __u8 replace);
53010+__u32 gr_check_link(const struct dentry *new_dentry,
53011+ const struct dentry *parent_dentry,
53012+ const struct vfsmount *parent_mnt,
53013+ const struct dentry *old_dentry,
53014+ const struct vfsmount *old_mnt);
53015+int gr_acl_handle_filldir(const struct file *file, const char *name,
53016+ const unsigned int namelen, const ino_t ino);
53017+
53018+__u32 gr_acl_handle_unix(const struct dentry *dentry,
53019+ const struct vfsmount *mnt);
53020+void gr_acl_handle_exit(void);
53021+void gr_acl_handle_psacct(struct task_struct *task, const long code);
53022+int gr_acl_handle_procpidmem(const struct task_struct *task);
53023+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
53024+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
53025+void gr_audit_ptrace(struct task_struct *task);
53026+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
53027+
53028+#ifdef CONFIG_GRKERNSEC
53029+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
53030+void gr_handle_vm86(void);
53031+void gr_handle_mem_readwrite(u64 from, u64 to);
53032+
53033+extern int grsec_enable_dmesg;
53034+extern int grsec_disable_privio;
53035+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53036+extern int grsec_enable_chroot_findtask;
53037+#endif
53038+#endif
53039+
53040+#endif
53041diff -urNp linux-3.0.3/include/linux/grsock.h linux-3.0.3/include/linux/grsock.h
53042--- linux-3.0.3/include/linux/grsock.h 1969-12-31 19:00:00.000000000 -0500
53043+++ linux-3.0.3/include/linux/grsock.h 2011-08-23 21:48:14.000000000 -0400
53044@@ -0,0 +1,19 @@
53045+#ifndef __GRSOCK_H
53046+#define __GRSOCK_H
53047+
53048+extern void gr_attach_curr_ip(const struct sock *sk);
53049+extern int gr_handle_sock_all(const int family, const int type,
53050+ const int protocol);
53051+extern int gr_handle_sock_server(const struct sockaddr *sck);
53052+extern int gr_handle_sock_server_other(const struct sock *sck);
53053+extern int gr_handle_sock_client(const struct sockaddr *sck);
53054+extern int gr_search_connect(struct socket * sock,
53055+ struct sockaddr_in * addr);
53056+extern int gr_search_bind(struct socket * sock,
53057+ struct sockaddr_in * addr);
53058+extern int gr_search_listen(struct socket * sock);
53059+extern int gr_search_accept(struct socket * sock);
53060+extern int gr_search_socket(const int domain, const int type,
53061+ const int protocol);
53062+
53063+#endif
53064diff -urNp linux-3.0.3/include/linux/hid.h linux-3.0.3/include/linux/hid.h
53065--- linux-3.0.3/include/linux/hid.h 2011-07-21 22:17:23.000000000 -0400
53066+++ linux-3.0.3/include/linux/hid.h 2011-08-23 21:47:56.000000000 -0400
53067@@ -675,7 +675,7 @@ struct hid_ll_driver {
53068 unsigned int code, int value);
53069
53070 int (*parse)(struct hid_device *hdev);
53071-};
53072+} __no_const;
53073
53074 #define PM_HINT_FULLON 1<<5
53075 #define PM_HINT_NORMAL 1<<1
53076diff -urNp linux-3.0.3/include/linux/highmem.h linux-3.0.3/include/linux/highmem.h
53077--- linux-3.0.3/include/linux/highmem.h 2011-07-21 22:17:23.000000000 -0400
53078+++ linux-3.0.3/include/linux/highmem.h 2011-08-23 21:47:56.000000000 -0400
53079@@ -185,6 +185,18 @@ static inline void clear_highpage(struct
53080 kunmap_atomic(kaddr, KM_USER0);
53081 }
53082
53083+static inline void sanitize_highpage(struct page *page)
53084+{
53085+ void *kaddr;
53086+ unsigned long flags;
53087+
53088+ local_irq_save(flags);
53089+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
53090+ clear_page(kaddr);
53091+ kunmap_atomic(kaddr, KM_CLEARPAGE);
53092+ local_irq_restore(flags);
53093+}
53094+
53095 static inline void zero_user_segments(struct page *page,
53096 unsigned start1, unsigned end1,
53097 unsigned start2, unsigned end2)
53098diff -urNp linux-3.0.3/include/linux/i2c.h linux-3.0.3/include/linux/i2c.h
53099--- linux-3.0.3/include/linux/i2c.h 2011-07-21 22:17:23.000000000 -0400
53100+++ linux-3.0.3/include/linux/i2c.h 2011-08-23 21:47:56.000000000 -0400
53101@@ -346,6 +346,7 @@ struct i2c_algorithm {
53102 /* To determine what the adapter supports */
53103 u32 (*functionality) (struct i2c_adapter *);
53104 };
53105+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
53106
53107 /*
53108 * i2c_adapter is the structure used to identify a physical i2c bus along
53109diff -urNp linux-3.0.3/include/linux/i2o.h linux-3.0.3/include/linux/i2o.h
53110--- linux-3.0.3/include/linux/i2o.h 2011-07-21 22:17:23.000000000 -0400
53111+++ linux-3.0.3/include/linux/i2o.h 2011-08-23 21:47:56.000000000 -0400
53112@@ -564,7 +564,7 @@ struct i2o_controller {
53113 struct i2o_device *exec; /* Executive */
53114 #if BITS_PER_LONG == 64
53115 spinlock_t context_list_lock; /* lock for context_list */
53116- atomic_t context_list_counter; /* needed for unique contexts */
53117+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
53118 struct list_head context_list; /* list of context id's
53119 and pointers */
53120 #endif
53121diff -urNp linux-3.0.3/include/linux/init.h linux-3.0.3/include/linux/init.h
53122--- linux-3.0.3/include/linux/init.h 2011-07-21 22:17:23.000000000 -0400
53123+++ linux-3.0.3/include/linux/init.h 2011-08-23 21:47:56.000000000 -0400
53124@@ -293,13 +293,13 @@ void __init parse_early_options(char *cm
53125
53126 /* Each module must use one module_init(). */
53127 #define module_init(initfn) \
53128- static inline initcall_t __inittest(void) \
53129+ static inline __used initcall_t __inittest(void) \
53130 { return initfn; } \
53131 int init_module(void) __attribute__((alias(#initfn)));
53132
53133 /* This is only required if you want to be unloadable. */
53134 #define module_exit(exitfn) \
53135- static inline exitcall_t __exittest(void) \
53136+ static inline __used exitcall_t __exittest(void) \
53137 { return exitfn; } \
53138 void cleanup_module(void) __attribute__((alias(#exitfn)));
53139
53140diff -urNp linux-3.0.3/include/linux/init_task.h linux-3.0.3/include/linux/init_task.h
53141--- linux-3.0.3/include/linux/init_task.h 2011-07-21 22:17:23.000000000 -0400
53142+++ linux-3.0.3/include/linux/init_task.h 2011-08-23 21:47:56.000000000 -0400
53143@@ -126,6 +126,12 @@ extern struct cred init_cred;
53144 # define INIT_PERF_EVENTS(tsk)
53145 #endif
53146
53147+#ifdef CONFIG_X86
53148+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
53149+#else
53150+#define INIT_TASK_THREAD_INFO
53151+#endif
53152+
53153 /*
53154 * INIT_TASK is used to set up the first task table, touch at
53155 * your own risk!. Base=0, limit=0x1fffff (=2MB)
53156@@ -164,6 +170,7 @@ extern struct cred init_cred;
53157 RCU_INIT_POINTER(.cred, &init_cred), \
53158 .comm = "swapper", \
53159 .thread = INIT_THREAD, \
53160+ INIT_TASK_THREAD_INFO \
53161 .fs = &init_fs, \
53162 .files = &init_files, \
53163 .signal = &init_signals, \
53164diff -urNp linux-3.0.3/include/linux/intel-iommu.h linux-3.0.3/include/linux/intel-iommu.h
53165--- linux-3.0.3/include/linux/intel-iommu.h 2011-07-21 22:17:23.000000000 -0400
53166+++ linux-3.0.3/include/linux/intel-iommu.h 2011-08-23 21:47:56.000000000 -0400
53167@@ -296,7 +296,7 @@ struct iommu_flush {
53168 u8 fm, u64 type);
53169 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
53170 unsigned int size_order, u64 type);
53171-};
53172+} __no_const;
53173
53174 enum {
53175 SR_DMAR_FECTL_REG,
53176diff -urNp linux-3.0.3/include/linux/interrupt.h linux-3.0.3/include/linux/interrupt.h
53177--- linux-3.0.3/include/linux/interrupt.h 2011-07-21 22:17:23.000000000 -0400
53178+++ linux-3.0.3/include/linux/interrupt.h 2011-08-23 21:47:56.000000000 -0400
53179@@ -422,7 +422,7 @@ enum
53180 /* map softirq index to softirq name. update 'softirq_to_name' in
53181 * kernel/softirq.c when adding a new softirq.
53182 */
53183-extern char *softirq_to_name[NR_SOFTIRQS];
53184+extern const char * const softirq_to_name[NR_SOFTIRQS];
53185
53186 /* softirq mask and active fields moved to irq_cpustat_t in
53187 * asm/hardirq.h to get better cache usage. KAO
53188@@ -430,12 +430,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
53189
53190 struct softirq_action
53191 {
53192- void (*action)(struct softirq_action *);
53193+ void (*action)(void);
53194 };
53195
53196 asmlinkage void do_softirq(void);
53197 asmlinkage void __do_softirq(void);
53198-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
53199+extern void open_softirq(int nr, void (*action)(void));
53200 extern void softirq_init(void);
53201 static inline void __raise_softirq_irqoff(unsigned int nr)
53202 {
53203diff -urNp linux-3.0.3/include/linux/kallsyms.h linux-3.0.3/include/linux/kallsyms.h
53204--- linux-3.0.3/include/linux/kallsyms.h 2011-07-21 22:17:23.000000000 -0400
53205+++ linux-3.0.3/include/linux/kallsyms.h 2011-08-23 21:48:14.000000000 -0400
53206@@ -15,7 +15,8 @@
53207
53208 struct module;
53209
53210-#ifdef CONFIG_KALLSYMS
53211+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
53212+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53213 /* Lookup the address for a symbol. Returns 0 if not found. */
53214 unsigned long kallsyms_lookup_name(const char *name);
53215
53216@@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(un
53217 /* Stupid that this does nothing, but I didn't create this mess. */
53218 #define __print_symbol(fmt, addr)
53219 #endif /*CONFIG_KALLSYMS*/
53220+#else /* when included by kallsyms.c, vsnprintf.c, or
53221+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
53222+extern void __print_symbol(const char *fmt, unsigned long address);
53223+extern int sprint_backtrace(char *buffer, unsigned long address);
53224+extern int sprint_symbol(char *buffer, unsigned long address);
53225+const char *kallsyms_lookup(unsigned long addr,
53226+ unsigned long *symbolsize,
53227+ unsigned long *offset,
53228+ char **modname, char *namebuf);
53229+#endif
53230
53231 /* This macro allows us to keep printk typechecking */
53232 static void __check_printsym_format(const char *fmt, ...)
53233diff -urNp linux-3.0.3/include/linux/kgdb.h linux-3.0.3/include/linux/kgdb.h
53234--- linux-3.0.3/include/linux/kgdb.h 2011-07-21 22:17:23.000000000 -0400
53235+++ linux-3.0.3/include/linux/kgdb.h 2011-08-23 21:47:56.000000000 -0400
53236@@ -53,7 +53,7 @@ extern int kgdb_connected;
53237 extern int kgdb_io_module_registered;
53238
53239 extern atomic_t kgdb_setting_breakpoint;
53240-extern atomic_t kgdb_cpu_doing_single_step;
53241+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
53242
53243 extern struct task_struct *kgdb_usethread;
53244 extern struct task_struct *kgdb_contthread;
53245@@ -241,8 +241,8 @@ extern void kgdb_arch_late(void);
53246 * hardware debug registers.
53247 */
53248 struct kgdb_arch {
53249- unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
53250- unsigned long flags;
53251+ const unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
53252+ const unsigned long flags;
53253
53254 int (*set_breakpoint)(unsigned long, char *);
53255 int (*remove_breakpoint)(unsigned long, char *);
53256@@ -268,14 +268,14 @@ struct kgdb_arch {
53257 * not a console
53258 */
53259 struct kgdb_io {
53260- const char *name;
53261+ const char * const name;
53262 int (*read_char) (void);
53263 void (*write_char) (u8);
53264 void (*flush) (void);
53265 int (*init) (void);
53266 void (*pre_exception) (void);
53267 void (*post_exception) (void);
53268- int is_console;
53269+ const int is_console;
53270 };
53271
53272 extern struct kgdb_arch arch_kgdb_ops;
53273diff -urNp linux-3.0.3/include/linux/kmod.h linux-3.0.3/include/linux/kmod.h
53274--- linux-3.0.3/include/linux/kmod.h 2011-07-21 22:17:23.000000000 -0400
53275+++ linux-3.0.3/include/linux/kmod.h 2011-08-23 21:48:14.000000000 -0400
53276@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysc
53277 * usually useless though. */
53278 extern int __request_module(bool wait, const char *name, ...) \
53279 __attribute__((format(printf, 2, 3)));
53280+extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
53281+ __attribute__((format(printf, 3, 4)));
53282 #define request_module(mod...) __request_module(true, mod)
53283 #define request_module_nowait(mod...) __request_module(false, mod)
53284 #define try_then_request_module(x, mod...) \
53285diff -urNp linux-3.0.3/include/linux/kvm_host.h linux-3.0.3/include/linux/kvm_host.h
53286--- linux-3.0.3/include/linux/kvm_host.h 2011-07-21 22:17:23.000000000 -0400
53287+++ linux-3.0.3/include/linux/kvm_host.h 2011-08-23 21:47:56.000000000 -0400
53288@@ -307,7 +307,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
53289 void vcpu_load(struct kvm_vcpu *vcpu);
53290 void vcpu_put(struct kvm_vcpu *vcpu);
53291
53292-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
53293+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
53294 struct module *module);
53295 void kvm_exit(void);
53296
53297@@ -446,7 +446,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
53298 struct kvm_guest_debug *dbg);
53299 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
53300
53301-int kvm_arch_init(void *opaque);
53302+int kvm_arch_init(const void *opaque);
53303 void kvm_arch_exit(void);
53304
53305 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
53306diff -urNp linux-3.0.3/include/linux/libata.h linux-3.0.3/include/linux/libata.h
53307--- linux-3.0.3/include/linux/libata.h 2011-07-21 22:17:23.000000000 -0400
53308+++ linux-3.0.3/include/linux/libata.h 2011-08-23 21:47:56.000000000 -0400
53309@@ -898,7 +898,7 @@ struct ata_port_operations {
53310 * ->inherits must be the last field and all the preceding
53311 * fields must be pointers.
53312 */
53313- const struct ata_port_operations *inherits;
53314+ const struct ata_port_operations * const inherits;
53315 };
53316
53317 struct ata_port_info {
53318diff -urNp linux-3.0.3/include/linux/mca.h linux-3.0.3/include/linux/mca.h
53319--- linux-3.0.3/include/linux/mca.h 2011-07-21 22:17:23.000000000 -0400
53320+++ linux-3.0.3/include/linux/mca.h 2011-08-23 21:47:56.000000000 -0400
53321@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
53322 int region);
53323 void * (*mca_transform_memory)(struct mca_device *,
53324 void *memory);
53325-};
53326+} __no_const;
53327
53328 struct mca_bus {
53329 u64 default_dma_mask;
53330diff -urNp linux-3.0.3/include/linux/memory.h linux-3.0.3/include/linux/memory.h
53331--- linux-3.0.3/include/linux/memory.h 2011-07-21 22:17:23.000000000 -0400
53332+++ linux-3.0.3/include/linux/memory.h 2011-08-23 21:47:56.000000000 -0400
53333@@ -144,7 +144,7 @@ struct memory_accessor {
53334 size_t count);
53335 ssize_t (*write)(struct memory_accessor *, const char *buf,
53336 off_t offset, size_t count);
53337-};
53338+} __no_const;
53339
53340 /*
53341 * Kernel text modification mutex, used for code patching. Users of this lock
53342diff -urNp linux-3.0.3/include/linux/mfd/abx500.h linux-3.0.3/include/linux/mfd/abx500.h
53343--- linux-3.0.3/include/linux/mfd/abx500.h 2011-07-21 22:17:23.000000000 -0400
53344+++ linux-3.0.3/include/linux/mfd/abx500.h 2011-08-23 21:47:56.000000000 -0400
53345@@ -234,6 +234,7 @@ struct abx500_ops {
53346 int (*event_registers_startup_state_get) (struct device *, u8 *);
53347 int (*startup_irq_enabled) (struct device *, unsigned int);
53348 };
53349+typedef struct abx500_ops __no_const abx500_ops_no_const;
53350
53351 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
53352 void abx500_remove_ops(struct device *dev);
53353diff -urNp linux-3.0.3/include/linux/mm.h linux-3.0.3/include/linux/mm.h
53354--- linux-3.0.3/include/linux/mm.h 2011-08-23 21:44:40.000000000 -0400
53355+++ linux-3.0.3/include/linux/mm.h 2011-08-23 21:47:56.000000000 -0400
53356@@ -113,7 +113,14 @@ extern unsigned int kobjsize(const void
53357
53358 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
53359 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
53360+
53361+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
53362+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
53363+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
53364+#else
53365 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
53366+#endif
53367+
53368 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
53369 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
53370
53371@@ -1009,34 +1016,6 @@ int set_page_dirty(struct page *page);
53372 int set_page_dirty_lock(struct page *page);
53373 int clear_page_dirty_for_io(struct page *page);
53374
53375-/* Is the vma a continuation of the stack vma above it? */
53376-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
53377-{
53378- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
53379-}
53380-
53381-static inline int stack_guard_page_start(struct vm_area_struct *vma,
53382- unsigned long addr)
53383-{
53384- return (vma->vm_flags & VM_GROWSDOWN) &&
53385- (vma->vm_start == addr) &&
53386- !vma_growsdown(vma->vm_prev, addr);
53387-}
53388-
53389-/* Is the vma a continuation of the stack vma below it? */
53390-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
53391-{
53392- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
53393-}
53394-
53395-static inline int stack_guard_page_end(struct vm_area_struct *vma,
53396- unsigned long addr)
53397-{
53398- return (vma->vm_flags & VM_GROWSUP) &&
53399- (vma->vm_end == addr) &&
53400- !vma_growsup(vma->vm_next, addr);
53401-}
53402-
53403 extern unsigned long move_page_tables(struct vm_area_struct *vma,
53404 unsigned long old_addr, struct vm_area_struct *new_vma,
53405 unsigned long new_addr, unsigned long len);
53406@@ -1169,6 +1148,15 @@ struct shrinker {
53407 extern void register_shrinker(struct shrinker *);
53408 extern void unregister_shrinker(struct shrinker *);
53409
53410+#ifdef CONFIG_MMU
53411+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
53412+#else
53413+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
53414+{
53415+ return __pgprot(0);
53416+}
53417+#endif
53418+
53419 int vma_wants_writenotify(struct vm_area_struct *vma);
53420
53421 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
53422@@ -1452,6 +1440,7 @@ out:
53423 }
53424
53425 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
53426+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
53427
53428 extern unsigned long do_brk(unsigned long, unsigned long);
53429
53430@@ -1510,6 +1499,10 @@ extern struct vm_area_struct * find_vma(
53431 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
53432 struct vm_area_struct **pprev);
53433
53434+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
53435+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
53436+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
53437+
53438 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
53439 NULL if none. Assume start_addr < end_addr. */
53440 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
53441@@ -1526,15 +1519,6 @@ static inline unsigned long vma_pages(st
53442 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
53443 }
53444
53445-#ifdef CONFIG_MMU
53446-pgprot_t vm_get_page_prot(unsigned long vm_flags);
53447-#else
53448-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
53449-{
53450- return __pgprot(0);
53451-}
53452-#endif
53453-
53454 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
53455 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
53456 unsigned long pfn, unsigned long size, pgprot_t);
53457@@ -1647,7 +1631,7 @@ extern int unpoison_memory(unsigned long
53458 extern int sysctl_memory_failure_early_kill;
53459 extern int sysctl_memory_failure_recovery;
53460 extern void shake_page(struct page *p, int access);
53461-extern atomic_long_t mce_bad_pages;
53462+extern atomic_long_unchecked_t mce_bad_pages;
53463 extern int soft_offline_page(struct page *page, int flags);
53464
53465 extern void dump_page(struct page *page);
53466@@ -1661,5 +1645,11 @@ extern void copy_user_huge_page(struct p
53467 unsigned int pages_per_huge_page);
53468 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
53469
53470+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
53471+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
53472+#else
53473+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
53474+#endif
53475+
53476 #endif /* __KERNEL__ */
53477 #endif /* _LINUX_MM_H */
53478diff -urNp linux-3.0.3/include/linux/mm_types.h linux-3.0.3/include/linux/mm_types.h
53479--- linux-3.0.3/include/linux/mm_types.h 2011-07-21 22:17:23.000000000 -0400
53480+++ linux-3.0.3/include/linux/mm_types.h 2011-08-23 21:47:56.000000000 -0400
53481@@ -184,6 +184,8 @@ struct vm_area_struct {
53482 #ifdef CONFIG_NUMA
53483 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
53484 #endif
53485+
53486+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
53487 };
53488
53489 struct core_thread {
53490@@ -316,6 +318,24 @@ struct mm_struct {
53491 #ifdef CONFIG_CPUMASK_OFFSTACK
53492 struct cpumask cpumask_allocation;
53493 #endif
53494+
53495+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
53496+ unsigned long pax_flags;
53497+#endif
53498+
53499+#ifdef CONFIG_PAX_DLRESOLVE
53500+ unsigned long call_dl_resolve;
53501+#endif
53502+
53503+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
53504+ unsigned long call_syscall;
53505+#endif
53506+
53507+#ifdef CONFIG_PAX_ASLR
53508+ unsigned long delta_mmap; /* randomized offset */
53509+ unsigned long delta_stack; /* randomized offset */
53510+#endif
53511+
53512 };
53513
53514 static inline void mm_init_cpumask(struct mm_struct *mm)
53515diff -urNp linux-3.0.3/include/linux/mmu_notifier.h linux-3.0.3/include/linux/mmu_notifier.h
53516--- linux-3.0.3/include/linux/mmu_notifier.h 2011-07-21 22:17:23.000000000 -0400
53517+++ linux-3.0.3/include/linux/mmu_notifier.h 2011-08-23 21:47:56.000000000 -0400
53518@@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destr
53519 */
53520 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
53521 ({ \
53522- pte_t __pte; \
53523+ pte_t ___pte; \
53524 struct vm_area_struct *___vma = __vma; \
53525 unsigned long ___address = __address; \
53526- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
53527+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
53528 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
53529- __pte; \
53530+ ___pte; \
53531 })
53532
53533 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
53534diff -urNp linux-3.0.3/include/linux/mmzone.h linux-3.0.3/include/linux/mmzone.h
53535--- linux-3.0.3/include/linux/mmzone.h 2011-07-21 22:17:23.000000000 -0400
53536+++ linux-3.0.3/include/linux/mmzone.h 2011-08-23 21:47:56.000000000 -0400
53537@@ -350,7 +350,7 @@ struct zone {
53538 unsigned long flags; /* zone flags, see below */
53539
53540 /* Zone statistics */
53541- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
53542+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
53543
53544 /*
53545 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
53546diff -urNp linux-3.0.3/include/linux/mod_devicetable.h linux-3.0.3/include/linux/mod_devicetable.h
53547--- linux-3.0.3/include/linux/mod_devicetable.h 2011-07-21 22:17:23.000000000 -0400
53548+++ linux-3.0.3/include/linux/mod_devicetable.h 2011-08-23 21:47:56.000000000 -0400
53549@@ -12,7 +12,7 @@
53550 typedef unsigned long kernel_ulong_t;
53551 #endif
53552
53553-#define PCI_ANY_ID (~0)
53554+#define PCI_ANY_ID ((__u16)~0)
53555
53556 struct pci_device_id {
53557 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
53558@@ -131,7 +131,7 @@ struct usb_device_id {
53559 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
53560 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
53561
53562-#define HID_ANY_ID (~0)
53563+#define HID_ANY_ID (~0U)
53564
53565 struct hid_device_id {
53566 __u16 bus;
53567diff -urNp linux-3.0.3/include/linux/module.h linux-3.0.3/include/linux/module.h
53568--- linux-3.0.3/include/linux/module.h 2011-07-21 22:17:23.000000000 -0400
53569+++ linux-3.0.3/include/linux/module.h 2011-08-23 21:47:56.000000000 -0400
53570@@ -16,6 +16,7 @@
53571 #include <linux/kobject.h>
53572 #include <linux/moduleparam.h>
53573 #include <linux/tracepoint.h>
53574+#include <linux/fs.h>
53575
53576 #include <linux/percpu.h>
53577 #include <asm/module.h>
53578@@ -325,19 +326,16 @@ struct module
53579 int (*init)(void);
53580
53581 /* If this is non-NULL, vfree after init() returns */
53582- void *module_init;
53583+ void *module_init_rx, *module_init_rw;
53584
53585 /* Here is the actual code + data, vfree'd on unload. */
53586- void *module_core;
53587+ void *module_core_rx, *module_core_rw;
53588
53589 /* Here are the sizes of the init and core sections */
53590- unsigned int init_size, core_size;
53591+ unsigned int init_size_rw, core_size_rw;
53592
53593 /* The size of the executable code in each section. */
53594- unsigned int init_text_size, core_text_size;
53595-
53596- /* Size of RO sections of the module (text+rodata) */
53597- unsigned int init_ro_size, core_ro_size;
53598+ unsigned int init_size_rx, core_size_rx;
53599
53600 /* Arch-specific module values */
53601 struct mod_arch_specific arch;
53602@@ -393,6 +391,10 @@ struct module
53603 #ifdef CONFIG_EVENT_TRACING
53604 struct ftrace_event_call **trace_events;
53605 unsigned int num_trace_events;
53606+ struct file_operations trace_id;
53607+ struct file_operations trace_enable;
53608+ struct file_operations trace_format;
53609+ struct file_operations trace_filter;
53610 #endif
53611 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
53612 unsigned int num_ftrace_callsites;
53613@@ -443,16 +445,46 @@ bool is_module_address(unsigned long add
53614 bool is_module_percpu_address(unsigned long addr);
53615 bool is_module_text_address(unsigned long addr);
53616
53617+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
53618+{
53619+
53620+#ifdef CONFIG_PAX_KERNEXEC
53621+ if (ktla_ktva(addr) >= (unsigned long)start &&
53622+ ktla_ktva(addr) < (unsigned long)start + size)
53623+ return 1;
53624+#endif
53625+
53626+ return ((void *)addr >= start && (void *)addr < start + size);
53627+}
53628+
53629+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
53630+{
53631+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
53632+}
53633+
53634+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
53635+{
53636+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
53637+}
53638+
53639+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
53640+{
53641+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
53642+}
53643+
53644+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
53645+{
53646+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
53647+}
53648+
53649 static inline int within_module_core(unsigned long addr, struct module *mod)
53650 {
53651- return (unsigned long)mod->module_core <= addr &&
53652- addr < (unsigned long)mod->module_core + mod->core_size;
53653+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
53654 }
53655
53656 static inline int within_module_init(unsigned long addr, struct module *mod)
53657 {
53658- return (unsigned long)mod->module_init <= addr &&
53659- addr < (unsigned long)mod->module_init + mod->init_size;
53660+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
53661 }
53662
53663 /* Search for module by name: must hold module_mutex. */
53664diff -urNp linux-3.0.3/include/linux/moduleloader.h linux-3.0.3/include/linux/moduleloader.h
53665--- linux-3.0.3/include/linux/moduleloader.h 2011-07-21 22:17:23.000000000 -0400
53666+++ linux-3.0.3/include/linux/moduleloader.h 2011-08-23 21:47:56.000000000 -0400
53667@@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
53668 sections. Returns NULL on failure. */
53669 void *module_alloc(unsigned long size);
53670
53671+#ifdef CONFIG_PAX_KERNEXEC
53672+void *module_alloc_exec(unsigned long size);
53673+#else
53674+#define module_alloc_exec(x) module_alloc(x)
53675+#endif
53676+
53677 /* Free memory returned from module_alloc. */
53678 void module_free(struct module *mod, void *module_region);
53679
53680+#ifdef CONFIG_PAX_KERNEXEC
53681+void module_free_exec(struct module *mod, void *module_region);
53682+#else
53683+#define module_free_exec(x, y) module_free((x), (y))
53684+#endif
53685+
53686 /* Apply the given relocation to the (simplified) ELF. Return -error
53687 or 0. */
53688 int apply_relocate(Elf_Shdr *sechdrs,
53689diff -urNp linux-3.0.3/include/linux/moduleparam.h linux-3.0.3/include/linux/moduleparam.h
53690--- linux-3.0.3/include/linux/moduleparam.h 2011-07-21 22:17:23.000000000 -0400
53691+++ linux-3.0.3/include/linux/moduleparam.h 2011-08-23 21:47:56.000000000 -0400
53692@@ -255,7 +255,7 @@ static inline void __kernel_param_unlock
53693 * @len is usually just sizeof(string).
53694 */
53695 #define module_param_string(name, string, len, perm) \
53696- static const struct kparam_string __param_string_##name \
53697+ static const struct kparam_string __param_string_##name __used \
53698 = { len, string }; \
53699 __module_param_call(MODULE_PARAM_PREFIX, name, \
53700 &param_ops_string, \
53701@@ -370,7 +370,7 @@ extern int param_get_invbool(char *buffe
53702 * module_param_named() for why this might be necessary.
53703 */
53704 #define module_param_array_named(name, array, type, nump, perm) \
53705- static const struct kparam_array __param_arr_##name \
53706+ static const struct kparam_array __param_arr_##name __used \
53707 = { .max = ARRAY_SIZE(array), .num = nump, \
53708 .ops = &param_ops_##type, \
53709 .elemsize = sizeof(array[0]), .elem = array }; \
53710diff -urNp linux-3.0.3/include/linux/namei.h linux-3.0.3/include/linux/namei.h
53711--- linux-3.0.3/include/linux/namei.h 2011-07-21 22:17:23.000000000 -0400
53712+++ linux-3.0.3/include/linux/namei.h 2011-08-23 21:47:56.000000000 -0400
53713@@ -24,7 +24,7 @@ struct nameidata {
53714 unsigned seq;
53715 int last_type;
53716 unsigned depth;
53717- char *saved_names[MAX_NESTED_LINKS + 1];
53718+ const char *saved_names[MAX_NESTED_LINKS + 1];
53719
53720 /* Intent data */
53721 union {
53722@@ -91,12 +91,12 @@ extern int follow_up(struct path *);
53723 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
53724 extern void unlock_rename(struct dentry *, struct dentry *);
53725
53726-static inline void nd_set_link(struct nameidata *nd, char *path)
53727+static inline void nd_set_link(struct nameidata *nd, const char *path)
53728 {
53729 nd->saved_names[nd->depth] = path;
53730 }
53731
53732-static inline char *nd_get_link(struct nameidata *nd)
53733+static inline const char *nd_get_link(const struct nameidata *nd)
53734 {
53735 return nd->saved_names[nd->depth];
53736 }
53737diff -urNp linux-3.0.3/include/linux/netdevice.h linux-3.0.3/include/linux/netdevice.h
53738--- linux-3.0.3/include/linux/netdevice.h 2011-08-23 21:44:40.000000000 -0400
53739+++ linux-3.0.3/include/linux/netdevice.h 2011-08-23 21:47:56.000000000 -0400
53740@@ -979,6 +979,7 @@ struct net_device_ops {
53741 int (*ndo_set_features)(struct net_device *dev,
53742 u32 features);
53743 };
53744+typedef struct net_device_ops __no_const net_device_ops_no_const;
53745
53746 /*
53747 * The DEVICE structure.
53748diff -urNp linux-3.0.3/include/linux/netfilter/xt_gradm.h linux-3.0.3/include/linux/netfilter/xt_gradm.h
53749--- linux-3.0.3/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
53750+++ linux-3.0.3/include/linux/netfilter/xt_gradm.h 2011-08-23 21:48:14.000000000 -0400
53751@@ -0,0 +1,9 @@
53752+#ifndef _LINUX_NETFILTER_XT_GRADM_H
53753+#define _LINUX_NETFILTER_XT_GRADM_H 1
53754+
53755+struct xt_gradm_mtinfo {
53756+ __u16 flags;
53757+ __u16 invflags;
53758+};
53759+
53760+#endif
53761diff -urNp linux-3.0.3/include/linux/oprofile.h linux-3.0.3/include/linux/oprofile.h
53762--- linux-3.0.3/include/linux/oprofile.h 2011-07-21 22:17:23.000000000 -0400
53763+++ linux-3.0.3/include/linux/oprofile.h 2011-08-23 21:47:56.000000000 -0400
53764@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super
53765 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
53766 char const * name, ulong * val);
53767
53768-/** Create a file for read-only access to an atomic_t. */
53769+/** Create a file for read-only access to an atomic_unchecked_t. */
53770 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
53771- char const * name, atomic_t * val);
53772+ char const * name, atomic_unchecked_t * val);
53773
53774 /** create a directory */
53775 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
53776diff -urNp linux-3.0.3/include/linux/padata.h linux-3.0.3/include/linux/padata.h
53777--- linux-3.0.3/include/linux/padata.h 2011-07-21 22:17:23.000000000 -0400
53778+++ linux-3.0.3/include/linux/padata.h 2011-08-23 21:47:56.000000000 -0400
53779@@ -129,7 +129,7 @@ struct parallel_data {
53780 struct padata_instance *pinst;
53781 struct padata_parallel_queue __percpu *pqueue;
53782 struct padata_serial_queue __percpu *squeue;
53783- atomic_t seq_nr;
53784+ atomic_unchecked_t seq_nr;
53785 atomic_t reorder_objects;
53786 atomic_t refcnt;
53787 unsigned int max_seq_nr;
53788diff -urNp linux-3.0.3/include/linux/perf_event.h linux-3.0.3/include/linux/perf_event.h
53789--- linux-3.0.3/include/linux/perf_event.h 2011-07-21 22:17:23.000000000 -0400
53790+++ linux-3.0.3/include/linux/perf_event.h 2011-08-23 21:47:56.000000000 -0400
53791@@ -761,8 +761,8 @@ struct perf_event {
53792
53793 enum perf_event_active_state state;
53794 unsigned int attach_state;
53795- local64_t count;
53796- atomic64_t child_count;
53797+ local64_t count; /* PaX: fix it one day */
53798+ atomic64_unchecked_t child_count;
53799
53800 /*
53801 * These are the total time in nanoseconds that the event
53802@@ -813,8 +813,8 @@ struct perf_event {
53803 * These accumulate total time (in nanoseconds) that children
53804 * events have been enabled and running, respectively.
53805 */
53806- atomic64_t child_total_time_enabled;
53807- atomic64_t child_total_time_running;
53808+ atomic64_unchecked_t child_total_time_enabled;
53809+ atomic64_unchecked_t child_total_time_running;
53810
53811 /*
53812 * Protect attach/detach and child_list:
53813diff -urNp linux-3.0.3/include/linux/pipe_fs_i.h linux-3.0.3/include/linux/pipe_fs_i.h
53814--- linux-3.0.3/include/linux/pipe_fs_i.h 2011-07-21 22:17:23.000000000 -0400
53815+++ linux-3.0.3/include/linux/pipe_fs_i.h 2011-08-23 21:47:56.000000000 -0400
53816@@ -46,9 +46,9 @@ struct pipe_buffer {
53817 struct pipe_inode_info {
53818 wait_queue_head_t wait;
53819 unsigned int nrbufs, curbuf, buffers;
53820- unsigned int readers;
53821- unsigned int writers;
53822- unsigned int waiting_writers;
53823+ atomic_t readers;
53824+ atomic_t writers;
53825+ atomic_t waiting_writers;
53826 unsigned int r_counter;
53827 unsigned int w_counter;
53828 struct page *tmp_page;
53829diff -urNp linux-3.0.3/include/linux/pm_runtime.h linux-3.0.3/include/linux/pm_runtime.h
53830--- linux-3.0.3/include/linux/pm_runtime.h 2011-07-21 22:17:23.000000000 -0400
53831+++ linux-3.0.3/include/linux/pm_runtime.h 2011-08-23 21:47:56.000000000 -0400
53832@@ -94,7 +94,7 @@ static inline bool pm_runtime_callbacks_
53833
53834 static inline void pm_runtime_mark_last_busy(struct device *dev)
53835 {
53836- ACCESS_ONCE(dev->power.last_busy) = jiffies;
53837+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
53838 }
53839
53840 #else /* !CONFIG_PM_RUNTIME */
53841diff -urNp linux-3.0.3/include/linux/poison.h linux-3.0.3/include/linux/poison.h
53842--- linux-3.0.3/include/linux/poison.h 2011-07-21 22:17:23.000000000 -0400
53843+++ linux-3.0.3/include/linux/poison.h 2011-08-23 21:47:56.000000000 -0400
53844@@ -19,8 +19,8 @@
53845 * under normal circumstances, used to verify that nobody uses
53846 * non-initialized list entries.
53847 */
53848-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
53849-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
53850+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
53851+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
53852
53853 /********** include/linux/timer.h **********/
53854 /*
53855diff -urNp linux-3.0.3/include/linux/preempt.h linux-3.0.3/include/linux/preempt.h
53856--- linux-3.0.3/include/linux/preempt.h 2011-07-21 22:17:23.000000000 -0400
53857+++ linux-3.0.3/include/linux/preempt.h 2011-08-23 21:47:56.000000000 -0400
53858@@ -115,7 +115,7 @@ struct preempt_ops {
53859 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
53860 void (*sched_out)(struct preempt_notifier *notifier,
53861 struct task_struct *next);
53862-};
53863+} __no_const;
53864
53865 /**
53866 * preempt_notifier - key for installing preemption notifiers
53867diff -urNp linux-3.0.3/include/linux/proc_fs.h linux-3.0.3/include/linux/proc_fs.h
53868--- linux-3.0.3/include/linux/proc_fs.h 2011-07-21 22:17:23.000000000 -0400
53869+++ linux-3.0.3/include/linux/proc_fs.h 2011-08-23 21:48:14.000000000 -0400
53870@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
53871 return proc_create_data(name, mode, parent, proc_fops, NULL);
53872 }
53873
53874+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
53875+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
53876+{
53877+#ifdef CONFIG_GRKERNSEC_PROC_USER
53878+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
53879+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53880+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
53881+#else
53882+ return proc_create_data(name, mode, parent, proc_fops, NULL);
53883+#endif
53884+}
53885+
53886+
53887 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
53888 mode_t mode, struct proc_dir_entry *base,
53889 read_proc_t *read_proc, void * data)
53890@@ -258,7 +271,7 @@ union proc_op {
53891 int (*proc_show)(struct seq_file *m,
53892 struct pid_namespace *ns, struct pid *pid,
53893 struct task_struct *task);
53894-};
53895+} __no_const;
53896
53897 struct ctl_table_header;
53898 struct ctl_table;
53899diff -urNp linux-3.0.3/include/linux/ptrace.h linux-3.0.3/include/linux/ptrace.h
53900--- linux-3.0.3/include/linux/ptrace.h 2011-07-21 22:17:23.000000000 -0400
53901+++ linux-3.0.3/include/linux/ptrace.h 2011-08-23 21:48:14.000000000 -0400
53902@@ -115,10 +115,10 @@ extern void __ptrace_unlink(struct task_
53903 extern void exit_ptrace(struct task_struct *tracer);
53904 #define PTRACE_MODE_READ 1
53905 #define PTRACE_MODE_ATTACH 2
53906-/* Returns 0 on success, -errno on denial. */
53907-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
53908 /* Returns true on success, false on denial. */
53909 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
53910+/* Returns true on success, false on denial. */
53911+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
53912
53913 static inline int ptrace_reparented(struct task_struct *child)
53914 {
53915diff -urNp linux-3.0.3/include/linux/random.h linux-3.0.3/include/linux/random.h
53916--- linux-3.0.3/include/linux/random.h 2011-08-23 21:44:40.000000000 -0400
53917+++ linux-3.0.3/include/linux/random.h 2011-08-23 21:47:56.000000000 -0400
53918@@ -69,12 +69,17 @@ void srandom32(u32 seed);
53919
53920 u32 prandom32(struct rnd_state *);
53921
53922+static inline unsigned long pax_get_random_long(void)
53923+{
53924+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
53925+}
53926+
53927 /*
53928 * Handle minimum values for seeds
53929 */
53930 static inline u32 __seed(u32 x, u32 m)
53931 {
53932- return (x < m) ? x + m : x;
53933+ return (x <= m) ? x + m + 1 : x;
53934 }
53935
53936 /**
53937diff -urNp linux-3.0.3/include/linux/reboot.h linux-3.0.3/include/linux/reboot.h
53938--- linux-3.0.3/include/linux/reboot.h 2011-07-21 22:17:23.000000000 -0400
53939+++ linux-3.0.3/include/linux/reboot.h 2011-08-23 21:47:56.000000000 -0400
53940@@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
53941 * Architecture-specific implementations of sys_reboot commands.
53942 */
53943
53944-extern void machine_restart(char *cmd);
53945-extern void machine_halt(void);
53946-extern void machine_power_off(void);
53947+extern void machine_restart(char *cmd) __noreturn;
53948+extern void machine_halt(void) __noreturn;
53949+extern void machine_power_off(void) __noreturn;
53950
53951 extern void machine_shutdown(void);
53952 struct pt_regs;
53953@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
53954 */
53955
53956 extern void kernel_restart_prepare(char *cmd);
53957-extern void kernel_restart(char *cmd);
53958-extern void kernel_halt(void);
53959-extern void kernel_power_off(void);
53960+extern void kernel_restart(char *cmd) __noreturn;
53961+extern void kernel_halt(void) __noreturn;
53962+extern void kernel_power_off(void) __noreturn;
53963
53964 extern int C_A_D; /* for sysctl */
53965 void ctrl_alt_del(void);
53966@@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
53967 * Emergency restart, callable from an interrupt handler.
53968 */
53969
53970-extern void emergency_restart(void);
53971+extern void emergency_restart(void) __noreturn;
53972 #include <asm/emergency-restart.h>
53973
53974 #endif
53975diff -urNp linux-3.0.3/include/linux/reiserfs_fs.h linux-3.0.3/include/linux/reiserfs_fs.h
53976--- linux-3.0.3/include/linux/reiserfs_fs.h 2011-07-21 22:17:23.000000000 -0400
53977+++ linux-3.0.3/include/linux/reiserfs_fs.h 2011-08-23 21:47:56.000000000 -0400
53978@@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset
53979 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
53980
53981 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
53982-#define get_generation(s) atomic_read (&fs_generation(s))
53983+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
53984 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
53985 #define __fs_changed(gen,s) (gen != get_generation (s))
53986 #define fs_changed(gen,s) \
53987diff -urNp linux-3.0.3/include/linux/reiserfs_fs_sb.h linux-3.0.3/include/linux/reiserfs_fs_sb.h
53988--- linux-3.0.3/include/linux/reiserfs_fs_sb.h 2011-07-21 22:17:23.000000000 -0400
53989+++ linux-3.0.3/include/linux/reiserfs_fs_sb.h 2011-08-23 21:47:56.000000000 -0400
53990@@ -386,7 +386,7 @@ struct reiserfs_sb_info {
53991 /* Comment? -Hans */
53992 wait_queue_head_t s_wait;
53993 /* To be obsoleted soon by per buffer seals.. -Hans */
53994- atomic_t s_generation_counter; // increased by one every time the
53995+ atomic_unchecked_t s_generation_counter; // increased by one every time the
53996 // tree gets re-balanced
53997 unsigned long s_properties; /* File system properties. Currently holds
53998 on-disk FS format */
53999diff -urNp linux-3.0.3/include/linux/relay.h linux-3.0.3/include/linux/relay.h
54000--- linux-3.0.3/include/linux/relay.h 2011-07-21 22:17:23.000000000 -0400
54001+++ linux-3.0.3/include/linux/relay.h 2011-08-23 21:47:56.000000000 -0400
54002@@ -159,7 +159,7 @@ struct rchan_callbacks
54003 * The callback should return 0 if successful, negative if not.
54004 */
54005 int (*remove_buf_file)(struct dentry *dentry);
54006-};
54007+} __no_const;
54008
54009 /*
54010 * CONFIG_RELAY kernel API, kernel/relay.c
54011diff -urNp linux-3.0.3/include/linux/rfkill.h linux-3.0.3/include/linux/rfkill.h
54012--- linux-3.0.3/include/linux/rfkill.h 2011-07-21 22:17:23.000000000 -0400
54013+++ linux-3.0.3/include/linux/rfkill.h 2011-08-23 21:47:56.000000000 -0400
54014@@ -147,6 +147,7 @@ struct rfkill_ops {
54015 void (*query)(struct rfkill *rfkill, void *data);
54016 int (*set_block)(void *data, bool blocked);
54017 };
54018+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
54019
54020 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
54021 /**
54022diff -urNp linux-3.0.3/include/linux/rmap.h linux-3.0.3/include/linux/rmap.h
54023--- linux-3.0.3/include/linux/rmap.h 2011-07-21 22:17:23.000000000 -0400
54024+++ linux-3.0.3/include/linux/rmap.h 2011-08-23 21:47:56.000000000 -0400
54025@@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struc
54026 void anon_vma_init(void); /* create anon_vma_cachep */
54027 int anon_vma_prepare(struct vm_area_struct *);
54028 void unlink_anon_vmas(struct vm_area_struct *);
54029-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
54030-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
54031+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
54032+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
54033 void __anon_vma_link(struct vm_area_struct *);
54034
54035 static inline void anon_vma_merge(struct vm_area_struct *vma,
54036diff -urNp linux-3.0.3/include/linux/sched.h linux-3.0.3/include/linux/sched.h
54037--- linux-3.0.3/include/linux/sched.h 2011-07-21 22:17:23.000000000 -0400
54038+++ linux-3.0.3/include/linux/sched.h 2011-08-25 17:22:27.000000000 -0400
54039@@ -100,6 +100,7 @@ struct bio_list;
54040 struct fs_struct;
54041 struct perf_event_context;
54042 struct blk_plug;
54043+struct linux_binprm;
54044
54045 /*
54046 * List of flags we want to share for kernel threads,
54047@@ -380,10 +381,13 @@ struct user_namespace;
54048 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
54049
54050 extern int sysctl_max_map_count;
54051+extern unsigned long sysctl_heap_stack_gap;
54052
54053 #include <linux/aio.h>
54054
54055 #ifdef CONFIG_MMU
54056+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
54057+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
54058 extern void arch_pick_mmap_layout(struct mm_struct *mm);
54059 extern unsigned long
54060 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
54061@@ -629,6 +633,17 @@ struct signal_struct {
54062 #ifdef CONFIG_TASKSTATS
54063 struct taskstats *stats;
54064 #endif
54065+
54066+#ifdef CONFIG_GRKERNSEC
54067+ u32 curr_ip;
54068+ u32 saved_ip;
54069+ u32 gr_saddr;
54070+ u32 gr_daddr;
54071+ u16 gr_sport;
54072+ u16 gr_dport;
54073+ u8 used_accept:1;
54074+#endif
54075+
54076 #ifdef CONFIG_AUDIT
54077 unsigned audit_tty;
54078 struct tty_audit_buf *tty_audit_buf;
54079@@ -710,6 +725,11 @@ struct user_struct {
54080 struct key *session_keyring; /* UID's default session keyring */
54081 #endif
54082
54083+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
54084+ unsigned int banned;
54085+ unsigned long ban_expires;
54086+#endif
54087+
54088 /* Hash table maintenance information */
54089 struct hlist_node uidhash_node;
54090 uid_t uid;
54091@@ -1340,8 +1360,8 @@ struct task_struct {
54092 struct list_head thread_group;
54093
54094 struct completion *vfork_done; /* for vfork() */
54095- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
54096- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
54097+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
54098+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
54099
54100 cputime_t utime, stime, utimescaled, stimescaled;
54101 cputime_t gtime;
54102@@ -1357,13 +1377,6 @@ struct task_struct {
54103 struct task_cputime cputime_expires;
54104 struct list_head cpu_timers[3];
54105
54106-/* process credentials */
54107- const struct cred __rcu *real_cred; /* objective and real subjective task
54108- * credentials (COW) */
54109- const struct cred __rcu *cred; /* effective (overridable) subjective task
54110- * credentials (COW) */
54111- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
54112-
54113 char comm[TASK_COMM_LEN]; /* executable name excluding path
54114 - access with [gs]et_task_comm (which lock
54115 it with task_lock())
54116@@ -1380,8 +1393,16 @@ struct task_struct {
54117 #endif
54118 /* CPU-specific state of this task */
54119 struct thread_struct thread;
54120+/* thread_info moved to task_struct */
54121+#ifdef CONFIG_X86
54122+ struct thread_info tinfo;
54123+#endif
54124 /* filesystem information */
54125 struct fs_struct *fs;
54126+
54127+ const struct cred __rcu *cred; /* effective (overridable) subjective task
54128+ * credentials (COW) */
54129+
54130 /* open file information */
54131 struct files_struct *files;
54132 /* namespaces */
54133@@ -1428,6 +1449,11 @@ struct task_struct {
54134 struct rt_mutex_waiter *pi_blocked_on;
54135 #endif
54136
54137+/* process credentials */
54138+ const struct cred __rcu *real_cred; /* objective and real subjective task
54139+ * credentials (COW) */
54140+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
54141+
54142 #ifdef CONFIG_DEBUG_MUTEXES
54143 /* mutex deadlock detection */
54144 struct mutex_waiter *blocked_on;
54145@@ -1538,6 +1564,21 @@ struct task_struct {
54146 unsigned long default_timer_slack_ns;
54147
54148 struct list_head *scm_work_list;
54149+
54150+#ifdef CONFIG_GRKERNSEC
54151+ /* grsecurity */
54152+ struct dentry *gr_chroot_dentry;
54153+ struct acl_subject_label *acl;
54154+ struct acl_role_label *role;
54155+ struct file *exec_file;
54156+ u16 acl_role_id;
54157+ /* is this the task that authenticated to the special role */
54158+ u8 acl_sp_role;
54159+ u8 is_writable;
54160+ u8 brute;
54161+ u8 gr_is_chrooted;
54162+#endif
54163+
54164 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
54165 /* Index of current stored address in ret_stack */
54166 int curr_ret_stack;
54167@@ -1572,6 +1613,57 @@ struct task_struct {
54168 #endif
54169 };
54170
54171+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
54172+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
54173+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
54174+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
54175+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
54176+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
54177+
54178+#ifdef CONFIG_PAX_SOFTMODE
54179+extern int pax_softmode;
54180+#endif
54181+
54182+extern int pax_check_flags(unsigned long *);
54183+
54184+/* if tsk != current then task_lock must be held on it */
54185+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
54186+static inline unsigned long pax_get_flags(struct task_struct *tsk)
54187+{
54188+ if (likely(tsk->mm))
54189+ return tsk->mm->pax_flags;
54190+ else
54191+ return 0UL;
54192+}
54193+
54194+/* if tsk != current then task_lock must be held on it */
54195+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
54196+{
54197+ if (likely(tsk->mm)) {
54198+ tsk->mm->pax_flags = flags;
54199+ return 0;
54200+ }
54201+ return -EINVAL;
54202+}
54203+#endif
54204+
54205+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
54206+extern void pax_set_initial_flags(struct linux_binprm *bprm);
54207+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
54208+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
54209+#endif
54210+
54211+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
54212+extern void pax_report_insns(void *pc, void *sp);
54213+extern void pax_report_refcount_overflow(struct pt_regs *regs);
54214+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
54215+
54216+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
54217+extern void pax_track_stack(void);
54218+#else
54219+static inline void pax_track_stack(void) {}
54220+#endif
54221+
54222 /* Future-safe accessor for struct task_struct's cpus_allowed. */
54223 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
54224
54225@@ -1768,6 +1860,7 @@ extern void thread_group_times(struct ta
54226 #define PF_DUMPCORE 0x00000200 /* dumped core */
54227 #define PF_SIGNALED 0x00000400 /* killed by a signal */
54228 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
54229+#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
54230 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
54231 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
54232 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
54233@@ -2056,7 +2149,9 @@ void yield(void);
54234 extern struct exec_domain default_exec_domain;
54235
54236 union thread_union {
54237+#ifndef CONFIG_X86
54238 struct thread_info thread_info;
54239+#endif
54240 unsigned long stack[THREAD_SIZE/sizeof(long)];
54241 };
54242
54243@@ -2089,6 +2184,7 @@ extern struct pid_namespace init_pid_ns;
54244 */
54245
54246 extern struct task_struct *find_task_by_vpid(pid_t nr);
54247+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
54248 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
54249 struct pid_namespace *ns);
54250
54251@@ -2225,7 +2321,7 @@ extern void __cleanup_sighand(struct sig
54252 extern void exit_itimers(struct signal_struct *);
54253 extern void flush_itimer_signals(void);
54254
54255-extern NORET_TYPE void do_group_exit(int);
54256+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
54257
54258 extern void daemonize(const char *, ...);
54259 extern int allow_signal(int);
54260@@ -2393,13 +2489,17 @@ static inline unsigned long *end_of_stac
54261
54262 #endif
54263
54264-static inline int object_is_on_stack(void *obj)
54265+static inline int object_starts_on_stack(void *obj)
54266 {
54267- void *stack = task_stack_page(current);
54268+ const void *stack = task_stack_page(current);
54269
54270 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
54271 }
54272
54273+#ifdef CONFIG_PAX_USERCOPY
54274+extern int object_is_on_stack(const void *obj, unsigned long len);
54275+#endif
54276+
54277 extern void thread_info_cache_init(void);
54278
54279 #ifdef CONFIG_DEBUG_STACK_USAGE
54280diff -urNp linux-3.0.3/include/linux/screen_info.h linux-3.0.3/include/linux/screen_info.h
54281--- linux-3.0.3/include/linux/screen_info.h 2011-07-21 22:17:23.000000000 -0400
54282+++ linux-3.0.3/include/linux/screen_info.h 2011-08-23 21:47:56.000000000 -0400
54283@@ -43,7 +43,8 @@ struct screen_info {
54284 __u16 pages; /* 0x32 */
54285 __u16 vesa_attributes; /* 0x34 */
54286 __u32 capabilities; /* 0x36 */
54287- __u8 _reserved[6]; /* 0x3a */
54288+ __u16 vesapm_size; /* 0x3a */
54289+ __u8 _reserved[4]; /* 0x3c */
54290 } __attribute__((packed));
54291
54292 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
54293diff -urNp linux-3.0.3/include/linux/security.h linux-3.0.3/include/linux/security.h
54294--- linux-3.0.3/include/linux/security.h 2011-07-21 22:17:23.000000000 -0400
54295+++ linux-3.0.3/include/linux/security.h 2011-08-23 21:48:14.000000000 -0400
54296@@ -36,6 +36,7 @@
54297 #include <linux/key.h>
54298 #include <linux/xfrm.h>
54299 #include <linux/slab.h>
54300+#include <linux/grsecurity.h>
54301 #include <net/flow.h>
54302
54303 /* Maximum number of letters for an LSM name string */
54304diff -urNp linux-3.0.3/include/linux/seq_file.h linux-3.0.3/include/linux/seq_file.h
54305--- linux-3.0.3/include/linux/seq_file.h 2011-07-21 22:17:23.000000000 -0400
54306+++ linux-3.0.3/include/linux/seq_file.h 2011-08-23 21:47:56.000000000 -0400
54307@@ -32,6 +32,7 @@ struct seq_operations {
54308 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
54309 int (*show) (struct seq_file *m, void *v);
54310 };
54311+typedef struct seq_operations __no_const seq_operations_no_const;
54312
54313 #define SEQ_SKIP 1
54314
54315diff -urNp linux-3.0.3/include/linux/shmem_fs.h linux-3.0.3/include/linux/shmem_fs.h
54316--- linux-3.0.3/include/linux/shmem_fs.h 2011-07-21 22:17:23.000000000 -0400
54317+++ linux-3.0.3/include/linux/shmem_fs.h 2011-08-23 21:47:56.000000000 -0400
54318@@ -10,7 +10,7 @@
54319
54320 #define SHMEM_NR_DIRECT 16
54321
54322-#define SHMEM_SYMLINK_INLINE_LEN (SHMEM_NR_DIRECT * sizeof(swp_entry_t))
54323+#define SHMEM_SYMLINK_INLINE_LEN 64
54324
54325 struct shmem_inode_info {
54326 spinlock_t lock;
54327diff -urNp linux-3.0.3/include/linux/shm.h linux-3.0.3/include/linux/shm.h
54328--- linux-3.0.3/include/linux/shm.h 2011-07-21 22:17:23.000000000 -0400
54329+++ linux-3.0.3/include/linux/shm.h 2011-08-23 21:48:14.000000000 -0400
54330@@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
54331 pid_t shm_cprid;
54332 pid_t shm_lprid;
54333 struct user_struct *mlock_user;
54334+#ifdef CONFIG_GRKERNSEC
54335+ time_t shm_createtime;
54336+ pid_t shm_lapid;
54337+#endif
54338 };
54339
54340 /* shm_mode upper byte flags */
54341diff -urNp linux-3.0.3/include/linux/skbuff.h linux-3.0.3/include/linux/skbuff.h
54342--- linux-3.0.3/include/linux/skbuff.h 2011-07-21 22:17:23.000000000 -0400
54343+++ linux-3.0.3/include/linux/skbuff.h 2011-08-23 21:47:56.000000000 -0400
54344@@ -592,7 +592,7 @@ static inline struct skb_shared_hwtstamp
54345 */
54346 static inline int skb_queue_empty(const struct sk_buff_head *list)
54347 {
54348- return list->next == (struct sk_buff *)list;
54349+ return list->next == (const struct sk_buff *)list;
54350 }
54351
54352 /**
54353@@ -605,7 +605,7 @@ static inline int skb_queue_empty(const
54354 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
54355 const struct sk_buff *skb)
54356 {
54357- return skb->next == (struct sk_buff *)list;
54358+ return skb->next == (const struct sk_buff *)list;
54359 }
54360
54361 /**
54362@@ -618,7 +618,7 @@ static inline bool skb_queue_is_last(con
54363 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
54364 const struct sk_buff *skb)
54365 {
54366- return skb->prev == (struct sk_buff *)list;
54367+ return skb->prev == (const struct sk_buff *)list;
54368 }
54369
54370 /**
54371@@ -1440,7 +1440,7 @@ static inline int pskb_network_may_pull(
54372 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
54373 */
54374 #ifndef NET_SKB_PAD
54375-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
54376+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
54377 #endif
54378
54379 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
54380diff -urNp linux-3.0.3/include/linux/slab_def.h linux-3.0.3/include/linux/slab_def.h
54381--- linux-3.0.3/include/linux/slab_def.h 2011-07-21 22:17:23.000000000 -0400
54382+++ linux-3.0.3/include/linux/slab_def.h 2011-08-23 21:47:56.000000000 -0400
54383@@ -96,10 +96,10 @@ struct kmem_cache {
54384 unsigned long node_allocs;
54385 unsigned long node_frees;
54386 unsigned long node_overflow;
54387- atomic_t allochit;
54388- atomic_t allocmiss;
54389- atomic_t freehit;
54390- atomic_t freemiss;
54391+ atomic_unchecked_t allochit;
54392+ atomic_unchecked_t allocmiss;
54393+ atomic_unchecked_t freehit;
54394+ atomic_unchecked_t freemiss;
54395
54396 /*
54397 * If debugging is enabled, then the allocator can add additional
54398diff -urNp linux-3.0.3/include/linux/slab.h linux-3.0.3/include/linux/slab.h
54399--- linux-3.0.3/include/linux/slab.h 2011-07-21 22:17:23.000000000 -0400
54400+++ linux-3.0.3/include/linux/slab.h 2011-08-23 21:47:56.000000000 -0400
54401@@ -11,12 +11,20 @@
54402
54403 #include <linux/gfp.h>
54404 #include <linux/types.h>
54405+#include <linux/err.h>
54406
54407 /*
54408 * Flags to pass to kmem_cache_create().
54409 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
54410 */
54411 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
54412+
54413+#ifdef CONFIG_PAX_USERCOPY
54414+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
54415+#else
54416+#define SLAB_USERCOPY 0x00000000UL
54417+#endif
54418+
54419 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
54420 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
54421 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
54422@@ -87,10 +95,13 @@
54423 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
54424 * Both make kfree a no-op.
54425 */
54426-#define ZERO_SIZE_PTR ((void *)16)
54427+#define ZERO_SIZE_PTR \
54428+({ \
54429+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
54430+ (void *)(-MAX_ERRNO-1L); \
54431+})
54432
54433-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
54434- (unsigned long)ZERO_SIZE_PTR)
54435+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
54436
54437 /*
54438 * struct kmem_cache related prototypes
54439@@ -141,6 +152,7 @@ void * __must_check krealloc(const void
54440 void kfree(const void *);
54441 void kzfree(const void *);
54442 size_t ksize(const void *);
54443+void check_object_size(const void *ptr, unsigned long n, bool to);
54444
54445 /*
54446 * Allocator specific definitions. These are mainly used to establish optimized
54447@@ -333,4 +345,59 @@ static inline void *kzalloc_node(size_t
54448
54449 void __init kmem_cache_init_late(void);
54450
54451+#define kmalloc(x, y) \
54452+({ \
54453+ void *___retval; \
54454+ intoverflow_t ___x = (intoverflow_t)x; \
54455+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
54456+ ___retval = NULL; \
54457+ else \
54458+ ___retval = kmalloc((size_t)___x, (y)); \
54459+ ___retval; \
54460+})
54461+
54462+#define kmalloc_node(x, y, z) \
54463+({ \
54464+ void *___retval; \
54465+ intoverflow_t ___x = (intoverflow_t)x; \
54466+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
54467+ ___retval = NULL; \
54468+ else \
54469+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
54470+ ___retval; \
54471+})
54472+
54473+#define kzalloc(x, y) \
54474+({ \
54475+ void *___retval; \
54476+ intoverflow_t ___x = (intoverflow_t)x; \
54477+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
54478+ ___retval = NULL; \
54479+ else \
54480+ ___retval = kzalloc((size_t)___x, (y)); \
54481+ ___retval; \
54482+})
54483+
54484+#define __krealloc(x, y, z) \
54485+({ \
54486+ void *___retval; \
54487+ intoverflow_t ___y = (intoverflow_t)y; \
54488+ if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
54489+ ___retval = NULL; \
54490+ else \
54491+ ___retval = __krealloc((x), (size_t)___y, (z)); \
54492+ ___retval; \
54493+})
54494+
54495+#define krealloc(x, y, z) \
54496+({ \
54497+ void *___retval; \
54498+ intoverflow_t ___y = (intoverflow_t)y; \
54499+ if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
54500+ ___retval = NULL; \
54501+ else \
54502+ ___retval = krealloc((x), (size_t)___y, (z)); \
54503+ ___retval; \
54504+})
54505+
54506 #endif /* _LINUX_SLAB_H */
54507diff -urNp linux-3.0.3/include/linux/slub_def.h linux-3.0.3/include/linux/slub_def.h
54508--- linux-3.0.3/include/linux/slub_def.h 2011-07-21 22:17:23.000000000 -0400
54509+++ linux-3.0.3/include/linux/slub_def.h 2011-08-23 21:47:56.000000000 -0400
54510@@ -82,7 +82,7 @@ struct kmem_cache {
54511 struct kmem_cache_order_objects max;
54512 struct kmem_cache_order_objects min;
54513 gfp_t allocflags; /* gfp flags to use on each alloc */
54514- int refcount; /* Refcount for slab cache destroy */
54515+ atomic_t refcount; /* Refcount for slab cache destroy */
54516 void (*ctor)(void *);
54517 int inuse; /* Offset to metadata */
54518 int align; /* Alignment */
54519@@ -218,7 +218,7 @@ static __always_inline struct kmem_cache
54520 }
54521
54522 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
54523-void *__kmalloc(size_t size, gfp_t flags);
54524+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
54525
54526 static __always_inline void *
54527 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
54528diff -urNp linux-3.0.3/include/linux/sonet.h linux-3.0.3/include/linux/sonet.h
54529--- linux-3.0.3/include/linux/sonet.h 2011-07-21 22:17:23.000000000 -0400
54530+++ linux-3.0.3/include/linux/sonet.h 2011-08-23 21:47:56.000000000 -0400
54531@@ -61,7 +61,7 @@ struct sonet_stats {
54532 #include <asm/atomic.h>
54533
54534 struct k_sonet_stats {
54535-#define __HANDLE_ITEM(i) atomic_t i
54536+#define __HANDLE_ITEM(i) atomic_unchecked_t i
54537 __SONET_ITEMS
54538 #undef __HANDLE_ITEM
54539 };
54540diff -urNp linux-3.0.3/include/linux/sunrpc/clnt.h linux-3.0.3/include/linux/sunrpc/clnt.h
54541--- linux-3.0.3/include/linux/sunrpc/clnt.h 2011-07-21 22:17:23.000000000 -0400
54542+++ linux-3.0.3/include/linux/sunrpc/clnt.h 2011-08-23 21:47:56.000000000 -0400
54543@@ -169,9 +169,9 @@ static inline unsigned short rpc_get_por
54544 {
54545 switch (sap->sa_family) {
54546 case AF_INET:
54547- return ntohs(((struct sockaddr_in *)sap)->sin_port);
54548+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
54549 case AF_INET6:
54550- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
54551+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
54552 }
54553 return 0;
54554 }
54555@@ -204,7 +204,7 @@ static inline bool __rpc_cmp_addr4(const
54556 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
54557 const struct sockaddr *src)
54558 {
54559- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
54560+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
54561 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
54562
54563 dsin->sin_family = ssin->sin_family;
54564@@ -301,7 +301,7 @@ static inline u32 rpc_get_scope_id(const
54565 if (sa->sa_family != AF_INET6)
54566 return 0;
54567
54568- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
54569+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
54570 }
54571
54572 #endif /* __KERNEL__ */
54573diff -urNp linux-3.0.3/include/linux/sunrpc/svc_rdma.h linux-3.0.3/include/linux/sunrpc/svc_rdma.h
54574--- linux-3.0.3/include/linux/sunrpc/svc_rdma.h 2011-07-21 22:17:23.000000000 -0400
54575+++ linux-3.0.3/include/linux/sunrpc/svc_rdma.h 2011-08-23 21:47:56.000000000 -0400
54576@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
54577 extern unsigned int svcrdma_max_requests;
54578 extern unsigned int svcrdma_max_req_size;
54579
54580-extern atomic_t rdma_stat_recv;
54581-extern atomic_t rdma_stat_read;
54582-extern atomic_t rdma_stat_write;
54583-extern atomic_t rdma_stat_sq_starve;
54584-extern atomic_t rdma_stat_rq_starve;
54585-extern atomic_t rdma_stat_rq_poll;
54586-extern atomic_t rdma_stat_rq_prod;
54587-extern atomic_t rdma_stat_sq_poll;
54588-extern atomic_t rdma_stat_sq_prod;
54589+extern atomic_unchecked_t rdma_stat_recv;
54590+extern atomic_unchecked_t rdma_stat_read;
54591+extern atomic_unchecked_t rdma_stat_write;
54592+extern atomic_unchecked_t rdma_stat_sq_starve;
54593+extern atomic_unchecked_t rdma_stat_rq_starve;
54594+extern atomic_unchecked_t rdma_stat_rq_poll;
54595+extern atomic_unchecked_t rdma_stat_rq_prod;
54596+extern atomic_unchecked_t rdma_stat_sq_poll;
54597+extern atomic_unchecked_t rdma_stat_sq_prod;
54598
54599 #define RPCRDMA_VERSION 1
54600
54601diff -urNp linux-3.0.3/include/linux/sysctl.h linux-3.0.3/include/linux/sysctl.h
54602--- linux-3.0.3/include/linux/sysctl.h 2011-07-21 22:17:23.000000000 -0400
54603+++ linux-3.0.3/include/linux/sysctl.h 2011-08-23 21:48:14.000000000 -0400
54604@@ -155,7 +155,11 @@ enum
54605 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
54606 };
54607
54608-
54609+#ifdef CONFIG_PAX_SOFTMODE
54610+enum {
54611+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
54612+};
54613+#endif
54614
54615 /* CTL_VM names: */
54616 enum
54617@@ -967,6 +971,8 @@ typedef int proc_handler (struct ctl_tab
54618
54619 extern int proc_dostring(struct ctl_table *, int,
54620 void __user *, size_t *, loff_t *);
54621+extern int proc_dostring_modpriv(struct ctl_table *, int,
54622+ void __user *, size_t *, loff_t *);
54623 extern int proc_dointvec(struct ctl_table *, int,
54624 void __user *, size_t *, loff_t *);
54625 extern int proc_dointvec_minmax(struct ctl_table *, int,
54626diff -urNp linux-3.0.3/include/linux/tty_ldisc.h linux-3.0.3/include/linux/tty_ldisc.h
54627--- linux-3.0.3/include/linux/tty_ldisc.h 2011-07-21 22:17:23.000000000 -0400
54628+++ linux-3.0.3/include/linux/tty_ldisc.h 2011-08-23 21:47:56.000000000 -0400
54629@@ -148,7 +148,7 @@ struct tty_ldisc_ops {
54630
54631 struct module *owner;
54632
54633- int refcount;
54634+ atomic_t refcount;
54635 };
54636
54637 struct tty_ldisc {
54638diff -urNp linux-3.0.3/include/linux/types.h linux-3.0.3/include/linux/types.h
54639--- linux-3.0.3/include/linux/types.h 2011-07-21 22:17:23.000000000 -0400
54640+++ linux-3.0.3/include/linux/types.h 2011-08-23 21:47:56.000000000 -0400
54641@@ -213,10 +213,26 @@ typedef struct {
54642 int counter;
54643 } atomic_t;
54644
54645+#ifdef CONFIG_PAX_REFCOUNT
54646+typedef struct {
54647+ int counter;
54648+} atomic_unchecked_t;
54649+#else
54650+typedef atomic_t atomic_unchecked_t;
54651+#endif
54652+
54653 #ifdef CONFIG_64BIT
54654 typedef struct {
54655 long counter;
54656 } atomic64_t;
54657+
54658+#ifdef CONFIG_PAX_REFCOUNT
54659+typedef struct {
54660+ long counter;
54661+} atomic64_unchecked_t;
54662+#else
54663+typedef atomic64_t atomic64_unchecked_t;
54664+#endif
54665 #endif
54666
54667 struct list_head {
54668diff -urNp linux-3.0.3/include/linux/uaccess.h linux-3.0.3/include/linux/uaccess.h
54669--- linux-3.0.3/include/linux/uaccess.h 2011-07-21 22:17:23.000000000 -0400
54670+++ linux-3.0.3/include/linux/uaccess.h 2011-08-23 21:47:56.000000000 -0400
54671@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
54672 long ret; \
54673 mm_segment_t old_fs = get_fs(); \
54674 \
54675- set_fs(KERNEL_DS); \
54676 pagefault_disable(); \
54677+ set_fs(KERNEL_DS); \
54678 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
54679- pagefault_enable(); \
54680 set_fs(old_fs); \
54681+ pagefault_enable(); \
54682 ret; \
54683 })
54684
54685diff -urNp linux-3.0.3/include/linux/unaligned/access_ok.h linux-3.0.3/include/linux/unaligned/access_ok.h
54686--- linux-3.0.3/include/linux/unaligned/access_ok.h 2011-07-21 22:17:23.000000000 -0400
54687+++ linux-3.0.3/include/linux/unaligned/access_ok.h 2011-08-23 21:47:56.000000000 -0400
54688@@ -6,32 +6,32 @@
54689
54690 static inline u16 get_unaligned_le16(const void *p)
54691 {
54692- return le16_to_cpup((__le16 *)p);
54693+ return le16_to_cpup((const __le16 *)p);
54694 }
54695
54696 static inline u32 get_unaligned_le32(const void *p)
54697 {
54698- return le32_to_cpup((__le32 *)p);
54699+ return le32_to_cpup((const __le32 *)p);
54700 }
54701
54702 static inline u64 get_unaligned_le64(const void *p)
54703 {
54704- return le64_to_cpup((__le64 *)p);
54705+ return le64_to_cpup((const __le64 *)p);
54706 }
54707
54708 static inline u16 get_unaligned_be16(const void *p)
54709 {
54710- return be16_to_cpup((__be16 *)p);
54711+ return be16_to_cpup((const __be16 *)p);
54712 }
54713
54714 static inline u32 get_unaligned_be32(const void *p)
54715 {
54716- return be32_to_cpup((__be32 *)p);
54717+ return be32_to_cpup((const __be32 *)p);
54718 }
54719
54720 static inline u64 get_unaligned_be64(const void *p)
54721 {
54722- return be64_to_cpup((__be64 *)p);
54723+ return be64_to_cpup((const __be64 *)p);
54724 }
54725
54726 static inline void put_unaligned_le16(u16 val, void *p)
54727diff -urNp linux-3.0.3/include/linux/vmalloc.h linux-3.0.3/include/linux/vmalloc.h
54728--- linux-3.0.3/include/linux/vmalloc.h 2011-07-21 22:17:23.000000000 -0400
54729+++ linux-3.0.3/include/linux/vmalloc.h 2011-08-23 21:47:56.000000000 -0400
54730@@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
54731 #define VM_MAP 0x00000004 /* vmap()ed pages */
54732 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
54733 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
54734+
54735+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
54736+#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
54737+#endif
54738+
54739 /* bits [20..32] reserved for arch specific ioremap internals */
54740
54741 /*
54742@@ -155,4 +160,103 @@ pcpu_free_vm_areas(struct vm_struct **vm
54743 # endif
54744 #endif
54745
54746+#define vmalloc(x) \
54747+({ \
54748+ void *___retval; \
54749+ intoverflow_t ___x = (intoverflow_t)x; \
54750+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
54751+ ___retval = NULL; \
54752+ else \
54753+ ___retval = vmalloc((unsigned long)___x); \
54754+ ___retval; \
54755+})
54756+
54757+#define vzalloc(x) \
54758+({ \
54759+ void *___retval; \
54760+ intoverflow_t ___x = (intoverflow_t)x; \
54761+ if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
54762+ ___retval = NULL; \
54763+ else \
54764+ ___retval = vzalloc((unsigned long)___x); \
54765+ ___retval; \
54766+})
54767+
54768+#define __vmalloc(x, y, z) \
54769+({ \
54770+ void *___retval; \
54771+ intoverflow_t ___x = (intoverflow_t)x; \
54772+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
54773+ ___retval = NULL; \
54774+ else \
54775+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
54776+ ___retval; \
54777+})
54778+
54779+#define vmalloc_user(x) \
54780+({ \
54781+ void *___retval; \
54782+ intoverflow_t ___x = (intoverflow_t)x; \
54783+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
54784+ ___retval = NULL; \
54785+ else \
54786+ ___retval = vmalloc_user((unsigned long)___x); \
54787+ ___retval; \
54788+})
54789+
54790+#define vmalloc_exec(x) \
54791+({ \
54792+ void *___retval; \
54793+ intoverflow_t ___x = (intoverflow_t)x; \
54794+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
54795+ ___retval = NULL; \
54796+ else \
54797+ ___retval = vmalloc_exec((unsigned long)___x); \
54798+ ___retval; \
54799+})
54800+
54801+#define vmalloc_node(x, y) \
54802+({ \
54803+ void *___retval; \
54804+ intoverflow_t ___x = (intoverflow_t)x; \
54805+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
54806+ ___retval = NULL; \
54807+ else \
54808+ ___retval = vmalloc_node((unsigned long)___x, (y));\
54809+ ___retval; \
54810+})
54811+
54812+#define vzalloc_node(x, y) \
54813+({ \
54814+ void *___retval; \
54815+ intoverflow_t ___x = (intoverflow_t)x; \
54816+ if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
54817+ ___retval = NULL; \
54818+ else \
54819+ ___retval = vzalloc_node((unsigned long)___x, (y));\
54820+ ___retval; \
54821+})
54822+
54823+#define vmalloc_32(x) \
54824+({ \
54825+ void *___retval; \
54826+ intoverflow_t ___x = (intoverflow_t)x; \
54827+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
54828+ ___retval = NULL; \
54829+ else \
54830+ ___retval = vmalloc_32((unsigned long)___x); \
54831+ ___retval; \
54832+})
54833+
54834+#define vmalloc_32_user(x) \
54835+({ \
54836+void *___retval; \
54837+ intoverflow_t ___x = (intoverflow_t)x; \
54838+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
54839+ ___retval = NULL; \
54840+ else \
54841+ ___retval = vmalloc_32_user((unsigned long)___x);\
54842+ ___retval; \
54843+})
54844+
54845 #endif /* _LINUX_VMALLOC_H */
54846diff -urNp linux-3.0.3/include/linux/vmstat.h linux-3.0.3/include/linux/vmstat.h
54847--- linux-3.0.3/include/linux/vmstat.h 2011-07-21 22:17:23.000000000 -0400
54848+++ linux-3.0.3/include/linux/vmstat.h 2011-08-23 21:47:56.000000000 -0400
54849@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(in
54850 /*
54851 * Zone based page accounting with per cpu differentials.
54852 */
54853-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
54854+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
54855
54856 static inline void zone_page_state_add(long x, struct zone *zone,
54857 enum zone_stat_item item)
54858 {
54859- atomic_long_add(x, &zone->vm_stat[item]);
54860- atomic_long_add(x, &vm_stat[item]);
54861+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
54862+ atomic_long_add_unchecked(x, &vm_stat[item]);
54863 }
54864
54865 static inline unsigned long global_page_state(enum zone_stat_item item)
54866 {
54867- long x = atomic_long_read(&vm_stat[item]);
54868+ long x = atomic_long_read_unchecked(&vm_stat[item]);
54869 #ifdef CONFIG_SMP
54870 if (x < 0)
54871 x = 0;
54872@@ -109,7 +109,7 @@ static inline unsigned long global_page_
54873 static inline unsigned long zone_page_state(struct zone *zone,
54874 enum zone_stat_item item)
54875 {
54876- long x = atomic_long_read(&zone->vm_stat[item]);
54877+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
54878 #ifdef CONFIG_SMP
54879 if (x < 0)
54880 x = 0;
54881@@ -126,7 +126,7 @@ static inline unsigned long zone_page_st
54882 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
54883 enum zone_stat_item item)
54884 {
54885- long x = atomic_long_read(&zone->vm_stat[item]);
54886+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
54887
54888 #ifdef CONFIG_SMP
54889 int cpu;
54890@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state
54891
54892 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
54893 {
54894- atomic_long_inc(&zone->vm_stat[item]);
54895- atomic_long_inc(&vm_stat[item]);
54896+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
54897+ atomic_long_inc_unchecked(&vm_stat[item]);
54898 }
54899
54900 static inline void __inc_zone_page_state(struct page *page,
54901@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state
54902
54903 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
54904 {
54905- atomic_long_dec(&zone->vm_stat[item]);
54906- atomic_long_dec(&vm_stat[item]);
54907+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
54908+ atomic_long_dec_unchecked(&vm_stat[item]);
54909 }
54910
54911 static inline void __dec_zone_page_state(struct page *page,
54912diff -urNp linux-3.0.3/include/media/saa7146_vv.h linux-3.0.3/include/media/saa7146_vv.h
54913--- linux-3.0.3/include/media/saa7146_vv.h 2011-07-21 22:17:23.000000000 -0400
54914+++ linux-3.0.3/include/media/saa7146_vv.h 2011-08-24 18:26:09.000000000 -0400
54915@@ -163,7 +163,7 @@ struct saa7146_ext_vv
54916 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
54917
54918 /* the extension can override this */
54919- struct v4l2_ioctl_ops ops;
54920+ v4l2_ioctl_ops_no_const ops;
54921 /* pointer to the saa7146 core ops */
54922 const struct v4l2_ioctl_ops *core_ops;
54923
54924diff -urNp linux-3.0.3/include/media/v4l2-ioctl.h linux-3.0.3/include/media/v4l2-ioctl.h
54925--- linux-3.0.3/include/media/v4l2-ioctl.h 2011-07-21 22:17:23.000000000 -0400
54926+++ linux-3.0.3/include/media/v4l2-ioctl.h 2011-08-24 18:25:45.000000000 -0400
54927@@ -272,6 +272,7 @@ struct v4l2_ioctl_ops {
54928 long (*vidioc_default) (struct file *file, void *fh,
54929 bool valid_prio, int cmd, void *arg);
54930 };
54931+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
54932
54933
54934 /* v4l debugging and diagnostics */
54935diff -urNp linux-3.0.3/include/net/caif/cfctrl.h linux-3.0.3/include/net/caif/cfctrl.h
54936--- linux-3.0.3/include/net/caif/cfctrl.h 2011-07-21 22:17:23.000000000 -0400
54937+++ linux-3.0.3/include/net/caif/cfctrl.h 2011-08-23 21:47:56.000000000 -0400
54938@@ -52,7 +52,7 @@ struct cfctrl_rsp {
54939 void (*radioset_rsp)(void);
54940 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
54941 struct cflayer *client_layer);
54942-};
54943+} __no_const;
54944
54945 /* Link Setup Parameters for CAIF-Links. */
54946 struct cfctrl_link_param {
54947@@ -101,8 +101,8 @@ struct cfctrl_request_info {
54948 struct cfctrl {
54949 struct cfsrvl serv;
54950 struct cfctrl_rsp res;
54951- atomic_t req_seq_no;
54952- atomic_t rsp_seq_no;
54953+ atomic_unchecked_t req_seq_no;
54954+ atomic_unchecked_t rsp_seq_no;
54955 struct list_head list;
54956 /* Protects from simultaneous access to first_req list */
54957 spinlock_t info_list_lock;
54958diff -urNp linux-3.0.3/include/net/flow.h linux-3.0.3/include/net/flow.h
54959--- linux-3.0.3/include/net/flow.h 2011-07-21 22:17:23.000000000 -0400
54960+++ linux-3.0.3/include/net/flow.h 2011-08-23 21:47:56.000000000 -0400
54961@@ -188,6 +188,6 @@ extern struct flow_cache_object *flow_ca
54962 u8 dir, flow_resolve_t resolver, void *ctx);
54963
54964 extern void flow_cache_flush(void);
54965-extern atomic_t flow_cache_genid;
54966+extern atomic_unchecked_t flow_cache_genid;
54967
54968 #endif
54969diff -urNp linux-3.0.3/include/net/inetpeer.h linux-3.0.3/include/net/inetpeer.h
54970--- linux-3.0.3/include/net/inetpeer.h 2011-07-21 22:17:23.000000000 -0400
54971+++ linux-3.0.3/include/net/inetpeer.h 2011-08-23 21:47:56.000000000 -0400
54972@@ -43,8 +43,8 @@ struct inet_peer {
54973 */
54974 union {
54975 struct {
54976- atomic_t rid; /* Frag reception counter */
54977- atomic_t ip_id_count; /* IP ID for the next packet */
54978+ atomic_unchecked_t rid; /* Frag reception counter */
54979+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
54980 __u32 tcp_ts;
54981 __u32 tcp_ts_stamp;
54982 u32 metrics[RTAX_MAX];
54983@@ -108,7 +108,7 @@ static inline __u16 inet_getid(struct in
54984 {
54985 more++;
54986 inet_peer_refcheck(p);
54987- return atomic_add_return(more, &p->ip_id_count) - more;
54988+ return atomic_add_return_unchecked(more, &p->ip_id_count) - more;
54989 }
54990
54991 #endif /* _NET_INETPEER_H */
54992diff -urNp linux-3.0.3/include/net/ip_fib.h linux-3.0.3/include/net/ip_fib.h
54993--- linux-3.0.3/include/net/ip_fib.h 2011-07-21 22:17:23.000000000 -0400
54994+++ linux-3.0.3/include/net/ip_fib.h 2011-08-23 21:47:56.000000000 -0400
54995@@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(s
54996
54997 #define FIB_RES_SADDR(net, res) \
54998 ((FIB_RES_NH(res).nh_saddr_genid == \
54999- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
55000+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
55001 FIB_RES_NH(res).nh_saddr : \
55002 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
55003 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
55004diff -urNp linux-3.0.3/include/net/ip_vs.h linux-3.0.3/include/net/ip_vs.h
55005--- linux-3.0.3/include/net/ip_vs.h 2011-07-21 22:17:23.000000000 -0400
55006+++ linux-3.0.3/include/net/ip_vs.h 2011-08-23 21:47:56.000000000 -0400
55007@@ -509,7 +509,7 @@ struct ip_vs_conn {
55008 struct ip_vs_conn *control; /* Master control connection */
55009 atomic_t n_control; /* Number of controlled ones */
55010 struct ip_vs_dest *dest; /* real server */
55011- atomic_t in_pkts; /* incoming packet counter */
55012+ atomic_unchecked_t in_pkts; /* incoming packet counter */
55013
55014 /* packet transmitter for different forwarding methods. If it
55015 mangles the packet, it must return NF_DROP or better NF_STOLEN,
55016@@ -647,7 +647,7 @@ struct ip_vs_dest {
55017 __be16 port; /* port number of the server */
55018 union nf_inet_addr addr; /* IP address of the server */
55019 volatile unsigned flags; /* dest status flags */
55020- atomic_t conn_flags; /* flags to copy to conn */
55021+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
55022 atomic_t weight; /* server weight */
55023
55024 atomic_t refcnt; /* reference counter */
55025diff -urNp linux-3.0.3/include/net/irda/ircomm_core.h linux-3.0.3/include/net/irda/ircomm_core.h
55026--- linux-3.0.3/include/net/irda/ircomm_core.h 2011-07-21 22:17:23.000000000 -0400
55027+++ linux-3.0.3/include/net/irda/ircomm_core.h 2011-08-23 21:47:56.000000000 -0400
55028@@ -51,7 +51,7 @@ typedef struct {
55029 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
55030 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
55031 struct ircomm_info *);
55032-} call_t;
55033+} __no_const call_t;
55034
55035 struct ircomm_cb {
55036 irda_queue_t queue;
55037diff -urNp linux-3.0.3/include/net/irda/ircomm_tty.h linux-3.0.3/include/net/irda/ircomm_tty.h
55038--- linux-3.0.3/include/net/irda/ircomm_tty.h 2011-07-21 22:17:23.000000000 -0400
55039+++ linux-3.0.3/include/net/irda/ircomm_tty.h 2011-08-23 21:47:56.000000000 -0400
55040@@ -35,6 +35,7 @@
55041 #include <linux/termios.h>
55042 #include <linux/timer.h>
55043 #include <linux/tty.h> /* struct tty_struct */
55044+#include <asm/local.h>
55045
55046 #include <net/irda/irias_object.h>
55047 #include <net/irda/ircomm_core.h>
55048@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
55049 unsigned short close_delay;
55050 unsigned short closing_wait; /* time to wait before closing */
55051
55052- int open_count;
55053- int blocked_open; /* # of blocked opens */
55054+ local_t open_count;
55055+ local_t blocked_open; /* # of blocked opens */
55056
55057 /* Protect concurent access to :
55058 * o self->open_count
55059diff -urNp linux-3.0.3/include/net/iucv/af_iucv.h linux-3.0.3/include/net/iucv/af_iucv.h
55060--- linux-3.0.3/include/net/iucv/af_iucv.h 2011-07-21 22:17:23.000000000 -0400
55061+++ linux-3.0.3/include/net/iucv/af_iucv.h 2011-08-23 21:47:56.000000000 -0400
55062@@ -87,7 +87,7 @@ struct iucv_sock {
55063 struct iucv_sock_list {
55064 struct hlist_head head;
55065 rwlock_t lock;
55066- atomic_t autobind_name;
55067+ atomic_unchecked_t autobind_name;
55068 };
55069
55070 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
55071diff -urNp linux-3.0.3/include/net/lapb.h linux-3.0.3/include/net/lapb.h
55072--- linux-3.0.3/include/net/lapb.h 2011-07-21 22:17:23.000000000 -0400
55073+++ linux-3.0.3/include/net/lapb.h 2011-08-23 21:47:56.000000000 -0400
55074@@ -95,7 +95,7 @@ struct lapb_cb {
55075 struct sk_buff_head write_queue;
55076 struct sk_buff_head ack_queue;
55077 unsigned char window;
55078- struct lapb_register_struct callbacks;
55079+ struct lapb_register_struct *callbacks;
55080
55081 /* FRMR control information */
55082 struct lapb_frame frmr_data;
55083diff -urNp linux-3.0.3/include/net/neighbour.h linux-3.0.3/include/net/neighbour.h
55084--- linux-3.0.3/include/net/neighbour.h 2011-07-21 22:17:23.000000000 -0400
55085+++ linux-3.0.3/include/net/neighbour.h 2011-08-23 21:47:56.000000000 -0400
55086@@ -117,7 +117,7 @@ struct neighbour {
55087 };
55088
55089 struct neigh_ops {
55090- int family;
55091+ const int family;
55092 void (*solicit)(struct neighbour *, struct sk_buff*);
55093 void (*error_report)(struct neighbour *, struct sk_buff*);
55094 int (*output)(struct sk_buff*);
55095diff -urNp linux-3.0.3/include/net/netlink.h linux-3.0.3/include/net/netlink.h
55096--- linux-3.0.3/include/net/netlink.h 2011-07-21 22:17:23.000000000 -0400
55097+++ linux-3.0.3/include/net/netlink.h 2011-08-23 21:47:56.000000000 -0400
55098@@ -562,7 +562,7 @@ static inline void *nlmsg_get_pos(struct
55099 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
55100 {
55101 if (mark)
55102- skb_trim(skb, (unsigned char *) mark - skb->data);
55103+ skb_trim(skb, (const unsigned char *) mark - skb->data);
55104 }
55105
55106 /**
55107diff -urNp linux-3.0.3/include/net/netns/ipv4.h linux-3.0.3/include/net/netns/ipv4.h
55108--- linux-3.0.3/include/net/netns/ipv4.h 2011-07-21 22:17:23.000000000 -0400
55109+++ linux-3.0.3/include/net/netns/ipv4.h 2011-08-23 21:47:56.000000000 -0400
55110@@ -56,8 +56,8 @@ struct netns_ipv4 {
55111
55112 unsigned int sysctl_ping_group_range[2];
55113
55114- atomic_t rt_genid;
55115- atomic_t dev_addr_genid;
55116+ atomic_unchecked_t rt_genid;
55117+ atomic_unchecked_t dev_addr_genid;
55118
55119 #ifdef CONFIG_IP_MROUTE
55120 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
55121diff -urNp linux-3.0.3/include/net/sctp/sctp.h linux-3.0.3/include/net/sctp/sctp.h
55122--- linux-3.0.3/include/net/sctp/sctp.h 2011-07-21 22:17:23.000000000 -0400
55123+++ linux-3.0.3/include/net/sctp/sctp.h 2011-08-23 21:47:56.000000000 -0400
55124@@ -315,9 +315,9 @@ do { \
55125
55126 #else /* SCTP_DEBUG */
55127
55128-#define SCTP_DEBUG_PRINTK(whatever...)
55129-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
55130-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
55131+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
55132+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
55133+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
55134 #define SCTP_ENABLE_DEBUG
55135 #define SCTP_DISABLE_DEBUG
55136 #define SCTP_ASSERT(expr, str, func)
55137diff -urNp linux-3.0.3/include/net/sock.h linux-3.0.3/include/net/sock.h
55138--- linux-3.0.3/include/net/sock.h 2011-07-21 22:17:23.000000000 -0400
55139+++ linux-3.0.3/include/net/sock.h 2011-08-23 21:47:56.000000000 -0400
55140@@ -277,7 +277,7 @@ struct sock {
55141 #ifdef CONFIG_RPS
55142 __u32 sk_rxhash;
55143 #endif
55144- atomic_t sk_drops;
55145+ atomic_unchecked_t sk_drops;
55146 int sk_rcvbuf;
55147
55148 struct sk_filter __rcu *sk_filter;
55149@@ -1390,7 +1390,7 @@ static inline void sk_nocaps_add(struct
55150 }
55151
55152 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
55153- char __user *from, char *to,
55154+ char __user *from, unsigned char *to,
55155 int copy, int offset)
55156 {
55157 if (skb->ip_summed == CHECKSUM_NONE) {
55158diff -urNp linux-3.0.3/include/net/tcp.h linux-3.0.3/include/net/tcp.h
55159--- linux-3.0.3/include/net/tcp.h 2011-07-21 22:17:23.000000000 -0400
55160+++ linux-3.0.3/include/net/tcp.h 2011-08-23 21:47:56.000000000 -0400
55161@@ -1374,8 +1374,8 @@ enum tcp_seq_states {
55162 struct tcp_seq_afinfo {
55163 char *name;
55164 sa_family_t family;
55165- struct file_operations seq_fops;
55166- struct seq_operations seq_ops;
55167+ file_operations_no_const seq_fops;
55168+ seq_operations_no_const seq_ops;
55169 };
55170
55171 struct tcp_iter_state {
55172diff -urNp linux-3.0.3/include/net/udp.h linux-3.0.3/include/net/udp.h
55173--- linux-3.0.3/include/net/udp.h 2011-07-21 22:17:23.000000000 -0400
55174+++ linux-3.0.3/include/net/udp.h 2011-08-23 21:47:56.000000000 -0400
55175@@ -234,8 +234,8 @@ struct udp_seq_afinfo {
55176 char *name;
55177 sa_family_t family;
55178 struct udp_table *udp_table;
55179- struct file_operations seq_fops;
55180- struct seq_operations seq_ops;
55181+ file_operations_no_const seq_fops;
55182+ seq_operations_no_const seq_ops;
55183 };
55184
55185 struct udp_iter_state {
55186diff -urNp linux-3.0.3/include/net/xfrm.h linux-3.0.3/include/net/xfrm.h
55187--- linux-3.0.3/include/net/xfrm.h 2011-07-21 22:17:23.000000000 -0400
55188+++ linux-3.0.3/include/net/xfrm.h 2011-08-23 21:47:56.000000000 -0400
55189@@ -505,7 +505,7 @@ struct xfrm_policy {
55190 struct timer_list timer;
55191
55192 struct flow_cache_object flo;
55193- atomic_t genid;
55194+ atomic_unchecked_t genid;
55195 u32 priority;
55196 u32 index;
55197 struct xfrm_mark mark;
55198diff -urNp linux-3.0.3/include/rdma/iw_cm.h linux-3.0.3/include/rdma/iw_cm.h
55199--- linux-3.0.3/include/rdma/iw_cm.h 2011-07-21 22:17:23.000000000 -0400
55200+++ linux-3.0.3/include/rdma/iw_cm.h 2011-08-23 21:47:56.000000000 -0400
55201@@ -120,7 +120,7 @@ struct iw_cm_verbs {
55202 int backlog);
55203
55204 int (*destroy_listen)(struct iw_cm_id *cm_id);
55205-};
55206+} __no_const;
55207
55208 /**
55209 * iw_create_cm_id - Create an IW CM identifier.
55210diff -urNp linux-3.0.3/include/scsi/libfc.h linux-3.0.3/include/scsi/libfc.h
55211--- linux-3.0.3/include/scsi/libfc.h 2011-07-21 22:17:23.000000000 -0400
55212+++ linux-3.0.3/include/scsi/libfc.h 2011-08-23 21:47:56.000000000 -0400
55213@@ -750,6 +750,7 @@ struct libfc_function_template {
55214 */
55215 void (*disc_stop_final) (struct fc_lport *);
55216 };
55217+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
55218
55219 /**
55220 * struct fc_disc - Discovery context
55221@@ -853,7 +854,7 @@ struct fc_lport {
55222 struct fc_vport *vport;
55223
55224 /* Operational Information */
55225- struct libfc_function_template tt;
55226+ libfc_function_template_no_const tt;
55227 u8 link_up;
55228 u8 qfull;
55229 enum fc_lport_state state;
55230diff -urNp linux-3.0.3/include/scsi/scsi_device.h linux-3.0.3/include/scsi/scsi_device.h
55231--- linux-3.0.3/include/scsi/scsi_device.h 2011-07-21 22:17:23.000000000 -0400
55232+++ linux-3.0.3/include/scsi/scsi_device.h 2011-08-23 21:47:56.000000000 -0400
55233@@ -161,9 +161,9 @@ struct scsi_device {
55234 unsigned int max_device_blocked; /* what device_blocked counts down from */
55235 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
55236
55237- atomic_t iorequest_cnt;
55238- atomic_t iodone_cnt;
55239- atomic_t ioerr_cnt;
55240+ atomic_unchecked_t iorequest_cnt;
55241+ atomic_unchecked_t iodone_cnt;
55242+ atomic_unchecked_t ioerr_cnt;
55243
55244 struct device sdev_gendev,
55245 sdev_dev;
55246diff -urNp linux-3.0.3/include/scsi/scsi_transport_fc.h linux-3.0.3/include/scsi/scsi_transport_fc.h
55247--- linux-3.0.3/include/scsi/scsi_transport_fc.h 2011-07-21 22:17:23.000000000 -0400
55248+++ linux-3.0.3/include/scsi/scsi_transport_fc.h 2011-08-23 21:47:56.000000000 -0400
55249@@ -666,9 +666,9 @@ struct fc_function_template {
55250 int (*bsg_timeout)(struct fc_bsg_job *);
55251
55252 /* allocation lengths for host-specific data */
55253- u32 dd_fcrport_size;
55254- u32 dd_fcvport_size;
55255- u32 dd_bsg_size;
55256+ const u32 dd_fcrport_size;
55257+ const u32 dd_fcvport_size;
55258+ const u32 dd_bsg_size;
55259
55260 /*
55261 * The driver sets these to tell the transport class it
55262@@ -678,39 +678,39 @@ struct fc_function_template {
55263 */
55264
55265 /* remote port fixed attributes */
55266- unsigned long show_rport_maxframe_size:1;
55267- unsigned long show_rport_supported_classes:1;
55268- unsigned long show_rport_dev_loss_tmo:1;
55269+ const unsigned long show_rport_maxframe_size:1;
55270+ const unsigned long show_rport_supported_classes:1;
55271+ const unsigned long show_rport_dev_loss_tmo:1;
55272
55273 /*
55274 * target dynamic attributes
55275 * These should all be "1" if the driver uses the remote port
55276 * add/delete functions (so attributes reflect rport values).
55277 */
55278- unsigned long show_starget_node_name:1;
55279- unsigned long show_starget_port_name:1;
55280- unsigned long show_starget_port_id:1;
55281+ const unsigned long show_starget_node_name:1;
55282+ const unsigned long show_starget_port_name:1;
55283+ const unsigned long show_starget_port_id:1;
55284
55285 /* host fixed attributes */
55286- unsigned long show_host_node_name:1;
55287- unsigned long show_host_port_name:1;
55288- unsigned long show_host_permanent_port_name:1;
55289- unsigned long show_host_supported_classes:1;
55290- unsigned long show_host_supported_fc4s:1;
55291- unsigned long show_host_supported_speeds:1;
55292- unsigned long show_host_maxframe_size:1;
55293- unsigned long show_host_serial_number:1;
55294+ const unsigned long show_host_node_name:1;
55295+ const unsigned long show_host_port_name:1;
55296+ const unsigned long show_host_permanent_port_name:1;
55297+ const unsigned long show_host_supported_classes:1;
55298+ const unsigned long show_host_supported_fc4s:1;
55299+ const unsigned long show_host_supported_speeds:1;
55300+ const unsigned long show_host_maxframe_size:1;
55301+ const unsigned long show_host_serial_number:1;
55302 /* host dynamic attributes */
55303- unsigned long show_host_port_id:1;
55304- unsigned long show_host_port_type:1;
55305- unsigned long show_host_port_state:1;
55306- unsigned long show_host_active_fc4s:1;
55307- unsigned long show_host_speed:1;
55308- unsigned long show_host_fabric_name:1;
55309- unsigned long show_host_symbolic_name:1;
55310- unsigned long show_host_system_hostname:1;
55311+ const unsigned long show_host_port_id:1;
55312+ const unsigned long show_host_port_type:1;
55313+ const unsigned long show_host_port_state:1;
55314+ const unsigned long show_host_active_fc4s:1;
55315+ const unsigned long show_host_speed:1;
55316+ const unsigned long show_host_fabric_name:1;
55317+ const unsigned long show_host_symbolic_name:1;
55318+ const unsigned long show_host_system_hostname:1;
55319
55320- unsigned long disable_target_scan:1;
55321+ const unsigned long disable_target_scan:1;
55322 };
55323
55324
55325diff -urNp linux-3.0.3/include/sound/ak4xxx-adda.h linux-3.0.3/include/sound/ak4xxx-adda.h
55326--- linux-3.0.3/include/sound/ak4xxx-adda.h 2011-07-21 22:17:23.000000000 -0400
55327+++ linux-3.0.3/include/sound/ak4xxx-adda.h 2011-08-23 21:47:56.000000000 -0400
55328@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
55329 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
55330 unsigned char val);
55331 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
55332-};
55333+} __no_const;
55334
55335 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
55336
55337diff -urNp linux-3.0.3/include/sound/hwdep.h linux-3.0.3/include/sound/hwdep.h
55338--- linux-3.0.3/include/sound/hwdep.h 2011-07-21 22:17:23.000000000 -0400
55339+++ linux-3.0.3/include/sound/hwdep.h 2011-08-23 21:47:56.000000000 -0400
55340@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
55341 struct snd_hwdep_dsp_status *status);
55342 int (*dsp_load)(struct snd_hwdep *hw,
55343 struct snd_hwdep_dsp_image *image);
55344-};
55345+} __no_const;
55346
55347 struct snd_hwdep {
55348 struct snd_card *card;
55349diff -urNp linux-3.0.3/include/sound/info.h linux-3.0.3/include/sound/info.h
55350--- linux-3.0.3/include/sound/info.h 2011-07-21 22:17:23.000000000 -0400
55351+++ linux-3.0.3/include/sound/info.h 2011-08-23 21:47:56.000000000 -0400
55352@@ -44,7 +44,7 @@ struct snd_info_entry_text {
55353 struct snd_info_buffer *buffer);
55354 void (*write)(struct snd_info_entry *entry,
55355 struct snd_info_buffer *buffer);
55356-};
55357+} __no_const;
55358
55359 struct snd_info_entry_ops {
55360 int (*open)(struct snd_info_entry *entry,
55361diff -urNp linux-3.0.3/include/sound/pcm.h linux-3.0.3/include/sound/pcm.h
55362--- linux-3.0.3/include/sound/pcm.h 2011-07-21 22:17:23.000000000 -0400
55363+++ linux-3.0.3/include/sound/pcm.h 2011-08-23 21:47:56.000000000 -0400
55364@@ -81,6 +81,7 @@ struct snd_pcm_ops {
55365 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
55366 int (*ack)(struct snd_pcm_substream *substream);
55367 };
55368+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
55369
55370 /*
55371 *
55372diff -urNp linux-3.0.3/include/sound/sb16_csp.h linux-3.0.3/include/sound/sb16_csp.h
55373--- linux-3.0.3/include/sound/sb16_csp.h 2011-07-21 22:17:23.000000000 -0400
55374+++ linux-3.0.3/include/sound/sb16_csp.h 2011-08-23 21:47:56.000000000 -0400
55375@@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
55376 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
55377 int (*csp_stop) (struct snd_sb_csp * p);
55378 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
55379-};
55380+} __no_const;
55381
55382 /*
55383 * CSP private data
55384diff -urNp linux-3.0.3/include/sound/soc.h linux-3.0.3/include/sound/soc.h
55385--- linux-3.0.3/include/sound/soc.h 2011-07-21 22:17:23.000000000 -0400
55386+++ linux-3.0.3/include/sound/soc.h 2011-08-23 21:47:56.000000000 -0400
55387@@ -635,7 +635,7 @@ struct snd_soc_platform_driver {
55388 struct snd_soc_dai *);
55389
55390 /* platform stream ops */
55391- struct snd_pcm_ops *ops;
55392+ struct snd_pcm_ops * const ops;
55393 };
55394
55395 struct snd_soc_platform {
55396diff -urNp linux-3.0.3/include/sound/ymfpci.h linux-3.0.3/include/sound/ymfpci.h
55397--- linux-3.0.3/include/sound/ymfpci.h 2011-07-21 22:17:23.000000000 -0400
55398+++ linux-3.0.3/include/sound/ymfpci.h 2011-08-23 21:47:56.000000000 -0400
55399@@ -358,7 +358,7 @@ struct snd_ymfpci {
55400 spinlock_t reg_lock;
55401 spinlock_t voice_lock;
55402 wait_queue_head_t interrupt_sleep;
55403- atomic_t interrupt_sleep_count;
55404+ atomic_unchecked_t interrupt_sleep_count;
55405 struct snd_info_entry *proc_entry;
55406 const struct firmware *dsp_microcode;
55407 const struct firmware *controller_microcode;
55408diff -urNp linux-3.0.3/include/target/target_core_base.h linux-3.0.3/include/target/target_core_base.h
55409--- linux-3.0.3/include/target/target_core_base.h 2011-07-21 22:17:23.000000000 -0400
55410+++ linux-3.0.3/include/target/target_core_base.h 2011-08-23 21:47:56.000000000 -0400
55411@@ -364,7 +364,7 @@ struct t10_reservation_ops {
55412 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
55413 int (*t10_pr_register)(struct se_cmd *);
55414 int (*t10_pr_clear)(struct se_cmd *);
55415-};
55416+} __no_const;
55417
55418 struct t10_reservation_template {
55419 /* Reservation effects all target ports */
55420@@ -432,8 +432,8 @@ struct se_transport_task {
55421 atomic_t t_task_cdbs_left;
55422 atomic_t t_task_cdbs_ex_left;
55423 atomic_t t_task_cdbs_timeout_left;
55424- atomic_t t_task_cdbs_sent;
55425- atomic_t t_transport_aborted;
55426+ atomic_unchecked_t t_task_cdbs_sent;
55427+ atomic_unchecked_t t_transport_aborted;
55428 atomic_t t_transport_active;
55429 atomic_t t_transport_complete;
55430 atomic_t t_transport_queue_active;
55431@@ -774,7 +774,7 @@ struct se_device {
55432 atomic_t active_cmds;
55433 atomic_t simple_cmds;
55434 atomic_t depth_left;
55435- atomic_t dev_ordered_id;
55436+ atomic_unchecked_t dev_ordered_id;
55437 atomic_t dev_tur_active;
55438 atomic_t execute_tasks;
55439 atomic_t dev_status_thr_count;
55440diff -urNp linux-3.0.3/include/trace/events/irq.h linux-3.0.3/include/trace/events/irq.h
55441--- linux-3.0.3/include/trace/events/irq.h 2011-07-21 22:17:23.000000000 -0400
55442+++ linux-3.0.3/include/trace/events/irq.h 2011-08-23 21:47:56.000000000 -0400
55443@@ -36,7 +36,7 @@ struct softirq_action;
55444 */
55445 TRACE_EVENT(irq_handler_entry,
55446
55447- TP_PROTO(int irq, struct irqaction *action),
55448+ TP_PROTO(int irq, const struct irqaction *action),
55449
55450 TP_ARGS(irq, action),
55451
55452@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
55453 */
55454 TRACE_EVENT(irq_handler_exit,
55455
55456- TP_PROTO(int irq, struct irqaction *action, int ret),
55457+ TP_PROTO(int irq, const struct irqaction *action, int ret),
55458
55459 TP_ARGS(irq, action, ret),
55460
55461diff -urNp linux-3.0.3/include/video/udlfb.h linux-3.0.3/include/video/udlfb.h
55462--- linux-3.0.3/include/video/udlfb.h 2011-07-21 22:17:23.000000000 -0400
55463+++ linux-3.0.3/include/video/udlfb.h 2011-08-23 21:47:56.000000000 -0400
55464@@ -51,10 +51,10 @@ struct dlfb_data {
55465 int base8;
55466 u32 pseudo_palette[256];
55467 /* blit-only rendering path metrics, exposed through sysfs */
55468- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
55469- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
55470- atomic_t bytes_sent; /* to usb, after compression including overhead */
55471- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
55472+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
55473+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
55474+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
55475+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
55476 };
55477
55478 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
55479diff -urNp linux-3.0.3/include/video/uvesafb.h linux-3.0.3/include/video/uvesafb.h
55480--- linux-3.0.3/include/video/uvesafb.h 2011-07-21 22:17:23.000000000 -0400
55481+++ linux-3.0.3/include/video/uvesafb.h 2011-08-23 21:47:56.000000000 -0400
55482@@ -177,6 +177,7 @@ struct uvesafb_par {
55483 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
55484 u8 pmi_setpal; /* PMI for palette changes */
55485 u16 *pmi_base; /* protected mode interface location */
55486+ u8 *pmi_code; /* protected mode code location */
55487 void *pmi_start;
55488 void *pmi_pal;
55489 u8 *vbe_state_orig; /*
55490diff -urNp linux-3.0.3/init/do_mounts.c linux-3.0.3/init/do_mounts.c
55491--- linux-3.0.3/init/do_mounts.c 2011-07-21 22:17:23.000000000 -0400
55492+++ linux-3.0.3/init/do_mounts.c 2011-08-23 21:47:56.000000000 -0400
55493@@ -287,7 +287,7 @@ static void __init get_fs_names(char *pa
55494
55495 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
55496 {
55497- int err = sys_mount(name, "/root", fs, flags, data);
55498+ int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
55499 if (err)
55500 return err;
55501
55502@@ -383,18 +383,18 @@ void __init change_floppy(char *fmt, ...
55503 va_start(args, fmt);
55504 vsprintf(buf, fmt, args);
55505 va_end(args);
55506- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
55507+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
55508 if (fd >= 0) {
55509 sys_ioctl(fd, FDEJECT, 0);
55510 sys_close(fd);
55511 }
55512 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
55513- fd = sys_open("/dev/console", O_RDWR, 0);
55514+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
55515 if (fd >= 0) {
55516 sys_ioctl(fd, TCGETS, (long)&termios);
55517 termios.c_lflag &= ~ICANON;
55518 sys_ioctl(fd, TCSETSF, (long)&termios);
55519- sys_read(fd, &c, 1);
55520+ sys_read(fd, (char __user *)&c, 1);
55521 termios.c_lflag |= ICANON;
55522 sys_ioctl(fd, TCSETSF, (long)&termios);
55523 sys_close(fd);
55524@@ -488,6 +488,6 @@ void __init prepare_namespace(void)
55525 mount_root();
55526 out:
55527 devtmpfs_mount("dev");
55528- sys_mount(".", "/", NULL, MS_MOVE, NULL);
55529+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
55530 sys_chroot((const char __user __force *)".");
55531 }
55532diff -urNp linux-3.0.3/init/do_mounts.h linux-3.0.3/init/do_mounts.h
55533--- linux-3.0.3/init/do_mounts.h 2011-07-21 22:17:23.000000000 -0400
55534+++ linux-3.0.3/init/do_mounts.h 2011-08-23 21:47:56.000000000 -0400
55535@@ -15,15 +15,15 @@ extern int root_mountflags;
55536
55537 static inline int create_dev(char *name, dev_t dev)
55538 {
55539- sys_unlink(name);
55540- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
55541+ sys_unlink((__force char __user *)name);
55542+ return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
55543 }
55544
55545 #if BITS_PER_LONG == 32
55546 static inline u32 bstat(char *name)
55547 {
55548 struct stat64 stat;
55549- if (sys_stat64(name, &stat) != 0)
55550+ if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
55551 return 0;
55552 if (!S_ISBLK(stat.st_mode))
55553 return 0;
55554diff -urNp linux-3.0.3/init/do_mounts_initrd.c linux-3.0.3/init/do_mounts_initrd.c
55555--- linux-3.0.3/init/do_mounts_initrd.c 2011-07-21 22:17:23.000000000 -0400
55556+++ linux-3.0.3/init/do_mounts_initrd.c 2011-08-23 21:47:56.000000000 -0400
55557@@ -44,13 +44,13 @@ static void __init handle_initrd(void)
55558 create_dev("/dev/root.old", Root_RAM0);
55559 /* mount initrd on rootfs' /root */
55560 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
55561- sys_mkdir("/old", 0700);
55562- root_fd = sys_open("/", 0, 0);
55563- old_fd = sys_open("/old", 0, 0);
55564+ sys_mkdir((__force const char __user *)"/old", 0700);
55565+ root_fd = sys_open((__force const char __user *)"/", 0, 0);
55566+ old_fd = sys_open((__force const char __user *)"/old", 0, 0);
55567 /* move initrd over / and chdir/chroot in initrd root */
55568- sys_chdir("/root");
55569- sys_mount(".", "/", NULL, MS_MOVE, NULL);
55570- sys_chroot(".");
55571+ sys_chdir((__force const char __user *)"/root");
55572+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
55573+ sys_chroot((__force const char __user *)".");
55574
55575 /*
55576 * In case that a resume from disk is carried out by linuxrc or one of
55577@@ -67,15 +67,15 @@ static void __init handle_initrd(void)
55578
55579 /* move initrd to rootfs' /old */
55580 sys_fchdir(old_fd);
55581- sys_mount("/", ".", NULL, MS_MOVE, NULL);
55582+ sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
55583 /* switch root and cwd back to / of rootfs */
55584 sys_fchdir(root_fd);
55585- sys_chroot(".");
55586+ sys_chroot((__force const char __user *)".");
55587 sys_close(old_fd);
55588 sys_close(root_fd);
55589
55590 if (new_decode_dev(real_root_dev) == Root_RAM0) {
55591- sys_chdir("/old");
55592+ sys_chdir((__force const char __user *)"/old");
55593 return;
55594 }
55595
55596@@ -83,17 +83,17 @@ static void __init handle_initrd(void)
55597 mount_root();
55598
55599 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
55600- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
55601+ error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
55602 if (!error)
55603 printk("okay\n");
55604 else {
55605- int fd = sys_open("/dev/root.old", O_RDWR, 0);
55606+ int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
55607 if (error == -ENOENT)
55608 printk("/initrd does not exist. Ignored.\n");
55609 else
55610 printk("failed\n");
55611 printk(KERN_NOTICE "Unmounting old root\n");
55612- sys_umount("/old", MNT_DETACH);
55613+ sys_umount((__force char __user *)"/old", MNT_DETACH);
55614 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
55615 if (fd < 0) {
55616 error = fd;
55617@@ -116,11 +116,11 @@ int __init initrd_load(void)
55618 * mounted in the normal path.
55619 */
55620 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
55621- sys_unlink("/initrd.image");
55622+ sys_unlink((__force const char __user *)"/initrd.image");
55623 handle_initrd();
55624 return 1;
55625 }
55626 }
55627- sys_unlink("/initrd.image");
55628+ sys_unlink((__force const char __user *)"/initrd.image");
55629 return 0;
55630 }
55631diff -urNp linux-3.0.3/init/do_mounts_md.c linux-3.0.3/init/do_mounts_md.c
55632--- linux-3.0.3/init/do_mounts_md.c 2011-07-21 22:17:23.000000000 -0400
55633+++ linux-3.0.3/init/do_mounts_md.c 2011-08-23 21:47:56.000000000 -0400
55634@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
55635 partitioned ? "_d" : "", minor,
55636 md_setup_args[ent].device_names);
55637
55638- fd = sys_open(name, 0, 0);
55639+ fd = sys_open((__force char __user *)name, 0, 0);
55640 if (fd < 0) {
55641 printk(KERN_ERR "md: open failed - cannot start "
55642 "array %s\n", name);
55643@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
55644 * array without it
55645 */
55646 sys_close(fd);
55647- fd = sys_open(name, 0, 0);
55648+ fd = sys_open((__force char __user *)name, 0, 0);
55649 sys_ioctl(fd, BLKRRPART, 0);
55650 }
55651 sys_close(fd);
55652diff -urNp linux-3.0.3/init/initramfs.c linux-3.0.3/init/initramfs.c
55653--- linux-3.0.3/init/initramfs.c 2011-07-21 22:17:23.000000000 -0400
55654+++ linux-3.0.3/init/initramfs.c 2011-08-23 21:47:56.000000000 -0400
55655@@ -74,7 +74,7 @@ static void __init free_hash(void)
55656 }
55657 }
55658
55659-static long __init do_utime(char __user *filename, time_t mtime)
55660+static long __init do_utime(__force char __user *filename, time_t mtime)
55661 {
55662 struct timespec t[2];
55663
55664@@ -109,7 +109,7 @@ static void __init dir_utime(void)
55665 struct dir_entry *de, *tmp;
55666 list_for_each_entry_safe(de, tmp, &dir_list, list) {
55667 list_del(&de->list);
55668- do_utime(de->name, de->mtime);
55669+ do_utime((__force char __user *)de->name, de->mtime);
55670 kfree(de->name);
55671 kfree(de);
55672 }
55673@@ -271,7 +271,7 @@ static int __init maybe_link(void)
55674 if (nlink >= 2) {
55675 char *old = find_link(major, minor, ino, mode, collected);
55676 if (old)
55677- return (sys_link(old, collected) < 0) ? -1 : 1;
55678+ return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
55679 }
55680 return 0;
55681 }
55682@@ -280,11 +280,11 @@ static void __init clean_path(char *path
55683 {
55684 struct stat st;
55685
55686- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
55687+ if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
55688 if (S_ISDIR(st.st_mode))
55689- sys_rmdir(path);
55690+ sys_rmdir((__force char __user *)path);
55691 else
55692- sys_unlink(path);
55693+ sys_unlink((__force char __user *)path);
55694 }
55695 }
55696
55697@@ -305,7 +305,7 @@ static int __init do_name(void)
55698 int openflags = O_WRONLY|O_CREAT;
55699 if (ml != 1)
55700 openflags |= O_TRUNC;
55701- wfd = sys_open(collected, openflags, mode);
55702+ wfd = sys_open((__force char __user *)collected, openflags, mode);
55703
55704 if (wfd >= 0) {
55705 sys_fchown(wfd, uid, gid);
55706@@ -317,17 +317,17 @@ static int __init do_name(void)
55707 }
55708 }
55709 } else if (S_ISDIR(mode)) {
55710- sys_mkdir(collected, mode);
55711- sys_chown(collected, uid, gid);
55712- sys_chmod(collected, mode);
55713+ sys_mkdir((__force char __user *)collected, mode);
55714+ sys_chown((__force char __user *)collected, uid, gid);
55715+ sys_chmod((__force char __user *)collected, mode);
55716 dir_add(collected, mtime);
55717 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
55718 S_ISFIFO(mode) || S_ISSOCK(mode)) {
55719 if (maybe_link() == 0) {
55720- sys_mknod(collected, mode, rdev);
55721- sys_chown(collected, uid, gid);
55722- sys_chmod(collected, mode);
55723- do_utime(collected, mtime);
55724+ sys_mknod((__force char __user *)collected, mode, rdev);
55725+ sys_chown((__force char __user *)collected, uid, gid);
55726+ sys_chmod((__force char __user *)collected, mode);
55727+ do_utime((__force char __user *)collected, mtime);
55728 }
55729 }
55730 return 0;
55731@@ -336,15 +336,15 @@ static int __init do_name(void)
55732 static int __init do_copy(void)
55733 {
55734 if (count >= body_len) {
55735- sys_write(wfd, victim, body_len);
55736+ sys_write(wfd, (__force char __user *)victim, body_len);
55737 sys_close(wfd);
55738- do_utime(vcollected, mtime);
55739+ do_utime((__force char __user *)vcollected, mtime);
55740 kfree(vcollected);
55741 eat(body_len);
55742 state = SkipIt;
55743 return 0;
55744 } else {
55745- sys_write(wfd, victim, count);
55746+ sys_write(wfd, (__force char __user *)victim, count);
55747 body_len -= count;
55748 eat(count);
55749 return 1;
55750@@ -355,9 +355,9 @@ static int __init do_symlink(void)
55751 {
55752 collected[N_ALIGN(name_len) + body_len] = '\0';
55753 clean_path(collected, 0);
55754- sys_symlink(collected + N_ALIGN(name_len), collected);
55755- sys_lchown(collected, uid, gid);
55756- do_utime(collected, mtime);
55757+ sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
55758+ sys_lchown((__force char __user *)collected, uid, gid);
55759+ do_utime((__force char __user *)collected, mtime);
55760 state = SkipIt;
55761 next_state = Reset;
55762 return 0;
55763diff -urNp linux-3.0.3/init/Kconfig linux-3.0.3/init/Kconfig
55764--- linux-3.0.3/init/Kconfig 2011-07-21 22:17:23.000000000 -0400
55765+++ linux-3.0.3/init/Kconfig 2011-08-23 21:47:56.000000000 -0400
55766@@ -1195,7 +1195,7 @@ config SLUB_DEBUG
55767
55768 config COMPAT_BRK
55769 bool "Disable heap randomization"
55770- default y
55771+ default n
55772 help
55773 Randomizing heap placement makes heap exploits harder, but it
55774 also breaks ancient binaries (including anything libc5 based).
55775diff -urNp linux-3.0.3/init/main.c linux-3.0.3/init/main.c
55776--- linux-3.0.3/init/main.c 2011-07-21 22:17:23.000000000 -0400
55777+++ linux-3.0.3/init/main.c 2011-08-23 21:48:14.000000000 -0400
55778@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void)
55779 extern void tc_init(void);
55780 #endif
55781
55782+extern void grsecurity_init(void);
55783+
55784 /*
55785 * Debug helper: via this flag we know that we are in 'early bootup code'
55786 * where only the boot processor is running with IRQ disabled. This means
55787@@ -149,6 +151,49 @@ static int __init set_reset_devices(char
55788
55789 __setup("reset_devices", set_reset_devices);
55790
55791+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
55792+extern char pax_enter_kernel_user[];
55793+extern char pax_exit_kernel_user[];
55794+extern pgdval_t clone_pgd_mask;
55795+#endif
55796+
55797+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
55798+static int __init setup_pax_nouderef(char *str)
55799+{
55800+#ifdef CONFIG_X86_32
55801+ unsigned int cpu;
55802+ struct desc_struct *gdt;
55803+
55804+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
55805+ gdt = get_cpu_gdt_table(cpu);
55806+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
55807+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
55808+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
55809+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
55810+ }
55811+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
55812+#else
55813+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
55814+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
55815+ clone_pgd_mask = ~(pgdval_t)0UL;
55816+#endif
55817+
55818+ return 0;
55819+}
55820+early_param("pax_nouderef", setup_pax_nouderef);
55821+#endif
55822+
55823+#ifdef CONFIG_PAX_SOFTMODE
55824+int pax_softmode;
55825+
55826+static int __init setup_pax_softmode(char *str)
55827+{
55828+ get_option(&str, &pax_softmode);
55829+ return 1;
55830+}
55831+__setup("pax_softmode=", setup_pax_softmode);
55832+#endif
55833+
55834 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
55835 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
55836 static const char *panic_later, *panic_param;
55837@@ -667,6 +712,7 @@ int __init_or_module do_one_initcall(ini
55838 {
55839 int count = preempt_count();
55840 int ret;
55841+ const char *msg1 = "", *msg2 = "";
55842
55843 if (initcall_debug)
55844 ret = do_one_initcall_debug(fn);
55845@@ -679,15 +725,15 @@ int __init_or_module do_one_initcall(ini
55846 sprintf(msgbuf, "error code %d ", ret);
55847
55848 if (preempt_count() != count) {
55849- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
55850+ msg1 = " preemption imbalance";
55851 preempt_count() = count;
55852 }
55853 if (irqs_disabled()) {
55854- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
55855+ msg2 = " disabled interrupts";
55856 local_irq_enable();
55857 }
55858- if (msgbuf[0]) {
55859- printk("initcall %pF returned with %s\n", fn, msgbuf);
55860+ if (msgbuf[0] || *msg1 || *msg2) {
55861+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
55862 }
55863
55864 return ret;
55865@@ -805,7 +851,7 @@ static int __init kernel_init(void * unu
55866 do_basic_setup();
55867
55868 /* Open the /dev/console on the rootfs, this should never fail */
55869- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
55870+ if (sys_open((__force const char __user *) "/dev/console", O_RDWR, 0) < 0)
55871 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
55872
55873 (void) sys_dup(0);
55874@@ -818,11 +864,13 @@ static int __init kernel_init(void * unu
55875 if (!ramdisk_execute_command)
55876 ramdisk_execute_command = "/init";
55877
55878- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
55879+ if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
55880 ramdisk_execute_command = NULL;
55881 prepare_namespace();
55882 }
55883
55884+ grsecurity_init();
55885+
55886 /*
55887 * Ok, we have completed the initial bootup, and
55888 * we're essentially up and running. Get rid of the
55889diff -urNp linux-3.0.3/ipc/mqueue.c linux-3.0.3/ipc/mqueue.c
55890--- linux-3.0.3/ipc/mqueue.c 2011-07-21 22:17:23.000000000 -0400
55891+++ linux-3.0.3/ipc/mqueue.c 2011-08-23 21:48:14.000000000 -0400
55892@@ -154,6 +154,7 @@ static struct inode *mqueue_get_inode(st
55893 mq_bytes = (mq_msg_tblsz +
55894 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
55895
55896+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
55897 spin_lock(&mq_lock);
55898 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
55899 u->mq_bytes + mq_bytes >
55900diff -urNp linux-3.0.3/ipc/msg.c linux-3.0.3/ipc/msg.c
55901--- linux-3.0.3/ipc/msg.c 2011-07-21 22:17:23.000000000 -0400
55902+++ linux-3.0.3/ipc/msg.c 2011-08-23 21:47:56.000000000 -0400
55903@@ -309,18 +309,19 @@ static inline int msg_security(struct ke
55904 return security_msg_queue_associate(msq, msgflg);
55905 }
55906
55907+static struct ipc_ops msg_ops = {
55908+ .getnew = newque,
55909+ .associate = msg_security,
55910+ .more_checks = NULL
55911+};
55912+
55913 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
55914 {
55915 struct ipc_namespace *ns;
55916- struct ipc_ops msg_ops;
55917 struct ipc_params msg_params;
55918
55919 ns = current->nsproxy->ipc_ns;
55920
55921- msg_ops.getnew = newque;
55922- msg_ops.associate = msg_security;
55923- msg_ops.more_checks = NULL;
55924-
55925 msg_params.key = key;
55926 msg_params.flg = msgflg;
55927
55928diff -urNp linux-3.0.3/ipc/sem.c linux-3.0.3/ipc/sem.c
55929--- linux-3.0.3/ipc/sem.c 2011-08-23 21:44:40.000000000 -0400
55930+++ linux-3.0.3/ipc/sem.c 2011-08-23 21:48:14.000000000 -0400
55931@@ -318,10 +318,15 @@ static inline int sem_more_checks(struct
55932 return 0;
55933 }
55934
55935+static struct ipc_ops sem_ops = {
55936+ .getnew = newary,
55937+ .associate = sem_security,
55938+ .more_checks = sem_more_checks
55939+};
55940+
55941 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
55942 {
55943 struct ipc_namespace *ns;
55944- struct ipc_ops sem_ops;
55945 struct ipc_params sem_params;
55946
55947 ns = current->nsproxy->ipc_ns;
55948@@ -329,10 +334,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
55949 if (nsems < 0 || nsems > ns->sc_semmsl)
55950 return -EINVAL;
55951
55952- sem_ops.getnew = newary;
55953- sem_ops.associate = sem_security;
55954- sem_ops.more_checks = sem_more_checks;
55955-
55956 sem_params.key = key;
55957 sem_params.flg = semflg;
55958 sem_params.u.nsems = nsems;
55959@@ -854,6 +855,8 @@ static int semctl_main(struct ipc_namesp
55960 int nsems;
55961 struct list_head tasks;
55962
55963+ pax_track_stack();
55964+
55965 sma = sem_lock_check(ns, semid);
55966 if (IS_ERR(sma))
55967 return PTR_ERR(sma);
55968@@ -1301,6 +1304,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
55969 struct ipc_namespace *ns;
55970 struct list_head tasks;
55971
55972+ pax_track_stack();
55973+
55974 ns = current->nsproxy->ipc_ns;
55975
55976 if (nsops < 1 || semid < 0)
55977diff -urNp linux-3.0.3/ipc/shm.c linux-3.0.3/ipc/shm.c
55978--- linux-3.0.3/ipc/shm.c 2011-07-21 22:17:23.000000000 -0400
55979+++ linux-3.0.3/ipc/shm.c 2011-08-23 21:48:14.000000000 -0400
55980@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_name
55981 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
55982 #endif
55983
55984+#ifdef CONFIG_GRKERNSEC
55985+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
55986+ const time_t shm_createtime, const uid_t cuid,
55987+ const int shmid);
55988+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
55989+ const time_t shm_createtime);
55990+#endif
55991+
55992 void shm_init_ns(struct ipc_namespace *ns)
55993 {
55994 ns->shm_ctlmax = SHMMAX;
55995@@ -401,6 +409,14 @@ static int newseg(struct ipc_namespace *
55996 shp->shm_lprid = 0;
55997 shp->shm_atim = shp->shm_dtim = 0;
55998 shp->shm_ctim = get_seconds();
55999+#ifdef CONFIG_GRKERNSEC
56000+ {
56001+ struct timespec timeval;
56002+ do_posix_clock_monotonic_gettime(&timeval);
56003+
56004+ shp->shm_createtime = timeval.tv_sec;
56005+ }
56006+#endif
56007 shp->shm_segsz = size;
56008 shp->shm_nattch = 0;
56009 shp->shm_file = file;
56010@@ -451,18 +467,19 @@ static inline int shm_more_checks(struct
56011 return 0;
56012 }
56013
56014+static struct ipc_ops shm_ops = {
56015+ .getnew = newseg,
56016+ .associate = shm_security,
56017+ .more_checks = shm_more_checks
56018+};
56019+
56020 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
56021 {
56022 struct ipc_namespace *ns;
56023- struct ipc_ops shm_ops;
56024 struct ipc_params shm_params;
56025
56026 ns = current->nsproxy->ipc_ns;
56027
56028- shm_ops.getnew = newseg;
56029- shm_ops.associate = shm_security;
56030- shm_ops.more_checks = shm_more_checks;
56031-
56032 shm_params.key = key;
56033 shm_params.flg = shmflg;
56034 shm_params.u.size = size;
56035@@ -762,8 +779,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int,
56036 case SHM_LOCK:
56037 case SHM_UNLOCK:
56038 {
56039- struct file *uninitialized_var(shm_file);
56040-
56041 lru_add_drain_all(); /* drain pagevecs to lru lists */
56042
56043 shp = shm_lock_check(ns, shmid);
56044@@ -896,9 +911,21 @@ long do_shmat(int shmid, char __user *sh
56045 if (err)
56046 goto out_unlock;
56047
56048+#ifdef CONFIG_GRKERNSEC
56049+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
56050+ shp->shm_perm.cuid, shmid) ||
56051+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
56052+ err = -EACCES;
56053+ goto out_unlock;
56054+ }
56055+#endif
56056+
56057 path = shp->shm_file->f_path;
56058 path_get(&path);
56059 shp->shm_nattch++;
56060+#ifdef CONFIG_GRKERNSEC
56061+ shp->shm_lapid = current->pid;
56062+#endif
56063 size = i_size_read(path.dentry->d_inode);
56064 shm_unlock(shp);
56065
56066diff -urNp linux-3.0.3/kernel/acct.c linux-3.0.3/kernel/acct.c
56067--- linux-3.0.3/kernel/acct.c 2011-07-21 22:17:23.000000000 -0400
56068+++ linux-3.0.3/kernel/acct.c 2011-08-23 21:47:56.000000000 -0400
56069@@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_a
56070 */
56071 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
56072 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
56073- file->f_op->write(file, (char *)&ac,
56074+ file->f_op->write(file, (__force char __user *)&ac,
56075 sizeof(acct_t), &file->f_pos);
56076 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
56077 set_fs(fs);
56078diff -urNp linux-3.0.3/kernel/audit.c linux-3.0.3/kernel/audit.c
56079--- linux-3.0.3/kernel/audit.c 2011-07-21 22:17:23.000000000 -0400
56080+++ linux-3.0.3/kernel/audit.c 2011-08-23 21:47:56.000000000 -0400
56081@@ -112,7 +112,7 @@ u32 audit_sig_sid = 0;
56082 3) suppressed due to audit_rate_limit
56083 4) suppressed due to audit_backlog_limit
56084 */
56085-static atomic_t audit_lost = ATOMIC_INIT(0);
56086+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
56087
56088 /* The netlink socket. */
56089 static struct sock *audit_sock;
56090@@ -234,7 +234,7 @@ void audit_log_lost(const char *message)
56091 unsigned long now;
56092 int print;
56093
56094- atomic_inc(&audit_lost);
56095+ atomic_inc_unchecked(&audit_lost);
56096
56097 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
56098
56099@@ -253,7 +253,7 @@ void audit_log_lost(const char *message)
56100 printk(KERN_WARNING
56101 "audit: audit_lost=%d audit_rate_limit=%d "
56102 "audit_backlog_limit=%d\n",
56103- atomic_read(&audit_lost),
56104+ atomic_read_unchecked(&audit_lost),
56105 audit_rate_limit,
56106 audit_backlog_limit);
56107 audit_panic(message);
56108@@ -686,7 +686,7 @@ static int audit_receive_msg(struct sk_b
56109 status_set.pid = audit_pid;
56110 status_set.rate_limit = audit_rate_limit;
56111 status_set.backlog_limit = audit_backlog_limit;
56112- status_set.lost = atomic_read(&audit_lost);
56113+ status_set.lost = atomic_read_unchecked(&audit_lost);
56114 status_set.backlog = skb_queue_len(&audit_skb_queue);
56115 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
56116 &status_set, sizeof(status_set));
56117diff -urNp linux-3.0.3/kernel/auditsc.c linux-3.0.3/kernel/auditsc.c
56118--- linux-3.0.3/kernel/auditsc.c 2011-07-21 22:17:23.000000000 -0400
56119+++ linux-3.0.3/kernel/auditsc.c 2011-08-23 21:47:56.000000000 -0400
56120@@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_conte
56121 }
56122
56123 /* global counter which is incremented every time something logs in */
56124-static atomic_t session_id = ATOMIC_INIT(0);
56125+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
56126
56127 /**
56128 * audit_set_loginuid - set a task's audit_context loginuid
56129@@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT
56130 */
56131 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
56132 {
56133- unsigned int sessionid = atomic_inc_return(&session_id);
56134+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
56135 struct audit_context *context = task->audit_context;
56136
56137 if (context && context->in_syscall) {
56138diff -urNp linux-3.0.3/kernel/capability.c linux-3.0.3/kernel/capability.c
56139--- linux-3.0.3/kernel/capability.c 2011-07-21 22:17:23.000000000 -0400
56140+++ linux-3.0.3/kernel/capability.c 2011-08-23 21:48:14.000000000 -0400
56141@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_
56142 * before modification is attempted and the application
56143 * fails.
56144 */
56145+ if (tocopy > ARRAY_SIZE(kdata))
56146+ return -EFAULT;
56147+
56148 if (copy_to_user(dataptr, kdata, tocopy
56149 * sizeof(struct __user_cap_data_struct))) {
56150 return -EFAULT;
56151@@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *n
56152 BUG();
56153 }
56154
56155- if (security_capable(ns, current_cred(), cap) == 0) {
56156+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
56157 current->flags |= PF_SUPERPRIV;
56158 return true;
56159 }
56160@@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *n
56161 }
56162 EXPORT_SYMBOL(ns_capable);
56163
56164+bool ns_capable_nolog(struct user_namespace *ns, int cap)
56165+{
56166+ if (unlikely(!cap_valid(cap))) {
56167+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
56168+ BUG();
56169+ }
56170+
56171+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
56172+ current->flags |= PF_SUPERPRIV;
56173+ return true;
56174+ }
56175+ return false;
56176+}
56177+EXPORT_SYMBOL(ns_capable_nolog);
56178+
56179+bool capable_nolog(int cap)
56180+{
56181+ return ns_capable_nolog(&init_user_ns, cap);
56182+}
56183+EXPORT_SYMBOL(capable_nolog);
56184+
56185 /**
56186 * task_ns_capable - Determine whether current task has a superior
56187 * capability targeted at a specific task's user namespace.
56188@@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct
56189 }
56190 EXPORT_SYMBOL(task_ns_capable);
56191
56192+bool task_ns_capable_nolog(struct task_struct *t, int cap)
56193+{
56194+ return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
56195+}
56196+EXPORT_SYMBOL(task_ns_capable_nolog);
56197+
56198 /**
56199 * nsown_capable - Check superior capability to one's own user_ns
56200 * @cap: The capability in question
56201diff -urNp linux-3.0.3/kernel/cgroup.c linux-3.0.3/kernel/cgroup.c
56202--- linux-3.0.3/kernel/cgroup.c 2011-07-21 22:17:23.000000000 -0400
56203+++ linux-3.0.3/kernel/cgroup.c 2011-08-23 21:48:14.000000000 -0400
56204@@ -593,6 +593,8 @@ static struct css_set *find_css_set(
56205 struct hlist_head *hhead;
56206 struct cg_cgroup_link *link;
56207
56208+ pax_track_stack();
56209+
56210 /* First see if we already have a cgroup group that matches
56211 * the desired set */
56212 read_lock(&css_set_lock);
56213diff -urNp linux-3.0.3/kernel/compat.c linux-3.0.3/kernel/compat.c
56214--- linux-3.0.3/kernel/compat.c 2011-07-21 22:17:23.000000000 -0400
56215+++ linux-3.0.3/kernel/compat.c 2011-08-23 21:48:14.000000000 -0400
56216@@ -13,6 +13,7 @@
56217
56218 #include <linux/linkage.h>
56219 #include <linux/compat.h>
56220+#include <linux/module.h>
56221 #include <linux/errno.h>
56222 #include <linux/time.h>
56223 #include <linux/signal.h>
56224diff -urNp linux-3.0.3/kernel/configs.c linux-3.0.3/kernel/configs.c
56225--- linux-3.0.3/kernel/configs.c 2011-07-21 22:17:23.000000000 -0400
56226+++ linux-3.0.3/kernel/configs.c 2011-08-23 21:48:14.000000000 -0400
56227@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
56228 struct proc_dir_entry *entry;
56229
56230 /* create the current config file */
56231+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
56232+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
56233+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
56234+ &ikconfig_file_ops);
56235+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
56236+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
56237+ &ikconfig_file_ops);
56238+#endif
56239+#else
56240 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
56241 &ikconfig_file_ops);
56242+#endif
56243+
56244 if (!entry)
56245 return -ENOMEM;
56246
56247diff -urNp linux-3.0.3/kernel/cred.c linux-3.0.3/kernel/cred.c
56248--- linux-3.0.3/kernel/cred.c 2011-07-21 22:17:23.000000000 -0400
56249+++ linux-3.0.3/kernel/cred.c 2011-08-25 17:23:03.000000000 -0400
56250@@ -158,6 +158,8 @@ static void put_cred_rcu(struct rcu_head
56251 */
56252 void __put_cred(struct cred *cred)
56253 {
56254+ pax_track_stack();
56255+
56256 kdebug("__put_cred(%p{%d,%d})", cred,
56257 atomic_read(&cred->usage),
56258 read_cred_subscribers(cred));
56259@@ -182,6 +184,8 @@ void exit_creds(struct task_struct *tsk)
56260 {
56261 struct cred *cred;
56262
56263+ pax_track_stack();
56264+
56265 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
56266 atomic_read(&tsk->cred->usage),
56267 read_cred_subscribers(tsk->cred));
56268@@ -220,6 +224,8 @@ const struct cred *get_task_cred(struct
56269 {
56270 const struct cred *cred;
56271
56272+ pax_track_stack();
56273+
56274 rcu_read_lock();
56275
56276 do {
56277@@ -239,6 +245,8 @@ struct cred *cred_alloc_blank(void)
56278 {
56279 struct cred *new;
56280
56281+ pax_track_stack();
56282+
56283 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
56284 if (!new)
56285 return NULL;
56286@@ -287,6 +295,8 @@ struct cred *prepare_creds(void)
56287 const struct cred *old;
56288 struct cred *new;
56289
56290+ pax_track_stack();
56291+
56292 validate_process_creds();
56293
56294 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
56295@@ -333,6 +343,8 @@ struct cred *prepare_exec_creds(void)
56296 struct thread_group_cred *tgcred = NULL;
56297 struct cred *new;
56298
56299+ pax_track_stack();
56300+
56301 #ifdef CONFIG_KEYS
56302 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
56303 if (!tgcred)
56304@@ -385,6 +397,8 @@ int copy_creds(struct task_struct *p, un
56305 struct cred *new;
56306 int ret;
56307
56308+ pax_track_stack();
56309+
56310 if (
56311 #ifdef CONFIG_KEYS
56312 !p->cred->thread_keyring &&
56313@@ -475,6 +489,8 @@ int commit_creds(struct cred *new)
56314 struct task_struct *task = current;
56315 const struct cred *old = task->real_cred;
56316
56317+ pax_track_stack();
56318+
56319 kdebug("commit_creds(%p{%d,%d})", new,
56320 atomic_read(&new->usage),
56321 read_cred_subscribers(new));
56322@@ -489,6 +505,8 @@ int commit_creds(struct cred *new)
56323
56324 get_cred(new); /* we will require a ref for the subj creds too */
56325
56326+ gr_set_role_label(task, new->uid, new->gid);
56327+
56328 /* dumpability changes */
56329 if (old->euid != new->euid ||
56330 old->egid != new->egid ||
56331@@ -508,10 +526,8 @@ int commit_creds(struct cred *new)
56332 key_fsgid_changed(task);
56333
56334 /* do it
56335- * - What if a process setreuid()'s and this brings the
56336- * new uid over his NPROC rlimit? We can check this now
56337- * cheaply with the new uid cache, so if it matters
56338- * we should be checking for it. -DaveM
56339+ * RLIMIT_NPROC limits on user->processes have already been checked
56340+ * in set_user().
56341 */
56342 alter_cred_subscribers(new, 2);
56343 if (new->user != old->user)
56344@@ -551,6 +567,8 @@ EXPORT_SYMBOL(commit_creds);
56345 */
56346 void abort_creds(struct cred *new)
56347 {
56348+ pax_track_stack();
56349+
56350 kdebug("abort_creds(%p{%d,%d})", new,
56351 atomic_read(&new->usage),
56352 read_cred_subscribers(new));
56353@@ -574,6 +592,8 @@ const struct cred *override_creds(const
56354 {
56355 const struct cred *old = current->cred;
56356
56357+ pax_track_stack();
56358+
56359 kdebug("override_creds(%p{%d,%d})", new,
56360 atomic_read(&new->usage),
56361 read_cred_subscribers(new));
56362@@ -603,6 +623,8 @@ void revert_creds(const struct cred *old
56363 {
56364 const struct cred *override = current->cred;
56365
56366+ pax_track_stack();
56367+
56368 kdebug("revert_creds(%p{%d,%d})", old,
56369 atomic_read(&old->usage),
56370 read_cred_subscribers(old));
56371@@ -649,6 +671,8 @@ struct cred *prepare_kernel_cred(struct
56372 const struct cred *old;
56373 struct cred *new;
56374
56375+ pax_track_stack();
56376+
56377 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
56378 if (!new)
56379 return NULL;
56380@@ -703,6 +727,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
56381 */
56382 int set_security_override(struct cred *new, u32 secid)
56383 {
56384+ pax_track_stack();
56385+
56386 return security_kernel_act_as(new, secid);
56387 }
56388 EXPORT_SYMBOL(set_security_override);
56389@@ -722,6 +748,8 @@ int set_security_override_from_ctx(struc
56390 u32 secid;
56391 int ret;
56392
56393+ pax_track_stack();
56394+
56395 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
56396 if (ret < 0)
56397 return ret;
56398diff -urNp linux-3.0.3/kernel/debug/debug_core.c linux-3.0.3/kernel/debug/debug_core.c
56399--- linux-3.0.3/kernel/debug/debug_core.c 2011-07-21 22:17:23.000000000 -0400
56400+++ linux-3.0.3/kernel/debug/debug_core.c 2011-08-23 21:47:56.000000000 -0400
56401@@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_loc
56402 */
56403 static atomic_t masters_in_kgdb;
56404 static atomic_t slaves_in_kgdb;
56405-static atomic_t kgdb_break_tasklet_var;
56406+static atomic_unchecked_t kgdb_break_tasklet_var;
56407 atomic_t kgdb_setting_breakpoint;
56408
56409 struct task_struct *kgdb_usethread;
56410@@ -129,7 +129,7 @@ int kgdb_single_step;
56411 static pid_t kgdb_sstep_pid;
56412
56413 /* to keep track of the CPU which is doing the single stepping*/
56414-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
56415+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
56416
56417 /*
56418 * If you are debugging a problem where roundup (the collection of
56419@@ -542,7 +542,7 @@ return_normal:
56420 * kernel will only try for the value of sstep_tries before
56421 * giving up and continuing on.
56422 */
56423- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
56424+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
56425 (kgdb_info[cpu].task &&
56426 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
56427 atomic_set(&kgdb_active, -1);
56428@@ -636,8 +636,8 @@ cpu_master_loop:
56429 }
56430
56431 kgdb_restore:
56432- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
56433- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
56434+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
56435+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
56436 if (kgdb_info[sstep_cpu].task)
56437 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
56438 else
56439@@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(vo
56440 static void kgdb_tasklet_bpt(unsigned long ing)
56441 {
56442 kgdb_breakpoint();
56443- atomic_set(&kgdb_break_tasklet_var, 0);
56444+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
56445 }
56446
56447 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
56448
56449 void kgdb_schedule_breakpoint(void)
56450 {
56451- if (atomic_read(&kgdb_break_tasklet_var) ||
56452+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
56453 atomic_read(&kgdb_active) != -1 ||
56454 atomic_read(&kgdb_setting_breakpoint))
56455 return;
56456- atomic_inc(&kgdb_break_tasklet_var);
56457+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
56458 tasklet_schedule(&kgdb_tasklet_breakpoint);
56459 }
56460 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
56461diff -urNp linux-3.0.3/kernel/debug/kdb/kdb_main.c linux-3.0.3/kernel/debug/kdb/kdb_main.c
56462--- linux-3.0.3/kernel/debug/kdb/kdb_main.c 2011-07-21 22:17:23.000000000 -0400
56463+++ linux-3.0.3/kernel/debug/kdb/kdb_main.c 2011-08-23 21:47:56.000000000 -0400
56464@@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const cha
56465 list_for_each_entry(mod, kdb_modules, list) {
56466
56467 kdb_printf("%-20s%8u 0x%p ", mod->name,
56468- mod->core_size, (void *)mod);
56469+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
56470 #ifdef CONFIG_MODULE_UNLOAD
56471 kdb_printf("%4d ", module_refcount(mod));
56472 #endif
56473@@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const cha
56474 kdb_printf(" (Loading)");
56475 else
56476 kdb_printf(" (Live)");
56477- kdb_printf(" 0x%p", mod->module_core);
56478+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
56479
56480 #ifdef CONFIG_MODULE_UNLOAD
56481 {
56482diff -urNp linux-3.0.3/kernel/events/core.c linux-3.0.3/kernel/events/core.c
56483--- linux-3.0.3/kernel/events/core.c 2011-08-23 21:44:40.000000000 -0400
56484+++ linux-3.0.3/kernel/events/core.c 2011-08-23 21:47:56.000000000 -0400
56485@@ -170,7 +170,7 @@ int perf_proc_update_handler(struct ctl_
56486 return 0;
56487 }
56488
56489-static atomic64_t perf_event_id;
56490+static atomic64_unchecked_t perf_event_id;
56491
56492 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
56493 enum event_type_t event_type);
56494@@ -2488,7 +2488,7 @@ static void __perf_event_read(void *info
56495
56496 static inline u64 perf_event_count(struct perf_event *event)
56497 {
56498- return local64_read(&event->count) + atomic64_read(&event->child_count);
56499+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
56500 }
56501
56502 static u64 perf_event_read(struct perf_event *event)
56503@@ -3023,9 +3023,9 @@ u64 perf_event_read_value(struct perf_ev
56504 mutex_lock(&event->child_mutex);
56505 total += perf_event_read(event);
56506 *enabled += event->total_time_enabled +
56507- atomic64_read(&event->child_total_time_enabled);
56508+ atomic64_read_unchecked(&event->child_total_time_enabled);
56509 *running += event->total_time_running +
56510- atomic64_read(&event->child_total_time_running);
56511+ atomic64_read_unchecked(&event->child_total_time_running);
56512
56513 list_for_each_entry(child, &event->child_list, child_list) {
56514 total += perf_event_read(child);
56515@@ -3388,10 +3388,10 @@ void perf_event_update_userpage(struct p
56516 userpg->offset -= local64_read(&event->hw.prev_count);
56517
56518 userpg->time_enabled = event->total_time_enabled +
56519- atomic64_read(&event->child_total_time_enabled);
56520+ atomic64_read_unchecked(&event->child_total_time_enabled);
56521
56522 userpg->time_running = event->total_time_running +
56523- atomic64_read(&event->child_total_time_running);
56524+ atomic64_read_unchecked(&event->child_total_time_running);
56525
56526 barrier();
56527 ++userpg->lock;
56528@@ -4188,11 +4188,11 @@ static void perf_output_read_one(struct
56529 values[n++] = perf_event_count(event);
56530 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
56531 values[n++] = enabled +
56532- atomic64_read(&event->child_total_time_enabled);
56533+ atomic64_read_unchecked(&event->child_total_time_enabled);
56534 }
56535 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
56536 values[n++] = running +
56537- atomic64_read(&event->child_total_time_running);
56538+ atomic64_read_unchecked(&event->child_total_time_running);
56539 }
56540 if (read_format & PERF_FORMAT_ID)
56541 values[n++] = primary_event_id(event);
56542@@ -6190,7 +6190,7 @@ perf_event_alloc(struct perf_event_attr
56543 event->parent = parent_event;
56544
56545 event->ns = get_pid_ns(current->nsproxy->pid_ns);
56546- event->id = atomic64_inc_return(&perf_event_id);
56547+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
56548
56549 event->state = PERF_EVENT_STATE_INACTIVE;
56550
56551@@ -6713,10 +6713,10 @@ static void sync_child_event(struct perf
56552 /*
56553 * Add back the child's count to the parent's count:
56554 */
56555- atomic64_add(child_val, &parent_event->child_count);
56556- atomic64_add(child_event->total_time_enabled,
56557+ atomic64_add_unchecked(child_val, &parent_event->child_count);
56558+ atomic64_add_unchecked(child_event->total_time_enabled,
56559 &parent_event->child_total_time_enabled);
56560- atomic64_add(child_event->total_time_running,
56561+ atomic64_add_unchecked(child_event->total_time_running,
56562 &parent_event->child_total_time_running);
56563
56564 /*
56565diff -urNp linux-3.0.3/kernel/exit.c linux-3.0.3/kernel/exit.c
56566--- linux-3.0.3/kernel/exit.c 2011-07-21 22:17:23.000000000 -0400
56567+++ linux-3.0.3/kernel/exit.c 2011-08-23 21:48:14.000000000 -0400
56568@@ -57,6 +57,10 @@
56569 #include <asm/pgtable.h>
56570 #include <asm/mmu_context.h>
56571
56572+#ifdef CONFIG_GRKERNSEC
56573+extern rwlock_t grsec_exec_file_lock;
56574+#endif
56575+
56576 static void exit_mm(struct task_struct * tsk);
56577
56578 static void __unhash_process(struct task_struct *p, bool group_dead)
56579@@ -169,6 +173,10 @@ void release_task(struct task_struct * p
56580 struct task_struct *leader;
56581 int zap_leader;
56582 repeat:
56583+#ifdef CONFIG_NET
56584+ gr_del_task_from_ip_table(p);
56585+#endif
56586+
56587 tracehook_prepare_release_task(p);
56588 /* don't need to get the RCU readlock here - the process is dead and
56589 * can't be modifying its own credentials. But shut RCU-lockdep up */
56590@@ -338,11 +346,22 @@ static void reparent_to_kthreadd(void)
56591 {
56592 write_lock_irq(&tasklist_lock);
56593
56594+#ifdef CONFIG_GRKERNSEC
56595+ write_lock(&grsec_exec_file_lock);
56596+ if (current->exec_file) {
56597+ fput(current->exec_file);
56598+ current->exec_file = NULL;
56599+ }
56600+ write_unlock(&grsec_exec_file_lock);
56601+#endif
56602+
56603 ptrace_unlink(current);
56604 /* Reparent to init */
56605 current->real_parent = current->parent = kthreadd_task;
56606 list_move_tail(&current->sibling, &current->real_parent->children);
56607
56608+ gr_set_kernel_label(current);
56609+
56610 /* Set the exit signal to SIGCHLD so we signal init on exit */
56611 current->exit_signal = SIGCHLD;
56612
56613@@ -394,7 +413,7 @@ int allow_signal(int sig)
56614 * know it'll be handled, so that they don't get converted to
56615 * SIGKILL or just silently dropped.
56616 */
56617- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
56618+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
56619 recalc_sigpending();
56620 spin_unlock_irq(&current->sighand->siglock);
56621 return 0;
56622@@ -430,6 +449,17 @@ void daemonize(const char *name, ...)
56623 vsnprintf(current->comm, sizeof(current->comm), name, args);
56624 va_end(args);
56625
56626+#ifdef CONFIG_GRKERNSEC
56627+ write_lock(&grsec_exec_file_lock);
56628+ if (current->exec_file) {
56629+ fput(current->exec_file);
56630+ current->exec_file = NULL;
56631+ }
56632+ write_unlock(&grsec_exec_file_lock);
56633+#endif
56634+
56635+ gr_set_kernel_label(current);
56636+
56637 /*
56638 * If we were started as result of loading a module, close all of the
56639 * user space pages. We don't need them, and if we didn't close them
56640@@ -904,15 +934,8 @@ NORET_TYPE void do_exit(long code)
56641 struct task_struct *tsk = current;
56642 int group_dead;
56643
56644- profile_task_exit(tsk);
56645-
56646- WARN_ON(atomic_read(&tsk->fs_excl));
56647- WARN_ON(blk_needs_flush_plug(tsk));
56648-
56649 if (unlikely(in_interrupt()))
56650 panic("Aiee, killing interrupt handler!");
56651- if (unlikely(!tsk->pid))
56652- panic("Attempted to kill the idle task!");
56653
56654 /*
56655 * If do_exit is called because this processes oopsed, it's possible
56656@@ -923,6 +946,14 @@ NORET_TYPE void do_exit(long code)
56657 */
56658 set_fs(USER_DS);
56659
56660+ profile_task_exit(tsk);
56661+
56662+ WARN_ON(atomic_read(&tsk->fs_excl));
56663+ WARN_ON(blk_needs_flush_plug(tsk));
56664+
56665+ if (unlikely(!tsk->pid))
56666+ panic("Attempted to kill the idle task!");
56667+
56668 tracehook_report_exit(&code);
56669
56670 validate_creds_for_do_exit(tsk);
56671@@ -983,6 +1014,9 @@ NORET_TYPE void do_exit(long code)
56672 tsk->exit_code = code;
56673 taskstats_exit(tsk, group_dead);
56674
56675+ gr_acl_handle_psacct(tsk, code);
56676+ gr_acl_handle_exit();
56677+
56678 exit_mm(tsk);
56679
56680 if (group_dead)
56681diff -urNp linux-3.0.3/kernel/fork.c linux-3.0.3/kernel/fork.c
56682--- linux-3.0.3/kernel/fork.c 2011-07-21 22:17:23.000000000 -0400
56683+++ linux-3.0.3/kernel/fork.c 2011-08-25 17:23:36.000000000 -0400
56684@@ -286,7 +286,7 @@ static struct task_struct *dup_task_stru
56685 *stackend = STACK_END_MAGIC; /* for overflow detection */
56686
56687 #ifdef CONFIG_CC_STACKPROTECTOR
56688- tsk->stack_canary = get_random_int();
56689+ tsk->stack_canary = pax_get_random_long();
56690 #endif
56691
56692 /* One for us, one for whoever does the "release_task()" (usually parent) */
56693@@ -308,13 +308,77 @@ out:
56694 }
56695
56696 #ifdef CONFIG_MMU
56697+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
56698+{
56699+ struct vm_area_struct *tmp;
56700+ unsigned long charge;
56701+ struct mempolicy *pol;
56702+ struct file *file;
56703+
56704+ charge = 0;
56705+ if (mpnt->vm_flags & VM_ACCOUNT) {
56706+ unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
56707+ if (security_vm_enough_memory(len))
56708+ goto fail_nomem;
56709+ charge = len;
56710+ }
56711+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
56712+ if (!tmp)
56713+ goto fail_nomem;
56714+ *tmp = *mpnt;
56715+ tmp->vm_mm = mm;
56716+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
56717+ pol = mpol_dup(vma_policy(mpnt));
56718+ if (IS_ERR(pol))
56719+ goto fail_nomem_policy;
56720+ vma_set_policy(tmp, pol);
56721+ if (anon_vma_fork(tmp, mpnt))
56722+ goto fail_nomem_anon_vma_fork;
56723+ tmp->vm_flags &= ~VM_LOCKED;
56724+ tmp->vm_next = tmp->vm_prev = NULL;
56725+ tmp->vm_mirror = NULL;
56726+ file = tmp->vm_file;
56727+ if (file) {
56728+ struct inode *inode = file->f_path.dentry->d_inode;
56729+ struct address_space *mapping = file->f_mapping;
56730+
56731+ get_file(file);
56732+ if (tmp->vm_flags & VM_DENYWRITE)
56733+ atomic_dec(&inode->i_writecount);
56734+ mutex_lock(&mapping->i_mmap_mutex);
56735+ if (tmp->vm_flags & VM_SHARED)
56736+ mapping->i_mmap_writable++;
56737+ flush_dcache_mmap_lock(mapping);
56738+ /* insert tmp into the share list, just after mpnt */
56739+ vma_prio_tree_add(tmp, mpnt);
56740+ flush_dcache_mmap_unlock(mapping);
56741+ mutex_unlock(&mapping->i_mmap_mutex);
56742+ }
56743+
56744+ /*
56745+ * Clear hugetlb-related page reserves for children. This only
56746+ * affects MAP_PRIVATE mappings. Faults generated by the child
56747+ * are not guaranteed to succeed, even if read-only
56748+ */
56749+ if (is_vm_hugetlb_page(tmp))
56750+ reset_vma_resv_huge_pages(tmp);
56751+
56752+ return tmp;
56753+
56754+fail_nomem_anon_vma_fork:
56755+ mpol_put(pol);
56756+fail_nomem_policy:
56757+ kmem_cache_free(vm_area_cachep, tmp);
56758+fail_nomem:
56759+ vm_unacct_memory(charge);
56760+ return NULL;
56761+}
56762+
56763 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
56764 {
56765 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
56766 struct rb_node **rb_link, *rb_parent;
56767 int retval;
56768- unsigned long charge;
56769- struct mempolicy *pol;
56770
56771 down_write(&oldmm->mmap_sem);
56772 flush_cache_dup_mm(oldmm);
56773@@ -326,8 +390,8 @@ static int dup_mmap(struct mm_struct *mm
56774 mm->locked_vm = 0;
56775 mm->mmap = NULL;
56776 mm->mmap_cache = NULL;
56777- mm->free_area_cache = oldmm->mmap_base;
56778- mm->cached_hole_size = ~0UL;
56779+ mm->free_area_cache = oldmm->free_area_cache;
56780+ mm->cached_hole_size = oldmm->cached_hole_size;
56781 mm->map_count = 0;
56782 cpumask_clear(mm_cpumask(mm));
56783 mm->mm_rb = RB_ROOT;
56784@@ -343,8 +407,6 @@ static int dup_mmap(struct mm_struct *mm
56785
56786 prev = NULL;
56787 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
56788- struct file *file;
56789-
56790 if (mpnt->vm_flags & VM_DONTCOPY) {
56791 long pages = vma_pages(mpnt);
56792 mm->total_vm -= pages;
56793@@ -352,55 +414,13 @@ static int dup_mmap(struct mm_struct *mm
56794 -pages);
56795 continue;
56796 }
56797- charge = 0;
56798- if (mpnt->vm_flags & VM_ACCOUNT) {
56799- unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
56800- if (security_vm_enough_memory(len))
56801- goto fail_nomem;
56802- charge = len;
56803- }
56804- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
56805- if (!tmp)
56806- goto fail_nomem;
56807- *tmp = *mpnt;
56808- INIT_LIST_HEAD(&tmp->anon_vma_chain);
56809- pol = mpol_dup(vma_policy(mpnt));
56810- retval = PTR_ERR(pol);
56811- if (IS_ERR(pol))
56812- goto fail_nomem_policy;
56813- vma_set_policy(tmp, pol);
56814- tmp->vm_mm = mm;
56815- if (anon_vma_fork(tmp, mpnt))
56816- goto fail_nomem_anon_vma_fork;
56817- tmp->vm_flags &= ~VM_LOCKED;
56818- tmp->vm_next = tmp->vm_prev = NULL;
56819- file = tmp->vm_file;
56820- if (file) {
56821- struct inode *inode = file->f_path.dentry->d_inode;
56822- struct address_space *mapping = file->f_mapping;
56823-
56824- get_file(file);
56825- if (tmp->vm_flags & VM_DENYWRITE)
56826- atomic_dec(&inode->i_writecount);
56827- mutex_lock(&mapping->i_mmap_mutex);
56828- if (tmp->vm_flags & VM_SHARED)
56829- mapping->i_mmap_writable++;
56830- flush_dcache_mmap_lock(mapping);
56831- /* insert tmp into the share list, just after mpnt */
56832- vma_prio_tree_add(tmp, mpnt);
56833- flush_dcache_mmap_unlock(mapping);
56834- mutex_unlock(&mapping->i_mmap_mutex);
56835+ tmp = dup_vma(mm, mpnt);
56836+ if (!tmp) {
56837+ retval = -ENOMEM;
56838+ goto out;
56839 }
56840
56841 /*
56842- * Clear hugetlb-related page reserves for children. This only
56843- * affects MAP_PRIVATE mappings. Faults generated by the child
56844- * are not guaranteed to succeed, even if read-only
56845- */
56846- if (is_vm_hugetlb_page(tmp))
56847- reset_vma_resv_huge_pages(tmp);
56848-
56849- /*
56850 * Link in the new vma and copy the page table entries.
56851 */
56852 *pprev = tmp;
56853@@ -421,6 +441,31 @@ static int dup_mmap(struct mm_struct *mm
56854 if (retval)
56855 goto out;
56856 }
56857+
56858+#ifdef CONFIG_PAX_SEGMEXEC
56859+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
56860+ struct vm_area_struct *mpnt_m;
56861+
56862+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
56863+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
56864+
56865+ if (!mpnt->vm_mirror)
56866+ continue;
56867+
56868+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
56869+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
56870+ mpnt->vm_mirror = mpnt_m;
56871+ } else {
56872+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
56873+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
56874+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
56875+ mpnt->vm_mirror->vm_mirror = mpnt;
56876+ }
56877+ }
56878+ BUG_ON(mpnt_m);
56879+ }
56880+#endif
56881+
56882 /* a new mm has just been created */
56883 arch_dup_mmap(oldmm, mm);
56884 retval = 0;
56885@@ -429,14 +474,6 @@ out:
56886 flush_tlb_mm(oldmm);
56887 up_write(&oldmm->mmap_sem);
56888 return retval;
56889-fail_nomem_anon_vma_fork:
56890- mpol_put(pol);
56891-fail_nomem_policy:
56892- kmem_cache_free(vm_area_cachep, tmp);
56893-fail_nomem:
56894- retval = -ENOMEM;
56895- vm_unacct_memory(charge);
56896- goto out;
56897 }
56898
56899 static inline int mm_alloc_pgd(struct mm_struct * mm)
56900@@ -836,13 +873,14 @@ static int copy_fs(unsigned long clone_f
56901 spin_unlock(&fs->lock);
56902 return -EAGAIN;
56903 }
56904- fs->users++;
56905+ atomic_inc(&fs->users);
56906 spin_unlock(&fs->lock);
56907 return 0;
56908 }
56909 tsk->fs = copy_fs_struct(fs);
56910 if (!tsk->fs)
56911 return -ENOMEM;
56912+ gr_set_chroot_entries(tsk, &tsk->fs->root);
56913 return 0;
56914 }
56915
56916@@ -1104,12 +1142,16 @@ static struct task_struct *copy_process(
56917 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
56918 #endif
56919 retval = -EAGAIN;
56920+
56921+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
56922+
56923 if (atomic_read(&p->real_cred->user->processes) >=
56924 task_rlimit(p, RLIMIT_NPROC)) {
56925- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
56926- p->real_cred->user != INIT_USER)
56927+ if (p->real_cred->user != INIT_USER &&
56928+ !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE))
56929 goto bad_fork_free;
56930 }
56931+ current->flags &= ~PF_NPROC_EXCEEDED;
56932
56933 retval = copy_creds(p, clone_flags);
56934 if (retval < 0)
56935@@ -1250,6 +1292,8 @@ static struct task_struct *copy_process(
56936 if (clone_flags & CLONE_THREAD)
56937 p->tgid = current->tgid;
56938
56939+ gr_copy_label(p);
56940+
56941 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
56942 /*
56943 * Clear TID on mm_release()?
56944@@ -1414,6 +1458,8 @@ bad_fork_cleanup_count:
56945 bad_fork_free:
56946 free_task(p);
56947 fork_out:
56948+ gr_log_forkfail(retval);
56949+
56950 return ERR_PTR(retval);
56951 }
56952
56953@@ -1502,6 +1548,8 @@ long do_fork(unsigned long clone_flags,
56954 if (clone_flags & CLONE_PARENT_SETTID)
56955 put_user(nr, parent_tidptr);
56956
56957+ gr_handle_brute_check();
56958+
56959 if (clone_flags & CLONE_VFORK) {
56960 p->vfork_done = &vfork;
56961 init_completion(&vfork);
56962@@ -1610,7 +1658,7 @@ static int unshare_fs(unsigned long unsh
56963 return 0;
56964
56965 /* don't need lock here; in the worst case we'll do useless copy */
56966- if (fs->users == 1)
56967+ if (atomic_read(&fs->users) == 1)
56968 return 0;
56969
56970 *new_fsp = copy_fs_struct(fs);
56971@@ -1697,7 +1745,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
56972 fs = current->fs;
56973 spin_lock(&fs->lock);
56974 current->fs = new_fs;
56975- if (--fs->users)
56976+ gr_set_chroot_entries(current, &current->fs->root);
56977+ if (atomic_dec_return(&fs->users))
56978 new_fs = NULL;
56979 else
56980 new_fs = fs;
56981diff -urNp linux-3.0.3/kernel/futex.c linux-3.0.3/kernel/futex.c
56982--- linux-3.0.3/kernel/futex.c 2011-08-23 21:44:40.000000000 -0400
56983+++ linux-3.0.3/kernel/futex.c 2011-08-23 21:48:14.000000000 -0400
56984@@ -54,6 +54,7 @@
56985 #include <linux/mount.h>
56986 #include <linux/pagemap.h>
56987 #include <linux/syscalls.h>
56988+#include <linux/ptrace.h>
56989 #include <linux/signal.h>
56990 #include <linux/module.h>
56991 #include <linux/magic.h>
56992@@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fsh
56993 struct page *page, *page_head;
56994 int err, ro = 0;
56995
56996+#ifdef CONFIG_PAX_SEGMEXEC
56997+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
56998+ return -EFAULT;
56999+#endif
57000+
57001 /*
57002 * The futex address must be "naturally" aligned.
57003 */
57004@@ -1863,6 +1869,8 @@ static int futex_wait(u32 __user *uaddr,
57005 struct futex_q q = futex_q_init;
57006 int ret;
57007
57008+ pax_track_stack();
57009+
57010 if (!bitset)
57011 return -EINVAL;
57012 q.bitset = bitset;
57013@@ -2259,6 +2267,8 @@ static int futex_wait_requeue_pi(u32 __u
57014 struct futex_q q = futex_q_init;
57015 int res, ret;
57016
57017+ pax_track_stack();
57018+
57019 if (!bitset)
57020 return -EINVAL;
57021
57022@@ -2431,7 +2441,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
57023 {
57024 struct robust_list_head __user *head;
57025 unsigned long ret;
57026+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
57027 const struct cred *cred = current_cred(), *pcred;
57028+#endif
57029
57030 if (!futex_cmpxchg_enabled)
57031 return -ENOSYS;
57032@@ -2447,6 +2459,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
57033 if (!p)
57034 goto err_unlock;
57035 ret = -EPERM;
57036+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57037+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
57038+ goto err_unlock;
57039+#else
57040 pcred = __task_cred(p);
57041 /* If victim is in different user_ns, then uids are not
57042 comparable, so we must have CAP_SYS_PTRACE */
57043@@ -2461,6 +2477,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
57044 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
57045 goto err_unlock;
57046 ok:
57047+#endif
57048 head = p->robust_list;
57049 rcu_read_unlock();
57050 }
57051@@ -2712,6 +2729,7 @@ static int __init futex_init(void)
57052 {
57053 u32 curval;
57054 int i;
57055+ mm_segment_t oldfs;
57056
57057 /*
57058 * This will fail and we want it. Some arch implementations do
57059@@ -2723,8 +2741,11 @@ static int __init futex_init(void)
57060 * implementation, the non-functional ones will return
57061 * -ENOSYS.
57062 */
57063+ oldfs = get_fs();
57064+ set_fs(USER_DS);
57065 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
57066 futex_cmpxchg_enabled = 1;
57067+ set_fs(oldfs);
57068
57069 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
57070 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
57071diff -urNp linux-3.0.3/kernel/futex_compat.c linux-3.0.3/kernel/futex_compat.c
57072--- linux-3.0.3/kernel/futex_compat.c 2011-07-21 22:17:23.000000000 -0400
57073+++ linux-3.0.3/kernel/futex_compat.c 2011-08-23 21:48:14.000000000 -0400
57074@@ -10,6 +10,7 @@
57075 #include <linux/compat.h>
57076 #include <linux/nsproxy.h>
57077 #include <linux/futex.h>
57078+#include <linux/ptrace.h>
57079
57080 #include <asm/uaccess.h>
57081
57082@@ -136,7 +137,10 @@ compat_sys_get_robust_list(int pid, comp
57083 {
57084 struct compat_robust_list_head __user *head;
57085 unsigned long ret;
57086- const struct cred *cred = current_cred(), *pcred;
57087+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
57088+ const struct cred *cred = current_cred();
57089+ const struct cred *pcred;
57090+#endif
57091
57092 if (!futex_cmpxchg_enabled)
57093 return -ENOSYS;
57094@@ -152,6 +156,10 @@ compat_sys_get_robust_list(int pid, comp
57095 if (!p)
57096 goto err_unlock;
57097 ret = -EPERM;
57098+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57099+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
57100+ goto err_unlock;
57101+#else
57102 pcred = __task_cred(p);
57103 /* If victim is in different user_ns, then uids are not
57104 comparable, so we must have CAP_SYS_PTRACE */
57105@@ -166,6 +174,7 @@ compat_sys_get_robust_list(int pid, comp
57106 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
57107 goto err_unlock;
57108 ok:
57109+#endif
57110 head = p->compat_robust_list;
57111 rcu_read_unlock();
57112 }
57113diff -urNp linux-3.0.3/kernel/gcov/base.c linux-3.0.3/kernel/gcov/base.c
57114--- linux-3.0.3/kernel/gcov/base.c 2011-07-21 22:17:23.000000000 -0400
57115+++ linux-3.0.3/kernel/gcov/base.c 2011-08-23 21:47:56.000000000 -0400
57116@@ -102,11 +102,6 @@ void gcov_enable_events(void)
57117 }
57118
57119 #ifdef CONFIG_MODULES
57120-static inline int within(void *addr, void *start, unsigned long size)
57121-{
57122- return ((addr >= start) && (addr < start + size));
57123-}
57124-
57125 /* Update list and generate events when modules are unloaded. */
57126 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
57127 void *data)
57128@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
57129 prev = NULL;
57130 /* Remove entries located in module from linked list. */
57131 for (info = gcov_info_head; info; info = info->next) {
57132- if (within(info, mod->module_core, mod->core_size)) {
57133+ if (within_module_core_rw((unsigned long)info, mod)) {
57134 if (prev)
57135 prev->next = info->next;
57136 else
57137diff -urNp linux-3.0.3/kernel/hrtimer.c linux-3.0.3/kernel/hrtimer.c
57138--- linux-3.0.3/kernel/hrtimer.c 2011-07-21 22:17:23.000000000 -0400
57139+++ linux-3.0.3/kernel/hrtimer.c 2011-08-23 21:47:56.000000000 -0400
57140@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
57141 local_irq_restore(flags);
57142 }
57143
57144-static void run_hrtimer_softirq(struct softirq_action *h)
57145+static void run_hrtimer_softirq(void)
57146 {
57147 hrtimer_peek_ahead_timers();
57148 }
57149diff -urNp linux-3.0.3/kernel/jump_label.c linux-3.0.3/kernel/jump_label.c
57150--- linux-3.0.3/kernel/jump_label.c 2011-07-21 22:17:23.000000000 -0400
57151+++ linux-3.0.3/kernel/jump_label.c 2011-08-23 21:47:56.000000000 -0400
57152@@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entr
57153
57154 size = (((unsigned long)stop - (unsigned long)start)
57155 / sizeof(struct jump_entry));
57156+ pax_open_kernel();
57157 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
57158+ pax_close_kernel();
57159 }
57160
57161 static void jump_label_update(struct jump_label_key *key, int enable);
57162@@ -297,10 +299,12 @@ static void jump_label_invalidate_module
57163 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
57164 struct jump_entry *iter;
57165
57166+ pax_open_kernel();
57167 for (iter = iter_start; iter < iter_stop; iter++) {
57168 if (within_module_init(iter->code, mod))
57169 iter->code = 0;
57170 }
57171+ pax_close_kernel();
57172 }
57173
57174 static int
57175diff -urNp linux-3.0.3/kernel/kallsyms.c linux-3.0.3/kernel/kallsyms.c
57176--- linux-3.0.3/kernel/kallsyms.c 2011-07-21 22:17:23.000000000 -0400
57177+++ linux-3.0.3/kernel/kallsyms.c 2011-08-23 21:48:14.000000000 -0400
57178@@ -11,6 +11,9 @@
57179 * Changed the compression method from stem compression to "table lookup"
57180 * compression (see scripts/kallsyms.c for a more complete description)
57181 */
57182+#ifdef CONFIG_GRKERNSEC_HIDESYM
57183+#define __INCLUDED_BY_HIDESYM 1
57184+#endif
57185 #include <linux/kallsyms.h>
57186 #include <linux/module.h>
57187 #include <linux/init.h>
57188@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_mark
57189
57190 static inline int is_kernel_inittext(unsigned long addr)
57191 {
57192+ if (system_state != SYSTEM_BOOTING)
57193+ return 0;
57194+
57195 if (addr >= (unsigned long)_sinittext
57196 && addr <= (unsigned long)_einittext)
57197 return 1;
57198 return 0;
57199 }
57200
57201+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
57202+#ifdef CONFIG_MODULES
57203+static inline int is_module_text(unsigned long addr)
57204+{
57205+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
57206+ return 1;
57207+
57208+ addr = ktla_ktva(addr);
57209+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
57210+}
57211+#else
57212+static inline int is_module_text(unsigned long addr)
57213+{
57214+ return 0;
57215+}
57216+#endif
57217+#endif
57218+
57219 static inline int is_kernel_text(unsigned long addr)
57220 {
57221 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
57222@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigne
57223
57224 static inline int is_kernel(unsigned long addr)
57225 {
57226+
57227+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
57228+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
57229+ return 1;
57230+
57231+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
57232+#else
57233 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
57234+#endif
57235+
57236 return 1;
57237 return in_gate_area_no_mm(addr);
57238 }
57239
57240 static int is_ksym_addr(unsigned long addr)
57241 {
57242+
57243+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
57244+ if (is_module_text(addr))
57245+ return 0;
57246+#endif
57247+
57248 if (all_var)
57249 return is_kernel(addr);
57250
57251@@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(st
57252
57253 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
57254 {
57255- iter->name[0] = '\0';
57256 iter->nameoff = get_symbol_offset(new_pos);
57257 iter->pos = new_pos;
57258 }
57259@@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, vo
57260 {
57261 struct kallsym_iter *iter = m->private;
57262
57263+#ifdef CONFIG_GRKERNSEC_HIDESYM
57264+ if (current_uid())
57265+ return 0;
57266+#endif
57267+
57268 /* Some debugging symbols have no name. Ignore them. */
57269 if (!iter->name[0])
57270 return 0;
57271@@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *i
57272 struct kallsym_iter *iter;
57273 int ret;
57274
57275- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
57276+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
57277 if (!iter)
57278 return -ENOMEM;
57279 reset_iter(iter, 0);
57280diff -urNp linux-3.0.3/kernel/kmod.c linux-3.0.3/kernel/kmod.c
57281--- linux-3.0.3/kernel/kmod.c 2011-07-21 22:17:23.000000000 -0400
57282+++ linux-3.0.3/kernel/kmod.c 2011-08-23 21:48:14.000000000 -0400
57283@@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
57284 * If module auto-loading support is disabled then this function
57285 * becomes a no-operation.
57286 */
57287-int __request_module(bool wait, const char *fmt, ...)
57288+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
57289 {
57290- va_list args;
57291 char module_name[MODULE_NAME_LEN];
57292 unsigned int max_modprobes;
57293 int ret;
57294- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
57295+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
57296 static char *envp[] = { "HOME=/",
57297 "TERM=linux",
57298 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
57299@@ -88,9 +87,7 @@ int __request_module(bool wait, const ch
57300 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
57301 static int kmod_loop_msg;
57302
57303- va_start(args, fmt);
57304- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
57305- va_end(args);
57306+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
57307 if (ret >= MODULE_NAME_LEN)
57308 return -ENAMETOOLONG;
57309
57310@@ -98,6 +95,20 @@ int __request_module(bool wait, const ch
57311 if (ret)
57312 return ret;
57313
57314+#ifdef CONFIG_GRKERNSEC_MODHARDEN
57315+ if (!current_uid()) {
57316+ /* hack to workaround consolekit/udisks stupidity */
57317+ read_lock(&tasklist_lock);
57318+ if (!strcmp(current->comm, "mount") &&
57319+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
57320+ read_unlock(&tasklist_lock);
57321+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
57322+ return -EPERM;
57323+ }
57324+ read_unlock(&tasklist_lock);
57325+ }
57326+#endif
57327+
57328 /* If modprobe needs a service that is in a module, we get a recursive
57329 * loop. Limit the number of running kmod threads to max_threads/2 or
57330 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
57331@@ -131,6 +142,47 @@ int __request_module(bool wait, const ch
57332 atomic_dec(&kmod_concurrent);
57333 return ret;
57334 }
57335+
57336+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
57337+{
57338+ va_list args;
57339+ int ret;
57340+
57341+ va_start(args, fmt);
57342+ ret = ____request_module(wait, module_param, fmt, args);
57343+ va_end(args);
57344+
57345+ return ret;
57346+}
57347+
57348+int __request_module(bool wait, const char *fmt, ...)
57349+{
57350+ va_list args;
57351+ int ret;
57352+
57353+#ifdef CONFIG_GRKERNSEC_MODHARDEN
57354+ if (current_uid()) {
57355+ char module_param[MODULE_NAME_LEN];
57356+
57357+ memset(module_param, 0, sizeof(module_param));
57358+
57359+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
57360+
57361+ va_start(args, fmt);
57362+ ret = ____request_module(wait, module_param, fmt, args);
57363+ va_end(args);
57364+
57365+ return ret;
57366+ }
57367+#endif
57368+
57369+ va_start(args, fmt);
57370+ ret = ____request_module(wait, NULL, fmt, args);
57371+ va_end(args);
57372+
57373+ return ret;
57374+}
57375+
57376 EXPORT_SYMBOL(__request_module);
57377 #endif /* CONFIG_MODULES */
57378
57379diff -urNp linux-3.0.3/kernel/kprobes.c linux-3.0.3/kernel/kprobes.c
57380--- linux-3.0.3/kernel/kprobes.c 2011-07-21 22:17:23.000000000 -0400
57381+++ linux-3.0.3/kernel/kprobes.c 2011-08-23 21:47:56.000000000 -0400
57382@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_
57383 * kernel image and loaded module images reside. This is required
57384 * so x86_64 can correctly handle the %rip-relative fixups.
57385 */
57386- kip->insns = module_alloc(PAGE_SIZE);
57387+ kip->insns = module_alloc_exec(PAGE_SIZE);
57388 if (!kip->insns) {
57389 kfree(kip);
57390 return NULL;
57391@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(st
57392 */
57393 if (!list_is_singular(&kip->list)) {
57394 list_del(&kip->list);
57395- module_free(NULL, kip->insns);
57396+ module_free_exec(NULL, kip->insns);
57397 kfree(kip);
57398 }
57399 return 1;
57400@@ -1936,7 +1936,7 @@ static int __init init_kprobes(void)
57401 {
57402 int i, err = 0;
57403 unsigned long offset = 0, size = 0;
57404- char *modname, namebuf[128];
57405+ char *modname, namebuf[KSYM_NAME_LEN];
57406 const char *symbol_name;
57407 void *addr;
57408 struct kprobe_blackpoint *kb;
57409@@ -2062,7 +2062,7 @@ static int __kprobes show_kprobe_addr(st
57410 const char *sym = NULL;
57411 unsigned int i = *(loff_t *) v;
57412 unsigned long offset = 0;
57413- char *modname, namebuf[128];
57414+ char *modname, namebuf[KSYM_NAME_LEN];
57415
57416 head = &kprobe_table[i];
57417 preempt_disable();
57418diff -urNp linux-3.0.3/kernel/lockdep.c linux-3.0.3/kernel/lockdep.c
57419--- linux-3.0.3/kernel/lockdep.c 2011-07-21 22:17:23.000000000 -0400
57420+++ linux-3.0.3/kernel/lockdep.c 2011-08-23 21:47:56.000000000 -0400
57421@@ -583,6 +583,10 @@ static int static_obj(void *obj)
57422 end = (unsigned long) &_end,
57423 addr = (unsigned long) obj;
57424
57425+#ifdef CONFIG_PAX_KERNEXEC
57426+ start = ktla_ktva(start);
57427+#endif
57428+
57429 /*
57430 * static variable?
57431 */
57432@@ -718,6 +722,7 @@ register_lock_class(struct lockdep_map *
57433 if (!static_obj(lock->key)) {
57434 debug_locks_off();
57435 printk("INFO: trying to register non-static key.\n");
57436+ printk("lock:%pS key:%pS.\n", lock, lock->key);
57437 printk("the code is fine but needs lockdep annotation.\n");
57438 printk("turning off the locking correctness validator.\n");
57439 dump_stack();
57440@@ -2936,7 +2941,7 @@ static int __lock_acquire(struct lockdep
57441 if (!class)
57442 return 0;
57443 }
57444- atomic_inc((atomic_t *)&class->ops);
57445+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
57446 if (very_verbose(class)) {
57447 printk("\nacquire class [%p] %s", class->key, class->name);
57448 if (class->name_version > 1)
57449diff -urNp linux-3.0.3/kernel/lockdep_proc.c linux-3.0.3/kernel/lockdep_proc.c
57450--- linux-3.0.3/kernel/lockdep_proc.c 2011-07-21 22:17:23.000000000 -0400
57451+++ linux-3.0.3/kernel/lockdep_proc.c 2011-08-23 21:47:56.000000000 -0400
57452@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
57453
57454 static void print_name(struct seq_file *m, struct lock_class *class)
57455 {
57456- char str[128];
57457+ char str[KSYM_NAME_LEN];
57458 const char *name = class->name;
57459
57460 if (!name) {
57461diff -urNp linux-3.0.3/kernel/module.c linux-3.0.3/kernel/module.c
57462--- linux-3.0.3/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
57463+++ linux-3.0.3/kernel/module.c 2011-08-23 21:48:14.000000000 -0400
57464@@ -58,6 +58,7 @@
57465 #include <linux/jump_label.h>
57466 #include <linux/pfn.h>
57467 #include <linux/bsearch.h>
57468+#include <linux/grsecurity.h>
57469
57470 #define CREATE_TRACE_POINTS
57471 #include <trace/events/module.h>
57472@@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_not
57473
57474 /* Bounds of module allocation, for speeding __module_address.
57475 * Protected by module_mutex. */
57476-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
57477+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
57478+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
57479
57480 int register_module_notifier(struct notifier_block * nb)
57481 {
57482@@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(cons
57483 return true;
57484
57485 list_for_each_entry_rcu(mod, &modules, list) {
57486- struct symsearch arr[] = {
57487+ struct symsearch modarr[] = {
57488 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
57489 NOT_GPL_ONLY, false },
57490 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
57491@@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(cons
57492 #endif
57493 };
57494
57495- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
57496+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
57497 return true;
57498 }
57499 return false;
57500@@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(
57501 static int percpu_modalloc(struct module *mod,
57502 unsigned long size, unsigned long align)
57503 {
57504- if (align > PAGE_SIZE) {
57505+ if (align-1 >= PAGE_SIZE) {
57506 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
57507 mod->name, align, PAGE_SIZE);
57508 align = PAGE_SIZE;
57509@@ -1166,7 +1168,7 @@ resolve_symbol_wait(struct module *mod,
57510 */
57511 #ifdef CONFIG_SYSFS
57512
57513-#ifdef CONFIG_KALLSYMS
57514+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
57515 static inline bool sect_empty(const Elf_Shdr *sect)
57516 {
57517 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
57518@@ -1632,21 +1634,21 @@ static void set_section_ro_nx(void *base
57519
57520 static void unset_module_core_ro_nx(struct module *mod)
57521 {
57522- set_page_attributes(mod->module_core + mod->core_text_size,
57523- mod->module_core + mod->core_size,
57524+ set_page_attributes(mod->module_core_rw,
57525+ mod->module_core_rw + mod->core_size_rw,
57526 set_memory_x);
57527- set_page_attributes(mod->module_core,
57528- mod->module_core + mod->core_ro_size,
57529+ set_page_attributes(mod->module_core_rx,
57530+ mod->module_core_rx + mod->core_size_rx,
57531 set_memory_rw);
57532 }
57533
57534 static void unset_module_init_ro_nx(struct module *mod)
57535 {
57536- set_page_attributes(mod->module_init + mod->init_text_size,
57537- mod->module_init + mod->init_size,
57538+ set_page_attributes(mod->module_init_rw,
57539+ mod->module_init_rw + mod->init_size_rw,
57540 set_memory_x);
57541- set_page_attributes(mod->module_init,
57542- mod->module_init + mod->init_ro_size,
57543+ set_page_attributes(mod->module_init_rx,
57544+ mod->module_init_rx + mod->init_size_rx,
57545 set_memory_rw);
57546 }
57547
57548@@ -1657,14 +1659,14 @@ void set_all_modules_text_rw(void)
57549
57550 mutex_lock(&module_mutex);
57551 list_for_each_entry_rcu(mod, &modules, list) {
57552- if ((mod->module_core) && (mod->core_text_size)) {
57553- set_page_attributes(mod->module_core,
57554- mod->module_core + mod->core_text_size,
57555+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
57556+ set_page_attributes(mod->module_core_rx,
57557+ mod->module_core_rx + mod->core_size_rx,
57558 set_memory_rw);
57559 }
57560- if ((mod->module_init) && (mod->init_text_size)) {
57561- set_page_attributes(mod->module_init,
57562- mod->module_init + mod->init_text_size,
57563+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
57564+ set_page_attributes(mod->module_init_rx,
57565+ mod->module_init_rx + mod->init_size_rx,
57566 set_memory_rw);
57567 }
57568 }
57569@@ -1678,14 +1680,14 @@ void set_all_modules_text_ro(void)
57570
57571 mutex_lock(&module_mutex);
57572 list_for_each_entry_rcu(mod, &modules, list) {
57573- if ((mod->module_core) && (mod->core_text_size)) {
57574- set_page_attributes(mod->module_core,
57575- mod->module_core + mod->core_text_size,
57576+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
57577+ set_page_attributes(mod->module_core_rx,
57578+ mod->module_core_rx + mod->core_size_rx,
57579 set_memory_ro);
57580 }
57581- if ((mod->module_init) && (mod->init_text_size)) {
57582- set_page_attributes(mod->module_init,
57583- mod->module_init + mod->init_text_size,
57584+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
57585+ set_page_attributes(mod->module_init_rx,
57586+ mod->module_init_rx + mod->init_size_rx,
57587 set_memory_ro);
57588 }
57589 }
57590@@ -1722,16 +1724,19 @@ static void free_module(struct module *m
57591
57592 /* This may be NULL, but that's OK */
57593 unset_module_init_ro_nx(mod);
57594- module_free(mod, mod->module_init);
57595+ module_free(mod, mod->module_init_rw);
57596+ module_free_exec(mod, mod->module_init_rx);
57597 kfree(mod->args);
57598 percpu_modfree(mod);
57599
57600 /* Free lock-classes: */
57601- lockdep_free_key_range(mod->module_core, mod->core_size);
57602+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
57603+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
57604
57605 /* Finally, free the core (containing the module structure) */
57606 unset_module_core_ro_nx(mod);
57607- module_free(mod, mod->module_core);
57608+ module_free_exec(mod, mod->module_core_rx);
57609+ module_free(mod, mod->module_core_rw);
57610
57611 #ifdef CONFIG_MPU
57612 update_protections(current->mm);
57613@@ -1800,10 +1805,31 @@ static int simplify_symbols(struct modul
57614 unsigned int i;
57615 int ret = 0;
57616 const struct kernel_symbol *ksym;
57617+#ifdef CONFIG_GRKERNSEC_MODHARDEN
57618+ int is_fs_load = 0;
57619+ int register_filesystem_found = 0;
57620+ char *p;
57621+
57622+ p = strstr(mod->args, "grsec_modharden_fs");
57623+ if (p) {
57624+ char *endptr = p + strlen("grsec_modharden_fs");
57625+ /* copy \0 as well */
57626+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
57627+ is_fs_load = 1;
57628+ }
57629+#endif
57630
57631 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
57632 const char *name = info->strtab + sym[i].st_name;
57633
57634+#ifdef CONFIG_GRKERNSEC_MODHARDEN
57635+ /* it's a real shame this will never get ripped and copied
57636+ upstream! ;(
57637+ */
57638+ if (is_fs_load && !strcmp(name, "register_filesystem"))
57639+ register_filesystem_found = 1;
57640+#endif
57641+
57642 switch (sym[i].st_shndx) {
57643 case SHN_COMMON:
57644 /* We compiled with -fno-common. These are not
57645@@ -1824,7 +1850,9 @@ static int simplify_symbols(struct modul
57646 ksym = resolve_symbol_wait(mod, info, name);
57647 /* Ok if resolved. */
57648 if (ksym && !IS_ERR(ksym)) {
57649+ pax_open_kernel();
57650 sym[i].st_value = ksym->value;
57651+ pax_close_kernel();
57652 break;
57653 }
57654
57655@@ -1843,11 +1871,20 @@ static int simplify_symbols(struct modul
57656 secbase = (unsigned long)mod_percpu(mod);
57657 else
57658 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
57659+ pax_open_kernel();
57660 sym[i].st_value += secbase;
57661+ pax_close_kernel();
57662 break;
57663 }
57664 }
57665
57666+#ifdef CONFIG_GRKERNSEC_MODHARDEN
57667+ if (is_fs_load && !register_filesystem_found) {
57668+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
57669+ ret = -EPERM;
57670+ }
57671+#endif
57672+
57673 return ret;
57674 }
57675
57676@@ -1931,22 +1968,12 @@ static void layout_sections(struct modul
57677 || s->sh_entsize != ~0UL
57678 || strstarts(sname, ".init"))
57679 continue;
57680- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
57681+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
57682+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
57683+ else
57684+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
57685 DEBUGP("\t%s\n", name);
57686 }
57687- switch (m) {
57688- case 0: /* executable */
57689- mod->core_size = debug_align(mod->core_size);
57690- mod->core_text_size = mod->core_size;
57691- break;
57692- case 1: /* RO: text and ro-data */
57693- mod->core_size = debug_align(mod->core_size);
57694- mod->core_ro_size = mod->core_size;
57695- break;
57696- case 3: /* whole core */
57697- mod->core_size = debug_align(mod->core_size);
57698- break;
57699- }
57700 }
57701
57702 DEBUGP("Init section allocation order:\n");
57703@@ -1960,23 +1987,13 @@ static void layout_sections(struct modul
57704 || s->sh_entsize != ~0UL
57705 || !strstarts(sname, ".init"))
57706 continue;
57707- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
57708- | INIT_OFFSET_MASK);
57709+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
57710+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
57711+ else
57712+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
57713+ s->sh_entsize |= INIT_OFFSET_MASK;
57714 DEBUGP("\t%s\n", sname);
57715 }
57716- switch (m) {
57717- case 0: /* executable */
57718- mod->init_size = debug_align(mod->init_size);
57719- mod->init_text_size = mod->init_size;
57720- break;
57721- case 1: /* RO: text and ro-data */
57722- mod->init_size = debug_align(mod->init_size);
57723- mod->init_ro_size = mod->init_size;
57724- break;
57725- case 3: /* whole init */
57726- mod->init_size = debug_align(mod->init_size);
57727- break;
57728- }
57729 }
57730 }
57731
57732@@ -2141,7 +2158,7 @@ static void layout_symtab(struct module
57733
57734 /* Put symbol section at end of init part of module. */
57735 symsect->sh_flags |= SHF_ALLOC;
57736- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
57737+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
57738 info->index.sym) | INIT_OFFSET_MASK;
57739 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
57740
57741@@ -2158,19 +2175,19 @@ static void layout_symtab(struct module
57742 }
57743
57744 /* Append room for core symbols at end of core part. */
57745- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
57746- mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
57747+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
57748+ mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
57749
57750 /* Put string table section at end of init part of module. */
57751 strsect->sh_flags |= SHF_ALLOC;
57752- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
57753+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
57754 info->index.str) | INIT_OFFSET_MASK;
57755 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
57756
57757 /* Append room for core symbols' strings at end of core part. */
57758- info->stroffs = mod->core_size;
57759+ info->stroffs = mod->core_size_rx;
57760 __set_bit(0, info->strmap);
57761- mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
57762+ mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
57763 }
57764
57765 static void add_kallsyms(struct module *mod, const struct load_info *info)
57766@@ -2186,11 +2203,13 @@ static void add_kallsyms(struct module *
57767 /* Make sure we get permanent strtab: don't use info->strtab. */
57768 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
57769
57770+ pax_open_kernel();
57771+
57772 /* Set types up while we still have access to sections. */
57773 for (i = 0; i < mod->num_symtab; i++)
57774 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
57775
57776- mod->core_symtab = dst = mod->module_core + info->symoffs;
57777+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
57778 src = mod->symtab;
57779 *dst = *src;
57780 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
57781@@ -2203,10 +2222,12 @@ static void add_kallsyms(struct module *
57782 }
57783 mod->core_num_syms = ndst;
57784
57785- mod->core_strtab = s = mod->module_core + info->stroffs;
57786+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
57787 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
57788 if (test_bit(i, info->strmap))
57789 *++s = mod->strtab[i];
57790+
57791+ pax_close_kernel();
57792 }
57793 #else
57794 static inline void layout_symtab(struct module *mod, struct load_info *info)
57795@@ -2235,17 +2256,33 @@ static void dynamic_debug_remove(struct
57796 ddebug_remove_module(debug->modname);
57797 }
57798
57799-static void *module_alloc_update_bounds(unsigned long size)
57800+static void *module_alloc_update_bounds_rw(unsigned long size)
57801 {
57802 void *ret = module_alloc(size);
57803
57804 if (ret) {
57805 mutex_lock(&module_mutex);
57806 /* Update module bounds. */
57807- if ((unsigned long)ret < module_addr_min)
57808- module_addr_min = (unsigned long)ret;
57809- if ((unsigned long)ret + size > module_addr_max)
57810- module_addr_max = (unsigned long)ret + size;
57811+ if ((unsigned long)ret < module_addr_min_rw)
57812+ module_addr_min_rw = (unsigned long)ret;
57813+ if ((unsigned long)ret + size > module_addr_max_rw)
57814+ module_addr_max_rw = (unsigned long)ret + size;
57815+ mutex_unlock(&module_mutex);
57816+ }
57817+ return ret;
57818+}
57819+
57820+static void *module_alloc_update_bounds_rx(unsigned long size)
57821+{
57822+ void *ret = module_alloc_exec(size);
57823+
57824+ if (ret) {
57825+ mutex_lock(&module_mutex);
57826+ /* Update module bounds. */
57827+ if ((unsigned long)ret < module_addr_min_rx)
57828+ module_addr_min_rx = (unsigned long)ret;
57829+ if ((unsigned long)ret + size > module_addr_max_rx)
57830+ module_addr_max_rx = (unsigned long)ret + size;
57831 mutex_unlock(&module_mutex);
57832 }
57833 return ret;
57834@@ -2538,7 +2575,7 @@ static int move_module(struct module *mo
57835 void *ptr;
57836
57837 /* Do the allocs. */
57838- ptr = module_alloc_update_bounds(mod->core_size);
57839+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
57840 /*
57841 * The pointer to this block is stored in the module structure
57842 * which is inside the block. Just mark it as not being a
57843@@ -2548,23 +2585,50 @@ static int move_module(struct module *mo
57844 if (!ptr)
57845 return -ENOMEM;
57846
57847- memset(ptr, 0, mod->core_size);
57848- mod->module_core = ptr;
57849+ memset(ptr, 0, mod->core_size_rw);
57850+ mod->module_core_rw = ptr;
57851
57852- ptr = module_alloc_update_bounds(mod->init_size);
57853+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
57854 /*
57855 * The pointer to this block is stored in the module structure
57856 * which is inside the block. This block doesn't need to be
57857 * scanned as it contains data and code that will be freed
57858 * after the module is initialized.
57859 */
57860- kmemleak_ignore(ptr);
57861- if (!ptr && mod->init_size) {
57862- module_free(mod, mod->module_core);
57863+ kmemleak_not_leak(ptr);
57864+ if (!ptr && mod->init_size_rw) {
57865+ module_free(mod, mod->module_core_rw);
57866 return -ENOMEM;
57867 }
57868- memset(ptr, 0, mod->init_size);
57869- mod->module_init = ptr;
57870+ memset(ptr, 0, mod->init_size_rw);
57871+ mod->module_init_rw = ptr;
57872+
57873+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
57874+ kmemleak_not_leak(ptr);
57875+ if (!ptr) {
57876+ module_free(mod, mod->module_init_rw);
57877+ module_free(mod, mod->module_core_rw);
57878+ return -ENOMEM;
57879+ }
57880+
57881+ pax_open_kernel();
57882+ memset(ptr, 0, mod->core_size_rx);
57883+ pax_close_kernel();
57884+ mod->module_core_rx = ptr;
57885+
57886+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
57887+ kmemleak_not_leak(ptr);
57888+ if (!ptr && mod->init_size_rx) {
57889+ module_free_exec(mod, mod->module_core_rx);
57890+ module_free(mod, mod->module_init_rw);
57891+ module_free(mod, mod->module_core_rw);
57892+ return -ENOMEM;
57893+ }
57894+
57895+ pax_open_kernel();
57896+ memset(ptr, 0, mod->init_size_rx);
57897+ pax_close_kernel();
57898+ mod->module_init_rx = ptr;
57899
57900 /* Transfer each section which specifies SHF_ALLOC */
57901 DEBUGP("final section addresses:\n");
57902@@ -2575,16 +2639,45 @@ static int move_module(struct module *mo
57903 if (!(shdr->sh_flags & SHF_ALLOC))
57904 continue;
57905
57906- if (shdr->sh_entsize & INIT_OFFSET_MASK)
57907- dest = mod->module_init
57908- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
57909- else
57910- dest = mod->module_core + shdr->sh_entsize;
57911+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
57912+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
57913+ dest = mod->module_init_rw
57914+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
57915+ else
57916+ dest = mod->module_init_rx
57917+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
57918+ } else {
57919+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
57920+ dest = mod->module_core_rw + shdr->sh_entsize;
57921+ else
57922+ dest = mod->module_core_rx + shdr->sh_entsize;
57923+ }
57924+
57925+ if (shdr->sh_type != SHT_NOBITS) {
57926+
57927+#ifdef CONFIG_PAX_KERNEXEC
57928+#ifdef CONFIG_X86_64
57929+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
57930+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
57931+#endif
57932+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
57933+ pax_open_kernel();
57934+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
57935+ pax_close_kernel();
57936+ } else
57937+#endif
57938
57939- if (shdr->sh_type != SHT_NOBITS)
57940 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
57941+ }
57942 /* Update sh_addr to point to copy in image. */
57943- shdr->sh_addr = (unsigned long)dest;
57944+
57945+#ifdef CONFIG_PAX_KERNEXEC
57946+ if (shdr->sh_flags & SHF_EXECINSTR)
57947+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
57948+ else
57949+#endif
57950+
57951+ shdr->sh_addr = (unsigned long)dest;
57952 DEBUGP("\t0x%lx %s\n",
57953 shdr->sh_addr, info->secstrings + shdr->sh_name);
57954 }
57955@@ -2635,12 +2728,12 @@ static void flush_module_icache(const st
57956 * Do it before processing of module parameters, so the module
57957 * can provide parameter accessor functions of its own.
57958 */
57959- if (mod->module_init)
57960- flush_icache_range((unsigned long)mod->module_init,
57961- (unsigned long)mod->module_init
57962- + mod->init_size);
57963- flush_icache_range((unsigned long)mod->module_core,
57964- (unsigned long)mod->module_core + mod->core_size);
57965+ if (mod->module_init_rx)
57966+ flush_icache_range((unsigned long)mod->module_init_rx,
57967+ (unsigned long)mod->module_init_rx
57968+ + mod->init_size_rx);
57969+ flush_icache_range((unsigned long)mod->module_core_rx,
57970+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
57971
57972 set_fs(old_fs);
57973 }
57974@@ -2712,8 +2805,10 @@ static void module_deallocate(struct mod
57975 {
57976 kfree(info->strmap);
57977 percpu_modfree(mod);
57978- module_free(mod, mod->module_init);
57979- module_free(mod, mod->module_core);
57980+ module_free_exec(mod, mod->module_init_rx);
57981+ module_free_exec(mod, mod->module_core_rx);
57982+ module_free(mod, mod->module_init_rw);
57983+ module_free(mod, mod->module_core_rw);
57984 }
57985
57986 static int post_relocation(struct module *mod, const struct load_info *info)
57987@@ -2770,9 +2865,38 @@ static struct module *load_module(void _
57988 if (err)
57989 goto free_unload;
57990
57991+ /* Now copy in args */
57992+ mod->args = strndup_user(uargs, ~0UL >> 1);
57993+ if (IS_ERR(mod->args)) {
57994+ err = PTR_ERR(mod->args);
57995+ goto free_unload;
57996+ }
57997+
57998 /* Set up MODINFO_ATTR fields */
57999 setup_modinfo(mod, &info);
58000
58001+#ifdef CONFIG_GRKERNSEC_MODHARDEN
58002+ {
58003+ char *p, *p2;
58004+
58005+ if (strstr(mod->args, "grsec_modharden_netdev")) {
58006+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
58007+ err = -EPERM;
58008+ goto free_modinfo;
58009+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
58010+ p += strlen("grsec_modharden_normal");
58011+ p2 = strstr(p, "_");
58012+ if (p2) {
58013+ *p2 = '\0';
58014+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
58015+ *p2 = '_';
58016+ }
58017+ err = -EPERM;
58018+ goto free_modinfo;
58019+ }
58020+ }
58021+#endif
58022+
58023 /* Fix up syms, so that st_value is a pointer to location. */
58024 err = simplify_symbols(mod, &info);
58025 if (err < 0)
58026@@ -2788,13 +2912,6 @@ static struct module *load_module(void _
58027
58028 flush_module_icache(mod);
58029
58030- /* Now copy in args */
58031- mod->args = strndup_user(uargs, ~0UL >> 1);
58032- if (IS_ERR(mod->args)) {
58033- err = PTR_ERR(mod->args);
58034- goto free_arch_cleanup;
58035- }
58036-
58037 /* Mark state as coming so strong_try_module_get() ignores us. */
58038 mod->state = MODULE_STATE_COMING;
58039
58040@@ -2854,11 +2971,10 @@ static struct module *load_module(void _
58041 unlock:
58042 mutex_unlock(&module_mutex);
58043 synchronize_sched();
58044- kfree(mod->args);
58045- free_arch_cleanup:
58046 module_arch_cleanup(mod);
58047 free_modinfo:
58048 free_modinfo(mod);
58049+ kfree(mod->args);
58050 free_unload:
58051 module_unload_free(mod);
58052 free_module:
58053@@ -2899,16 +3015,16 @@ SYSCALL_DEFINE3(init_module, void __user
58054 MODULE_STATE_COMING, mod);
58055
58056 /* Set RO and NX regions for core */
58057- set_section_ro_nx(mod->module_core,
58058- mod->core_text_size,
58059- mod->core_ro_size,
58060- mod->core_size);
58061+ set_section_ro_nx(mod->module_core_rx,
58062+ mod->core_size_rx,
58063+ mod->core_size_rx,
58064+ mod->core_size_rx);
58065
58066 /* Set RO and NX regions for init */
58067- set_section_ro_nx(mod->module_init,
58068- mod->init_text_size,
58069- mod->init_ro_size,
58070- mod->init_size);
58071+ set_section_ro_nx(mod->module_init_rx,
58072+ mod->init_size_rx,
58073+ mod->init_size_rx,
58074+ mod->init_size_rx);
58075
58076 do_mod_ctors(mod);
58077 /* Start the module */
58078@@ -2954,11 +3070,12 @@ SYSCALL_DEFINE3(init_module, void __user
58079 mod->strtab = mod->core_strtab;
58080 #endif
58081 unset_module_init_ro_nx(mod);
58082- module_free(mod, mod->module_init);
58083- mod->module_init = NULL;
58084- mod->init_size = 0;
58085- mod->init_ro_size = 0;
58086- mod->init_text_size = 0;
58087+ module_free(mod, mod->module_init_rw);
58088+ module_free_exec(mod, mod->module_init_rx);
58089+ mod->module_init_rw = NULL;
58090+ mod->module_init_rx = NULL;
58091+ mod->init_size_rw = 0;
58092+ mod->init_size_rx = 0;
58093 mutex_unlock(&module_mutex);
58094
58095 return 0;
58096@@ -2989,10 +3106,16 @@ static const char *get_ksymbol(struct mo
58097 unsigned long nextval;
58098
58099 /* At worse, next value is at end of module */
58100- if (within_module_init(addr, mod))
58101- nextval = (unsigned long)mod->module_init+mod->init_text_size;
58102+ if (within_module_init_rx(addr, mod))
58103+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
58104+ else if (within_module_init_rw(addr, mod))
58105+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
58106+ else if (within_module_core_rx(addr, mod))
58107+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
58108+ else if (within_module_core_rw(addr, mod))
58109+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
58110 else
58111- nextval = (unsigned long)mod->module_core+mod->core_text_size;
58112+ return NULL;
58113
58114 /* Scan for closest preceding symbol, and next symbol. (ELF
58115 starts real symbols at 1). */
58116@@ -3238,7 +3361,7 @@ static int m_show(struct seq_file *m, vo
58117 char buf[8];
58118
58119 seq_printf(m, "%s %u",
58120- mod->name, mod->init_size + mod->core_size);
58121+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
58122 print_unload_info(m, mod);
58123
58124 /* Informative for users. */
58125@@ -3247,7 +3370,7 @@ static int m_show(struct seq_file *m, vo
58126 mod->state == MODULE_STATE_COMING ? "Loading":
58127 "Live");
58128 /* Used by oprofile and other similar tools. */
58129- seq_printf(m, " 0x%pK", mod->module_core);
58130+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
58131
58132 /* Taints info */
58133 if (mod->taints)
58134@@ -3283,7 +3406,17 @@ static const struct file_operations proc
58135
58136 static int __init proc_modules_init(void)
58137 {
58138+#ifndef CONFIG_GRKERNSEC_HIDESYM
58139+#ifdef CONFIG_GRKERNSEC_PROC_USER
58140+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
58141+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58142+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
58143+#else
58144 proc_create("modules", 0, NULL, &proc_modules_operations);
58145+#endif
58146+#else
58147+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
58148+#endif
58149 return 0;
58150 }
58151 module_init(proc_modules_init);
58152@@ -3342,12 +3475,12 @@ struct module *__module_address(unsigned
58153 {
58154 struct module *mod;
58155
58156- if (addr < module_addr_min || addr > module_addr_max)
58157+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
58158+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
58159 return NULL;
58160
58161 list_for_each_entry_rcu(mod, &modules, list)
58162- if (within_module_core(addr, mod)
58163- || within_module_init(addr, mod))
58164+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
58165 return mod;
58166 return NULL;
58167 }
58168@@ -3381,11 +3514,20 @@ bool is_module_text_address(unsigned lon
58169 */
58170 struct module *__module_text_address(unsigned long addr)
58171 {
58172- struct module *mod = __module_address(addr);
58173+ struct module *mod;
58174+
58175+#ifdef CONFIG_X86_32
58176+ addr = ktla_ktva(addr);
58177+#endif
58178+
58179+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
58180+ return NULL;
58181+
58182+ mod = __module_address(addr);
58183+
58184 if (mod) {
58185 /* Make sure it's within the text section. */
58186- if (!within(addr, mod->module_init, mod->init_text_size)
58187- && !within(addr, mod->module_core, mod->core_text_size))
58188+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
58189 mod = NULL;
58190 }
58191 return mod;
58192diff -urNp linux-3.0.3/kernel/mutex.c linux-3.0.3/kernel/mutex.c
58193--- linux-3.0.3/kernel/mutex.c 2011-07-21 22:17:23.000000000 -0400
58194+++ linux-3.0.3/kernel/mutex.c 2011-08-23 21:47:56.000000000 -0400
58195@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock,
58196 spin_lock_mutex(&lock->wait_lock, flags);
58197
58198 debug_mutex_lock_common(lock, &waiter);
58199- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
58200+ debug_mutex_add_waiter(lock, &waiter, task);
58201
58202 /* add waiting tasks to the end of the waitqueue (FIFO): */
58203 list_add_tail(&waiter.list, &lock->wait_list);
58204@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock,
58205 * TASK_UNINTERRUPTIBLE case.)
58206 */
58207 if (unlikely(signal_pending_state(state, task))) {
58208- mutex_remove_waiter(lock, &waiter,
58209- task_thread_info(task));
58210+ mutex_remove_waiter(lock, &waiter, task);
58211 mutex_release(&lock->dep_map, 1, ip);
58212 spin_unlock_mutex(&lock->wait_lock, flags);
58213
58214@@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock,
58215 done:
58216 lock_acquired(&lock->dep_map, ip);
58217 /* got the lock - rejoice! */
58218- mutex_remove_waiter(lock, &waiter, current_thread_info());
58219+ mutex_remove_waiter(lock, &waiter, task);
58220 mutex_set_owner(lock);
58221
58222 /* set it to 0 if there are no waiters left: */
58223diff -urNp linux-3.0.3/kernel/mutex-debug.c linux-3.0.3/kernel/mutex-debug.c
58224--- linux-3.0.3/kernel/mutex-debug.c 2011-07-21 22:17:23.000000000 -0400
58225+++ linux-3.0.3/kernel/mutex-debug.c 2011-08-23 21:47:56.000000000 -0400
58226@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
58227 }
58228
58229 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
58230- struct thread_info *ti)
58231+ struct task_struct *task)
58232 {
58233 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
58234
58235 /* Mark the current thread as blocked on the lock: */
58236- ti->task->blocked_on = waiter;
58237+ task->blocked_on = waiter;
58238 }
58239
58240 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
58241- struct thread_info *ti)
58242+ struct task_struct *task)
58243 {
58244 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
58245- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
58246- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
58247- ti->task->blocked_on = NULL;
58248+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
58249+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
58250+ task->blocked_on = NULL;
58251
58252 list_del_init(&waiter->list);
58253 waiter->task = NULL;
58254diff -urNp linux-3.0.3/kernel/mutex-debug.h linux-3.0.3/kernel/mutex-debug.h
58255--- linux-3.0.3/kernel/mutex-debug.h 2011-07-21 22:17:23.000000000 -0400
58256+++ linux-3.0.3/kernel/mutex-debug.h 2011-08-23 21:47:56.000000000 -0400
58257@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(stru
58258 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
58259 extern void debug_mutex_add_waiter(struct mutex *lock,
58260 struct mutex_waiter *waiter,
58261- struct thread_info *ti);
58262+ struct task_struct *task);
58263 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
58264- struct thread_info *ti);
58265+ struct task_struct *task);
58266 extern void debug_mutex_unlock(struct mutex *lock);
58267 extern void debug_mutex_init(struct mutex *lock, const char *name,
58268 struct lock_class_key *key);
58269diff -urNp linux-3.0.3/kernel/padata.c linux-3.0.3/kernel/padata.c
58270--- linux-3.0.3/kernel/padata.c 2011-07-21 22:17:23.000000000 -0400
58271+++ linux-3.0.3/kernel/padata.c 2011-08-23 21:47:56.000000000 -0400
58272@@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_ins
58273 padata->pd = pd;
58274 padata->cb_cpu = cb_cpu;
58275
58276- if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
58277- atomic_set(&pd->seq_nr, -1);
58278+ if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
58279+ atomic_set_unchecked(&pd->seq_nr, -1);
58280
58281- padata->seq_nr = atomic_inc_return(&pd->seq_nr);
58282+ padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
58283
58284 target_cpu = padata_cpu_hash(padata);
58285 queue = per_cpu_ptr(pd->pqueue, target_cpu);
58286@@ -444,7 +444,7 @@ static struct parallel_data *padata_allo
58287 padata_init_pqueues(pd);
58288 padata_init_squeues(pd);
58289 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
58290- atomic_set(&pd->seq_nr, -1);
58291+ atomic_set_unchecked(&pd->seq_nr, -1);
58292 atomic_set(&pd->reorder_objects, 0);
58293 atomic_set(&pd->refcnt, 0);
58294 pd->pinst = pinst;
58295diff -urNp linux-3.0.3/kernel/panic.c linux-3.0.3/kernel/panic.c
58296--- linux-3.0.3/kernel/panic.c 2011-07-21 22:17:23.000000000 -0400
58297+++ linux-3.0.3/kernel/panic.c 2011-08-23 21:48:14.000000000 -0400
58298@@ -369,7 +369,7 @@ static void warn_slowpath_common(const c
58299 const char *board;
58300
58301 printk(KERN_WARNING "------------[ cut here ]------------\n");
58302- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
58303+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
58304 board = dmi_get_system_info(DMI_PRODUCT_NAME);
58305 if (board)
58306 printk(KERN_WARNING "Hardware name: %s\n", board);
58307@@ -424,7 +424,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
58308 */
58309 void __stack_chk_fail(void)
58310 {
58311- panic("stack-protector: Kernel stack is corrupted in: %p\n",
58312+ dump_stack();
58313+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
58314 __builtin_return_address(0));
58315 }
58316 EXPORT_SYMBOL(__stack_chk_fail);
58317diff -urNp linux-3.0.3/kernel/pid.c linux-3.0.3/kernel/pid.c
58318--- linux-3.0.3/kernel/pid.c 2011-07-21 22:17:23.000000000 -0400
58319+++ linux-3.0.3/kernel/pid.c 2011-08-23 21:48:14.000000000 -0400
58320@@ -33,6 +33,7 @@
58321 #include <linux/rculist.h>
58322 #include <linux/bootmem.h>
58323 #include <linux/hash.h>
58324+#include <linux/security.h>
58325 #include <linux/pid_namespace.h>
58326 #include <linux/init_task.h>
58327 #include <linux/syscalls.h>
58328@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
58329
58330 int pid_max = PID_MAX_DEFAULT;
58331
58332-#define RESERVED_PIDS 300
58333+#define RESERVED_PIDS 500
58334
58335 int pid_max_min = RESERVED_PIDS + 1;
58336 int pid_max_max = PID_MAX_LIMIT;
58337@@ -419,8 +420,15 @@ EXPORT_SYMBOL(pid_task);
58338 */
58339 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
58340 {
58341+ struct task_struct *task;
58342+
58343 rcu_lockdep_assert(rcu_read_lock_held());
58344- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
58345+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
58346+
58347+ if (gr_pid_is_chrooted(task))
58348+ return NULL;
58349+
58350+ return task;
58351 }
58352
58353 struct task_struct *find_task_by_vpid(pid_t vnr)
58354@@ -428,6 +436,12 @@ struct task_struct *find_task_by_vpid(pi
58355 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
58356 }
58357
58358+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
58359+{
58360+ rcu_lockdep_assert(rcu_read_lock_held());
58361+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
58362+}
58363+
58364 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
58365 {
58366 struct pid *pid;
58367diff -urNp linux-3.0.3/kernel/posix-cpu-timers.c linux-3.0.3/kernel/posix-cpu-timers.c
58368--- linux-3.0.3/kernel/posix-cpu-timers.c 2011-07-21 22:17:23.000000000 -0400
58369+++ linux-3.0.3/kernel/posix-cpu-timers.c 2011-08-23 21:48:14.000000000 -0400
58370@@ -6,6 +6,7 @@
58371 #include <linux/posix-timers.h>
58372 #include <linux/errno.h>
58373 #include <linux/math64.h>
58374+#include <linux/security.h>
58375 #include <asm/uaccess.h>
58376 #include <linux/kernel_stat.h>
58377 #include <trace/events/timer.h>
58378@@ -1604,14 +1605,14 @@ struct k_clock clock_posix_cpu = {
58379
58380 static __init int init_posix_cpu_timers(void)
58381 {
58382- struct k_clock process = {
58383+ static struct k_clock process = {
58384 .clock_getres = process_cpu_clock_getres,
58385 .clock_get = process_cpu_clock_get,
58386 .timer_create = process_cpu_timer_create,
58387 .nsleep = process_cpu_nsleep,
58388 .nsleep_restart = process_cpu_nsleep_restart,
58389 };
58390- struct k_clock thread = {
58391+ static struct k_clock thread = {
58392 .clock_getres = thread_cpu_clock_getres,
58393 .clock_get = thread_cpu_clock_get,
58394 .timer_create = thread_cpu_timer_create,
58395diff -urNp linux-3.0.3/kernel/posix-timers.c linux-3.0.3/kernel/posix-timers.c
58396--- linux-3.0.3/kernel/posix-timers.c 2011-07-21 22:17:23.000000000 -0400
58397+++ linux-3.0.3/kernel/posix-timers.c 2011-08-23 21:48:14.000000000 -0400
58398@@ -43,6 +43,7 @@
58399 #include <linux/idr.h>
58400 #include <linux/posix-clock.h>
58401 #include <linux/posix-timers.h>
58402+#include <linux/grsecurity.h>
58403 #include <linux/syscalls.h>
58404 #include <linux/wait.h>
58405 #include <linux/workqueue.h>
58406@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
58407 * which we beg off on and pass to do_sys_settimeofday().
58408 */
58409
58410-static struct k_clock posix_clocks[MAX_CLOCKS];
58411+static struct k_clock *posix_clocks[MAX_CLOCKS];
58412
58413 /*
58414 * These ones are defined below.
58415@@ -227,7 +228,7 @@ static int posix_get_boottime(const cloc
58416 */
58417 static __init int init_posix_timers(void)
58418 {
58419- struct k_clock clock_realtime = {
58420+ static struct k_clock clock_realtime = {
58421 .clock_getres = hrtimer_get_res,
58422 .clock_get = posix_clock_realtime_get,
58423 .clock_set = posix_clock_realtime_set,
58424@@ -239,7 +240,7 @@ static __init int init_posix_timers(void
58425 .timer_get = common_timer_get,
58426 .timer_del = common_timer_del,
58427 };
58428- struct k_clock clock_monotonic = {
58429+ static struct k_clock clock_monotonic = {
58430 .clock_getres = hrtimer_get_res,
58431 .clock_get = posix_ktime_get_ts,
58432 .nsleep = common_nsleep,
58433@@ -249,19 +250,19 @@ static __init int init_posix_timers(void
58434 .timer_get = common_timer_get,
58435 .timer_del = common_timer_del,
58436 };
58437- struct k_clock clock_monotonic_raw = {
58438+ static struct k_clock clock_monotonic_raw = {
58439 .clock_getres = hrtimer_get_res,
58440 .clock_get = posix_get_monotonic_raw,
58441 };
58442- struct k_clock clock_realtime_coarse = {
58443+ static struct k_clock clock_realtime_coarse = {
58444 .clock_getres = posix_get_coarse_res,
58445 .clock_get = posix_get_realtime_coarse,
58446 };
58447- struct k_clock clock_monotonic_coarse = {
58448+ static struct k_clock clock_monotonic_coarse = {
58449 .clock_getres = posix_get_coarse_res,
58450 .clock_get = posix_get_monotonic_coarse,
58451 };
58452- struct k_clock clock_boottime = {
58453+ static struct k_clock clock_boottime = {
58454 .clock_getres = hrtimer_get_res,
58455 .clock_get = posix_get_boottime,
58456 .nsleep = common_nsleep,
58457@@ -272,6 +273,8 @@ static __init int init_posix_timers(void
58458 .timer_del = common_timer_del,
58459 };
58460
58461+ pax_track_stack();
58462+
58463 posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
58464 posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
58465 posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
58466@@ -473,7 +476,7 @@ void posix_timers_register_clock(const c
58467 return;
58468 }
58469
58470- posix_clocks[clock_id] = *new_clock;
58471+ posix_clocks[clock_id] = new_clock;
58472 }
58473 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
58474
58475@@ -519,9 +522,9 @@ static struct k_clock *clockid_to_kclock
58476 return (id & CLOCKFD_MASK) == CLOCKFD ?
58477 &clock_posix_dynamic : &clock_posix_cpu;
58478
58479- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
58480+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
58481 return NULL;
58482- return &posix_clocks[id];
58483+ return posix_clocks[id];
58484 }
58485
58486 static int common_timer_create(struct k_itimer *new_timer)
58487@@ -959,6 +962,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
58488 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
58489 return -EFAULT;
58490
58491+ /* only the CLOCK_REALTIME clock can be set, all other clocks
58492+ have their clock_set fptr set to a nosettime dummy function
58493+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
58494+ call common_clock_set, which calls do_sys_settimeofday, which
58495+ we hook
58496+ */
58497+
58498 return kc->clock_set(which_clock, &new_tp);
58499 }
58500
58501diff -urNp linux-3.0.3/kernel/power/poweroff.c linux-3.0.3/kernel/power/poweroff.c
58502--- linux-3.0.3/kernel/power/poweroff.c 2011-07-21 22:17:23.000000000 -0400
58503+++ linux-3.0.3/kernel/power/poweroff.c 2011-08-23 21:47:56.000000000 -0400
58504@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
58505 .enable_mask = SYSRQ_ENABLE_BOOT,
58506 };
58507
58508-static int pm_sysrq_init(void)
58509+static int __init pm_sysrq_init(void)
58510 {
58511 register_sysrq_key('o', &sysrq_poweroff_op);
58512 return 0;
58513diff -urNp linux-3.0.3/kernel/power/process.c linux-3.0.3/kernel/power/process.c
58514--- linux-3.0.3/kernel/power/process.c 2011-07-21 22:17:23.000000000 -0400
58515+++ linux-3.0.3/kernel/power/process.c 2011-08-23 21:47:56.000000000 -0400
58516@@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_
58517 u64 elapsed_csecs64;
58518 unsigned int elapsed_csecs;
58519 bool wakeup = false;
58520+ bool timedout = false;
58521
58522 do_gettimeofday(&start);
58523
58524@@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_
58525
58526 while (true) {
58527 todo = 0;
58528+ if (time_after(jiffies, end_time))
58529+ timedout = true;
58530 read_lock(&tasklist_lock);
58531 do_each_thread(g, p) {
58532 if (frozen(p) || !freezable(p))
58533@@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_
58534 * try_to_stop() after schedule() in ptrace/signal
58535 * stop sees TIF_FREEZE.
58536 */
58537- if (!task_is_stopped_or_traced(p) &&
58538- !freezer_should_skip(p))
58539+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
58540 todo++;
58541+ if (timedout) {
58542+ printk(KERN_ERR "Task refusing to freeze:\n");
58543+ sched_show_task(p);
58544+ }
58545+ }
58546 } while_each_thread(g, p);
58547 read_unlock(&tasklist_lock);
58548
58549@@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_
58550 todo += wq_busy;
58551 }
58552
58553- if (!todo || time_after(jiffies, end_time))
58554+ if (!todo || timedout)
58555 break;
58556
58557 if (pm_wakeup_pending()) {
58558diff -urNp linux-3.0.3/kernel/printk.c linux-3.0.3/kernel/printk.c
58559--- linux-3.0.3/kernel/printk.c 2011-07-21 22:17:23.000000000 -0400
58560+++ linux-3.0.3/kernel/printk.c 2011-08-23 21:48:14.000000000 -0400
58561@@ -313,12 +313,17 @@ static int check_syslog_permissions(int
58562 if (from_file && type != SYSLOG_ACTION_OPEN)
58563 return 0;
58564
58565+#ifdef CONFIG_GRKERNSEC_DMESG
58566+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
58567+ return -EPERM;
58568+#endif
58569+
58570 if (syslog_action_restricted(type)) {
58571 if (capable(CAP_SYSLOG))
58572 return 0;
58573 /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */
58574 if (capable(CAP_SYS_ADMIN)) {
58575- WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN "
58576+ printk_once(KERN_WARNING "Attempt to access syslog with CAP_SYS_ADMIN "
58577 "but no CAP_SYSLOG (deprecated).\n");
58578 return 0;
58579 }
58580diff -urNp linux-3.0.3/kernel/profile.c linux-3.0.3/kernel/profile.c
58581--- linux-3.0.3/kernel/profile.c 2011-07-21 22:17:23.000000000 -0400
58582+++ linux-3.0.3/kernel/profile.c 2011-08-23 21:47:56.000000000 -0400
58583@@ -39,7 +39,7 @@ struct profile_hit {
58584 /* Oprofile timer tick hook */
58585 static int (*timer_hook)(struct pt_regs *) __read_mostly;
58586
58587-static atomic_t *prof_buffer;
58588+static atomic_unchecked_t *prof_buffer;
58589 static unsigned long prof_len, prof_shift;
58590
58591 int prof_on __read_mostly;
58592@@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
58593 hits[i].pc = 0;
58594 continue;
58595 }
58596- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
58597+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
58598 hits[i].hits = hits[i].pc = 0;
58599 }
58600 }
58601@@ -342,9 +342,9 @@ static void do_profile_hits(int type, vo
58602 * Add the current hit(s) and flush the write-queue out
58603 * to the global buffer:
58604 */
58605- atomic_add(nr_hits, &prof_buffer[pc]);
58606+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
58607 for (i = 0; i < NR_PROFILE_HIT; ++i) {
58608- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
58609+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
58610 hits[i].pc = hits[i].hits = 0;
58611 }
58612 out:
58613@@ -419,7 +419,7 @@ static void do_profile_hits(int type, vo
58614 {
58615 unsigned long pc;
58616 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
58617- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
58618+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
58619 }
58620 #endif /* !CONFIG_SMP */
58621
58622@@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
58623 return -EFAULT;
58624 buf++; p++; count--; read++;
58625 }
58626- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
58627+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
58628 if (copy_to_user(buf, (void *)pnt, count))
58629 return -EFAULT;
58630 read += count;
58631@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
58632 }
58633 #endif
58634 profile_discard_flip_buffers();
58635- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
58636+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
58637 return count;
58638 }
58639
58640diff -urNp linux-3.0.3/kernel/ptrace.c linux-3.0.3/kernel/ptrace.c
58641--- linux-3.0.3/kernel/ptrace.c 2011-07-21 22:17:23.000000000 -0400
58642+++ linux-3.0.3/kernel/ptrace.c 2011-08-23 21:48:14.000000000 -0400
58643@@ -132,7 +132,8 @@ int ptrace_check_attach(struct task_stru
58644 return ret;
58645 }
58646
58647-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
58648+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
58649+ unsigned int log)
58650 {
58651 const struct cred *cred = current_cred(), *tcred;
58652
58653@@ -158,7 +159,8 @@ int __ptrace_may_access(struct task_stru
58654 cred->gid == tcred->sgid &&
58655 cred->gid == tcred->gid))
58656 goto ok;
58657- if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
58658+ if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
58659+ (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
58660 goto ok;
58661 rcu_read_unlock();
58662 return -EPERM;
58663@@ -167,7 +169,9 @@ ok:
58664 smp_rmb();
58665 if (task->mm)
58666 dumpable = get_dumpable(task->mm);
58667- if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
58668+ if (!dumpable &&
58669+ ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
58670+ (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
58671 return -EPERM;
58672
58673 return security_ptrace_access_check(task, mode);
58674@@ -177,7 +181,16 @@ bool ptrace_may_access(struct task_struc
58675 {
58676 int err;
58677 task_lock(task);
58678- err = __ptrace_may_access(task, mode);
58679+ err = __ptrace_may_access(task, mode, 0);
58680+ task_unlock(task);
58681+ return !err;
58682+}
58683+
58684+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
58685+{
58686+ int err;
58687+ task_lock(task);
58688+ err = __ptrace_may_access(task, mode, 1);
58689 task_unlock(task);
58690 return !err;
58691 }
58692@@ -205,7 +218,7 @@ static int ptrace_attach(struct task_str
58693 goto out;
58694
58695 task_lock(task);
58696- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
58697+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
58698 task_unlock(task);
58699 if (retval)
58700 goto unlock_creds;
58701@@ -218,7 +231,7 @@ static int ptrace_attach(struct task_str
58702 goto unlock_tasklist;
58703
58704 task->ptrace = PT_PTRACED;
58705- if (task_ns_capable(task, CAP_SYS_PTRACE))
58706+ if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
58707 task->ptrace |= PT_PTRACE_CAP;
58708
58709 __ptrace_link(task, current);
58710@@ -406,6 +419,8 @@ int ptrace_readdata(struct task_struct *
58711 {
58712 int copied = 0;
58713
58714+ pax_track_stack();
58715+
58716 while (len > 0) {
58717 char buf[128];
58718 int this_len, retval;
58719@@ -417,7 +432,7 @@ int ptrace_readdata(struct task_struct *
58720 break;
58721 return -EIO;
58722 }
58723- if (copy_to_user(dst, buf, retval))
58724+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
58725 return -EFAULT;
58726 copied += retval;
58727 src += retval;
58728@@ -431,6 +446,8 @@ int ptrace_writedata(struct task_struct
58729 {
58730 int copied = 0;
58731
58732+ pax_track_stack();
58733+
58734 while (len > 0) {
58735 char buf[128];
58736 int this_len, retval;
58737@@ -613,9 +630,11 @@ int ptrace_request(struct task_struct *c
58738 {
58739 int ret = -EIO;
58740 siginfo_t siginfo;
58741- void __user *datavp = (void __user *) data;
58742+ void __user *datavp = (__force void __user *) data;
58743 unsigned long __user *datalp = datavp;
58744
58745+ pax_track_stack();
58746+
58747 switch (request) {
58748 case PTRACE_PEEKTEXT:
58749 case PTRACE_PEEKDATA:
58750@@ -761,14 +780,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
58751 goto out;
58752 }
58753
58754+ if (gr_handle_ptrace(child, request)) {
58755+ ret = -EPERM;
58756+ goto out_put_task_struct;
58757+ }
58758+
58759 if (request == PTRACE_ATTACH) {
58760 ret = ptrace_attach(child);
58761 /*
58762 * Some architectures need to do book-keeping after
58763 * a ptrace attach.
58764 */
58765- if (!ret)
58766+ if (!ret) {
58767 arch_ptrace_attach(child);
58768+ gr_audit_ptrace(child);
58769+ }
58770 goto out_put_task_struct;
58771 }
58772
58773@@ -793,7 +819,7 @@ int generic_ptrace_peekdata(struct task_
58774 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
58775 if (copied != sizeof(tmp))
58776 return -EIO;
58777- return put_user(tmp, (unsigned long __user *)data);
58778+ return put_user(tmp, (__force unsigned long __user *)data);
58779 }
58780
58781 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
58782@@ -816,6 +842,8 @@ int compat_ptrace_request(struct task_st
58783 siginfo_t siginfo;
58784 int ret;
58785
58786+ pax_track_stack();
58787+
58788 switch (request) {
58789 case PTRACE_PEEKTEXT:
58790 case PTRACE_PEEKDATA:
58791@@ -903,14 +931,21 @@ asmlinkage long compat_sys_ptrace(compat
58792 goto out;
58793 }
58794
58795+ if (gr_handle_ptrace(child, request)) {
58796+ ret = -EPERM;
58797+ goto out_put_task_struct;
58798+ }
58799+
58800 if (request == PTRACE_ATTACH) {
58801 ret = ptrace_attach(child);
58802 /*
58803 * Some architectures need to do book-keeping after
58804 * a ptrace attach.
58805 */
58806- if (!ret)
58807+ if (!ret) {
58808 arch_ptrace_attach(child);
58809+ gr_audit_ptrace(child);
58810+ }
58811 goto out_put_task_struct;
58812 }
58813
58814diff -urNp linux-3.0.3/kernel/rcutorture.c linux-3.0.3/kernel/rcutorture.c
58815--- linux-3.0.3/kernel/rcutorture.c 2011-07-21 22:17:23.000000000 -0400
58816+++ linux-3.0.3/kernel/rcutorture.c 2011-08-23 21:47:56.000000000 -0400
58817@@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
58818 { 0 };
58819 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
58820 { 0 };
58821-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
58822-static atomic_t n_rcu_torture_alloc;
58823-static atomic_t n_rcu_torture_alloc_fail;
58824-static atomic_t n_rcu_torture_free;
58825-static atomic_t n_rcu_torture_mberror;
58826-static atomic_t n_rcu_torture_error;
58827+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
58828+static atomic_unchecked_t n_rcu_torture_alloc;
58829+static atomic_unchecked_t n_rcu_torture_alloc_fail;
58830+static atomic_unchecked_t n_rcu_torture_free;
58831+static atomic_unchecked_t n_rcu_torture_mberror;
58832+static atomic_unchecked_t n_rcu_torture_error;
58833 static long n_rcu_torture_boost_ktrerror;
58834 static long n_rcu_torture_boost_rterror;
58835 static long n_rcu_torture_boost_failure;
58836@@ -223,11 +223,11 @@ rcu_torture_alloc(void)
58837
58838 spin_lock_bh(&rcu_torture_lock);
58839 if (list_empty(&rcu_torture_freelist)) {
58840- atomic_inc(&n_rcu_torture_alloc_fail);
58841+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
58842 spin_unlock_bh(&rcu_torture_lock);
58843 return NULL;
58844 }
58845- atomic_inc(&n_rcu_torture_alloc);
58846+ atomic_inc_unchecked(&n_rcu_torture_alloc);
58847 p = rcu_torture_freelist.next;
58848 list_del_init(p);
58849 spin_unlock_bh(&rcu_torture_lock);
58850@@ -240,7 +240,7 @@ rcu_torture_alloc(void)
58851 static void
58852 rcu_torture_free(struct rcu_torture *p)
58853 {
58854- atomic_inc(&n_rcu_torture_free);
58855+ atomic_inc_unchecked(&n_rcu_torture_free);
58856 spin_lock_bh(&rcu_torture_lock);
58857 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
58858 spin_unlock_bh(&rcu_torture_lock);
58859@@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
58860 i = rp->rtort_pipe_count;
58861 if (i > RCU_TORTURE_PIPE_LEN)
58862 i = RCU_TORTURE_PIPE_LEN;
58863- atomic_inc(&rcu_torture_wcount[i]);
58864+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
58865 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
58866 rp->rtort_mbtest = 0;
58867 rcu_torture_free(rp);
58868@@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_fr
58869 i = rp->rtort_pipe_count;
58870 if (i > RCU_TORTURE_PIPE_LEN)
58871 i = RCU_TORTURE_PIPE_LEN;
58872- atomic_inc(&rcu_torture_wcount[i]);
58873+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
58874 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
58875 rp->rtort_mbtest = 0;
58876 list_del(&rp->rtort_free);
58877@@ -882,7 +882,7 @@ rcu_torture_writer(void *arg)
58878 i = old_rp->rtort_pipe_count;
58879 if (i > RCU_TORTURE_PIPE_LEN)
58880 i = RCU_TORTURE_PIPE_LEN;
58881- atomic_inc(&rcu_torture_wcount[i]);
58882+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
58883 old_rp->rtort_pipe_count++;
58884 cur_ops->deferred_free(old_rp);
58885 }
58886@@ -951,7 +951,7 @@ static void rcu_torture_timer(unsigned l
58887 return;
58888 }
58889 if (p->rtort_mbtest == 0)
58890- atomic_inc(&n_rcu_torture_mberror);
58891+ atomic_inc_unchecked(&n_rcu_torture_mberror);
58892 spin_lock(&rand_lock);
58893 cur_ops->read_delay(&rand);
58894 n_rcu_torture_timers++;
58895@@ -1013,7 +1013,7 @@ rcu_torture_reader(void *arg)
58896 continue;
58897 }
58898 if (p->rtort_mbtest == 0)
58899- atomic_inc(&n_rcu_torture_mberror);
58900+ atomic_inc_unchecked(&n_rcu_torture_mberror);
58901 cur_ops->read_delay(&rand);
58902 preempt_disable();
58903 pipe_count = p->rtort_pipe_count;
58904@@ -1072,16 +1072,16 @@ rcu_torture_printk(char *page)
58905 rcu_torture_current,
58906 rcu_torture_current_version,
58907 list_empty(&rcu_torture_freelist),
58908- atomic_read(&n_rcu_torture_alloc),
58909- atomic_read(&n_rcu_torture_alloc_fail),
58910- atomic_read(&n_rcu_torture_free),
58911- atomic_read(&n_rcu_torture_mberror),
58912+ atomic_read_unchecked(&n_rcu_torture_alloc),
58913+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
58914+ atomic_read_unchecked(&n_rcu_torture_free),
58915+ atomic_read_unchecked(&n_rcu_torture_mberror),
58916 n_rcu_torture_boost_ktrerror,
58917 n_rcu_torture_boost_rterror,
58918 n_rcu_torture_boost_failure,
58919 n_rcu_torture_boosts,
58920 n_rcu_torture_timers);
58921- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
58922+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
58923 n_rcu_torture_boost_ktrerror != 0 ||
58924 n_rcu_torture_boost_rterror != 0 ||
58925 n_rcu_torture_boost_failure != 0)
58926@@ -1089,7 +1089,7 @@ rcu_torture_printk(char *page)
58927 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
58928 if (i > 1) {
58929 cnt += sprintf(&page[cnt], "!!! ");
58930- atomic_inc(&n_rcu_torture_error);
58931+ atomic_inc_unchecked(&n_rcu_torture_error);
58932 WARN_ON_ONCE(1);
58933 }
58934 cnt += sprintf(&page[cnt], "Reader Pipe: ");
58935@@ -1103,7 +1103,7 @@ rcu_torture_printk(char *page)
58936 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
58937 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
58938 cnt += sprintf(&page[cnt], " %d",
58939- atomic_read(&rcu_torture_wcount[i]));
58940+ atomic_read_unchecked(&rcu_torture_wcount[i]));
58941 }
58942 cnt += sprintf(&page[cnt], "\n");
58943 if (cur_ops->stats)
58944@@ -1412,7 +1412,7 @@ rcu_torture_cleanup(void)
58945
58946 if (cur_ops->cleanup)
58947 cur_ops->cleanup();
58948- if (atomic_read(&n_rcu_torture_error))
58949+ if (atomic_read_unchecked(&n_rcu_torture_error))
58950 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
58951 else
58952 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
58953@@ -1476,17 +1476,17 @@ rcu_torture_init(void)
58954
58955 rcu_torture_current = NULL;
58956 rcu_torture_current_version = 0;
58957- atomic_set(&n_rcu_torture_alloc, 0);
58958- atomic_set(&n_rcu_torture_alloc_fail, 0);
58959- atomic_set(&n_rcu_torture_free, 0);
58960- atomic_set(&n_rcu_torture_mberror, 0);
58961- atomic_set(&n_rcu_torture_error, 0);
58962+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
58963+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
58964+ atomic_set_unchecked(&n_rcu_torture_free, 0);
58965+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
58966+ atomic_set_unchecked(&n_rcu_torture_error, 0);
58967 n_rcu_torture_boost_ktrerror = 0;
58968 n_rcu_torture_boost_rterror = 0;
58969 n_rcu_torture_boost_failure = 0;
58970 n_rcu_torture_boosts = 0;
58971 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
58972- atomic_set(&rcu_torture_wcount[i], 0);
58973+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
58974 for_each_possible_cpu(cpu) {
58975 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
58976 per_cpu(rcu_torture_count, cpu)[i] = 0;
58977diff -urNp linux-3.0.3/kernel/rcutree.c linux-3.0.3/kernel/rcutree.c
58978--- linux-3.0.3/kernel/rcutree.c 2011-07-21 22:17:23.000000000 -0400
58979+++ linux-3.0.3/kernel/rcutree.c 2011-08-23 21:47:56.000000000 -0400
58980@@ -1470,7 +1470,7 @@ __rcu_process_callbacks(struct rcu_state
58981 /*
58982 * Do softirq processing for the current CPU.
58983 */
58984-static void rcu_process_callbacks(struct softirq_action *unused)
58985+static void rcu_process_callbacks(void)
58986 {
58987 __rcu_process_callbacks(&rcu_sched_state,
58988 &__get_cpu_var(rcu_sched_data));
58989diff -urNp linux-3.0.3/kernel/rcutree_plugin.h linux-3.0.3/kernel/rcutree_plugin.h
58990--- linux-3.0.3/kernel/rcutree_plugin.h 2011-07-21 22:17:23.000000000 -0400
58991+++ linux-3.0.3/kernel/rcutree_plugin.h 2011-08-23 21:47:56.000000000 -0400
58992@@ -822,7 +822,7 @@ void synchronize_rcu_expedited(void)
58993
58994 /* Clean up and exit. */
58995 smp_mb(); /* ensure expedited GP seen before counter increment. */
58996- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
58997+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
58998 unlock_mb_ret:
58999 mutex_unlock(&sync_rcu_preempt_exp_mutex);
59000 mb_ret:
59001@@ -1774,8 +1774,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expe
59002
59003 #else /* #ifndef CONFIG_SMP */
59004
59005-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
59006-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
59007+static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
59008+static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
59009
59010 static int synchronize_sched_expedited_cpu_stop(void *data)
59011 {
59012@@ -1830,7 +1830,7 @@ void synchronize_sched_expedited(void)
59013 int firstsnap, s, snap, trycount = 0;
59014
59015 /* Note that atomic_inc_return() implies full memory barrier. */
59016- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
59017+ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
59018 get_online_cpus();
59019
59020 /*
59021@@ -1851,7 +1851,7 @@ void synchronize_sched_expedited(void)
59022 }
59023
59024 /* Check to see if someone else did our work for us. */
59025- s = atomic_read(&sync_sched_expedited_done);
59026+ s = atomic_read_unchecked(&sync_sched_expedited_done);
59027 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
59028 smp_mb(); /* ensure test happens before caller kfree */
59029 return;
59030@@ -1866,7 +1866,7 @@ void synchronize_sched_expedited(void)
59031 * grace period works for us.
59032 */
59033 get_online_cpus();
59034- snap = atomic_read(&sync_sched_expedited_started) - 1;
59035+ snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
59036 smp_mb(); /* ensure read is before try_stop_cpus(). */
59037 }
59038
59039@@ -1877,12 +1877,12 @@ void synchronize_sched_expedited(void)
59040 * than we did beat us to the punch.
59041 */
59042 do {
59043- s = atomic_read(&sync_sched_expedited_done);
59044+ s = atomic_read_unchecked(&sync_sched_expedited_done);
59045 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
59046 smp_mb(); /* ensure test happens before caller kfree */
59047 break;
59048 }
59049- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
59050+ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
59051
59052 put_online_cpus();
59053 }
59054diff -urNp linux-3.0.3/kernel/relay.c linux-3.0.3/kernel/relay.c
59055--- linux-3.0.3/kernel/relay.c 2011-07-21 22:17:23.000000000 -0400
59056+++ linux-3.0.3/kernel/relay.c 2011-08-23 21:48:14.000000000 -0400
59057@@ -1236,6 +1236,8 @@ static ssize_t subbuf_splice_actor(struc
59058 };
59059 ssize_t ret;
59060
59061+ pax_track_stack();
59062+
59063 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
59064 return 0;
59065 if (splice_grow_spd(pipe, &spd))
59066diff -urNp linux-3.0.3/kernel/resource.c linux-3.0.3/kernel/resource.c
59067--- linux-3.0.3/kernel/resource.c 2011-07-21 22:17:23.000000000 -0400
59068+++ linux-3.0.3/kernel/resource.c 2011-08-23 21:48:14.000000000 -0400
59069@@ -141,8 +141,18 @@ static const struct file_operations proc
59070
59071 static int __init ioresources_init(void)
59072 {
59073+#ifdef CONFIG_GRKERNSEC_PROC_ADD
59074+#ifdef CONFIG_GRKERNSEC_PROC_USER
59075+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
59076+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
59077+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59078+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
59079+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
59080+#endif
59081+#else
59082 proc_create("ioports", 0, NULL, &proc_ioports_operations);
59083 proc_create("iomem", 0, NULL, &proc_iomem_operations);
59084+#endif
59085 return 0;
59086 }
59087 __initcall(ioresources_init);
59088diff -urNp linux-3.0.3/kernel/rtmutex-tester.c linux-3.0.3/kernel/rtmutex-tester.c
59089--- linux-3.0.3/kernel/rtmutex-tester.c 2011-07-21 22:17:23.000000000 -0400
59090+++ linux-3.0.3/kernel/rtmutex-tester.c 2011-08-23 21:47:56.000000000 -0400
59091@@ -20,7 +20,7 @@
59092 #define MAX_RT_TEST_MUTEXES 8
59093
59094 static spinlock_t rttest_lock;
59095-static atomic_t rttest_event;
59096+static atomic_unchecked_t rttest_event;
59097
59098 struct test_thread_data {
59099 int opcode;
59100@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_
59101
59102 case RTTEST_LOCKCONT:
59103 td->mutexes[td->opdata] = 1;
59104- td->event = atomic_add_return(1, &rttest_event);
59105+ td->event = atomic_add_return_unchecked(1, &rttest_event);
59106 return 0;
59107
59108 case RTTEST_RESET:
59109@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_
59110 return 0;
59111
59112 case RTTEST_RESETEVENT:
59113- atomic_set(&rttest_event, 0);
59114+ atomic_set_unchecked(&rttest_event, 0);
59115 return 0;
59116
59117 default:
59118@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_
59119 return ret;
59120
59121 td->mutexes[id] = 1;
59122- td->event = atomic_add_return(1, &rttest_event);
59123+ td->event = atomic_add_return_unchecked(1, &rttest_event);
59124 rt_mutex_lock(&mutexes[id]);
59125- td->event = atomic_add_return(1, &rttest_event);
59126+ td->event = atomic_add_return_unchecked(1, &rttest_event);
59127 td->mutexes[id] = 4;
59128 return 0;
59129
59130@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_
59131 return ret;
59132
59133 td->mutexes[id] = 1;
59134- td->event = atomic_add_return(1, &rttest_event);
59135+ td->event = atomic_add_return_unchecked(1, &rttest_event);
59136 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
59137- td->event = atomic_add_return(1, &rttest_event);
59138+ td->event = atomic_add_return_unchecked(1, &rttest_event);
59139 td->mutexes[id] = ret ? 0 : 4;
59140 return ret ? -EINTR : 0;
59141
59142@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_
59143 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
59144 return ret;
59145
59146- td->event = atomic_add_return(1, &rttest_event);
59147+ td->event = atomic_add_return_unchecked(1, &rttest_event);
59148 rt_mutex_unlock(&mutexes[id]);
59149- td->event = atomic_add_return(1, &rttest_event);
59150+ td->event = atomic_add_return_unchecked(1, &rttest_event);
59151 td->mutexes[id] = 0;
59152 return 0;
59153
59154@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mu
59155 break;
59156
59157 td->mutexes[dat] = 2;
59158- td->event = atomic_add_return(1, &rttest_event);
59159+ td->event = atomic_add_return_unchecked(1, &rttest_event);
59160 break;
59161
59162 default:
59163@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mu
59164 return;
59165
59166 td->mutexes[dat] = 3;
59167- td->event = atomic_add_return(1, &rttest_event);
59168+ td->event = atomic_add_return_unchecked(1, &rttest_event);
59169 break;
59170
59171 case RTTEST_LOCKNOWAIT:
59172@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mu
59173 return;
59174
59175 td->mutexes[dat] = 1;
59176- td->event = atomic_add_return(1, &rttest_event);
59177+ td->event = atomic_add_return_unchecked(1, &rttest_event);
59178 return;
59179
59180 default:
59181diff -urNp linux-3.0.3/kernel/sched_autogroup.c linux-3.0.3/kernel/sched_autogroup.c
59182--- linux-3.0.3/kernel/sched_autogroup.c 2011-07-21 22:17:23.000000000 -0400
59183+++ linux-3.0.3/kernel/sched_autogroup.c 2011-08-23 21:47:56.000000000 -0400
59184@@ -7,7 +7,7 @@
59185
59186 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
59187 static struct autogroup autogroup_default;
59188-static atomic_t autogroup_seq_nr;
59189+static atomic_unchecked_t autogroup_seq_nr;
59190
59191 static void __init autogroup_init(struct task_struct *init_task)
59192 {
59193@@ -78,7 +78,7 @@ static inline struct autogroup *autogrou
59194
59195 kref_init(&ag->kref);
59196 init_rwsem(&ag->lock);
59197- ag->id = atomic_inc_return(&autogroup_seq_nr);
59198+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
59199 ag->tg = tg;
59200 #ifdef CONFIG_RT_GROUP_SCHED
59201 /*
59202diff -urNp linux-3.0.3/kernel/sched.c linux-3.0.3/kernel/sched.c
59203--- linux-3.0.3/kernel/sched.c 2011-07-21 22:17:23.000000000 -0400
59204+++ linux-3.0.3/kernel/sched.c 2011-08-23 21:48:14.000000000 -0400
59205@@ -4251,6 +4251,8 @@ asmlinkage void __sched schedule(void)
59206 struct rq *rq;
59207 int cpu;
59208
59209+ pax_track_stack();
59210+
59211 need_resched:
59212 preempt_disable();
59213 cpu = smp_processor_id();
59214@@ -4934,6 +4936,8 @@ int can_nice(const struct task_struct *p
59215 /* convert nice value [19,-20] to rlimit style value [1,40] */
59216 int nice_rlim = 20 - nice;
59217
59218+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
59219+
59220 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
59221 capable(CAP_SYS_NICE));
59222 }
59223@@ -4967,7 +4971,8 @@ SYSCALL_DEFINE1(nice, int, increment)
59224 if (nice > 19)
59225 nice = 19;
59226
59227- if (increment < 0 && !can_nice(current, nice))
59228+ if (increment < 0 && (!can_nice(current, nice) ||
59229+ gr_handle_chroot_nice()))
59230 return -EPERM;
59231
59232 retval = security_task_setnice(current, nice);
59233@@ -5111,6 +5116,7 @@ recheck:
59234 unsigned long rlim_rtprio =
59235 task_rlimit(p, RLIMIT_RTPRIO);
59236
59237+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
59238 /* can't set/change the rt policy */
59239 if (policy != p->policy && !rlim_rtprio)
59240 return -EPERM;
59241diff -urNp linux-3.0.3/kernel/sched_fair.c linux-3.0.3/kernel/sched_fair.c
59242--- linux-3.0.3/kernel/sched_fair.c 2011-07-21 22:17:23.000000000 -0400
59243+++ linux-3.0.3/kernel/sched_fair.c 2011-08-23 21:47:56.000000000 -0400
59244@@ -4050,7 +4050,7 @@ static void nohz_idle_balance(int this_c
59245 * run_rebalance_domains is triggered when needed from the scheduler tick.
59246 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
59247 */
59248-static void run_rebalance_domains(struct softirq_action *h)
59249+static void run_rebalance_domains(void)
59250 {
59251 int this_cpu = smp_processor_id();
59252 struct rq *this_rq = cpu_rq(this_cpu);
59253diff -urNp linux-3.0.3/kernel/signal.c linux-3.0.3/kernel/signal.c
59254--- linux-3.0.3/kernel/signal.c 2011-07-21 22:17:23.000000000 -0400
59255+++ linux-3.0.3/kernel/signal.c 2011-08-23 21:48:14.000000000 -0400
59256@@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cache
59257
59258 int print_fatal_signals __read_mostly;
59259
59260-static void __user *sig_handler(struct task_struct *t, int sig)
59261+static __sighandler_t sig_handler(struct task_struct *t, int sig)
59262 {
59263 return t->sighand->action[sig - 1].sa.sa_handler;
59264 }
59265
59266-static int sig_handler_ignored(void __user *handler, int sig)
59267+static int sig_handler_ignored(__sighandler_t handler, int sig)
59268 {
59269 /* Is it explicitly or implicitly ignored? */
59270 return handler == SIG_IGN ||
59271@@ -60,7 +60,7 @@ static int sig_handler_ignored(void __us
59272 static int sig_task_ignored(struct task_struct *t, int sig,
59273 int from_ancestor_ns)
59274 {
59275- void __user *handler;
59276+ __sighandler_t handler;
59277
59278 handler = sig_handler(t, sig);
59279
59280@@ -320,6 +320,9 @@ __sigqueue_alloc(int sig, struct task_st
59281 atomic_inc(&user->sigpending);
59282 rcu_read_unlock();
59283
59284+ if (!override_rlimit)
59285+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
59286+
59287 if (override_rlimit ||
59288 atomic_read(&user->sigpending) <=
59289 task_rlimit(t, RLIMIT_SIGPENDING)) {
59290@@ -444,7 +447,7 @@ flush_signal_handlers(struct task_struct
59291
59292 int unhandled_signal(struct task_struct *tsk, int sig)
59293 {
59294- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
59295+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
59296 if (is_global_init(tsk))
59297 return 1;
59298 if (handler != SIG_IGN && handler != SIG_DFL)
59299@@ -770,6 +773,13 @@ static int check_kill_permission(int sig
59300 }
59301 }
59302
59303+ /* allow glibc communication via tgkill to other threads in our
59304+ thread group */
59305+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
59306+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
59307+ && gr_handle_signal(t, sig))
59308+ return -EPERM;
59309+
59310 return security_task_kill(t, info, sig, 0);
59311 }
59312
59313@@ -1092,7 +1102,7 @@ __group_send_sig_info(int sig, struct si
59314 return send_signal(sig, info, p, 1);
59315 }
59316
59317-static int
59318+int
59319 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
59320 {
59321 return send_signal(sig, info, t, 0);
59322@@ -1129,6 +1139,7 @@ force_sig_info(int sig, struct siginfo *
59323 unsigned long int flags;
59324 int ret, blocked, ignored;
59325 struct k_sigaction *action;
59326+ int is_unhandled = 0;
59327
59328 spin_lock_irqsave(&t->sighand->siglock, flags);
59329 action = &t->sighand->action[sig-1];
59330@@ -1143,9 +1154,18 @@ force_sig_info(int sig, struct siginfo *
59331 }
59332 if (action->sa.sa_handler == SIG_DFL)
59333 t->signal->flags &= ~SIGNAL_UNKILLABLE;
59334+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
59335+ is_unhandled = 1;
59336 ret = specific_send_sig_info(sig, info, t);
59337 spin_unlock_irqrestore(&t->sighand->siglock, flags);
59338
59339+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
59340+ normal operation */
59341+ if (is_unhandled) {
59342+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
59343+ gr_handle_crash(t, sig);
59344+ }
59345+
59346 return ret;
59347 }
59348
59349@@ -1212,8 +1232,11 @@ int group_send_sig_info(int sig, struct
59350 ret = check_kill_permission(sig, info, p);
59351 rcu_read_unlock();
59352
59353- if (!ret && sig)
59354+ if (!ret && sig) {
59355 ret = do_send_sig_info(sig, info, p, true);
59356+ if (!ret)
59357+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
59358+ }
59359
59360 return ret;
59361 }
59362@@ -1839,6 +1862,8 @@ void ptrace_notify(int exit_code)
59363 {
59364 siginfo_t info;
59365
59366+ pax_track_stack();
59367+
59368 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
59369
59370 memset(&info, 0, sizeof info);
59371@@ -2639,7 +2664,15 @@ do_send_specific(pid_t tgid, pid_t pid,
59372 int error = -ESRCH;
59373
59374 rcu_read_lock();
59375- p = find_task_by_vpid(pid);
59376+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59377+ /* allow glibc communication via tgkill to other threads in our
59378+ thread group */
59379+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
59380+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
59381+ p = find_task_by_vpid_unrestricted(pid);
59382+ else
59383+#endif
59384+ p = find_task_by_vpid(pid);
59385 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
59386 error = check_kill_permission(sig, info, p);
59387 /*
59388diff -urNp linux-3.0.3/kernel/smp.c linux-3.0.3/kernel/smp.c
59389--- linux-3.0.3/kernel/smp.c 2011-07-21 22:17:23.000000000 -0400
59390+++ linux-3.0.3/kernel/smp.c 2011-08-23 21:47:56.000000000 -0400
59391@@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t fu
59392 }
59393 EXPORT_SYMBOL(smp_call_function);
59394
59395-void ipi_call_lock(void)
59396+void ipi_call_lock(void) __acquires(call_function.lock)
59397 {
59398 raw_spin_lock(&call_function.lock);
59399 }
59400
59401-void ipi_call_unlock(void)
59402+void ipi_call_unlock(void) __releases(call_function.lock)
59403 {
59404 raw_spin_unlock(&call_function.lock);
59405 }
59406
59407-void ipi_call_lock_irq(void)
59408+void ipi_call_lock_irq(void) __acquires(call_function.lock)
59409 {
59410 raw_spin_lock_irq(&call_function.lock);
59411 }
59412
59413-void ipi_call_unlock_irq(void)
59414+void ipi_call_unlock_irq(void) __releases(call_function.lock)
59415 {
59416 raw_spin_unlock_irq(&call_function.lock);
59417 }
59418diff -urNp linux-3.0.3/kernel/softirq.c linux-3.0.3/kernel/softirq.c
59419--- linux-3.0.3/kernel/softirq.c 2011-07-21 22:17:23.000000000 -0400
59420+++ linux-3.0.3/kernel/softirq.c 2011-08-23 21:47:56.000000000 -0400
59421@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
59422
59423 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
59424
59425-char *softirq_to_name[NR_SOFTIRQS] = {
59426+const char * const softirq_to_name[NR_SOFTIRQS] = {
59427 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
59428 "TASKLET", "SCHED", "HRTIMER", "RCU"
59429 };
59430@@ -235,7 +235,7 @@ restart:
59431 kstat_incr_softirqs_this_cpu(vec_nr);
59432
59433 trace_softirq_entry(vec_nr);
59434- h->action(h);
59435+ h->action();
59436 trace_softirq_exit(vec_nr);
59437 if (unlikely(prev_count != preempt_count())) {
59438 printk(KERN_ERR "huh, entered softirq %u %s %p"
59439@@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
59440 local_irq_restore(flags);
59441 }
59442
59443-void open_softirq(int nr, void (*action)(struct softirq_action *))
59444+void open_softirq(int nr, void (*action)(void))
59445 {
59446- softirq_vec[nr].action = action;
59447+ pax_open_kernel();
59448+ *(void **)&softirq_vec[nr].action = action;
59449+ pax_close_kernel();
59450 }
59451
59452 /*
59453@@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct
59454
59455 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
59456
59457-static void tasklet_action(struct softirq_action *a)
59458+static void tasklet_action(void)
59459 {
59460 struct tasklet_struct *list;
59461
59462@@ -476,7 +478,7 @@ static void tasklet_action(struct softir
59463 }
59464 }
59465
59466-static void tasklet_hi_action(struct softirq_action *a)
59467+static void tasklet_hi_action(void)
59468 {
59469 struct tasklet_struct *list;
59470
59471diff -urNp linux-3.0.3/kernel/sys.c linux-3.0.3/kernel/sys.c
59472--- linux-3.0.3/kernel/sys.c 2011-07-21 22:17:23.000000000 -0400
59473+++ linux-3.0.3/kernel/sys.c 2011-08-25 17:24:58.000000000 -0400
59474@@ -154,6 +154,12 @@ static int set_one_prio(struct task_stru
59475 error = -EACCES;
59476 goto out;
59477 }
59478+
59479+ if (gr_handle_chroot_setpriority(p, niceval)) {
59480+ error = -EACCES;
59481+ goto out;
59482+ }
59483+
59484 no_nice = security_task_setnice(p, niceval);
59485 if (no_nice) {
59486 error = no_nice;
59487@@ -537,6 +543,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
59488 goto error;
59489 }
59490
59491+ if (gr_check_group_change(new->gid, new->egid, -1))
59492+ goto error;
59493+
59494 if (rgid != (gid_t) -1 ||
59495 (egid != (gid_t) -1 && egid != old->gid))
59496 new->sgid = new->egid;
59497@@ -566,6 +575,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
59498 old = current_cred();
59499
59500 retval = -EPERM;
59501+
59502+ if (gr_check_group_change(gid, gid, gid))
59503+ goto error;
59504+
59505 if (nsown_capable(CAP_SETGID))
59506 new->gid = new->egid = new->sgid = new->fsgid = gid;
59507 else if (gid == old->gid || gid == old->sgid)
59508@@ -591,11 +604,18 @@ static int set_user(struct cred *new)
59509 if (!new_user)
59510 return -EAGAIN;
59511
59512+ /*
59513+ * We don't fail in case of NPROC limit excess here because too many
59514+ * poorly written programs don't check set*uid() return code, assuming
59515+ * it never fails if called by root. We may still enforce NPROC limit
59516+ * for programs doing set*uid()+execve() by harmlessly deferring the
59517+ * failure to the execve() stage.
59518+ */
59519 if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
59520- new_user != INIT_USER) {
59521- free_uid(new_user);
59522- return -EAGAIN;
59523- }
59524+ new_user != INIT_USER)
59525+ current->flags |= PF_NPROC_EXCEEDED;
59526+ else
59527+ current->flags &= ~PF_NPROC_EXCEEDED;
59528
59529 free_uid(new->user);
59530 new->user = new_user;
59531@@ -646,6 +666,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
59532 goto error;
59533 }
59534
59535+ if (gr_check_user_change(new->uid, new->euid, -1))
59536+ goto error;
59537+
59538 if (new->uid != old->uid) {
59539 retval = set_user(new);
59540 if (retval < 0)
59541@@ -690,6 +713,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
59542 old = current_cred();
59543
59544 retval = -EPERM;
59545+
59546+ if (gr_check_crash_uid(uid))
59547+ goto error;
59548+ if (gr_check_user_change(uid, uid, uid))
59549+ goto error;
59550+
59551 if (nsown_capable(CAP_SETUID)) {
59552 new->suid = new->uid = uid;
59553 if (uid != old->uid) {
59554@@ -744,6 +773,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
59555 goto error;
59556 }
59557
59558+ if (gr_check_user_change(ruid, euid, -1))
59559+ goto error;
59560+
59561 if (ruid != (uid_t) -1) {
59562 new->uid = ruid;
59563 if (ruid != old->uid) {
59564@@ -808,6 +840,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
59565 goto error;
59566 }
59567
59568+ if (gr_check_group_change(rgid, egid, -1))
59569+ goto error;
59570+
59571 if (rgid != (gid_t) -1)
59572 new->gid = rgid;
59573 if (egid != (gid_t) -1)
59574@@ -854,6 +889,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
59575 old = current_cred();
59576 old_fsuid = old->fsuid;
59577
59578+ if (gr_check_user_change(-1, -1, uid))
59579+ goto error;
59580+
59581 if (uid == old->uid || uid == old->euid ||
59582 uid == old->suid || uid == old->fsuid ||
59583 nsown_capable(CAP_SETUID)) {
59584@@ -864,6 +902,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
59585 }
59586 }
59587
59588+error:
59589 abort_creds(new);
59590 return old_fsuid;
59591
59592@@ -890,12 +929,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
59593 if (gid == old->gid || gid == old->egid ||
59594 gid == old->sgid || gid == old->fsgid ||
59595 nsown_capable(CAP_SETGID)) {
59596+ if (gr_check_group_change(-1, -1, gid))
59597+ goto error;
59598+
59599 if (gid != old_fsgid) {
59600 new->fsgid = gid;
59601 goto change_okay;
59602 }
59603 }
59604
59605+error:
59606 abort_creds(new);
59607 return old_fsgid;
59608
59609@@ -1642,7 +1685,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
59610 error = get_dumpable(me->mm);
59611 break;
59612 case PR_SET_DUMPABLE:
59613- if (arg2 < 0 || arg2 > 1) {
59614+ if (arg2 > 1) {
59615 error = -EINVAL;
59616 break;
59617 }
59618diff -urNp linux-3.0.3/kernel/sysctl.c linux-3.0.3/kernel/sysctl.c
59619--- linux-3.0.3/kernel/sysctl.c 2011-07-21 22:17:23.000000000 -0400
59620+++ linux-3.0.3/kernel/sysctl.c 2011-08-23 21:48:14.000000000 -0400
59621@@ -85,6 +85,13 @@
59622
59623
59624 #if defined(CONFIG_SYSCTL)
59625+#include <linux/grsecurity.h>
59626+#include <linux/grinternal.h>
59627+
59628+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
59629+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
59630+ const int op);
59631+extern int gr_handle_chroot_sysctl(const int op);
59632
59633 /* External variables not in a header file. */
59634 extern int sysctl_overcommit_memory;
59635@@ -197,6 +204,7 @@ static int sysrq_sysctl_handler(ctl_tabl
59636 }
59637
59638 #endif
59639+extern struct ctl_table grsecurity_table[];
59640
59641 static struct ctl_table root_table[];
59642 static struct ctl_table_root sysctl_table_root;
59643@@ -226,6 +234,20 @@ extern struct ctl_table epoll_table[];
59644 int sysctl_legacy_va_layout;
59645 #endif
59646
59647+#ifdef CONFIG_PAX_SOFTMODE
59648+static ctl_table pax_table[] = {
59649+ {
59650+ .procname = "softmode",
59651+ .data = &pax_softmode,
59652+ .maxlen = sizeof(unsigned int),
59653+ .mode = 0600,
59654+ .proc_handler = &proc_dointvec,
59655+ },
59656+
59657+ { }
59658+};
59659+#endif
59660+
59661 /* The default sysctl tables: */
59662
59663 static struct ctl_table root_table[] = {
59664@@ -272,6 +294,22 @@ static int max_extfrag_threshold = 1000;
59665 #endif
59666
59667 static struct ctl_table kern_table[] = {
59668+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
59669+ {
59670+ .procname = "grsecurity",
59671+ .mode = 0500,
59672+ .child = grsecurity_table,
59673+ },
59674+#endif
59675+
59676+#ifdef CONFIG_PAX_SOFTMODE
59677+ {
59678+ .procname = "pax",
59679+ .mode = 0500,
59680+ .child = pax_table,
59681+ },
59682+#endif
59683+
59684 {
59685 .procname = "sched_child_runs_first",
59686 .data = &sysctl_sched_child_runs_first,
59687@@ -546,7 +584,7 @@ static struct ctl_table kern_table[] = {
59688 .data = &modprobe_path,
59689 .maxlen = KMOD_PATH_LEN,
59690 .mode = 0644,
59691- .proc_handler = proc_dostring,
59692+ .proc_handler = proc_dostring_modpriv,
59693 },
59694 {
59695 .procname = "modules_disabled",
59696@@ -713,16 +751,20 @@ static struct ctl_table kern_table[] = {
59697 .extra1 = &zero,
59698 .extra2 = &one,
59699 },
59700+#endif
59701 {
59702 .procname = "kptr_restrict",
59703 .data = &kptr_restrict,
59704 .maxlen = sizeof(int),
59705 .mode = 0644,
59706 .proc_handler = proc_dmesg_restrict,
59707+#ifdef CONFIG_GRKERNSEC_HIDESYM
59708+ .extra1 = &two,
59709+#else
59710 .extra1 = &zero,
59711+#endif
59712 .extra2 = &two,
59713 },
59714-#endif
59715 {
59716 .procname = "ngroups_max",
59717 .data = &ngroups_max,
59718@@ -1205,6 +1247,13 @@ static struct ctl_table vm_table[] = {
59719 .proc_handler = proc_dointvec_minmax,
59720 .extra1 = &zero,
59721 },
59722+ {
59723+ .procname = "heap_stack_gap",
59724+ .data = &sysctl_heap_stack_gap,
59725+ .maxlen = sizeof(sysctl_heap_stack_gap),
59726+ .mode = 0644,
59727+ .proc_handler = proc_doulongvec_minmax,
59728+ },
59729 #else
59730 {
59731 .procname = "nr_trim_pages",
59732@@ -1714,6 +1763,17 @@ static int test_perm(int mode, int op)
59733 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
59734 {
59735 int mode;
59736+ int error;
59737+
59738+ if (table->parent != NULL && table->parent->procname != NULL &&
59739+ table->procname != NULL &&
59740+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
59741+ return -EACCES;
59742+ if (gr_handle_chroot_sysctl(op))
59743+ return -EACCES;
59744+ error = gr_handle_sysctl(table, op);
59745+ if (error)
59746+ return error;
59747
59748 if (root->permissions)
59749 mode = root->permissions(root, current->nsproxy, table);
59750@@ -2118,6 +2178,16 @@ int proc_dostring(struct ctl_table *tabl
59751 buffer, lenp, ppos);
59752 }
59753
59754+int proc_dostring_modpriv(struct ctl_table *table, int write,
59755+ void __user *buffer, size_t *lenp, loff_t *ppos)
59756+{
59757+ if (write && !capable(CAP_SYS_MODULE))
59758+ return -EPERM;
59759+
59760+ return _proc_do_string(table->data, table->maxlen, write,
59761+ buffer, lenp, ppos);
59762+}
59763+
59764 static size_t proc_skip_spaces(char **buf)
59765 {
59766 size_t ret;
59767@@ -2223,6 +2293,8 @@ static int proc_put_long(void __user **b
59768 len = strlen(tmp);
59769 if (len > *size)
59770 len = *size;
59771+ if (len > sizeof(tmp))
59772+ len = sizeof(tmp);
59773 if (copy_to_user(*buf, tmp, len))
59774 return -EFAULT;
59775 *size -= len;
59776@@ -2539,8 +2611,11 @@ static int __do_proc_doulongvec_minmax(v
59777 *i = val;
59778 } else {
59779 val = convdiv * (*i) / convmul;
59780- if (!first)
59781+ if (!first) {
59782 err = proc_put_char(&buffer, &left, '\t');
59783+ if (err)
59784+ break;
59785+ }
59786 err = proc_put_long(&buffer, &left, val, false);
59787 if (err)
59788 break;
59789@@ -2935,6 +3010,12 @@ int proc_dostring(struct ctl_table *tabl
59790 return -ENOSYS;
59791 }
59792
59793+int proc_dostring_modpriv(struct ctl_table *table, int write,
59794+ void __user *buffer, size_t *lenp, loff_t *ppos)
59795+{
59796+ return -ENOSYS;
59797+}
59798+
59799 int proc_dointvec(struct ctl_table *table, int write,
59800 void __user *buffer, size_t *lenp, loff_t *ppos)
59801 {
59802@@ -2991,6 +3072,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
59803 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
59804 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
59805 EXPORT_SYMBOL(proc_dostring);
59806+EXPORT_SYMBOL(proc_dostring_modpriv);
59807 EXPORT_SYMBOL(proc_doulongvec_minmax);
59808 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
59809 EXPORT_SYMBOL(register_sysctl_table);
59810diff -urNp linux-3.0.3/kernel/sysctl_check.c linux-3.0.3/kernel/sysctl_check.c
59811--- linux-3.0.3/kernel/sysctl_check.c 2011-07-21 22:17:23.000000000 -0400
59812+++ linux-3.0.3/kernel/sysctl_check.c 2011-08-23 21:48:14.000000000 -0400
59813@@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *n
59814 set_fail(&fail, table, "Directory with extra2");
59815 } else {
59816 if ((table->proc_handler == proc_dostring) ||
59817+ (table->proc_handler == proc_dostring_modpriv) ||
59818 (table->proc_handler == proc_dointvec) ||
59819 (table->proc_handler == proc_dointvec_minmax) ||
59820 (table->proc_handler == proc_dointvec_jiffies) ||
59821diff -urNp linux-3.0.3/kernel/taskstats.c linux-3.0.3/kernel/taskstats.c
59822--- linux-3.0.3/kernel/taskstats.c 2011-07-21 22:17:23.000000000 -0400
59823+++ linux-3.0.3/kernel/taskstats.c 2011-08-23 21:48:14.000000000 -0400
59824@@ -27,9 +27,12 @@
59825 #include <linux/cgroup.h>
59826 #include <linux/fs.h>
59827 #include <linux/file.h>
59828+#include <linux/grsecurity.h>
59829 #include <net/genetlink.h>
59830 #include <asm/atomic.h>
59831
59832+extern int gr_is_taskstats_denied(int pid);
59833+
59834 /*
59835 * Maximum length of a cpumask that can be specified in
59836 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
59837@@ -558,6 +561,9 @@ err:
59838
59839 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
59840 {
59841+ if (gr_is_taskstats_denied(current->pid))
59842+ return -EACCES;
59843+
59844 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
59845 return cmd_attr_register_cpumask(info);
59846 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
59847diff -urNp linux-3.0.3/kernel/time/alarmtimer.c linux-3.0.3/kernel/time/alarmtimer.c
59848--- linux-3.0.3/kernel/time/alarmtimer.c 2011-07-21 22:17:23.000000000 -0400
59849+++ linux-3.0.3/kernel/time/alarmtimer.c 2011-08-23 21:47:56.000000000 -0400
59850@@ -685,7 +685,7 @@ static int __init alarmtimer_init(void)
59851 {
59852 int error = 0;
59853 int i;
59854- struct k_clock alarm_clock = {
59855+ static struct k_clock alarm_clock = {
59856 .clock_getres = alarm_clock_getres,
59857 .clock_get = alarm_clock_get,
59858 .timer_create = alarm_timer_create,
59859diff -urNp linux-3.0.3/kernel/time/tick-broadcast.c linux-3.0.3/kernel/time/tick-broadcast.c
59860--- linux-3.0.3/kernel/time/tick-broadcast.c 2011-07-21 22:17:23.000000000 -0400
59861+++ linux-3.0.3/kernel/time/tick-broadcast.c 2011-08-23 21:47:56.000000000 -0400
59862@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct cl
59863 * then clear the broadcast bit.
59864 */
59865 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
59866- int cpu = smp_processor_id();
59867+ cpu = smp_processor_id();
59868
59869 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
59870 tick_broadcast_clear_oneshot(cpu);
59871diff -urNp linux-3.0.3/kernel/time/timekeeping.c linux-3.0.3/kernel/time/timekeeping.c
59872--- linux-3.0.3/kernel/time/timekeeping.c 2011-07-21 22:17:23.000000000 -0400
59873+++ linux-3.0.3/kernel/time/timekeeping.c 2011-08-23 21:48:14.000000000 -0400
59874@@ -14,6 +14,7 @@
59875 #include <linux/init.h>
59876 #include <linux/mm.h>
59877 #include <linux/sched.h>
59878+#include <linux/grsecurity.h>
59879 #include <linux/syscore_ops.h>
59880 #include <linux/clocksource.h>
59881 #include <linux/jiffies.h>
59882@@ -361,6 +362,8 @@ int do_settimeofday(const struct timespe
59883 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
59884 return -EINVAL;
59885
59886+ gr_log_timechange();
59887+
59888 write_seqlock_irqsave(&xtime_lock, flags);
59889
59890 timekeeping_forward_now();
59891diff -urNp linux-3.0.3/kernel/time/timer_list.c linux-3.0.3/kernel/time/timer_list.c
59892--- linux-3.0.3/kernel/time/timer_list.c 2011-07-21 22:17:23.000000000 -0400
59893+++ linux-3.0.3/kernel/time/timer_list.c 2011-08-23 21:48:14.000000000 -0400
59894@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
59895
59896 static void print_name_offset(struct seq_file *m, void *sym)
59897 {
59898+#ifdef CONFIG_GRKERNSEC_HIDESYM
59899+ SEQ_printf(m, "<%p>", NULL);
59900+#else
59901 char symname[KSYM_NAME_LEN];
59902
59903 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
59904 SEQ_printf(m, "<%pK>", sym);
59905 else
59906 SEQ_printf(m, "%s", symname);
59907+#endif
59908 }
59909
59910 static void
59911@@ -112,7 +116,11 @@ next_one:
59912 static void
59913 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
59914 {
59915+#ifdef CONFIG_GRKERNSEC_HIDESYM
59916+ SEQ_printf(m, " .base: %p\n", NULL);
59917+#else
59918 SEQ_printf(m, " .base: %pK\n", base);
59919+#endif
59920 SEQ_printf(m, " .index: %d\n",
59921 base->index);
59922 SEQ_printf(m, " .resolution: %Lu nsecs\n",
59923@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs
59924 {
59925 struct proc_dir_entry *pe;
59926
59927+#ifdef CONFIG_GRKERNSEC_PROC_ADD
59928+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
59929+#else
59930 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
59931+#endif
59932 if (!pe)
59933 return -ENOMEM;
59934 return 0;
59935diff -urNp linux-3.0.3/kernel/time/timer_stats.c linux-3.0.3/kernel/time/timer_stats.c
59936--- linux-3.0.3/kernel/time/timer_stats.c 2011-07-21 22:17:23.000000000 -0400
59937+++ linux-3.0.3/kernel/time/timer_stats.c 2011-08-23 21:48:14.000000000 -0400
59938@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
59939 static unsigned long nr_entries;
59940 static struct entry entries[MAX_ENTRIES];
59941
59942-static atomic_t overflow_count;
59943+static atomic_unchecked_t overflow_count;
59944
59945 /*
59946 * The entries are in a hash-table, for fast lookup:
59947@@ -140,7 +140,7 @@ static void reset_entries(void)
59948 nr_entries = 0;
59949 memset(entries, 0, sizeof(entries));
59950 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
59951- atomic_set(&overflow_count, 0);
59952+ atomic_set_unchecked(&overflow_count, 0);
59953 }
59954
59955 static struct entry *alloc_entry(void)
59956@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
59957 if (likely(entry))
59958 entry->count++;
59959 else
59960- atomic_inc(&overflow_count);
59961+ atomic_inc_unchecked(&overflow_count);
59962
59963 out_unlock:
59964 raw_spin_unlock_irqrestore(lock, flags);
59965@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
59966
59967 static void print_name_offset(struct seq_file *m, unsigned long addr)
59968 {
59969+#ifdef CONFIG_GRKERNSEC_HIDESYM
59970+ seq_printf(m, "<%p>", NULL);
59971+#else
59972 char symname[KSYM_NAME_LEN];
59973
59974 if (lookup_symbol_name(addr, symname) < 0)
59975 seq_printf(m, "<%p>", (void *)addr);
59976 else
59977 seq_printf(m, "%s", symname);
59978+#endif
59979 }
59980
59981 static int tstats_show(struct seq_file *m, void *v)
59982@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
59983
59984 seq_puts(m, "Timer Stats Version: v0.2\n");
59985 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
59986- if (atomic_read(&overflow_count))
59987+ if (atomic_read_unchecked(&overflow_count))
59988 seq_printf(m, "Overflow: %d entries\n",
59989- atomic_read(&overflow_count));
59990+ atomic_read_unchecked(&overflow_count));
59991
59992 for (i = 0; i < nr_entries; i++) {
59993 entry = entries + i;
59994@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(voi
59995 {
59996 struct proc_dir_entry *pe;
59997
59998+#ifdef CONFIG_GRKERNSEC_PROC_ADD
59999+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
60000+#else
60001 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
60002+#endif
60003 if (!pe)
60004 return -ENOMEM;
60005 return 0;
60006diff -urNp linux-3.0.3/kernel/time.c linux-3.0.3/kernel/time.c
60007--- linux-3.0.3/kernel/time.c 2011-07-21 22:17:23.000000000 -0400
60008+++ linux-3.0.3/kernel/time.c 2011-08-23 21:48:14.000000000 -0400
60009@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct tim
60010 return error;
60011
60012 if (tz) {
60013+ /* we log in do_settimeofday called below, so don't log twice
60014+ */
60015+ if (!tv)
60016+ gr_log_timechange();
60017+
60018 /* SMP safe, global irq locking makes it work. */
60019 sys_tz = *tz;
60020 update_vsyscall_tz();
60021diff -urNp linux-3.0.3/kernel/timer.c linux-3.0.3/kernel/timer.c
60022--- linux-3.0.3/kernel/timer.c 2011-07-21 22:17:23.000000000 -0400
60023+++ linux-3.0.3/kernel/timer.c 2011-08-23 21:47:56.000000000 -0400
60024@@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
60025 /*
60026 * This function runs timers and the timer-tq in bottom half context.
60027 */
60028-static void run_timer_softirq(struct softirq_action *h)
60029+static void run_timer_softirq(void)
60030 {
60031 struct tvec_base *base = __this_cpu_read(tvec_bases);
60032
60033diff -urNp linux-3.0.3/kernel/trace/blktrace.c linux-3.0.3/kernel/trace/blktrace.c
60034--- linux-3.0.3/kernel/trace/blktrace.c 2011-07-21 22:17:23.000000000 -0400
60035+++ linux-3.0.3/kernel/trace/blktrace.c 2011-08-23 21:47:56.000000000 -0400
60036@@ -321,7 +321,7 @@ static ssize_t blk_dropped_read(struct f
60037 struct blk_trace *bt = filp->private_data;
60038 char buf[16];
60039
60040- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
60041+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
60042
60043 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
60044 }
60045@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(str
60046 return 1;
60047
60048 bt = buf->chan->private_data;
60049- atomic_inc(&bt->dropped);
60050+ atomic_inc_unchecked(&bt->dropped);
60051 return 0;
60052 }
60053
60054@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_qu
60055
60056 bt->dir = dir;
60057 bt->dev = dev;
60058- atomic_set(&bt->dropped, 0);
60059+ atomic_set_unchecked(&bt->dropped, 0);
60060
60061 ret = -EIO;
60062 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
60063diff -urNp linux-3.0.3/kernel/trace/ftrace.c linux-3.0.3/kernel/trace/ftrace.c
60064--- linux-3.0.3/kernel/trace/ftrace.c 2011-07-21 22:17:23.000000000 -0400
60065+++ linux-3.0.3/kernel/trace/ftrace.c 2011-08-23 21:47:56.000000000 -0400
60066@@ -1566,12 +1566,17 @@ ftrace_code_disable(struct module *mod,
60067 if (unlikely(ftrace_disabled))
60068 return 0;
60069
60070+ ret = ftrace_arch_code_modify_prepare();
60071+ FTRACE_WARN_ON(ret);
60072+ if (ret)
60073+ return 0;
60074+
60075 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
60076+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
60077 if (ret) {
60078 ftrace_bug(ret, ip);
60079- return 0;
60080 }
60081- return 1;
60082+ return ret ? 0 : 1;
60083 }
60084
60085 /*
60086@@ -2550,7 +2555,7 @@ static void ftrace_free_entry_rcu(struct
60087
60088 int
60089 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
60090- void *data)
60091+ void *data)
60092 {
60093 struct ftrace_func_probe *entry;
60094 struct ftrace_page *pg;
60095diff -urNp linux-3.0.3/kernel/trace/trace.c linux-3.0.3/kernel/trace/trace.c
60096--- linux-3.0.3/kernel/trace/trace.c 2011-07-21 22:17:23.000000000 -0400
60097+++ linux-3.0.3/kernel/trace/trace.c 2011-08-23 21:48:14.000000000 -0400
60098@@ -3339,6 +3339,8 @@ static ssize_t tracing_splice_read_pipe(
60099 size_t rem;
60100 unsigned int i;
60101
60102+ pax_track_stack();
60103+
60104 if (splice_grow_spd(pipe, &spd))
60105 return -ENOMEM;
60106
60107@@ -3822,6 +3824,8 @@ tracing_buffers_splice_read(struct file
60108 int entries, size, i;
60109 size_t ret;
60110
60111+ pax_track_stack();
60112+
60113 if (splice_grow_spd(pipe, &spd))
60114 return -ENOMEM;
60115
60116@@ -3990,10 +3994,9 @@ static const struct file_operations trac
60117 };
60118 #endif
60119
60120-static struct dentry *d_tracer;
60121-
60122 struct dentry *tracing_init_dentry(void)
60123 {
60124+ static struct dentry *d_tracer;
60125 static int once;
60126
60127 if (d_tracer)
60128@@ -4013,10 +4016,9 @@ struct dentry *tracing_init_dentry(void)
60129 return d_tracer;
60130 }
60131
60132-static struct dentry *d_percpu;
60133-
60134 struct dentry *tracing_dentry_percpu(void)
60135 {
60136+ static struct dentry *d_percpu;
60137 static int once;
60138 struct dentry *d_tracer;
60139
60140diff -urNp linux-3.0.3/kernel/trace/trace_events.c linux-3.0.3/kernel/trace/trace_events.c
60141--- linux-3.0.3/kernel/trace/trace_events.c 2011-08-23 21:44:40.000000000 -0400
60142+++ linux-3.0.3/kernel/trace/trace_events.c 2011-08-23 21:47:56.000000000 -0400
60143@@ -1318,10 +1318,6 @@ static LIST_HEAD(ftrace_module_file_list
60144 struct ftrace_module_file_ops {
60145 struct list_head list;
60146 struct module *mod;
60147- struct file_operations id;
60148- struct file_operations enable;
60149- struct file_operations format;
60150- struct file_operations filter;
60151 };
60152
60153 static struct ftrace_module_file_ops *
60154@@ -1342,17 +1338,12 @@ trace_create_file_ops(struct module *mod
60155
60156 file_ops->mod = mod;
60157
60158- file_ops->id = ftrace_event_id_fops;
60159- file_ops->id.owner = mod;
60160-
60161- file_ops->enable = ftrace_enable_fops;
60162- file_ops->enable.owner = mod;
60163-
60164- file_ops->filter = ftrace_event_filter_fops;
60165- file_ops->filter.owner = mod;
60166-
60167- file_ops->format = ftrace_event_format_fops;
60168- file_ops->format.owner = mod;
60169+ pax_open_kernel();
60170+ *(void **)&mod->trace_id.owner = mod;
60171+ *(void **)&mod->trace_enable.owner = mod;
60172+ *(void **)&mod->trace_filter.owner = mod;
60173+ *(void **)&mod->trace_format.owner = mod;
60174+ pax_close_kernel();
60175
60176 list_add(&file_ops->list, &ftrace_module_file_list);
60177
60178@@ -1376,8 +1367,8 @@ static void trace_module_add_events(stru
60179
60180 for_each_event(call, start, end) {
60181 __trace_add_event_call(*call, mod,
60182- &file_ops->id, &file_ops->enable,
60183- &file_ops->filter, &file_ops->format);
60184+ &mod->trace_id, &mod->trace_enable,
60185+ &mod->trace_filter, &mod->trace_format);
60186 }
60187 }
60188
60189diff -urNp linux-3.0.3/kernel/trace/trace_mmiotrace.c linux-3.0.3/kernel/trace/trace_mmiotrace.c
60190--- linux-3.0.3/kernel/trace/trace_mmiotrace.c 2011-07-21 22:17:23.000000000 -0400
60191+++ linux-3.0.3/kernel/trace/trace_mmiotrace.c 2011-08-23 21:47:56.000000000 -0400
60192@@ -24,7 +24,7 @@ struct header_iter {
60193 static struct trace_array *mmio_trace_array;
60194 static bool overrun_detected;
60195 static unsigned long prev_overruns;
60196-static atomic_t dropped_count;
60197+static atomic_unchecked_t dropped_count;
60198
60199 static void mmio_reset_data(struct trace_array *tr)
60200 {
60201@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iter
60202
60203 static unsigned long count_overruns(struct trace_iterator *iter)
60204 {
60205- unsigned long cnt = atomic_xchg(&dropped_count, 0);
60206+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
60207 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
60208
60209 if (over > prev_overruns)
60210@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct
60211 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
60212 sizeof(*entry), 0, pc);
60213 if (!event) {
60214- atomic_inc(&dropped_count);
60215+ atomic_inc_unchecked(&dropped_count);
60216 return;
60217 }
60218 entry = ring_buffer_event_data(event);
60219@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct
60220 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
60221 sizeof(*entry), 0, pc);
60222 if (!event) {
60223- atomic_inc(&dropped_count);
60224+ atomic_inc_unchecked(&dropped_count);
60225 return;
60226 }
60227 entry = ring_buffer_event_data(event);
60228diff -urNp linux-3.0.3/kernel/trace/trace_output.c linux-3.0.3/kernel/trace/trace_output.c
60229--- linux-3.0.3/kernel/trace/trace_output.c 2011-07-21 22:17:23.000000000 -0400
60230+++ linux-3.0.3/kernel/trace/trace_output.c 2011-08-23 21:47:56.000000000 -0400
60231@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s,
60232
60233 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
60234 if (!IS_ERR(p)) {
60235- p = mangle_path(s->buffer + s->len, p, "\n");
60236+ p = mangle_path(s->buffer + s->len, p, "\n\\");
60237 if (p) {
60238 s->len = p - s->buffer;
60239 return 1;
60240diff -urNp linux-3.0.3/kernel/trace/trace_stack.c linux-3.0.3/kernel/trace/trace_stack.c
60241--- linux-3.0.3/kernel/trace/trace_stack.c 2011-07-21 22:17:23.000000000 -0400
60242+++ linux-3.0.3/kernel/trace/trace_stack.c 2011-08-23 21:47:56.000000000 -0400
60243@@ -50,7 +50,7 @@ static inline void check_stack(void)
60244 return;
60245
60246 /* we do not handle interrupt stacks yet */
60247- if (!object_is_on_stack(&this_size))
60248+ if (!object_starts_on_stack(&this_size))
60249 return;
60250
60251 local_irq_save(flags);
60252diff -urNp linux-3.0.3/kernel/trace/trace_workqueue.c linux-3.0.3/kernel/trace/trace_workqueue.c
60253--- linux-3.0.3/kernel/trace/trace_workqueue.c 2011-07-21 22:17:23.000000000 -0400
60254+++ linux-3.0.3/kernel/trace/trace_workqueue.c 2011-08-23 21:47:56.000000000 -0400
60255@@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
60256 int cpu;
60257 pid_t pid;
60258 /* Can be inserted from interrupt or user context, need to be atomic */
60259- atomic_t inserted;
60260+ atomic_unchecked_t inserted;
60261 /*
60262 * Don't need to be atomic, works are serialized in a single workqueue thread
60263 * on a single CPU.
60264@@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
60265 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
60266 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
60267 if (node->pid == wq_thread->pid) {
60268- atomic_inc(&node->inserted);
60269+ atomic_inc_unchecked(&node->inserted);
60270 goto found;
60271 }
60272 }
60273@@ -210,7 +210,7 @@ static int workqueue_stat_show(struct se
60274 tsk = get_pid_task(pid, PIDTYPE_PID);
60275 if (tsk) {
60276 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
60277- atomic_read(&cws->inserted), cws->executed,
60278+ atomic_read_unchecked(&cws->inserted), cws->executed,
60279 tsk->comm);
60280 put_task_struct(tsk);
60281 }
60282diff -urNp linux-3.0.3/lib/bug.c linux-3.0.3/lib/bug.c
60283--- linux-3.0.3/lib/bug.c 2011-07-21 22:17:23.000000000 -0400
60284+++ linux-3.0.3/lib/bug.c 2011-08-23 21:47:56.000000000 -0400
60285@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned l
60286 return BUG_TRAP_TYPE_NONE;
60287
60288 bug = find_bug(bugaddr);
60289+ if (!bug)
60290+ return BUG_TRAP_TYPE_NONE;
60291
60292 file = NULL;
60293 line = 0;
60294diff -urNp linux-3.0.3/lib/debugobjects.c linux-3.0.3/lib/debugobjects.c
60295--- linux-3.0.3/lib/debugobjects.c 2011-07-21 22:17:23.000000000 -0400
60296+++ linux-3.0.3/lib/debugobjects.c 2011-08-23 21:47:56.000000000 -0400
60297@@ -284,7 +284,7 @@ static void debug_object_is_on_stack(voi
60298 if (limit > 4)
60299 return;
60300
60301- is_on_stack = object_is_on_stack(addr);
60302+ is_on_stack = object_starts_on_stack(addr);
60303 if (is_on_stack == onstack)
60304 return;
60305
60306diff -urNp linux-3.0.3/lib/dma-debug.c linux-3.0.3/lib/dma-debug.c
60307--- linux-3.0.3/lib/dma-debug.c 2011-07-21 22:17:23.000000000 -0400
60308+++ linux-3.0.3/lib/dma-debug.c 2011-08-23 21:47:56.000000000 -0400
60309@@ -870,7 +870,7 @@ out:
60310
60311 static void check_for_stack(struct device *dev, void *addr)
60312 {
60313- if (object_is_on_stack(addr))
60314+ if (object_starts_on_stack(addr))
60315 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
60316 "stack [addr=%p]\n", addr);
60317 }
60318diff -urNp linux-3.0.3/lib/extable.c linux-3.0.3/lib/extable.c
60319--- linux-3.0.3/lib/extable.c 2011-07-21 22:17:23.000000000 -0400
60320+++ linux-3.0.3/lib/extable.c 2011-08-23 21:47:56.000000000 -0400
60321@@ -13,6 +13,7 @@
60322 #include <linux/init.h>
60323 #include <linux/sort.h>
60324 #include <asm/uaccess.h>
60325+#include <asm/pgtable.h>
60326
60327 #ifndef ARCH_HAS_SORT_EXTABLE
60328 /*
60329@@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const v
60330 void sort_extable(struct exception_table_entry *start,
60331 struct exception_table_entry *finish)
60332 {
60333+ pax_open_kernel();
60334 sort(start, finish - start, sizeof(struct exception_table_entry),
60335 cmp_ex, NULL);
60336+ pax_close_kernel();
60337 }
60338
60339 #ifdef CONFIG_MODULES
60340diff -urNp linux-3.0.3/lib/inflate.c linux-3.0.3/lib/inflate.c
60341--- linux-3.0.3/lib/inflate.c 2011-07-21 22:17:23.000000000 -0400
60342+++ linux-3.0.3/lib/inflate.c 2011-08-23 21:47:56.000000000 -0400
60343@@ -269,7 +269,7 @@ static void free(void *where)
60344 malloc_ptr = free_mem_ptr;
60345 }
60346 #else
60347-#define malloc(a) kmalloc(a, GFP_KERNEL)
60348+#define malloc(a) kmalloc((a), GFP_KERNEL)
60349 #define free(a) kfree(a)
60350 #endif
60351
60352diff -urNp linux-3.0.3/lib/Kconfig.debug linux-3.0.3/lib/Kconfig.debug
60353--- linux-3.0.3/lib/Kconfig.debug 2011-07-21 22:17:23.000000000 -0400
60354+++ linux-3.0.3/lib/Kconfig.debug 2011-08-23 21:48:14.000000000 -0400
60355@@ -1088,6 +1088,7 @@ config LATENCYTOP
60356 depends on DEBUG_KERNEL
60357 depends on STACKTRACE_SUPPORT
60358 depends on PROC_FS
60359+ depends on !GRKERNSEC_HIDESYM
60360 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
60361 select KALLSYMS
60362 select KALLSYMS_ALL
60363diff -urNp linux-3.0.3/lib/kref.c linux-3.0.3/lib/kref.c
60364--- linux-3.0.3/lib/kref.c 2011-07-21 22:17:23.000000000 -0400
60365+++ linux-3.0.3/lib/kref.c 2011-08-23 21:47:56.000000000 -0400
60366@@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
60367 */
60368 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
60369 {
60370- WARN_ON(release == NULL);
60371+ BUG_ON(release == NULL);
60372 WARN_ON(release == (void (*)(struct kref *))kfree);
60373
60374 if (atomic_dec_and_test(&kref->refcount)) {
60375diff -urNp linux-3.0.3/lib/radix-tree.c linux-3.0.3/lib/radix-tree.c
60376--- linux-3.0.3/lib/radix-tree.c 2011-07-21 22:17:23.000000000 -0400
60377+++ linux-3.0.3/lib/radix-tree.c 2011-08-23 21:47:56.000000000 -0400
60378@@ -80,7 +80,7 @@ struct radix_tree_preload {
60379 int nr;
60380 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
60381 };
60382-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
60383+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
60384
60385 static inline void *ptr_to_indirect(void *ptr)
60386 {
60387diff -urNp linux-3.0.3/lib/vsprintf.c linux-3.0.3/lib/vsprintf.c
60388--- linux-3.0.3/lib/vsprintf.c 2011-07-21 22:17:23.000000000 -0400
60389+++ linux-3.0.3/lib/vsprintf.c 2011-08-23 21:48:14.000000000 -0400
60390@@ -16,6 +16,9 @@
60391 * - scnprintf and vscnprintf
60392 */
60393
60394+#ifdef CONFIG_GRKERNSEC_HIDESYM
60395+#define __INCLUDED_BY_HIDESYM 1
60396+#endif
60397 #include <stdarg.h>
60398 #include <linux/module.h>
60399 #include <linux/types.h>
60400@@ -435,7 +438,7 @@ char *symbol_string(char *buf, char *end
60401 char sym[KSYM_SYMBOL_LEN];
60402 if (ext == 'B')
60403 sprint_backtrace(sym, value);
60404- else if (ext != 'f' && ext != 's')
60405+ else if (ext != 'f' && ext != 's' && ext != 'a')
60406 sprint_symbol(sym, value);
60407 else
60408 kallsyms_lookup(value, NULL, NULL, NULL, sym);
60409@@ -799,7 +802,11 @@ char *uuid_string(char *buf, char *end,
60410 return string(buf, end, uuid, spec);
60411 }
60412
60413+#ifdef CONFIG_GRKERNSEC_HIDESYM
60414+int kptr_restrict __read_mostly = 2;
60415+#else
60416 int kptr_restrict __read_mostly;
60417+#endif
60418
60419 /*
60420 * Show a '%p' thing. A kernel extension is that the '%p' is followed
60421@@ -813,6 +820,8 @@ int kptr_restrict __read_mostly;
60422 * - 'S' For symbolic direct pointers with offset
60423 * - 's' For symbolic direct pointers without offset
60424 * - 'B' For backtraced symbolic direct pointers with offset
60425+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
60426+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
60427 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
60428 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
60429 * - 'M' For a 6-byte MAC address, it prints the address in the
60430@@ -857,12 +866,12 @@ char *pointer(const char *fmt, char *buf
60431 {
60432 if (!ptr && *fmt != 'K') {
60433 /*
60434- * Print (null) with the same width as a pointer so it makes
60435+ * Print (nil) with the same width as a pointer so it makes
60436 * tabular output look nice.
60437 */
60438 if (spec.field_width == -1)
60439 spec.field_width = 2 * sizeof(void *);
60440- return string(buf, end, "(null)", spec);
60441+ return string(buf, end, "(nil)", spec);
60442 }
60443
60444 switch (*fmt) {
60445@@ -872,6 +881,13 @@ char *pointer(const char *fmt, char *buf
60446 /* Fallthrough */
60447 case 'S':
60448 case 's':
60449+#ifdef CONFIG_GRKERNSEC_HIDESYM
60450+ break;
60451+#else
60452+ return symbol_string(buf, end, ptr, spec, *fmt);
60453+#endif
60454+ case 'A':
60455+ case 'a':
60456 case 'B':
60457 return symbol_string(buf, end, ptr, spec, *fmt);
60458 case 'R':
60459@@ -1631,11 +1647,11 @@ int bstr_printf(char *buf, size_t size,
60460 typeof(type) value; \
60461 if (sizeof(type) == 8) { \
60462 args = PTR_ALIGN(args, sizeof(u32)); \
60463- *(u32 *)&value = *(u32 *)args; \
60464- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
60465+ *(u32 *)&value = *(const u32 *)args; \
60466+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
60467 } else { \
60468 args = PTR_ALIGN(args, sizeof(type)); \
60469- value = *(typeof(type) *)args; \
60470+ value = *(const typeof(type) *)args; \
60471 } \
60472 args += sizeof(type); \
60473 value; \
60474@@ -1698,7 +1714,7 @@ int bstr_printf(char *buf, size_t size,
60475 case FORMAT_TYPE_STR: {
60476 const char *str_arg = args;
60477 args += strlen(str_arg) + 1;
60478- str = string(str, end, (char *)str_arg, spec);
60479+ str = string(str, end, str_arg, spec);
60480 break;
60481 }
60482
60483diff -urNp linux-3.0.3/localversion-grsec linux-3.0.3/localversion-grsec
60484--- linux-3.0.3/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
60485+++ linux-3.0.3/localversion-grsec 2011-08-23 21:48:14.000000000 -0400
60486@@ -0,0 +1 @@
60487+-grsec
60488diff -urNp linux-3.0.3/Makefile linux-3.0.3/Makefile
60489--- linux-3.0.3/Makefile 2011-08-23 21:44:40.000000000 -0400
60490+++ linux-3.0.3/Makefile 2011-08-24 18:10:12.000000000 -0400
60491@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
60492
60493 HOSTCC = gcc
60494 HOSTCXX = g++
60495-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
60496-HOSTCXXFLAGS = -O2
60497+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
60498+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
60499+HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
60500
60501 # Decide whether to build built-in, modular, or both.
60502 # Normally, just do built-in.
60503@@ -365,10 +366,12 @@ LINUXINCLUDE := -I$(srctree)/arch/$(h
60504 KBUILD_CPPFLAGS := -D__KERNEL__
60505
60506 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
60507+ -W -Wno-unused-parameter -Wno-missing-field-initializers \
60508 -fno-strict-aliasing -fno-common \
60509 -Werror-implicit-function-declaration \
60510 -Wno-format-security \
60511 -fno-delete-null-pointer-checks
60512+KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
60513 KBUILD_AFLAGS_KERNEL :=
60514 KBUILD_CFLAGS_KERNEL :=
60515 KBUILD_AFLAGS := -D__ASSEMBLY__
60516@@ -407,10 +410,11 @@ export RCS_TAR_IGNORE := --exclude SCCS
60517 # Rules shared between *config targets and build targets
60518
60519 # Basic helpers built in scripts/
60520-PHONY += scripts_basic
60521-scripts_basic:
60522+PHONY += scripts_basic0 scripts_basic gcc-plugins
60523+scripts_basic0:
60524 $(Q)$(MAKE) $(build)=scripts/basic
60525 $(Q)rm -f .tmp_quiet_recordmcount
60526+scripts_basic: scripts_basic0 gcc-plugins
60527
60528 # To avoid any implicit rule to kick in, define an empty command.
60529 scripts/basic/%: scripts_basic ;
60530@@ -564,6 +568,24 @@ else
60531 KBUILD_CFLAGS += -O2
60532 endif
60533
60534+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh $(HOSTCC)), y)
60535+CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so
60536+ifdef CONFIG_PAX_MEMORY_STACKLEAK
60537+STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -fplugin-arg-stackleak_plugin-track-lowest-sp=100
60538+endif
60539+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN
60540+gcc-plugins:
60541+ $(Q)$(MAKE) $(build)=tools/gcc
60542+else
60543+gcc-plugins:
60544+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
60545+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev.))
60546+else
60547+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
60548+endif
60549+ $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
60550+endif
60551+
60552 include $(srctree)/arch/$(SRCARCH)/Makefile
60553
60554 ifneq ($(CONFIG_FRAME_WARN),0)
60555@@ -708,7 +730,7 @@ export mod_strip_cmd
60556
60557
60558 ifeq ($(KBUILD_EXTMOD),)
60559-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
60560+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
60561
60562 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
60563 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
60564@@ -907,6 +929,7 @@ define rule_vmlinux-modpost
60565 endef
60566
60567 # vmlinux image - including updated kernel symbols
60568+vmlinux: KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
60569 vmlinux: $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) vmlinux.o $(kallsyms.o) FORCE
60570 ifdef CONFIG_HEADERS_CHECK
60571 $(Q)$(MAKE) -f $(srctree)/Makefile headers_check
60572@@ -973,7 +996,7 @@ ifneq ($(KBUILD_SRC),)
60573 endif
60574
60575 # prepare2 creates a makefile if using a separate output directory
60576-prepare2: prepare3 outputmakefile asm-generic
60577+prepare2: prepare3 outputmakefile asm-generic gcc-plugins
60578
60579 prepare1: prepare2 include/linux/version.h include/generated/utsrelease.h \
60580 include/config/auto.conf
60581@@ -1087,6 +1110,7 @@ all: modules
60582 # using awk while concatenating to the final file.
60583
60584 PHONY += modules
60585+modules: KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
60586 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
60587 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
60588 @$(kecho) ' Building modules, stage 2.';
60589@@ -1198,7 +1222,7 @@ distclean: mrproper
60590 @find $(srctree) $(RCS_FIND_IGNORE) \
60591 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
60592 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
60593- -o -name '.*.rej' -o -size 0 \
60594+ -o -name '.*.rej' -o -size 0 -o -name '*.so' \
60595 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
60596 -type f -print | xargs rm -f
60597
60598@@ -1404,7 +1428,7 @@ clean: $(clean-dirs)
60599 $(call cmd,rmdirs)
60600 $(call cmd,rmfiles)
60601 @find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
60602- \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
60603+ \( -name '*.[oas]' -o -name '*.[ks]o' -o -name '.*.cmd' \
60604 -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
60605 -o -name '*.symtypes' -o -name 'modules.order' \
60606 -o -name modules.builtin -o -name '.tmp_*.o.*' \
60607diff -urNp linux-3.0.3/mm/filemap.c linux-3.0.3/mm/filemap.c
60608--- linux-3.0.3/mm/filemap.c 2011-07-21 22:17:23.000000000 -0400
60609+++ linux-3.0.3/mm/filemap.c 2011-08-23 21:48:14.000000000 -0400
60610@@ -1763,7 +1763,7 @@ int generic_file_mmap(struct file * file
60611 struct address_space *mapping = file->f_mapping;
60612
60613 if (!mapping->a_ops->readpage)
60614- return -ENOEXEC;
60615+ return -ENODEV;
60616 file_accessed(file);
60617 vma->vm_ops = &generic_file_vm_ops;
60618 vma->vm_flags |= VM_CAN_NONLINEAR;
60619@@ -2169,6 +2169,7 @@ inline int generic_write_checks(struct f
60620 *pos = i_size_read(inode);
60621
60622 if (limit != RLIM_INFINITY) {
60623+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
60624 if (*pos >= limit) {
60625 send_sig(SIGXFSZ, current, 0);
60626 return -EFBIG;
60627diff -urNp linux-3.0.3/mm/fremap.c linux-3.0.3/mm/fremap.c
60628--- linux-3.0.3/mm/fremap.c 2011-07-21 22:17:23.000000000 -0400
60629+++ linux-3.0.3/mm/fremap.c 2011-08-23 21:47:56.000000000 -0400
60630@@ -156,6 +156,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
60631 retry:
60632 vma = find_vma(mm, start);
60633
60634+#ifdef CONFIG_PAX_SEGMEXEC
60635+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
60636+ goto out;
60637+#endif
60638+
60639 /*
60640 * Make sure the vma is shared, that it supports prefaulting,
60641 * and that the remapped range is valid and fully within
60642diff -urNp linux-3.0.3/mm/highmem.c linux-3.0.3/mm/highmem.c
60643--- linux-3.0.3/mm/highmem.c 2011-07-21 22:17:23.000000000 -0400
60644+++ linux-3.0.3/mm/highmem.c 2011-08-23 21:47:56.000000000 -0400
60645@@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
60646 * So no dangers, even with speculative execution.
60647 */
60648 page = pte_page(pkmap_page_table[i]);
60649+ pax_open_kernel();
60650 pte_clear(&init_mm, (unsigned long)page_address(page),
60651 &pkmap_page_table[i]);
60652-
60653+ pax_close_kernel();
60654 set_page_address(page, NULL);
60655 need_flush = 1;
60656 }
60657@@ -186,9 +187,11 @@ start:
60658 }
60659 }
60660 vaddr = PKMAP_ADDR(last_pkmap_nr);
60661+
60662+ pax_open_kernel();
60663 set_pte_at(&init_mm, vaddr,
60664 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
60665-
60666+ pax_close_kernel();
60667 pkmap_count[last_pkmap_nr] = 1;
60668 set_page_address(page, (void *)vaddr);
60669
60670diff -urNp linux-3.0.3/mm/huge_memory.c linux-3.0.3/mm/huge_memory.c
60671--- linux-3.0.3/mm/huge_memory.c 2011-07-21 22:17:23.000000000 -0400
60672+++ linux-3.0.3/mm/huge_memory.c 2011-08-23 21:47:56.000000000 -0400
60673@@ -702,7 +702,7 @@ out:
60674 * run pte_offset_map on the pmd, if an huge pmd could
60675 * materialize from under us from a different thread.
60676 */
60677- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
60678+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
60679 return VM_FAULT_OOM;
60680 /* if an huge pmd materialized from under us just retry later */
60681 if (unlikely(pmd_trans_huge(*pmd)))
60682diff -urNp linux-3.0.3/mm/hugetlb.c linux-3.0.3/mm/hugetlb.c
60683--- linux-3.0.3/mm/hugetlb.c 2011-07-21 22:17:23.000000000 -0400
60684+++ linux-3.0.3/mm/hugetlb.c 2011-08-23 21:47:56.000000000 -0400
60685@@ -2339,6 +2339,27 @@ static int unmap_ref_private(struct mm_s
60686 return 1;
60687 }
60688
60689+#ifdef CONFIG_PAX_SEGMEXEC
60690+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
60691+{
60692+ struct mm_struct *mm = vma->vm_mm;
60693+ struct vm_area_struct *vma_m;
60694+ unsigned long address_m;
60695+ pte_t *ptep_m;
60696+
60697+ vma_m = pax_find_mirror_vma(vma);
60698+ if (!vma_m)
60699+ return;
60700+
60701+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
60702+ address_m = address + SEGMEXEC_TASK_SIZE;
60703+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
60704+ get_page(page_m);
60705+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
60706+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
60707+}
60708+#endif
60709+
60710 /*
60711 * Hugetlb_cow() should be called with page lock of the original hugepage held.
60712 */
60713@@ -2440,6 +2461,11 @@ retry_avoidcopy:
60714 make_huge_pte(vma, new_page, 1));
60715 page_remove_rmap(old_page);
60716 hugepage_add_new_anon_rmap(new_page, vma, address);
60717+
60718+#ifdef CONFIG_PAX_SEGMEXEC
60719+ pax_mirror_huge_pte(vma, address, new_page);
60720+#endif
60721+
60722 /* Make the old page be freed below */
60723 new_page = old_page;
60724 mmu_notifier_invalidate_range_end(mm,
60725@@ -2591,6 +2617,10 @@ retry:
60726 && (vma->vm_flags & VM_SHARED)));
60727 set_huge_pte_at(mm, address, ptep, new_pte);
60728
60729+#ifdef CONFIG_PAX_SEGMEXEC
60730+ pax_mirror_huge_pte(vma, address, page);
60731+#endif
60732+
60733 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
60734 /* Optimization, do the COW without a second fault */
60735 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
60736@@ -2620,6 +2650,10 @@ int hugetlb_fault(struct mm_struct *mm,
60737 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
60738 struct hstate *h = hstate_vma(vma);
60739
60740+#ifdef CONFIG_PAX_SEGMEXEC
60741+ struct vm_area_struct *vma_m;
60742+#endif
60743+
60744 ptep = huge_pte_offset(mm, address);
60745 if (ptep) {
60746 entry = huge_ptep_get(ptep);
60747@@ -2631,6 +2665,26 @@ int hugetlb_fault(struct mm_struct *mm,
60748 VM_FAULT_SET_HINDEX(h - hstates);
60749 }
60750
60751+#ifdef CONFIG_PAX_SEGMEXEC
60752+ vma_m = pax_find_mirror_vma(vma);
60753+ if (vma_m) {
60754+ unsigned long address_m;
60755+
60756+ if (vma->vm_start > vma_m->vm_start) {
60757+ address_m = address;
60758+ address -= SEGMEXEC_TASK_SIZE;
60759+ vma = vma_m;
60760+ h = hstate_vma(vma);
60761+ } else
60762+ address_m = address + SEGMEXEC_TASK_SIZE;
60763+
60764+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
60765+ return VM_FAULT_OOM;
60766+ address_m &= HPAGE_MASK;
60767+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
60768+ }
60769+#endif
60770+
60771 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
60772 if (!ptep)
60773 return VM_FAULT_OOM;
60774diff -urNp linux-3.0.3/mm/internal.h linux-3.0.3/mm/internal.h
60775--- linux-3.0.3/mm/internal.h 2011-07-21 22:17:23.000000000 -0400
60776+++ linux-3.0.3/mm/internal.h 2011-08-23 21:47:56.000000000 -0400
60777@@ -49,6 +49,7 @@ extern void putback_lru_page(struct page
60778 * in mm/page_alloc.c
60779 */
60780 extern void __free_pages_bootmem(struct page *page, unsigned int order);
60781+extern void free_compound_page(struct page *page);
60782 extern void prep_compound_page(struct page *page, unsigned long order);
60783 #ifdef CONFIG_MEMORY_FAILURE
60784 extern bool is_free_buddy_page(struct page *page);
60785diff -urNp linux-3.0.3/mm/Kconfig linux-3.0.3/mm/Kconfig
60786--- linux-3.0.3/mm/Kconfig 2011-07-21 22:17:23.000000000 -0400
60787+++ linux-3.0.3/mm/Kconfig 2011-08-23 21:48:14.000000000 -0400
60788@@ -240,7 +240,7 @@ config KSM
60789 config DEFAULT_MMAP_MIN_ADDR
60790 int "Low address space to protect from user allocation"
60791 depends on MMU
60792- default 4096
60793+ default 65536
60794 help
60795 This is the portion of low virtual memory which should be protected
60796 from userspace allocation. Keeping a user from writing to low pages
60797diff -urNp linux-3.0.3/mm/kmemleak.c linux-3.0.3/mm/kmemleak.c
60798--- linux-3.0.3/mm/kmemleak.c 2011-07-21 22:17:23.000000000 -0400
60799+++ linux-3.0.3/mm/kmemleak.c 2011-08-23 21:48:14.000000000 -0400
60800@@ -357,7 +357,7 @@ static void print_unreferenced(struct se
60801
60802 for (i = 0; i < object->trace_len; i++) {
60803 void *ptr = (void *)object->trace[i];
60804- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
60805+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
60806 }
60807 }
60808
60809diff -urNp linux-3.0.3/mm/madvise.c linux-3.0.3/mm/madvise.c
60810--- linux-3.0.3/mm/madvise.c 2011-07-21 22:17:23.000000000 -0400
60811+++ linux-3.0.3/mm/madvise.c 2011-08-23 21:47:56.000000000 -0400
60812@@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_a
60813 pgoff_t pgoff;
60814 unsigned long new_flags = vma->vm_flags;
60815
60816+#ifdef CONFIG_PAX_SEGMEXEC
60817+ struct vm_area_struct *vma_m;
60818+#endif
60819+
60820 switch (behavior) {
60821 case MADV_NORMAL:
60822 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
60823@@ -110,6 +114,13 @@ success:
60824 /*
60825 * vm_flags is protected by the mmap_sem held in write mode.
60826 */
60827+
60828+#ifdef CONFIG_PAX_SEGMEXEC
60829+ vma_m = pax_find_mirror_vma(vma);
60830+ if (vma_m)
60831+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
60832+#endif
60833+
60834 vma->vm_flags = new_flags;
60835
60836 out:
60837@@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_a
60838 struct vm_area_struct ** prev,
60839 unsigned long start, unsigned long end)
60840 {
60841+
60842+#ifdef CONFIG_PAX_SEGMEXEC
60843+ struct vm_area_struct *vma_m;
60844+#endif
60845+
60846 *prev = vma;
60847 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
60848 return -EINVAL;
60849@@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_a
60850 zap_page_range(vma, start, end - start, &details);
60851 } else
60852 zap_page_range(vma, start, end - start, NULL);
60853+
60854+#ifdef CONFIG_PAX_SEGMEXEC
60855+ vma_m = pax_find_mirror_vma(vma);
60856+ if (vma_m) {
60857+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
60858+ struct zap_details details = {
60859+ .nonlinear_vma = vma_m,
60860+ .last_index = ULONG_MAX,
60861+ };
60862+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
60863+ } else
60864+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
60865+ }
60866+#endif
60867+
60868 return 0;
60869 }
60870
60871@@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
60872 if (end < start)
60873 goto out;
60874
60875+#ifdef CONFIG_PAX_SEGMEXEC
60876+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
60877+ if (end > SEGMEXEC_TASK_SIZE)
60878+ goto out;
60879+ } else
60880+#endif
60881+
60882+ if (end > TASK_SIZE)
60883+ goto out;
60884+
60885 error = 0;
60886 if (end == start)
60887 goto out;
60888diff -urNp linux-3.0.3/mm/memory.c linux-3.0.3/mm/memory.c
60889--- linux-3.0.3/mm/memory.c 2011-08-23 21:44:40.000000000 -0400
60890+++ linux-3.0.3/mm/memory.c 2011-08-23 21:47:56.000000000 -0400
60891@@ -457,8 +457,12 @@ static inline void free_pmd_range(struct
60892 return;
60893
60894 pmd = pmd_offset(pud, start);
60895+
60896+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
60897 pud_clear(pud);
60898 pmd_free_tlb(tlb, pmd, start);
60899+#endif
60900+
60901 }
60902
60903 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
60904@@ -489,9 +493,12 @@ static inline void free_pud_range(struct
60905 if (end - 1 > ceiling - 1)
60906 return;
60907
60908+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
60909 pud = pud_offset(pgd, start);
60910 pgd_clear(pgd);
60911 pud_free_tlb(tlb, pud, start);
60912+#endif
60913+
60914 }
60915
60916 /*
60917@@ -1577,12 +1584,6 @@ no_page_table:
60918 return page;
60919 }
60920
60921-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
60922-{
60923- return stack_guard_page_start(vma, addr) ||
60924- stack_guard_page_end(vma, addr+PAGE_SIZE);
60925-}
60926-
60927 /**
60928 * __get_user_pages() - pin user pages in memory
60929 * @tsk: task_struct of target task
60930@@ -1655,10 +1656,10 @@ int __get_user_pages(struct task_struct
60931 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
60932 i = 0;
60933
60934- do {
60935+ while (nr_pages) {
60936 struct vm_area_struct *vma;
60937
60938- vma = find_extend_vma(mm, start);
60939+ vma = find_vma(mm, start);
60940 if (!vma && in_gate_area(mm, start)) {
60941 unsigned long pg = start & PAGE_MASK;
60942 pgd_t *pgd;
60943@@ -1706,7 +1707,7 @@ int __get_user_pages(struct task_struct
60944 goto next_page;
60945 }
60946
60947- if (!vma ||
60948+ if (!vma || start < vma->vm_start ||
60949 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
60950 !(vm_flags & vma->vm_flags))
60951 return i ? : -EFAULT;
60952@@ -1733,11 +1734,6 @@ int __get_user_pages(struct task_struct
60953 int ret;
60954 unsigned int fault_flags = 0;
60955
60956- /* For mlock, just skip the stack guard page. */
60957- if (foll_flags & FOLL_MLOCK) {
60958- if (stack_guard_page(vma, start))
60959- goto next_page;
60960- }
60961 if (foll_flags & FOLL_WRITE)
60962 fault_flags |= FAULT_FLAG_WRITE;
60963 if (nonblocking)
60964@@ -1811,7 +1807,7 @@ next_page:
60965 start += PAGE_SIZE;
60966 nr_pages--;
60967 } while (nr_pages && start < vma->vm_end);
60968- } while (nr_pages);
60969+ }
60970 return i;
60971 }
60972 EXPORT_SYMBOL(__get_user_pages);
60973@@ -2018,6 +2014,10 @@ static int insert_page(struct vm_area_st
60974 page_add_file_rmap(page);
60975 set_pte_at(mm, addr, pte, mk_pte(page, prot));
60976
60977+#ifdef CONFIG_PAX_SEGMEXEC
60978+ pax_mirror_file_pte(vma, addr, page, ptl);
60979+#endif
60980+
60981 retval = 0;
60982 pte_unmap_unlock(pte, ptl);
60983 return retval;
60984@@ -2052,10 +2052,22 @@ out:
60985 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
60986 struct page *page)
60987 {
60988+
60989+#ifdef CONFIG_PAX_SEGMEXEC
60990+ struct vm_area_struct *vma_m;
60991+#endif
60992+
60993 if (addr < vma->vm_start || addr >= vma->vm_end)
60994 return -EFAULT;
60995 if (!page_count(page))
60996 return -EINVAL;
60997+
60998+#ifdef CONFIG_PAX_SEGMEXEC
60999+ vma_m = pax_find_mirror_vma(vma);
61000+ if (vma_m)
61001+ vma_m->vm_flags |= VM_INSERTPAGE;
61002+#endif
61003+
61004 vma->vm_flags |= VM_INSERTPAGE;
61005 return insert_page(vma, addr, page, vma->vm_page_prot);
61006 }
61007@@ -2141,6 +2153,7 @@ int vm_insert_mixed(struct vm_area_struc
61008 unsigned long pfn)
61009 {
61010 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
61011+ BUG_ON(vma->vm_mirror);
61012
61013 if (addr < vma->vm_start || addr >= vma->vm_end)
61014 return -EFAULT;
61015@@ -2456,6 +2469,186 @@ static inline void cow_user_page(struct
61016 copy_user_highpage(dst, src, va, vma);
61017 }
61018
61019+#ifdef CONFIG_PAX_SEGMEXEC
61020+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
61021+{
61022+ struct mm_struct *mm = vma->vm_mm;
61023+ spinlock_t *ptl;
61024+ pte_t *pte, entry;
61025+
61026+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
61027+ entry = *pte;
61028+ if (!pte_present(entry)) {
61029+ if (!pte_none(entry)) {
61030+ BUG_ON(pte_file(entry));
61031+ free_swap_and_cache(pte_to_swp_entry(entry));
61032+ pte_clear_not_present_full(mm, address, pte, 0);
61033+ }
61034+ } else {
61035+ struct page *page;
61036+
61037+ flush_cache_page(vma, address, pte_pfn(entry));
61038+ entry = ptep_clear_flush(vma, address, pte);
61039+ BUG_ON(pte_dirty(entry));
61040+ page = vm_normal_page(vma, address, entry);
61041+ if (page) {
61042+ update_hiwater_rss(mm);
61043+ if (PageAnon(page))
61044+ dec_mm_counter_fast(mm, MM_ANONPAGES);
61045+ else
61046+ dec_mm_counter_fast(mm, MM_FILEPAGES);
61047+ page_remove_rmap(page);
61048+ page_cache_release(page);
61049+ }
61050+ }
61051+ pte_unmap_unlock(pte, ptl);
61052+}
61053+
61054+/* PaX: if vma is mirrored, synchronize the mirror's PTE
61055+ *
61056+ * the ptl of the lower mapped page is held on entry and is not released on exit
61057+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
61058+ */
61059+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
61060+{
61061+ struct mm_struct *mm = vma->vm_mm;
61062+ unsigned long address_m;
61063+ spinlock_t *ptl_m;
61064+ struct vm_area_struct *vma_m;
61065+ pmd_t *pmd_m;
61066+ pte_t *pte_m, entry_m;
61067+
61068+ BUG_ON(!page_m || !PageAnon(page_m));
61069+
61070+ vma_m = pax_find_mirror_vma(vma);
61071+ if (!vma_m)
61072+ return;
61073+
61074+ BUG_ON(!PageLocked(page_m));
61075+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
61076+ address_m = address + SEGMEXEC_TASK_SIZE;
61077+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
61078+ pte_m = pte_offset_map(pmd_m, address_m);
61079+ ptl_m = pte_lockptr(mm, pmd_m);
61080+ if (ptl != ptl_m) {
61081+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
61082+ if (!pte_none(*pte_m))
61083+ goto out;
61084+ }
61085+
61086+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
61087+ page_cache_get(page_m);
61088+ page_add_anon_rmap(page_m, vma_m, address_m);
61089+ inc_mm_counter_fast(mm, MM_ANONPAGES);
61090+ set_pte_at(mm, address_m, pte_m, entry_m);
61091+ update_mmu_cache(vma_m, address_m, entry_m);
61092+out:
61093+ if (ptl != ptl_m)
61094+ spin_unlock(ptl_m);
61095+ pte_unmap(pte_m);
61096+ unlock_page(page_m);
61097+}
61098+
61099+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
61100+{
61101+ struct mm_struct *mm = vma->vm_mm;
61102+ unsigned long address_m;
61103+ spinlock_t *ptl_m;
61104+ struct vm_area_struct *vma_m;
61105+ pmd_t *pmd_m;
61106+ pte_t *pte_m, entry_m;
61107+
61108+ BUG_ON(!page_m || PageAnon(page_m));
61109+
61110+ vma_m = pax_find_mirror_vma(vma);
61111+ if (!vma_m)
61112+ return;
61113+
61114+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
61115+ address_m = address + SEGMEXEC_TASK_SIZE;
61116+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
61117+ pte_m = pte_offset_map(pmd_m, address_m);
61118+ ptl_m = pte_lockptr(mm, pmd_m);
61119+ if (ptl != ptl_m) {
61120+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
61121+ if (!pte_none(*pte_m))
61122+ goto out;
61123+ }
61124+
61125+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
61126+ page_cache_get(page_m);
61127+ page_add_file_rmap(page_m);
61128+ inc_mm_counter_fast(mm, MM_FILEPAGES);
61129+ set_pte_at(mm, address_m, pte_m, entry_m);
61130+ update_mmu_cache(vma_m, address_m, entry_m);
61131+out:
61132+ if (ptl != ptl_m)
61133+ spin_unlock(ptl_m);
61134+ pte_unmap(pte_m);
61135+}
61136+
61137+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
61138+{
61139+ struct mm_struct *mm = vma->vm_mm;
61140+ unsigned long address_m;
61141+ spinlock_t *ptl_m;
61142+ struct vm_area_struct *vma_m;
61143+ pmd_t *pmd_m;
61144+ pte_t *pte_m, entry_m;
61145+
61146+ vma_m = pax_find_mirror_vma(vma);
61147+ if (!vma_m)
61148+ return;
61149+
61150+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
61151+ address_m = address + SEGMEXEC_TASK_SIZE;
61152+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
61153+ pte_m = pte_offset_map(pmd_m, address_m);
61154+ ptl_m = pte_lockptr(mm, pmd_m);
61155+ if (ptl != ptl_m) {
61156+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
61157+ if (!pte_none(*pte_m))
61158+ goto out;
61159+ }
61160+
61161+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
61162+ set_pte_at(mm, address_m, pte_m, entry_m);
61163+out:
61164+ if (ptl != ptl_m)
61165+ spin_unlock(ptl_m);
61166+ pte_unmap(pte_m);
61167+}
61168+
61169+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
61170+{
61171+ struct page *page_m;
61172+ pte_t entry;
61173+
61174+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
61175+ goto out;
61176+
61177+ entry = *pte;
61178+ page_m = vm_normal_page(vma, address, entry);
61179+ if (!page_m)
61180+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
61181+ else if (PageAnon(page_m)) {
61182+ if (pax_find_mirror_vma(vma)) {
61183+ pte_unmap_unlock(pte, ptl);
61184+ lock_page(page_m);
61185+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
61186+ if (pte_same(entry, *pte))
61187+ pax_mirror_anon_pte(vma, address, page_m, ptl);
61188+ else
61189+ unlock_page(page_m);
61190+ }
61191+ } else
61192+ pax_mirror_file_pte(vma, address, page_m, ptl);
61193+
61194+out:
61195+ pte_unmap_unlock(pte, ptl);
61196+}
61197+#endif
61198+
61199 /*
61200 * This routine handles present pages, when users try to write
61201 * to a shared page. It is done by copying the page to a new address
61202@@ -2667,6 +2860,12 @@ gotten:
61203 */
61204 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
61205 if (likely(pte_same(*page_table, orig_pte))) {
61206+
61207+#ifdef CONFIG_PAX_SEGMEXEC
61208+ if (pax_find_mirror_vma(vma))
61209+ BUG_ON(!trylock_page(new_page));
61210+#endif
61211+
61212 if (old_page) {
61213 if (!PageAnon(old_page)) {
61214 dec_mm_counter_fast(mm, MM_FILEPAGES);
61215@@ -2718,6 +2917,10 @@ gotten:
61216 page_remove_rmap(old_page);
61217 }
61218
61219+#ifdef CONFIG_PAX_SEGMEXEC
61220+ pax_mirror_anon_pte(vma, address, new_page, ptl);
61221+#endif
61222+
61223 /* Free the old page.. */
61224 new_page = old_page;
61225 ret |= VM_FAULT_WRITE;
61226@@ -2997,6 +3200,11 @@ static int do_swap_page(struct mm_struct
61227 swap_free(entry);
61228 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
61229 try_to_free_swap(page);
61230+
61231+#ifdef CONFIG_PAX_SEGMEXEC
61232+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
61233+#endif
61234+
61235 unlock_page(page);
61236 if (swapcache) {
61237 /*
61238@@ -3020,6 +3228,11 @@ static int do_swap_page(struct mm_struct
61239
61240 /* No need to invalidate - it was non-present before */
61241 update_mmu_cache(vma, address, page_table);
61242+
61243+#ifdef CONFIG_PAX_SEGMEXEC
61244+ pax_mirror_anon_pte(vma, address, page, ptl);
61245+#endif
61246+
61247 unlock:
61248 pte_unmap_unlock(page_table, ptl);
61249 out:
61250@@ -3039,40 +3252,6 @@ out_release:
61251 }
61252
61253 /*
61254- * This is like a special single-page "expand_{down|up}wards()",
61255- * except we must first make sure that 'address{-|+}PAGE_SIZE'
61256- * doesn't hit another vma.
61257- */
61258-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
61259-{
61260- address &= PAGE_MASK;
61261- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
61262- struct vm_area_struct *prev = vma->vm_prev;
61263-
61264- /*
61265- * Is there a mapping abutting this one below?
61266- *
61267- * That's only ok if it's the same stack mapping
61268- * that has gotten split..
61269- */
61270- if (prev && prev->vm_end == address)
61271- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
61272-
61273- expand_downwards(vma, address - PAGE_SIZE);
61274- }
61275- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
61276- struct vm_area_struct *next = vma->vm_next;
61277-
61278- /* As VM_GROWSDOWN but s/below/above/ */
61279- if (next && next->vm_start == address + PAGE_SIZE)
61280- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
61281-
61282- expand_upwards(vma, address + PAGE_SIZE);
61283- }
61284- return 0;
61285-}
61286-
61287-/*
61288 * We enter with non-exclusive mmap_sem (to exclude vma changes,
61289 * but allow concurrent faults), and pte mapped but not yet locked.
61290 * We return with mmap_sem still held, but pte unmapped and unlocked.
61291@@ -3081,27 +3260,23 @@ static int do_anonymous_page(struct mm_s
61292 unsigned long address, pte_t *page_table, pmd_t *pmd,
61293 unsigned int flags)
61294 {
61295- struct page *page;
61296+ struct page *page = NULL;
61297 spinlock_t *ptl;
61298 pte_t entry;
61299
61300- pte_unmap(page_table);
61301-
61302- /* Check if we need to add a guard page to the stack */
61303- if (check_stack_guard_page(vma, address) < 0)
61304- return VM_FAULT_SIGBUS;
61305-
61306- /* Use the zero-page for reads */
61307 if (!(flags & FAULT_FLAG_WRITE)) {
61308 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
61309 vma->vm_page_prot));
61310- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
61311+ ptl = pte_lockptr(mm, pmd);
61312+ spin_lock(ptl);
61313 if (!pte_none(*page_table))
61314 goto unlock;
61315 goto setpte;
61316 }
61317
61318 /* Allocate our own private page. */
61319+ pte_unmap(page_table);
61320+
61321 if (unlikely(anon_vma_prepare(vma)))
61322 goto oom;
61323 page = alloc_zeroed_user_highpage_movable(vma, address);
61324@@ -3120,6 +3295,11 @@ static int do_anonymous_page(struct mm_s
61325 if (!pte_none(*page_table))
61326 goto release;
61327
61328+#ifdef CONFIG_PAX_SEGMEXEC
61329+ if (pax_find_mirror_vma(vma))
61330+ BUG_ON(!trylock_page(page));
61331+#endif
61332+
61333 inc_mm_counter_fast(mm, MM_ANONPAGES);
61334 page_add_new_anon_rmap(page, vma, address);
61335 setpte:
61336@@ -3127,6 +3307,12 @@ setpte:
61337
61338 /* No need to invalidate - it was non-present before */
61339 update_mmu_cache(vma, address, page_table);
61340+
61341+#ifdef CONFIG_PAX_SEGMEXEC
61342+ if (page)
61343+ pax_mirror_anon_pte(vma, address, page, ptl);
61344+#endif
61345+
61346 unlock:
61347 pte_unmap_unlock(page_table, ptl);
61348 return 0;
61349@@ -3264,6 +3450,12 @@ static int __do_fault(struct mm_struct *
61350 */
61351 /* Only go through if we didn't race with anybody else... */
61352 if (likely(pte_same(*page_table, orig_pte))) {
61353+
61354+#ifdef CONFIG_PAX_SEGMEXEC
61355+ if (anon && pax_find_mirror_vma(vma))
61356+ BUG_ON(!trylock_page(page));
61357+#endif
61358+
61359 flush_icache_page(vma, page);
61360 entry = mk_pte(page, vma->vm_page_prot);
61361 if (flags & FAULT_FLAG_WRITE)
61362@@ -3283,6 +3475,14 @@ static int __do_fault(struct mm_struct *
61363
61364 /* no need to invalidate: a not-present page won't be cached */
61365 update_mmu_cache(vma, address, page_table);
61366+
61367+#ifdef CONFIG_PAX_SEGMEXEC
61368+ if (anon)
61369+ pax_mirror_anon_pte(vma, address, page, ptl);
61370+ else
61371+ pax_mirror_file_pte(vma, address, page, ptl);
61372+#endif
61373+
61374 } else {
61375 if (charged)
61376 mem_cgroup_uncharge_page(page);
61377@@ -3430,6 +3630,12 @@ int handle_pte_fault(struct mm_struct *m
61378 if (flags & FAULT_FLAG_WRITE)
61379 flush_tlb_fix_spurious_fault(vma, address);
61380 }
61381+
61382+#ifdef CONFIG_PAX_SEGMEXEC
61383+ pax_mirror_pte(vma, address, pte, pmd, ptl);
61384+ return 0;
61385+#endif
61386+
61387 unlock:
61388 pte_unmap_unlock(pte, ptl);
61389 return 0;
61390@@ -3446,6 +3652,10 @@ int handle_mm_fault(struct mm_struct *mm
61391 pmd_t *pmd;
61392 pte_t *pte;
61393
61394+#ifdef CONFIG_PAX_SEGMEXEC
61395+ struct vm_area_struct *vma_m;
61396+#endif
61397+
61398 __set_current_state(TASK_RUNNING);
61399
61400 count_vm_event(PGFAULT);
61401@@ -3457,6 +3667,34 @@ int handle_mm_fault(struct mm_struct *mm
61402 if (unlikely(is_vm_hugetlb_page(vma)))
61403 return hugetlb_fault(mm, vma, address, flags);
61404
61405+#ifdef CONFIG_PAX_SEGMEXEC
61406+ vma_m = pax_find_mirror_vma(vma);
61407+ if (vma_m) {
61408+ unsigned long address_m;
61409+ pgd_t *pgd_m;
61410+ pud_t *pud_m;
61411+ pmd_t *pmd_m;
61412+
61413+ if (vma->vm_start > vma_m->vm_start) {
61414+ address_m = address;
61415+ address -= SEGMEXEC_TASK_SIZE;
61416+ vma = vma_m;
61417+ } else
61418+ address_m = address + SEGMEXEC_TASK_SIZE;
61419+
61420+ pgd_m = pgd_offset(mm, address_m);
61421+ pud_m = pud_alloc(mm, pgd_m, address_m);
61422+ if (!pud_m)
61423+ return VM_FAULT_OOM;
61424+ pmd_m = pmd_alloc(mm, pud_m, address_m);
61425+ if (!pmd_m)
61426+ return VM_FAULT_OOM;
61427+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
61428+ return VM_FAULT_OOM;
61429+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
61430+ }
61431+#endif
61432+
61433 pgd = pgd_offset(mm, address);
61434 pud = pud_alloc(mm, pgd, address);
61435 if (!pud)
61436@@ -3486,7 +3724,7 @@ int handle_mm_fault(struct mm_struct *mm
61437 * run pte_offset_map on the pmd, if an huge pmd could
61438 * materialize from under us from a different thread.
61439 */
61440- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
61441+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
61442 return VM_FAULT_OOM;
61443 /* if an huge pmd materialized from under us just retry later */
61444 if (unlikely(pmd_trans_huge(*pmd)))
61445@@ -3590,7 +3828,7 @@ static int __init gate_vma_init(void)
61446 gate_vma.vm_start = FIXADDR_USER_START;
61447 gate_vma.vm_end = FIXADDR_USER_END;
61448 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
61449- gate_vma.vm_page_prot = __P101;
61450+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
61451 /*
61452 * Make sure the vDSO gets into every core dump.
61453 * Dumping its contents makes post-mortem fully interpretable later
61454diff -urNp linux-3.0.3/mm/memory-failure.c linux-3.0.3/mm/memory-failure.c
61455--- linux-3.0.3/mm/memory-failure.c 2011-07-21 22:17:23.000000000 -0400
61456+++ linux-3.0.3/mm/memory-failure.c 2011-08-23 21:47:56.000000000 -0400
61457@@ -59,7 +59,7 @@ int sysctl_memory_failure_early_kill __r
61458
61459 int sysctl_memory_failure_recovery __read_mostly = 1;
61460
61461-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
61462+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
61463
61464 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
61465
61466@@ -1008,7 +1008,7 @@ int __memory_failure(unsigned long pfn,
61467 }
61468
61469 nr_pages = 1 << compound_trans_order(hpage);
61470- atomic_long_add(nr_pages, &mce_bad_pages);
61471+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
61472
61473 /*
61474 * We need/can do nothing about count=0 pages.
61475@@ -1038,7 +1038,7 @@ int __memory_failure(unsigned long pfn,
61476 if (!PageHWPoison(hpage)
61477 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
61478 || (p != hpage && TestSetPageHWPoison(hpage))) {
61479- atomic_long_sub(nr_pages, &mce_bad_pages);
61480+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61481 return 0;
61482 }
61483 set_page_hwpoison_huge_page(hpage);
61484@@ -1096,7 +1096,7 @@ int __memory_failure(unsigned long pfn,
61485 }
61486 if (hwpoison_filter(p)) {
61487 if (TestClearPageHWPoison(p))
61488- atomic_long_sub(nr_pages, &mce_bad_pages);
61489+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61490 unlock_page(hpage);
61491 put_page(hpage);
61492 return 0;
61493@@ -1222,7 +1222,7 @@ int unpoison_memory(unsigned long pfn)
61494 return 0;
61495 }
61496 if (TestClearPageHWPoison(p))
61497- atomic_long_sub(nr_pages, &mce_bad_pages);
61498+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61499 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
61500 return 0;
61501 }
61502@@ -1236,7 +1236,7 @@ int unpoison_memory(unsigned long pfn)
61503 */
61504 if (TestClearPageHWPoison(page)) {
61505 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
61506- atomic_long_sub(nr_pages, &mce_bad_pages);
61507+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61508 freeit = 1;
61509 if (PageHuge(page))
61510 clear_page_hwpoison_huge_page(page);
61511@@ -1349,7 +1349,7 @@ static int soft_offline_huge_page(struct
61512 }
61513 done:
61514 if (!PageHWPoison(hpage))
61515- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
61516+ atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
61517 set_page_hwpoison_huge_page(hpage);
61518 dequeue_hwpoisoned_huge_page(hpage);
61519 /* keep elevated page count for bad page */
61520@@ -1480,7 +1480,7 @@ int soft_offline_page(struct page *page,
61521 return ret;
61522
61523 done:
61524- atomic_long_add(1, &mce_bad_pages);
61525+ atomic_long_add_unchecked(1, &mce_bad_pages);
61526 SetPageHWPoison(page);
61527 /* keep elevated page count for bad page */
61528 return ret;
61529diff -urNp linux-3.0.3/mm/mempolicy.c linux-3.0.3/mm/mempolicy.c
61530--- linux-3.0.3/mm/mempolicy.c 2011-07-21 22:17:23.000000000 -0400
61531+++ linux-3.0.3/mm/mempolicy.c 2011-08-23 21:48:14.000000000 -0400
61532@@ -639,6 +639,10 @@ static int mbind_range(struct mm_struct
61533 unsigned long vmstart;
61534 unsigned long vmend;
61535
61536+#ifdef CONFIG_PAX_SEGMEXEC
61537+ struct vm_area_struct *vma_m;
61538+#endif
61539+
61540 vma = find_vma_prev(mm, start, &prev);
61541 if (!vma || vma->vm_start > start)
61542 return -EFAULT;
61543@@ -669,6 +673,16 @@ static int mbind_range(struct mm_struct
61544 err = policy_vma(vma, new_pol);
61545 if (err)
61546 goto out;
61547+
61548+#ifdef CONFIG_PAX_SEGMEXEC
61549+ vma_m = pax_find_mirror_vma(vma);
61550+ if (vma_m) {
61551+ err = policy_vma(vma_m, new_pol);
61552+ if (err)
61553+ goto out;
61554+ }
61555+#endif
61556+
61557 }
61558
61559 out:
61560@@ -1102,6 +1116,17 @@ static long do_mbind(unsigned long start
61561
61562 if (end < start)
61563 return -EINVAL;
61564+
61565+#ifdef CONFIG_PAX_SEGMEXEC
61566+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
61567+ if (end > SEGMEXEC_TASK_SIZE)
61568+ return -EINVAL;
61569+ } else
61570+#endif
61571+
61572+ if (end > TASK_SIZE)
61573+ return -EINVAL;
61574+
61575 if (end == start)
61576 return 0;
61577
61578@@ -1320,6 +1345,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
61579 if (!mm)
61580 goto out;
61581
61582+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61583+ if (mm != current->mm &&
61584+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
61585+ err = -EPERM;
61586+ goto out;
61587+ }
61588+#endif
61589+
61590 /*
61591 * Check if this process has the right to modify the specified
61592 * process. The right exists if the process has administrative
61593@@ -1329,8 +1362,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
61594 rcu_read_lock();
61595 tcred = __task_cred(task);
61596 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
61597- cred->uid != tcred->suid && cred->uid != tcred->uid &&
61598- !capable(CAP_SYS_NICE)) {
61599+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
61600 rcu_read_unlock();
61601 err = -EPERM;
61602 goto out;
61603diff -urNp linux-3.0.3/mm/migrate.c linux-3.0.3/mm/migrate.c
61604--- linux-3.0.3/mm/migrate.c 2011-07-21 22:17:23.000000000 -0400
61605+++ linux-3.0.3/mm/migrate.c 2011-08-23 21:48:14.000000000 -0400
61606@@ -1124,6 +1124,8 @@ static int do_pages_move(struct mm_struc
61607 unsigned long chunk_start;
61608 int err;
61609
61610+ pax_track_stack();
61611+
61612 task_nodes = cpuset_mems_allowed(task);
61613
61614 err = -ENOMEM;
61615@@ -1308,6 +1310,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
61616 if (!mm)
61617 return -EINVAL;
61618
61619+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61620+ if (mm != current->mm &&
61621+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
61622+ err = -EPERM;
61623+ goto out;
61624+ }
61625+#endif
61626+
61627 /*
61628 * Check if this process has the right to modify the specified
61629 * process. The right exists if the process has administrative
61630@@ -1317,8 +1327,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
61631 rcu_read_lock();
61632 tcred = __task_cred(task);
61633 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
61634- cred->uid != tcred->suid && cred->uid != tcred->uid &&
61635- !capable(CAP_SYS_NICE)) {
61636+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
61637 rcu_read_unlock();
61638 err = -EPERM;
61639 goto out;
61640diff -urNp linux-3.0.3/mm/mlock.c linux-3.0.3/mm/mlock.c
61641--- linux-3.0.3/mm/mlock.c 2011-07-21 22:17:23.000000000 -0400
61642+++ linux-3.0.3/mm/mlock.c 2011-08-23 21:48:14.000000000 -0400
61643@@ -13,6 +13,7 @@
61644 #include <linux/pagemap.h>
61645 #include <linux/mempolicy.h>
61646 #include <linux/syscalls.h>
61647+#include <linux/security.h>
61648 #include <linux/sched.h>
61649 #include <linux/module.h>
61650 #include <linux/rmap.h>
61651@@ -377,6 +378,9 @@ static int do_mlock(unsigned long start,
61652 return -EINVAL;
61653 if (end == start)
61654 return 0;
61655+ if (end > TASK_SIZE)
61656+ return -EINVAL;
61657+
61658 vma = find_vma_prev(current->mm, start, &prev);
61659 if (!vma || vma->vm_start > start)
61660 return -ENOMEM;
61661@@ -387,6 +391,11 @@ static int do_mlock(unsigned long start,
61662 for (nstart = start ; ; ) {
61663 vm_flags_t newflags;
61664
61665+#ifdef CONFIG_PAX_SEGMEXEC
61666+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
61667+ break;
61668+#endif
61669+
61670 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
61671
61672 newflags = vma->vm_flags | VM_LOCKED;
61673@@ -492,6 +501,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
61674 lock_limit >>= PAGE_SHIFT;
61675
61676 /* check against resource limits */
61677+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
61678 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
61679 error = do_mlock(start, len, 1);
61680 up_write(&current->mm->mmap_sem);
61681@@ -515,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
61682 static int do_mlockall(int flags)
61683 {
61684 struct vm_area_struct * vma, * prev = NULL;
61685- unsigned int def_flags = 0;
61686
61687 if (flags & MCL_FUTURE)
61688- def_flags = VM_LOCKED;
61689- current->mm->def_flags = def_flags;
61690+ current->mm->def_flags |= VM_LOCKED;
61691+ else
61692+ current->mm->def_flags &= ~VM_LOCKED;
61693 if (flags == MCL_FUTURE)
61694 goto out;
61695
61696 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
61697 vm_flags_t newflags;
61698
61699+#ifdef CONFIG_PAX_SEGMEXEC
61700+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
61701+ break;
61702+#endif
61703+
61704+ BUG_ON(vma->vm_end > TASK_SIZE);
61705 newflags = vma->vm_flags | VM_LOCKED;
61706 if (!(flags & MCL_CURRENT))
61707 newflags &= ~VM_LOCKED;
61708@@ -557,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
61709 lock_limit >>= PAGE_SHIFT;
61710
61711 ret = -ENOMEM;
61712+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
61713 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
61714 capable(CAP_IPC_LOCK))
61715 ret = do_mlockall(flags);
61716diff -urNp linux-3.0.3/mm/mmap.c linux-3.0.3/mm/mmap.c
61717--- linux-3.0.3/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
61718+++ linux-3.0.3/mm/mmap.c 2011-08-23 21:48:14.000000000 -0400
61719@@ -46,6 +46,16 @@
61720 #define arch_rebalance_pgtables(addr, len) (addr)
61721 #endif
61722
61723+static inline void verify_mm_writelocked(struct mm_struct *mm)
61724+{
61725+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
61726+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
61727+ up_read(&mm->mmap_sem);
61728+ BUG();
61729+ }
61730+#endif
61731+}
61732+
61733 static void unmap_region(struct mm_struct *mm,
61734 struct vm_area_struct *vma, struct vm_area_struct *prev,
61735 unsigned long start, unsigned long end);
61736@@ -71,22 +81,32 @@ static void unmap_region(struct mm_struc
61737 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
61738 *
61739 */
61740-pgprot_t protection_map[16] = {
61741+pgprot_t protection_map[16] __read_only = {
61742 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
61743 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
61744 };
61745
61746-pgprot_t vm_get_page_prot(unsigned long vm_flags)
61747+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
61748 {
61749- return __pgprot(pgprot_val(protection_map[vm_flags &
61750+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
61751 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
61752 pgprot_val(arch_vm_get_page_prot(vm_flags)));
61753+
61754+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
61755+ if (!(__supported_pte_mask & _PAGE_NX) &&
61756+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
61757+ (vm_flags & (VM_READ | VM_WRITE)))
61758+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
61759+#endif
61760+
61761+ return prot;
61762 }
61763 EXPORT_SYMBOL(vm_get_page_prot);
61764
61765 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
61766 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
61767 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
61768+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
61769 /*
61770 * Make sure vm_committed_as in one cacheline and not cacheline shared with
61771 * other variables. It can be updated by several CPUs frequently.
61772@@ -236,6 +256,7 @@ static struct vm_area_struct *remove_vma
61773 struct vm_area_struct *next = vma->vm_next;
61774
61775 might_sleep();
61776+ BUG_ON(vma->vm_mirror);
61777 if (vma->vm_ops && vma->vm_ops->close)
61778 vma->vm_ops->close(vma);
61779 if (vma->vm_file) {
61780@@ -280,6 +301,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
61781 * not page aligned -Ram Gupta
61782 */
61783 rlim = rlimit(RLIMIT_DATA);
61784+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
61785 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
61786 (mm->end_data - mm->start_data) > rlim)
61787 goto out;
61788@@ -697,6 +719,12 @@ static int
61789 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
61790 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
61791 {
61792+
61793+#ifdef CONFIG_PAX_SEGMEXEC
61794+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
61795+ return 0;
61796+#endif
61797+
61798 if (is_mergeable_vma(vma, file, vm_flags) &&
61799 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
61800 if (vma->vm_pgoff == vm_pgoff)
61801@@ -716,6 +744,12 @@ static int
61802 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
61803 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
61804 {
61805+
61806+#ifdef CONFIG_PAX_SEGMEXEC
61807+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
61808+ return 0;
61809+#endif
61810+
61811 if (is_mergeable_vma(vma, file, vm_flags) &&
61812 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
61813 pgoff_t vm_pglen;
61814@@ -758,13 +792,20 @@ can_vma_merge_after(struct vm_area_struc
61815 struct vm_area_struct *vma_merge(struct mm_struct *mm,
61816 struct vm_area_struct *prev, unsigned long addr,
61817 unsigned long end, unsigned long vm_flags,
61818- struct anon_vma *anon_vma, struct file *file,
61819+ struct anon_vma *anon_vma, struct file *file,
61820 pgoff_t pgoff, struct mempolicy *policy)
61821 {
61822 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
61823 struct vm_area_struct *area, *next;
61824 int err;
61825
61826+#ifdef CONFIG_PAX_SEGMEXEC
61827+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
61828+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
61829+
61830+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
61831+#endif
61832+
61833 /*
61834 * We later require that vma->vm_flags == vm_flags,
61835 * so this tests vma->vm_flags & VM_SPECIAL, too.
61836@@ -780,6 +821,15 @@ struct vm_area_struct *vma_merge(struct
61837 if (next && next->vm_end == end) /* cases 6, 7, 8 */
61838 next = next->vm_next;
61839
61840+#ifdef CONFIG_PAX_SEGMEXEC
61841+ if (prev)
61842+ prev_m = pax_find_mirror_vma(prev);
61843+ if (area)
61844+ area_m = pax_find_mirror_vma(area);
61845+ if (next)
61846+ next_m = pax_find_mirror_vma(next);
61847+#endif
61848+
61849 /*
61850 * Can it merge with the predecessor?
61851 */
61852@@ -799,9 +849,24 @@ struct vm_area_struct *vma_merge(struct
61853 /* cases 1, 6 */
61854 err = vma_adjust(prev, prev->vm_start,
61855 next->vm_end, prev->vm_pgoff, NULL);
61856- } else /* cases 2, 5, 7 */
61857+
61858+#ifdef CONFIG_PAX_SEGMEXEC
61859+ if (!err && prev_m)
61860+ err = vma_adjust(prev_m, prev_m->vm_start,
61861+ next_m->vm_end, prev_m->vm_pgoff, NULL);
61862+#endif
61863+
61864+ } else { /* cases 2, 5, 7 */
61865 err = vma_adjust(prev, prev->vm_start,
61866 end, prev->vm_pgoff, NULL);
61867+
61868+#ifdef CONFIG_PAX_SEGMEXEC
61869+ if (!err && prev_m)
61870+ err = vma_adjust(prev_m, prev_m->vm_start,
61871+ end_m, prev_m->vm_pgoff, NULL);
61872+#endif
61873+
61874+ }
61875 if (err)
61876 return NULL;
61877 khugepaged_enter_vma_merge(prev);
61878@@ -815,12 +880,27 @@ struct vm_area_struct *vma_merge(struct
61879 mpol_equal(policy, vma_policy(next)) &&
61880 can_vma_merge_before(next, vm_flags,
61881 anon_vma, file, pgoff+pglen)) {
61882- if (prev && addr < prev->vm_end) /* case 4 */
61883+ if (prev && addr < prev->vm_end) { /* case 4 */
61884 err = vma_adjust(prev, prev->vm_start,
61885 addr, prev->vm_pgoff, NULL);
61886- else /* cases 3, 8 */
61887+
61888+#ifdef CONFIG_PAX_SEGMEXEC
61889+ if (!err && prev_m)
61890+ err = vma_adjust(prev_m, prev_m->vm_start,
61891+ addr_m, prev_m->vm_pgoff, NULL);
61892+#endif
61893+
61894+ } else { /* cases 3, 8 */
61895 err = vma_adjust(area, addr, next->vm_end,
61896 next->vm_pgoff - pglen, NULL);
61897+
61898+#ifdef CONFIG_PAX_SEGMEXEC
61899+ if (!err && area_m)
61900+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
61901+ next_m->vm_pgoff - pglen, NULL);
61902+#endif
61903+
61904+ }
61905 if (err)
61906 return NULL;
61907 khugepaged_enter_vma_merge(area);
61908@@ -929,14 +1009,11 @@ none:
61909 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
61910 struct file *file, long pages)
61911 {
61912- const unsigned long stack_flags
61913- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
61914-
61915 if (file) {
61916 mm->shared_vm += pages;
61917 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
61918 mm->exec_vm += pages;
61919- } else if (flags & stack_flags)
61920+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
61921 mm->stack_vm += pages;
61922 if (flags & (VM_RESERVED|VM_IO))
61923 mm->reserved_vm += pages;
61924@@ -963,7 +1040,7 @@ unsigned long do_mmap_pgoff(struct file
61925 * (the exception is when the underlying filesystem is noexec
61926 * mounted, in which case we dont add PROT_EXEC.)
61927 */
61928- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
61929+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
61930 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
61931 prot |= PROT_EXEC;
61932
61933@@ -989,7 +1066,7 @@ unsigned long do_mmap_pgoff(struct file
61934 /* Obtain the address to map to. we verify (or select) it and ensure
61935 * that it represents a valid section of the address space.
61936 */
61937- addr = get_unmapped_area(file, addr, len, pgoff, flags);
61938+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
61939 if (addr & ~PAGE_MASK)
61940 return addr;
61941
61942@@ -1000,6 +1077,36 @@ unsigned long do_mmap_pgoff(struct file
61943 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
61944 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
61945
61946+#ifdef CONFIG_PAX_MPROTECT
61947+ if (mm->pax_flags & MF_PAX_MPROTECT) {
61948+#ifndef CONFIG_PAX_MPROTECT_COMPAT
61949+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
61950+ gr_log_rwxmmap(file);
61951+
61952+#ifdef CONFIG_PAX_EMUPLT
61953+ vm_flags &= ~VM_EXEC;
61954+#else
61955+ return -EPERM;
61956+#endif
61957+
61958+ }
61959+
61960+ if (!(vm_flags & VM_EXEC))
61961+ vm_flags &= ~VM_MAYEXEC;
61962+#else
61963+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
61964+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
61965+#endif
61966+ else
61967+ vm_flags &= ~VM_MAYWRITE;
61968+ }
61969+#endif
61970+
61971+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
61972+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
61973+ vm_flags &= ~VM_PAGEEXEC;
61974+#endif
61975+
61976 if (flags & MAP_LOCKED)
61977 if (!can_do_mlock())
61978 return -EPERM;
61979@@ -1011,6 +1118,7 @@ unsigned long do_mmap_pgoff(struct file
61980 locked += mm->locked_vm;
61981 lock_limit = rlimit(RLIMIT_MEMLOCK);
61982 lock_limit >>= PAGE_SHIFT;
61983+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
61984 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
61985 return -EAGAIN;
61986 }
61987@@ -1081,6 +1189,9 @@ unsigned long do_mmap_pgoff(struct file
61988 if (error)
61989 return error;
61990
61991+ if (!gr_acl_handle_mmap(file, prot))
61992+ return -EACCES;
61993+
61994 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
61995 }
61996 EXPORT_SYMBOL(do_mmap_pgoff);
61997@@ -1161,7 +1272,7 @@ int vma_wants_writenotify(struct vm_area
61998 vm_flags_t vm_flags = vma->vm_flags;
61999
62000 /* If it was private or non-writable, the write bit is already clear */
62001- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
62002+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
62003 return 0;
62004
62005 /* The backer wishes to know when pages are first written to? */
62006@@ -1210,14 +1321,24 @@ unsigned long mmap_region(struct file *f
62007 unsigned long charged = 0;
62008 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
62009
62010+#ifdef CONFIG_PAX_SEGMEXEC
62011+ struct vm_area_struct *vma_m = NULL;
62012+#endif
62013+
62014+ /*
62015+ * mm->mmap_sem is required to protect against another thread
62016+ * changing the mappings in case we sleep.
62017+ */
62018+ verify_mm_writelocked(mm);
62019+
62020 /* Clear old maps */
62021 error = -ENOMEM;
62022-munmap_back:
62023 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
62024 if (vma && vma->vm_start < addr + len) {
62025 if (do_munmap(mm, addr, len))
62026 return -ENOMEM;
62027- goto munmap_back;
62028+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
62029+ BUG_ON(vma && vma->vm_start < addr + len);
62030 }
62031
62032 /* Check against address space limit. */
62033@@ -1266,6 +1387,16 @@ munmap_back:
62034 goto unacct_error;
62035 }
62036
62037+#ifdef CONFIG_PAX_SEGMEXEC
62038+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
62039+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
62040+ if (!vma_m) {
62041+ error = -ENOMEM;
62042+ goto free_vma;
62043+ }
62044+ }
62045+#endif
62046+
62047 vma->vm_mm = mm;
62048 vma->vm_start = addr;
62049 vma->vm_end = addr + len;
62050@@ -1289,6 +1420,19 @@ munmap_back:
62051 error = file->f_op->mmap(file, vma);
62052 if (error)
62053 goto unmap_and_free_vma;
62054+
62055+#ifdef CONFIG_PAX_SEGMEXEC
62056+ if (vma_m && (vm_flags & VM_EXECUTABLE))
62057+ added_exe_file_vma(mm);
62058+#endif
62059+
62060+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
62061+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
62062+ vma->vm_flags |= VM_PAGEEXEC;
62063+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
62064+ }
62065+#endif
62066+
62067 if (vm_flags & VM_EXECUTABLE)
62068 added_exe_file_vma(mm);
62069
62070@@ -1324,6 +1468,11 @@ munmap_back:
62071 vma_link(mm, vma, prev, rb_link, rb_parent);
62072 file = vma->vm_file;
62073
62074+#ifdef CONFIG_PAX_SEGMEXEC
62075+ if (vma_m)
62076+ BUG_ON(pax_mirror_vma(vma_m, vma));
62077+#endif
62078+
62079 /* Once vma denies write, undo our temporary denial count */
62080 if (correct_wcount)
62081 atomic_inc(&inode->i_writecount);
62082@@ -1332,6 +1481,7 @@ out:
62083
62084 mm->total_vm += len >> PAGE_SHIFT;
62085 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
62086+ track_exec_limit(mm, addr, addr + len, vm_flags);
62087 if (vm_flags & VM_LOCKED) {
62088 if (!mlock_vma_pages_range(vma, addr, addr + len))
62089 mm->locked_vm += (len >> PAGE_SHIFT);
62090@@ -1349,6 +1499,12 @@ unmap_and_free_vma:
62091 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
62092 charged = 0;
62093 free_vma:
62094+
62095+#ifdef CONFIG_PAX_SEGMEXEC
62096+ if (vma_m)
62097+ kmem_cache_free(vm_area_cachep, vma_m);
62098+#endif
62099+
62100 kmem_cache_free(vm_area_cachep, vma);
62101 unacct_error:
62102 if (charged)
62103@@ -1356,6 +1512,44 @@ unacct_error:
62104 return error;
62105 }
62106
62107+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
62108+{
62109+ if (!vma) {
62110+#ifdef CONFIG_STACK_GROWSUP
62111+ if (addr > sysctl_heap_stack_gap)
62112+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
62113+ else
62114+ vma = find_vma(current->mm, 0);
62115+ if (vma && (vma->vm_flags & VM_GROWSUP))
62116+ return false;
62117+#endif
62118+ return true;
62119+ }
62120+
62121+ if (addr + len > vma->vm_start)
62122+ return false;
62123+
62124+ if (vma->vm_flags & VM_GROWSDOWN)
62125+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
62126+#ifdef CONFIG_STACK_GROWSUP
62127+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
62128+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
62129+#endif
62130+
62131+ return true;
62132+}
62133+
62134+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
62135+{
62136+ if (vma->vm_start < len)
62137+ return -ENOMEM;
62138+ if (!(vma->vm_flags & VM_GROWSDOWN))
62139+ return vma->vm_start - len;
62140+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
62141+ return vma->vm_start - len - sysctl_heap_stack_gap;
62142+ return -ENOMEM;
62143+}
62144+
62145 /* Get an address range which is currently unmapped.
62146 * For shmat() with addr=0.
62147 *
62148@@ -1382,18 +1576,23 @@ arch_get_unmapped_area(struct file *filp
62149 if (flags & MAP_FIXED)
62150 return addr;
62151
62152+#ifdef CONFIG_PAX_RANDMMAP
62153+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
62154+#endif
62155+
62156 if (addr) {
62157 addr = PAGE_ALIGN(addr);
62158- vma = find_vma(mm, addr);
62159- if (TASK_SIZE - len >= addr &&
62160- (!vma || addr + len <= vma->vm_start))
62161- return addr;
62162+ if (TASK_SIZE - len >= addr) {
62163+ vma = find_vma(mm, addr);
62164+ if (check_heap_stack_gap(vma, addr, len))
62165+ return addr;
62166+ }
62167 }
62168 if (len > mm->cached_hole_size) {
62169- start_addr = addr = mm->free_area_cache;
62170+ start_addr = addr = mm->free_area_cache;
62171 } else {
62172- start_addr = addr = TASK_UNMAPPED_BASE;
62173- mm->cached_hole_size = 0;
62174+ start_addr = addr = mm->mmap_base;
62175+ mm->cached_hole_size = 0;
62176 }
62177
62178 full_search:
62179@@ -1404,34 +1603,40 @@ full_search:
62180 * Start a new search - just in case we missed
62181 * some holes.
62182 */
62183- if (start_addr != TASK_UNMAPPED_BASE) {
62184- addr = TASK_UNMAPPED_BASE;
62185- start_addr = addr;
62186+ if (start_addr != mm->mmap_base) {
62187+ start_addr = addr = mm->mmap_base;
62188 mm->cached_hole_size = 0;
62189 goto full_search;
62190 }
62191 return -ENOMEM;
62192 }
62193- if (!vma || addr + len <= vma->vm_start) {
62194- /*
62195- * Remember the place where we stopped the search:
62196- */
62197- mm->free_area_cache = addr + len;
62198- return addr;
62199- }
62200+ if (check_heap_stack_gap(vma, addr, len))
62201+ break;
62202 if (addr + mm->cached_hole_size < vma->vm_start)
62203 mm->cached_hole_size = vma->vm_start - addr;
62204 addr = vma->vm_end;
62205 }
62206+
62207+ /*
62208+ * Remember the place where we stopped the search:
62209+ */
62210+ mm->free_area_cache = addr + len;
62211+ return addr;
62212 }
62213 #endif
62214
62215 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
62216 {
62217+
62218+#ifdef CONFIG_PAX_SEGMEXEC
62219+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
62220+ return;
62221+#endif
62222+
62223 /*
62224 * Is this a new hole at the lowest possible address?
62225 */
62226- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
62227+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
62228 mm->free_area_cache = addr;
62229 mm->cached_hole_size = ~0UL;
62230 }
62231@@ -1449,7 +1654,7 @@ arch_get_unmapped_area_topdown(struct fi
62232 {
62233 struct vm_area_struct *vma;
62234 struct mm_struct *mm = current->mm;
62235- unsigned long addr = addr0;
62236+ unsigned long base = mm->mmap_base, addr = addr0;
62237
62238 /* requested length too big for entire address space */
62239 if (len > TASK_SIZE)
62240@@ -1458,13 +1663,18 @@ arch_get_unmapped_area_topdown(struct fi
62241 if (flags & MAP_FIXED)
62242 return addr;
62243
62244+#ifdef CONFIG_PAX_RANDMMAP
62245+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
62246+#endif
62247+
62248 /* requesting a specific address */
62249 if (addr) {
62250 addr = PAGE_ALIGN(addr);
62251- vma = find_vma(mm, addr);
62252- if (TASK_SIZE - len >= addr &&
62253- (!vma || addr + len <= vma->vm_start))
62254- return addr;
62255+ if (TASK_SIZE - len >= addr) {
62256+ vma = find_vma(mm, addr);
62257+ if (check_heap_stack_gap(vma, addr, len))
62258+ return addr;
62259+ }
62260 }
62261
62262 /* check if free_area_cache is useful for us */
62263@@ -1479,7 +1689,7 @@ arch_get_unmapped_area_topdown(struct fi
62264 /* make sure it can fit in the remaining address space */
62265 if (addr > len) {
62266 vma = find_vma(mm, addr-len);
62267- if (!vma || addr <= vma->vm_start)
62268+ if (check_heap_stack_gap(vma, addr - len, len))
62269 /* remember the address as a hint for next time */
62270 return (mm->free_area_cache = addr-len);
62271 }
62272@@ -1496,7 +1706,7 @@ arch_get_unmapped_area_topdown(struct fi
62273 * return with success:
62274 */
62275 vma = find_vma(mm, addr);
62276- if (!vma || addr+len <= vma->vm_start)
62277+ if (check_heap_stack_gap(vma, addr, len))
62278 /* remember the address as a hint for next time */
62279 return (mm->free_area_cache = addr);
62280
62281@@ -1505,8 +1715,8 @@ arch_get_unmapped_area_topdown(struct fi
62282 mm->cached_hole_size = vma->vm_start - addr;
62283
62284 /* try just below the current vma->vm_start */
62285- addr = vma->vm_start-len;
62286- } while (len < vma->vm_start);
62287+ addr = skip_heap_stack_gap(vma, len);
62288+ } while (!IS_ERR_VALUE(addr));
62289
62290 bottomup:
62291 /*
62292@@ -1515,13 +1725,21 @@ bottomup:
62293 * can happen with large stack limits and large mmap()
62294 * allocations.
62295 */
62296+ mm->mmap_base = TASK_UNMAPPED_BASE;
62297+
62298+#ifdef CONFIG_PAX_RANDMMAP
62299+ if (mm->pax_flags & MF_PAX_RANDMMAP)
62300+ mm->mmap_base += mm->delta_mmap;
62301+#endif
62302+
62303+ mm->free_area_cache = mm->mmap_base;
62304 mm->cached_hole_size = ~0UL;
62305- mm->free_area_cache = TASK_UNMAPPED_BASE;
62306 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
62307 /*
62308 * Restore the topdown base:
62309 */
62310- mm->free_area_cache = mm->mmap_base;
62311+ mm->mmap_base = base;
62312+ mm->free_area_cache = base;
62313 mm->cached_hole_size = ~0UL;
62314
62315 return addr;
62316@@ -1530,6 +1748,12 @@ bottomup:
62317
62318 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
62319 {
62320+
62321+#ifdef CONFIG_PAX_SEGMEXEC
62322+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
62323+ return;
62324+#endif
62325+
62326 /*
62327 * Is this a new hole at the highest possible address?
62328 */
62329@@ -1537,8 +1761,10 @@ void arch_unmap_area_topdown(struct mm_s
62330 mm->free_area_cache = addr;
62331
62332 /* dont allow allocations above current base */
62333- if (mm->free_area_cache > mm->mmap_base)
62334+ if (mm->free_area_cache > mm->mmap_base) {
62335 mm->free_area_cache = mm->mmap_base;
62336+ mm->cached_hole_size = ~0UL;
62337+ }
62338 }
62339
62340 unsigned long
62341@@ -1646,6 +1872,28 @@ out:
62342 return prev ? prev->vm_next : vma;
62343 }
62344
62345+#ifdef CONFIG_PAX_SEGMEXEC
62346+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
62347+{
62348+ struct vm_area_struct *vma_m;
62349+
62350+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
62351+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
62352+ BUG_ON(vma->vm_mirror);
62353+ return NULL;
62354+ }
62355+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
62356+ vma_m = vma->vm_mirror;
62357+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
62358+ BUG_ON(vma->vm_file != vma_m->vm_file);
62359+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
62360+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
62361+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
62362+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
62363+ return vma_m;
62364+}
62365+#endif
62366+
62367 /*
62368 * Verify that the stack growth is acceptable and
62369 * update accounting. This is shared with both the
62370@@ -1662,6 +1910,7 @@ static int acct_stack_growth(struct vm_a
62371 return -ENOMEM;
62372
62373 /* Stack limit test */
62374+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
62375 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
62376 return -ENOMEM;
62377
62378@@ -1672,6 +1921,7 @@ static int acct_stack_growth(struct vm_a
62379 locked = mm->locked_vm + grow;
62380 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
62381 limit >>= PAGE_SHIFT;
62382+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
62383 if (locked > limit && !capable(CAP_IPC_LOCK))
62384 return -ENOMEM;
62385 }
62386@@ -1702,37 +1952,48 @@ static int acct_stack_growth(struct vm_a
62387 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
62388 * vma is the last one with address > vma->vm_end. Have to extend vma.
62389 */
62390+#ifndef CONFIG_IA64
62391+static
62392+#endif
62393 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
62394 {
62395 int error;
62396+ bool locknext;
62397
62398 if (!(vma->vm_flags & VM_GROWSUP))
62399 return -EFAULT;
62400
62401+ /* Also guard against wrapping around to address 0. */
62402+ if (address < PAGE_ALIGN(address+1))
62403+ address = PAGE_ALIGN(address+1);
62404+ else
62405+ return -ENOMEM;
62406+
62407 /*
62408 * We must make sure the anon_vma is allocated
62409 * so that the anon_vma locking is not a noop.
62410 */
62411 if (unlikely(anon_vma_prepare(vma)))
62412 return -ENOMEM;
62413+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
62414+ if (locknext && anon_vma_prepare(vma->vm_next))
62415+ return -ENOMEM;
62416 vma_lock_anon_vma(vma);
62417+ if (locknext)
62418+ vma_lock_anon_vma(vma->vm_next);
62419
62420 /*
62421 * vma->vm_start/vm_end cannot change under us because the caller
62422 * is required to hold the mmap_sem in read mode. We need the
62423- * anon_vma lock to serialize against concurrent expand_stacks.
62424- * Also guard against wrapping around to address 0.
62425+ * anon_vma locks to serialize against concurrent expand_stacks
62426+ * and expand_upwards.
62427 */
62428- if (address < PAGE_ALIGN(address+4))
62429- address = PAGE_ALIGN(address+4);
62430- else {
62431- vma_unlock_anon_vma(vma);
62432- return -ENOMEM;
62433- }
62434 error = 0;
62435
62436 /* Somebody else might have raced and expanded it already */
62437- if (address > vma->vm_end) {
62438+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
62439+ error = -ENOMEM;
62440+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
62441 unsigned long size, grow;
62442
62443 size = address - vma->vm_start;
62444@@ -1747,6 +2008,8 @@ int expand_upwards(struct vm_area_struct
62445 }
62446 }
62447 }
62448+ if (locknext)
62449+ vma_unlock_anon_vma(vma->vm_next);
62450 vma_unlock_anon_vma(vma);
62451 khugepaged_enter_vma_merge(vma);
62452 return error;
62453@@ -1760,6 +2023,8 @@ int expand_downwards(struct vm_area_stru
62454 unsigned long address)
62455 {
62456 int error;
62457+ bool lockprev = false;
62458+ struct vm_area_struct *prev;
62459
62460 /*
62461 * We must make sure the anon_vma is allocated
62462@@ -1773,6 +2038,15 @@ int expand_downwards(struct vm_area_stru
62463 if (error)
62464 return error;
62465
62466+ prev = vma->vm_prev;
62467+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
62468+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
62469+#endif
62470+ if (lockprev && anon_vma_prepare(prev))
62471+ return -ENOMEM;
62472+ if (lockprev)
62473+ vma_lock_anon_vma(prev);
62474+
62475 vma_lock_anon_vma(vma);
62476
62477 /*
62478@@ -1782,9 +2056,17 @@ int expand_downwards(struct vm_area_stru
62479 */
62480
62481 /* Somebody else might have raced and expanded it already */
62482- if (address < vma->vm_start) {
62483+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
62484+ error = -ENOMEM;
62485+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
62486 unsigned long size, grow;
62487
62488+#ifdef CONFIG_PAX_SEGMEXEC
62489+ struct vm_area_struct *vma_m;
62490+
62491+ vma_m = pax_find_mirror_vma(vma);
62492+#endif
62493+
62494 size = vma->vm_end - address;
62495 grow = (vma->vm_start - address) >> PAGE_SHIFT;
62496
62497@@ -1794,11 +2076,22 @@ int expand_downwards(struct vm_area_stru
62498 if (!error) {
62499 vma->vm_start = address;
62500 vma->vm_pgoff -= grow;
62501+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
62502+
62503+#ifdef CONFIG_PAX_SEGMEXEC
62504+ if (vma_m) {
62505+ vma_m->vm_start -= grow << PAGE_SHIFT;
62506+ vma_m->vm_pgoff -= grow;
62507+ }
62508+#endif
62509+
62510 perf_event_mmap(vma);
62511 }
62512 }
62513 }
62514 vma_unlock_anon_vma(vma);
62515+ if (lockprev)
62516+ vma_unlock_anon_vma(prev);
62517 khugepaged_enter_vma_merge(vma);
62518 return error;
62519 }
62520@@ -1868,6 +2161,13 @@ static void remove_vma_list(struct mm_st
62521 do {
62522 long nrpages = vma_pages(vma);
62523
62524+#ifdef CONFIG_PAX_SEGMEXEC
62525+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
62526+ vma = remove_vma(vma);
62527+ continue;
62528+ }
62529+#endif
62530+
62531 mm->total_vm -= nrpages;
62532 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
62533 vma = remove_vma(vma);
62534@@ -1913,6 +2213,16 @@ detach_vmas_to_be_unmapped(struct mm_str
62535 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
62536 vma->vm_prev = NULL;
62537 do {
62538+
62539+#ifdef CONFIG_PAX_SEGMEXEC
62540+ if (vma->vm_mirror) {
62541+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
62542+ vma->vm_mirror->vm_mirror = NULL;
62543+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
62544+ vma->vm_mirror = NULL;
62545+ }
62546+#endif
62547+
62548 rb_erase(&vma->vm_rb, &mm->mm_rb);
62549 mm->map_count--;
62550 tail_vma = vma;
62551@@ -1941,14 +2251,33 @@ static int __split_vma(struct mm_struct
62552 struct vm_area_struct *new;
62553 int err = -ENOMEM;
62554
62555+#ifdef CONFIG_PAX_SEGMEXEC
62556+ struct vm_area_struct *vma_m, *new_m = NULL;
62557+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
62558+#endif
62559+
62560 if (is_vm_hugetlb_page(vma) && (addr &
62561 ~(huge_page_mask(hstate_vma(vma)))))
62562 return -EINVAL;
62563
62564+#ifdef CONFIG_PAX_SEGMEXEC
62565+ vma_m = pax_find_mirror_vma(vma);
62566+#endif
62567+
62568 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
62569 if (!new)
62570 goto out_err;
62571
62572+#ifdef CONFIG_PAX_SEGMEXEC
62573+ if (vma_m) {
62574+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
62575+ if (!new_m) {
62576+ kmem_cache_free(vm_area_cachep, new);
62577+ goto out_err;
62578+ }
62579+ }
62580+#endif
62581+
62582 /* most fields are the same, copy all, and then fixup */
62583 *new = *vma;
62584
62585@@ -1961,6 +2290,22 @@ static int __split_vma(struct mm_struct
62586 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
62587 }
62588
62589+#ifdef CONFIG_PAX_SEGMEXEC
62590+ if (vma_m) {
62591+ *new_m = *vma_m;
62592+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
62593+ new_m->vm_mirror = new;
62594+ new->vm_mirror = new_m;
62595+
62596+ if (new_below)
62597+ new_m->vm_end = addr_m;
62598+ else {
62599+ new_m->vm_start = addr_m;
62600+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
62601+ }
62602+ }
62603+#endif
62604+
62605 pol = mpol_dup(vma_policy(vma));
62606 if (IS_ERR(pol)) {
62607 err = PTR_ERR(pol);
62608@@ -1986,6 +2331,42 @@ static int __split_vma(struct mm_struct
62609 else
62610 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
62611
62612+#ifdef CONFIG_PAX_SEGMEXEC
62613+ if (!err && vma_m) {
62614+ if (anon_vma_clone(new_m, vma_m))
62615+ goto out_free_mpol;
62616+
62617+ mpol_get(pol);
62618+ vma_set_policy(new_m, pol);
62619+
62620+ if (new_m->vm_file) {
62621+ get_file(new_m->vm_file);
62622+ if (vma_m->vm_flags & VM_EXECUTABLE)
62623+ added_exe_file_vma(mm);
62624+ }
62625+
62626+ if (new_m->vm_ops && new_m->vm_ops->open)
62627+ new_m->vm_ops->open(new_m);
62628+
62629+ if (new_below)
62630+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
62631+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
62632+ else
62633+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
62634+
62635+ if (err) {
62636+ if (new_m->vm_ops && new_m->vm_ops->close)
62637+ new_m->vm_ops->close(new_m);
62638+ if (new_m->vm_file) {
62639+ if (vma_m->vm_flags & VM_EXECUTABLE)
62640+ removed_exe_file_vma(mm);
62641+ fput(new_m->vm_file);
62642+ }
62643+ mpol_put(pol);
62644+ }
62645+ }
62646+#endif
62647+
62648 /* Success. */
62649 if (!err)
62650 return 0;
62651@@ -1998,10 +2379,18 @@ static int __split_vma(struct mm_struct
62652 removed_exe_file_vma(mm);
62653 fput(new->vm_file);
62654 }
62655- unlink_anon_vmas(new);
62656 out_free_mpol:
62657 mpol_put(pol);
62658 out_free_vma:
62659+
62660+#ifdef CONFIG_PAX_SEGMEXEC
62661+ if (new_m) {
62662+ unlink_anon_vmas(new_m);
62663+ kmem_cache_free(vm_area_cachep, new_m);
62664+ }
62665+#endif
62666+
62667+ unlink_anon_vmas(new);
62668 kmem_cache_free(vm_area_cachep, new);
62669 out_err:
62670 return err;
62671@@ -2014,6 +2403,15 @@ static int __split_vma(struct mm_struct
62672 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
62673 unsigned long addr, int new_below)
62674 {
62675+
62676+#ifdef CONFIG_PAX_SEGMEXEC
62677+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
62678+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
62679+ if (mm->map_count >= sysctl_max_map_count-1)
62680+ return -ENOMEM;
62681+ } else
62682+#endif
62683+
62684 if (mm->map_count >= sysctl_max_map_count)
62685 return -ENOMEM;
62686
62687@@ -2025,11 +2423,30 @@ int split_vma(struct mm_struct *mm, stru
62688 * work. This now handles partial unmappings.
62689 * Jeremy Fitzhardinge <jeremy@goop.org>
62690 */
62691+#ifdef CONFIG_PAX_SEGMEXEC
62692 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
62693 {
62694+ int ret = __do_munmap(mm, start, len);
62695+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
62696+ return ret;
62697+
62698+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
62699+}
62700+
62701+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
62702+#else
62703+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
62704+#endif
62705+{
62706 unsigned long end;
62707 struct vm_area_struct *vma, *prev, *last;
62708
62709+ /*
62710+ * mm->mmap_sem is required to protect against another thread
62711+ * changing the mappings in case we sleep.
62712+ */
62713+ verify_mm_writelocked(mm);
62714+
62715 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
62716 return -EINVAL;
62717
62718@@ -2104,6 +2521,8 @@ int do_munmap(struct mm_struct *mm, unsi
62719 /* Fix up all other VM information */
62720 remove_vma_list(mm, vma);
62721
62722+ track_exec_limit(mm, start, end, 0UL);
62723+
62724 return 0;
62725 }
62726
62727@@ -2116,22 +2535,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
62728
62729 profile_munmap(addr);
62730
62731+#ifdef CONFIG_PAX_SEGMEXEC
62732+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
62733+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
62734+ return -EINVAL;
62735+#endif
62736+
62737 down_write(&mm->mmap_sem);
62738 ret = do_munmap(mm, addr, len);
62739 up_write(&mm->mmap_sem);
62740 return ret;
62741 }
62742
62743-static inline void verify_mm_writelocked(struct mm_struct *mm)
62744-{
62745-#ifdef CONFIG_DEBUG_VM
62746- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
62747- WARN_ON(1);
62748- up_read(&mm->mmap_sem);
62749- }
62750-#endif
62751-}
62752-
62753 /*
62754 * this is really a simplified "do_mmap". it only handles
62755 * anonymous maps. eventually we may be able to do some
62756@@ -2145,6 +2560,7 @@ unsigned long do_brk(unsigned long addr,
62757 struct rb_node ** rb_link, * rb_parent;
62758 pgoff_t pgoff = addr >> PAGE_SHIFT;
62759 int error;
62760+ unsigned long charged;
62761
62762 len = PAGE_ALIGN(len);
62763 if (!len)
62764@@ -2156,16 +2572,30 @@ unsigned long do_brk(unsigned long addr,
62765
62766 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
62767
62768+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
62769+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
62770+ flags &= ~VM_EXEC;
62771+
62772+#ifdef CONFIG_PAX_MPROTECT
62773+ if (mm->pax_flags & MF_PAX_MPROTECT)
62774+ flags &= ~VM_MAYEXEC;
62775+#endif
62776+
62777+ }
62778+#endif
62779+
62780 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
62781 if (error & ~PAGE_MASK)
62782 return error;
62783
62784+ charged = len >> PAGE_SHIFT;
62785+
62786 /*
62787 * mlock MCL_FUTURE?
62788 */
62789 if (mm->def_flags & VM_LOCKED) {
62790 unsigned long locked, lock_limit;
62791- locked = len >> PAGE_SHIFT;
62792+ locked = charged;
62793 locked += mm->locked_vm;
62794 lock_limit = rlimit(RLIMIT_MEMLOCK);
62795 lock_limit >>= PAGE_SHIFT;
62796@@ -2182,22 +2612,22 @@ unsigned long do_brk(unsigned long addr,
62797 /*
62798 * Clear old maps. this also does some error checking for us
62799 */
62800- munmap_back:
62801 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
62802 if (vma && vma->vm_start < addr + len) {
62803 if (do_munmap(mm, addr, len))
62804 return -ENOMEM;
62805- goto munmap_back;
62806+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
62807+ BUG_ON(vma && vma->vm_start < addr + len);
62808 }
62809
62810 /* Check against address space limits *after* clearing old maps... */
62811- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
62812+ if (!may_expand_vm(mm, charged))
62813 return -ENOMEM;
62814
62815 if (mm->map_count > sysctl_max_map_count)
62816 return -ENOMEM;
62817
62818- if (security_vm_enough_memory(len >> PAGE_SHIFT))
62819+ if (security_vm_enough_memory(charged))
62820 return -ENOMEM;
62821
62822 /* Can we just expand an old private anonymous mapping? */
62823@@ -2211,7 +2641,7 @@ unsigned long do_brk(unsigned long addr,
62824 */
62825 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
62826 if (!vma) {
62827- vm_unacct_memory(len >> PAGE_SHIFT);
62828+ vm_unacct_memory(charged);
62829 return -ENOMEM;
62830 }
62831
62832@@ -2225,11 +2655,12 @@ unsigned long do_brk(unsigned long addr,
62833 vma_link(mm, vma, prev, rb_link, rb_parent);
62834 out:
62835 perf_event_mmap(vma);
62836- mm->total_vm += len >> PAGE_SHIFT;
62837+ mm->total_vm += charged;
62838 if (flags & VM_LOCKED) {
62839 if (!mlock_vma_pages_range(vma, addr, addr + len))
62840- mm->locked_vm += (len >> PAGE_SHIFT);
62841+ mm->locked_vm += charged;
62842 }
62843+ track_exec_limit(mm, addr, addr + len, flags);
62844 return addr;
62845 }
62846
62847@@ -2276,8 +2707,10 @@ void exit_mmap(struct mm_struct *mm)
62848 * Walk the list again, actually closing and freeing it,
62849 * with preemption enabled, without holding any MM locks.
62850 */
62851- while (vma)
62852+ while (vma) {
62853+ vma->vm_mirror = NULL;
62854 vma = remove_vma(vma);
62855+ }
62856
62857 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
62858 }
62859@@ -2291,6 +2724,13 @@ int insert_vm_struct(struct mm_struct *
62860 struct vm_area_struct * __vma, * prev;
62861 struct rb_node ** rb_link, * rb_parent;
62862
62863+#ifdef CONFIG_PAX_SEGMEXEC
62864+ struct vm_area_struct *vma_m = NULL;
62865+#endif
62866+
62867+ if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
62868+ return -EPERM;
62869+
62870 /*
62871 * The vm_pgoff of a purely anonymous vma should be irrelevant
62872 * until its first write fault, when page's anon_vma and index
62873@@ -2313,7 +2753,22 @@ int insert_vm_struct(struct mm_struct *
62874 if ((vma->vm_flags & VM_ACCOUNT) &&
62875 security_vm_enough_memory_mm(mm, vma_pages(vma)))
62876 return -ENOMEM;
62877+
62878+#ifdef CONFIG_PAX_SEGMEXEC
62879+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
62880+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
62881+ if (!vma_m)
62882+ return -ENOMEM;
62883+ }
62884+#endif
62885+
62886 vma_link(mm, vma, prev, rb_link, rb_parent);
62887+
62888+#ifdef CONFIG_PAX_SEGMEXEC
62889+ if (vma_m)
62890+ BUG_ON(pax_mirror_vma(vma_m, vma));
62891+#endif
62892+
62893 return 0;
62894 }
62895
62896@@ -2331,6 +2786,8 @@ struct vm_area_struct *copy_vma(struct v
62897 struct rb_node **rb_link, *rb_parent;
62898 struct mempolicy *pol;
62899
62900+ BUG_ON(vma->vm_mirror);
62901+
62902 /*
62903 * If anonymous vma has not yet been faulted, update new pgoff
62904 * to match new location, to increase its chance of merging.
62905@@ -2381,6 +2838,39 @@ struct vm_area_struct *copy_vma(struct v
62906 return NULL;
62907 }
62908
62909+#ifdef CONFIG_PAX_SEGMEXEC
62910+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
62911+{
62912+ struct vm_area_struct *prev_m;
62913+ struct rb_node **rb_link_m, *rb_parent_m;
62914+ struct mempolicy *pol_m;
62915+
62916+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
62917+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
62918+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
62919+ *vma_m = *vma;
62920+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
62921+ if (anon_vma_clone(vma_m, vma))
62922+ return -ENOMEM;
62923+ pol_m = vma_policy(vma_m);
62924+ mpol_get(pol_m);
62925+ vma_set_policy(vma_m, pol_m);
62926+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
62927+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
62928+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
62929+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
62930+ if (vma_m->vm_file)
62931+ get_file(vma_m->vm_file);
62932+ if (vma_m->vm_ops && vma_m->vm_ops->open)
62933+ vma_m->vm_ops->open(vma_m);
62934+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
62935+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
62936+ vma_m->vm_mirror = vma;
62937+ vma->vm_mirror = vma_m;
62938+ return 0;
62939+}
62940+#endif
62941+
62942 /*
62943 * Return true if the calling process may expand its vm space by the passed
62944 * number of pages
62945@@ -2391,7 +2881,7 @@ int may_expand_vm(struct mm_struct *mm,
62946 unsigned long lim;
62947
62948 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
62949-
62950+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
62951 if (cur + npages > lim)
62952 return 0;
62953 return 1;
62954@@ -2462,6 +2952,22 @@ int install_special_mapping(struct mm_st
62955 vma->vm_start = addr;
62956 vma->vm_end = addr + len;
62957
62958+#ifdef CONFIG_PAX_MPROTECT
62959+ if (mm->pax_flags & MF_PAX_MPROTECT) {
62960+#ifndef CONFIG_PAX_MPROTECT_COMPAT
62961+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
62962+ return -EPERM;
62963+ if (!(vm_flags & VM_EXEC))
62964+ vm_flags &= ~VM_MAYEXEC;
62965+#else
62966+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
62967+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
62968+#endif
62969+ else
62970+ vm_flags &= ~VM_MAYWRITE;
62971+ }
62972+#endif
62973+
62974 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
62975 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
62976
62977diff -urNp linux-3.0.3/mm/mprotect.c linux-3.0.3/mm/mprotect.c
62978--- linux-3.0.3/mm/mprotect.c 2011-07-21 22:17:23.000000000 -0400
62979+++ linux-3.0.3/mm/mprotect.c 2011-08-23 21:48:14.000000000 -0400
62980@@ -23,10 +23,16 @@
62981 #include <linux/mmu_notifier.h>
62982 #include <linux/migrate.h>
62983 #include <linux/perf_event.h>
62984+
62985+#ifdef CONFIG_PAX_MPROTECT
62986+#include <linux/elf.h>
62987+#endif
62988+
62989 #include <asm/uaccess.h>
62990 #include <asm/pgtable.h>
62991 #include <asm/cacheflush.h>
62992 #include <asm/tlbflush.h>
62993+#include <asm/mmu_context.h>
62994
62995 #ifndef pgprot_modify
62996 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
62997@@ -141,6 +147,48 @@ static void change_protection(struct vm_
62998 flush_tlb_range(vma, start, end);
62999 }
63000
63001+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
63002+/* called while holding the mmap semaphor for writing except stack expansion */
63003+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
63004+{
63005+ unsigned long oldlimit, newlimit = 0UL;
63006+
63007+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
63008+ return;
63009+
63010+ spin_lock(&mm->page_table_lock);
63011+ oldlimit = mm->context.user_cs_limit;
63012+ if ((prot & VM_EXEC) && oldlimit < end)
63013+ /* USER_CS limit moved up */
63014+ newlimit = end;
63015+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
63016+ /* USER_CS limit moved down */
63017+ newlimit = start;
63018+
63019+ if (newlimit) {
63020+ mm->context.user_cs_limit = newlimit;
63021+
63022+#ifdef CONFIG_SMP
63023+ wmb();
63024+ cpus_clear(mm->context.cpu_user_cs_mask);
63025+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
63026+#endif
63027+
63028+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
63029+ }
63030+ spin_unlock(&mm->page_table_lock);
63031+ if (newlimit == end) {
63032+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
63033+
63034+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
63035+ if (is_vm_hugetlb_page(vma))
63036+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
63037+ else
63038+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
63039+ }
63040+}
63041+#endif
63042+
63043 int
63044 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
63045 unsigned long start, unsigned long end, unsigned long newflags)
63046@@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vm
63047 int error;
63048 int dirty_accountable = 0;
63049
63050+#ifdef CONFIG_PAX_SEGMEXEC
63051+ struct vm_area_struct *vma_m = NULL;
63052+ unsigned long start_m, end_m;
63053+
63054+ start_m = start + SEGMEXEC_TASK_SIZE;
63055+ end_m = end + SEGMEXEC_TASK_SIZE;
63056+#endif
63057+
63058 if (newflags == oldflags) {
63059 *pprev = vma;
63060 return 0;
63061 }
63062
63063+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
63064+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
63065+
63066+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
63067+ return -ENOMEM;
63068+
63069+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
63070+ return -ENOMEM;
63071+ }
63072+
63073 /*
63074 * If we make a private mapping writable we increase our commit;
63075 * but (without finer accounting) cannot reduce our commit if we
63076@@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vm
63077 }
63078 }
63079
63080+#ifdef CONFIG_PAX_SEGMEXEC
63081+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
63082+ if (start != vma->vm_start) {
63083+ error = split_vma(mm, vma, start, 1);
63084+ if (error)
63085+ goto fail;
63086+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
63087+ *pprev = (*pprev)->vm_next;
63088+ }
63089+
63090+ if (end != vma->vm_end) {
63091+ error = split_vma(mm, vma, end, 0);
63092+ if (error)
63093+ goto fail;
63094+ }
63095+
63096+ if (pax_find_mirror_vma(vma)) {
63097+ error = __do_munmap(mm, start_m, end_m - start_m);
63098+ if (error)
63099+ goto fail;
63100+ } else {
63101+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
63102+ if (!vma_m) {
63103+ error = -ENOMEM;
63104+ goto fail;
63105+ }
63106+ vma->vm_flags = newflags;
63107+ error = pax_mirror_vma(vma_m, vma);
63108+ if (error) {
63109+ vma->vm_flags = oldflags;
63110+ goto fail;
63111+ }
63112+ }
63113+ }
63114+#endif
63115+
63116 /*
63117 * First try to merge with previous and/or next vma.
63118 */
63119@@ -204,9 +306,21 @@ success:
63120 * vm_flags and vm_page_prot are protected by the mmap_sem
63121 * held in write mode.
63122 */
63123+
63124+#ifdef CONFIG_PAX_SEGMEXEC
63125+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
63126+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
63127+#endif
63128+
63129 vma->vm_flags = newflags;
63130+
63131+#ifdef CONFIG_PAX_MPROTECT
63132+ if (mm->binfmt && mm->binfmt->handle_mprotect)
63133+ mm->binfmt->handle_mprotect(vma, newflags);
63134+#endif
63135+
63136 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
63137- vm_get_page_prot(newflags));
63138+ vm_get_page_prot(vma->vm_flags));
63139
63140 if (vma_wants_writenotify(vma)) {
63141 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
63142@@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63143 end = start + len;
63144 if (end <= start)
63145 return -ENOMEM;
63146+
63147+#ifdef CONFIG_PAX_SEGMEXEC
63148+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
63149+ if (end > SEGMEXEC_TASK_SIZE)
63150+ return -EINVAL;
63151+ } else
63152+#endif
63153+
63154+ if (end > TASK_SIZE)
63155+ return -EINVAL;
63156+
63157 if (!arch_validate_prot(prot))
63158 return -EINVAL;
63159
63160@@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63161 /*
63162 * Does the application expect PROT_READ to imply PROT_EXEC:
63163 */
63164- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
63165+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
63166 prot |= PROT_EXEC;
63167
63168 vm_flags = calc_vm_prot_bits(prot);
63169@@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63170 if (start > vma->vm_start)
63171 prev = vma;
63172
63173+#ifdef CONFIG_PAX_MPROTECT
63174+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
63175+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
63176+#endif
63177+
63178 for (nstart = start ; ; ) {
63179 unsigned long newflags;
63180
63181@@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63182
63183 /* newflags >> 4 shift VM_MAY% in place of VM_% */
63184 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
63185+ if (prot & (PROT_WRITE | PROT_EXEC))
63186+ gr_log_rwxmprotect(vma->vm_file);
63187+
63188+ error = -EACCES;
63189+ goto out;
63190+ }
63191+
63192+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
63193 error = -EACCES;
63194 goto out;
63195 }
63196@@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63197 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
63198 if (error)
63199 goto out;
63200+
63201+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
63202+
63203 nstart = tmp;
63204
63205 if (nstart < prev->vm_end)
63206diff -urNp linux-3.0.3/mm/mremap.c linux-3.0.3/mm/mremap.c
63207--- linux-3.0.3/mm/mremap.c 2011-07-21 22:17:23.000000000 -0400
63208+++ linux-3.0.3/mm/mremap.c 2011-08-23 21:47:56.000000000 -0400
63209@@ -113,6 +113,12 @@ static void move_ptes(struct vm_area_str
63210 continue;
63211 pte = ptep_clear_flush(vma, old_addr, old_pte);
63212 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
63213+
63214+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
63215+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
63216+ pte = pte_exprotect(pte);
63217+#endif
63218+
63219 set_pte_at(mm, new_addr, new_pte, pte);
63220 }
63221
63222@@ -272,6 +278,11 @@ static struct vm_area_struct *vma_to_res
63223 if (is_vm_hugetlb_page(vma))
63224 goto Einval;
63225
63226+#ifdef CONFIG_PAX_SEGMEXEC
63227+ if (pax_find_mirror_vma(vma))
63228+ goto Einval;
63229+#endif
63230+
63231 /* We can't remap across vm area boundaries */
63232 if (old_len > vma->vm_end - addr)
63233 goto Efault;
63234@@ -328,20 +339,25 @@ static unsigned long mremap_to(unsigned
63235 unsigned long ret = -EINVAL;
63236 unsigned long charged = 0;
63237 unsigned long map_flags;
63238+ unsigned long pax_task_size = TASK_SIZE;
63239
63240 if (new_addr & ~PAGE_MASK)
63241 goto out;
63242
63243- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
63244+#ifdef CONFIG_PAX_SEGMEXEC
63245+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
63246+ pax_task_size = SEGMEXEC_TASK_SIZE;
63247+#endif
63248+
63249+ pax_task_size -= PAGE_SIZE;
63250+
63251+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
63252 goto out;
63253
63254 /* Check if the location we're moving into overlaps the
63255 * old location at all, and fail if it does.
63256 */
63257- if ((new_addr <= addr) && (new_addr+new_len) > addr)
63258- goto out;
63259-
63260- if ((addr <= new_addr) && (addr+old_len) > new_addr)
63261+ if (addr + old_len > new_addr && new_addr + new_len > addr)
63262 goto out;
63263
63264 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
63265@@ -413,6 +429,7 @@ unsigned long do_mremap(unsigned long ad
63266 struct vm_area_struct *vma;
63267 unsigned long ret = -EINVAL;
63268 unsigned long charged = 0;
63269+ unsigned long pax_task_size = TASK_SIZE;
63270
63271 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
63272 goto out;
63273@@ -431,6 +448,17 @@ unsigned long do_mremap(unsigned long ad
63274 if (!new_len)
63275 goto out;
63276
63277+#ifdef CONFIG_PAX_SEGMEXEC
63278+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
63279+ pax_task_size = SEGMEXEC_TASK_SIZE;
63280+#endif
63281+
63282+ pax_task_size -= PAGE_SIZE;
63283+
63284+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
63285+ old_len > pax_task_size || addr > pax_task_size-old_len)
63286+ goto out;
63287+
63288 if (flags & MREMAP_FIXED) {
63289 if (flags & MREMAP_MAYMOVE)
63290 ret = mremap_to(addr, old_len, new_addr, new_len);
63291@@ -480,6 +508,7 @@ unsigned long do_mremap(unsigned long ad
63292 addr + new_len);
63293 }
63294 ret = addr;
63295+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
63296 goto out;
63297 }
63298 }
63299@@ -506,7 +535,13 @@ unsigned long do_mremap(unsigned long ad
63300 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
63301 if (ret)
63302 goto out;
63303+
63304+ map_flags = vma->vm_flags;
63305 ret = move_vma(vma, addr, old_len, new_len, new_addr);
63306+ if (!(ret & ~PAGE_MASK)) {
63307+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
63308+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
63309+ }
63310 }
63311 out:
63312 if (ret & ~PAGE_MASK)
63313diff -urNp linux-3.0.3/mm/nobootmem.c linux-3.0.3/mm/nobootmem.c
63314--- linux-3.0.3/mm/nobootmem.c 2011-07-21 22:17:23.000000000 -0400
63315+++ linux-3.0.3/mm/nobootmem.c 2011-08-23 21:47:56.000000000 -0400
63316@@ -110,19 +110,30 @@ static void __init __free_pages_memory(u
63317 unsigned long __init free_all_memory_core_early(int nodeid)
63318 {
63319 int i;
63320- u64 start, end;
63321+ u64 start, end, startrange, endrange;
63322 unsigned long count = 0;
63323- struct range *range = NULL;
63324+ struct range *range = NULL, rangerange = { 0, 0 };
63325 int nr_range;
63326
63327 nr_range = get_free_all_memory_range(&range, nodeid);
63328+ startrange = __pa(range) >> PAGE_SHIFT;
63329+ endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
63330
63331 for (i = 0; i < nr_range; i++) {
63332 start = range[i].start;
63333 end = range[i].end;
63334+ if (start <= endrange && startrange < end) {
63335+ BUG_ON(rangerange.start | rangerange.end);
63336+ rangerange = range[i];
63337+ continue;
63338+ }
63339 count += end - start;
63340 __free_pages_memory(start, end);
63341 }
63342+ start = rangerange.start;
63343+ end = rangerange.end;
63344+ count += end - start;
63345+ __free_pages_memory(start, end);
63346
63347 return count;
63348 }
63349diff -urNp linux-3.0.3/mm/nommu.c linux-3.0.3/mm/nommu.c
63350--- linux-3.0.3/mm/nommu.c 2011-07-21 22:17:23.000000000 -0400
63351+++ linux-3.0.3/mm/nommu.c 2011-08-23 21:47:56.000000000 -0400
63352@@ -63,7 +63,6 @@ int sysctl_overcommit_memory = OVERCOMMI
63353 int sysctl_overcommit_ratio = 50; /* default is 50% */
63354 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
63355 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
63356-int heap_stack_gap = 0;
63357
63358 atomic_long_t mmap_pages_allocated;
63359
63360@@ -826,15 +825,6 @@ struct vm_area_struct *find_vma(struct m
63361 EXPORT_SYMBOL(find_vma);
63362
63363 /*
63364- * find a VMA
63365- * - we don't extend stack VMAs under NOMMU conditions
63366- */
63367-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
63368-{
63369- return find_vma(mm, addr);
63370-}
63371-
63372-/*
63373 * expand a stack to a given address
63374 * - not supported under NOMMU conditions
63375 */
63376@@ -1554,6 +1544,7 @@ int split_vma(struct mm_struct *mm, stru
63377
63378 /* most fields are the same, copy all, and then fixup */
63379 *new = *vma;
63380+ INIT_LIST_HEAD(&new->anon_vma_chain);
63381 *region = *vma->vm_region;
63382 new->vm_region = region;
63383
63384diff -urNp linux-3.0.3/mm/page_alloc.c linux-3.0.3/mm/page_alloc.c
63385--- linux-3.0.3/mm/page_alloc.c 2011-07-21 22:17:23.000000000 -0400
63386+++ linux-3.0.3/mm/page_alloc.c 2011-08-23 21:48:14.000000000 -0400
63387@@ -340,7 +340,7 @@ out:
63388 * This usage means that zero-order pages may not be compound.
63389 */
63390
63391-static void free_compound_page(struct page *page)
63392+void free_compound_page(struct page *page)
63393 {
63394 __free_pages_ok(page, compound_order(page));
63395 }
63396@@ -653,6 +653,10 @@ static bool free_pages_prepare(struct pa
63397 int i;
63398 int bad = 0;
63399
63400+#ifdef CONFIG_PAX_MEMORY_SANITIZE
63401+ unsigned long index = 1UL << order;
63402+#endif
63403+
63404 trace_mm_page_free_direct(page, order);
63405 kmemcheck_free_shadow(page, order);
63406
63407@@ -668,6 +672,12 @@ static bool free_pages_prepare(struct pa
63408 debug_check_no_obj_freed(page_address(page),
63409 PAGE_SIZE << order);
63410 }
63411+
63412+#ifdef CONFIG_PAX_MEMORY_SANITIZE
63413+ for (; index; --index)
63414+ sanitize_highpage(page + index - 1);
63415+#endif
63416+
63417 arch_free_page(page, order);
63418 kernel_map_pages(page, 1 << order, 0);
63419
63420@@ -783,8 +793,10 @@ static int prep_new_page(struct page *pa
63421 arch_alloc_page(page, order);
63422 kernel_map_pages(page, 1 << order, 1);
63423
63424+#ifndef CONFIG_PAX_MEMORY_SANITIZE
63425 if (gfp_flags & __GFP_ZERO)
63426 prep_zero_page(page, order, gfp_flags);
63427+#endif
63428
63429 if (order && (gfp_flags & __GFP_COMP))
63430 prep_compound_page(page, order);
63431@@ -2525,6 +2537,8 @@ void show_free_areas(unsigned int filter
63432 int cpu;
63433 struct zone *zone;
63434
63435+ pax_track_stack();
63436+
63437 for_each_populated_zone(zone) {
63438 if (skip_free_areas_node(filter, zone_to_nid(zone)))
63439 continue;
63440diff -urNp linux-3.0.3/mm/percpu.c linux-3.0.3/mm/percpu.c
63441--- linux-3.0.3/mm/percpu.c 2011-07-21 22:17:23.000000000 -0400
63442+++ linux-3.0.3/mm/percpu.c 2011-08-23 21:47:56.000000000 -0400
63443@@ -121,7 +121,7 @@ static unsigned int pcpu_first_unit_cpu
63444 static unsigned int pcpu_last_unit_cpu __read_mostly;
63445
63446 /* the address of the first chunk which starts with the kernel static area */
63447-void *pcpu_base_addr __read_mostly;
63448+void *pcpu_base_addr __read_only;
63449 EXPORT_SYMBOL_GPL(pcpu_base_addr);
63450
63451 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
63452diff -urNp linux-3.0.3/mm/rmap.c linux-3.0.3/mm/rmap.c
63453--- linux-3.0.3/mm/rmap.c 2011-07-21 22:17:23.000000000 -0400
63454+++ linux-3.0.3/mm/rmap.c 2011-08-23 21:47:56.000000000 -0400
63455@@ -153,6 +153,10 @@ int anon_vma_prepare(struct vm_area_stru
63456 struct anon_vma *anon_vma = vma->anon_vma;
63457 struct anon_vma_chain *avc;
63458
63459+#ifdef CONFIG_PAX_SEGMEXEC
63460+ struct anon_vma_chain *avc_m = NULL;
63461+#endif
63462+
63463 might_sleep();
63464 if (unlikely(!anon_vma)) {
63465 struct mm_struct *mm = vma->vm_mm;
63466@@ -162,6 +166,12 @@ int anon_vma_prepare(struct vm_area_stru
63467 if (!avc)
63468 goto out_enomem;
63469
63470+#ifdef CONFIG_PAX_SEGMEXEC
63471+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
63472+ if (!avc_m)
63473+ goto out_enomem_free_avc;
63474+#endif
63475+
63476 anon_vma = find_mergeable_anon_vma(vma);
63477 allocated = NULL;
63478 if (!anon_vma) {
63479@@ -175,6 +185,21 @@ int anon_vma_prepare(struct vm_area_stru
63480 /* page_table_lock to protect against threads */
63481 spin_lock(&mm->page_table_lock);
63482 if (likely(!vma->anon_vma)) {
63483+
63484+#ifdef CONFIG_PAX_SEGMEXEC
63485+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
63486+
63487+ if (vma_m) {
63488+ BUG_ON(vma_m->anon_vma);
63489+ vma_m->anon_vma = anon_vma;
63490+ avc_m->anon_vma = anon_vma;
63491+ avc_m->vma = vma;
63492+ list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
63493+ list_add(&avc_m->same_anon_vma, &anon_vma->head);
63494+ avc_m = NULL;
63495+ }
63496+#endif
63497+
63498 vma->anon_vma = anon_vma;
63499 avc->anon_vma = anon_vma;
63500 avc->vma = vma;
63501@@ -188,12 +213,24 @@ int anon_vma_prepare(struct vm_area_stru
63502
63503 if (unlikely(allocated))
63504 put_anon_vma(allocated);
63505+
63506+#ifdef CONFIG_PAX_SEGMEXEC
63507+ if (unlikely(avc_m))
63508+ anon_vma_chain_free(avc_m);
63509+#endif
63510+
63511 if (unlikely(avc))
63512 anon_vma_chain_free(avc);
63513 }
63514 return 0;
63515
63516 out_enomem_free_avc:
63517+
63518+#ifdef CONFIG_PAX_SEGMEXEC
63519+ if (avc_m)
63520+ anon_vma_chain_free(avc_m);
63521+#endif
63522+
63523 anon_vma_chain_free(avc);
63524 out_enomem:
63525 return -ENOMEM;
63526@@ -244,7 +281,7 @@ static void anon_vma_chain_link(struct v
63527 * Attach the anon_vmas from src to dst.
63528 * Returns 0 on success, -ENOMEM on failure.
63529 */
63530-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
63531+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
63532 {
63533 struct anon_vma_chain *avc, *pavc;
63534 struct anon_vma *root = NULL;
63535@@ -277,7 +314,7 @@ int anon_vma_clone(struct vm_area_struct
63536 * the corresponding VMA in the parent process is attached to.
63537 * Returns 0 on success, non-zero on failure.
63538 */
63539-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
63540+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
63541 {
63542 struct anon_vma_chain *avc;
63543 struct anon_vma *anon_vma;
63544diff -urNp linux-3.0.3/mm/shmem.c linux-3.0.3/mm/shmem.c
63545--- linux-3.0.3/mm/shmem.c 2011-07-21 22:17:23.000000000 -0400
63546+++ linux-3.0.3/mm/shmem.c 2011-08-23 21:48:14.000000000 -0400
63547@@ -31,7 +31,7 @@
63548 #include <linux/percpu_counter.h>
63549 #include <linux/swap.h>
63550
63551-static struct vfsmount *shm_mnt;
63552+struct vfsmount *shm_mnt;
63553
63554 #ifdef CONFIG_SHMEM
63555 /*
63556@@ -1101,6 +1101,8 @@ static int shmem_writepage(struct page *
63557 goto unlock;
63558 }
63559 entry = shmem_swp_entry(info, index, NULL);
63560+ if (!entry)
63561+ goto unlock;
63562 if (entry->val) {
63563 /*
63564 * The more uptodate page coming down from a stacked
63565@@ -1172,6 +1174,8 @@ static struct page *shmem_swapin(swp_ent
63566 struct vm_area_struct pvma;
63567 struct page *page;
63568
63569+ pax_track_stack();
63570+
63571 spol = mpol_cond_copy(&mpol,
63572 mpol_shared_policy_lookup(&info->policy, idx));
63573
63574@@ -2568,8 +2572,7 @@ int shmem_fill_super(struct super_block
63575 int err = -ENOMEM;
63576
63577 /* Round up to L1_CACHE_BYTES to resist false sharing */
63578- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
63579- L1_CACHE_BYTES), GFP_KERNEL);
63580+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
63581 if (!sbinfo)
63582 return -ENOMEM;
63583
63584diff -urNp linux-3.0.3/mm/slab.c linux-3.0.3/mm/slab.c
63585--- linux-3.0.3/mm/slab.c 2011-07-21 22:17:23.000000000 -0400
63586+++ linux-3.0.3/mm/slab.c 2011-08-23 21:48:14.000000000 -0400
63587@@ -151,7 +151,7 @@
63588
63589 /* Legal flag mask for kmem_cache_create(). */
63590 #if DEBUG
63591-# define CREATE_MASK (SLAB_RED_ZONE | \
63592+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
63593 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
63594 SLAB_CACHE_DMA | \
63595 SLAB_STORE_USER | \
63596@@ -159,7 +159,7 @@
63597 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
63598 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
63599 #else
63600-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
63601+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
63602 SLAB_CACHE_DMA | \
63603 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
63604 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
63605@@ -288,7 +288,7 @@ struct kmem_list3 {
63606 * Need this for bootstrapping a per node allocator.
63607 */
63608 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
63609-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
63610+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
63611 #define CACHE_CACHE 0
63612 #define SIZE_AC MAX_NUMNODES
63613 #define SIZE_L3 (2 * MAX_NUMNODES)
63614@@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_
63615 if ((x)->max_freeable < i) \
63616 (x)->max_freeable = i; \
63617 } while (0)
63618-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
63619-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
63620-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
63621-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
63622+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
63623+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
63624+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
63625+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
63626 #else
63627 #define STATS_INC_ACTIVE(x) do { } while (0)
63628 #define STATS_DEC_ACTIVE(x) do { } while (0)
63629@@ -538,7 +538,7 @@ static inline void *index_to_obj(struct
63630 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
63631 */
63632 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
63633- const struct slab *slab, void *obj)
63634+ const struct slab *slab, const void *obj)
63635 {
63636 u32 offset = (obj - slab->s_mem);
63637 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
63638@@ -564,7 +564,7 @@ struct cache_names {
63639 static struct cache_names __initdata cache_names[] = {
63640 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
63641 #include <linux/kmalloc_sizes.h>
63642- {NULL,}
63643+ {NULL}
63644 #undef CACHE
63645 };
63646
63647@@ -1530,7 +1530,7 @@ void __init kmem_cache_init(void)
63648 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
63649 sizes[INDEX_AC].cs_size,
63650 ARCH_KMALLOC_MINALIGN,
63651- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
63652+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
63653 NULL);
63654
63655 if (INDEX_AC != INDEX_L3) {
63656@@ -1538,7 +1538,7 @@ void __init kmem_cache_init(void)
63657 kmem_cache_create(names[INDEX_L3].name,
63658 sizes[INDEX_L3].cs_size,
63659 ARCH_KMALLOC_MINALIGN,
63660- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
63661+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
63662 NULL);
63663 }
63664
63665@@ -1556,7 +1556,7 @@ void __init kmem_cache_init(void)
63666 sizes->cs_cachep = kmem_cache_create(names->name,
63667 sizes->cs_size,
63668 ARCH_KMALLOC_MINALIGN,
63669- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
63670+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
63671 NULL);
63672 }
63673 #ifdef CONFIG_ZONE_DMA
63674@@ -4272,10 +4272,10 @@ static int s_show(struct seq_file *m, vo
63675 }
63676 /* cpu stats */
63677 {
63678- unsigned long allochit = atomic_read(&cachep->allochit);
63679- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
63680- unsigned long freehit = atomic_read(&cachep->freehit);
63681- unsigned long freemiss = atomic_read(&cachep->freemiss);
63682+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
63683+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
63684+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
63685+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
63686
63687 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
63688 allochit, allocmiss, freehit, freemiss);
63689@@ -4532,15 +4532,66 @@ static const struct file_operations proc
63690
63691 static int __init slab_proc_init(void)
63692 {
63693- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
63694+ mode_t gr_mode = S_IRUGO;
63695+
63696+#ifdef CONFIG_GRKERNSEC_PROC_ADD
63697+ gr_mode = S_IRUSR;
63698+#endif
63699+
63700+ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
63701 #ifdef CONFIG_DEBUG_SLAB_LEAK
63702- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
63703+ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
63704 #endif
63705 return 0;
63706 }
63707 module_init(slab_proc_init);
63708 #endif
63709
63710+void check_object_size(const void *ptr, unsigned long n, bool to)
63711+{
63712+
63713+#ifdef CONFIG_PAX_USERCOPY
63714+ struct page *page;
63715+ struct kmem_cache *cachep = NULL;
63716+ struct slab *slabp;
63717+ unsigned int objnr;
63718+ unsigned long offset;
63719+
63720+ if (!n)
63721+ return;
63722+
63723+ if (ZERO_OR_NULL_PTR(ptr))
63724+ goto report;
63725+
63726+ if (!virt_addr_valid(ptr))
63727+ return;
63728+
63729+ page = virt_to_head_page(ptr);
63730+
63731+ if (!PageSlab(page)) {
63732+ if (object_is_on_stack(ptr, n) == -1)
63733+ goto report;
63734+ return;
63735+ }
63736+
63737+ cachep = page_get_cache(page);
63738+ if (!(cachep->flags & SLAB_USERCOPY))
63739+ goto report;
63740+
63741+ slabp = page_get_slab(page);
63742+ objnr = obj_to_index(cachep, slabp, ptr);
63743+ BUG_ON(objnr >= cachep->num);
63744+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
63745+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
63746+ return;
63747+
63748+report:
63749+ pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
63750+#endif
63751+
63752+}
63753+EXPORT_SYMBOL(check_object_size);
63754+
63755 /**
63756 * ksize - get the actual amount of memory allocated for a given object
63757 * @objp: Pointer to the object
63758diff -urNp linux-3.0.3/mm/slob.c linux-3.0.3/mm/slob.c
63759--- linux-3.0.3/mm/slob.c 2011-07-21 22:17:23.000000000 -0400
63760+++ linux-3.0.3/mm/slob.c 2011-08-23 21:47:56.000000000 -0400
63761@@ -29,7 +29,7 @@
63762 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
63763 * alloc_pages() directly, allocating compound pages so the page order
63764 * does not have to be separately tracked, and also stores the exact
63765- * allocation size in page->private so that it can be used to accurately
63766+ * allocation size in slob_page->size so that it can be used to accurately
63767 * provide ksize(). These objects are detected in kfree() because slob_page()
63768 * is false for them.
63769 *
63770@@ -58,6 +58,7 @@
63771 */
63772
63773 #include <linux/kernel.h>
63774+#include <linux/sched.h>
63775 #include <linux/slab.h>
63776 #include <linux/mm.h>
63777 #include <linux/swap.h> /* struct reclaim_state */
63778@@ -102,7 +103,8 @@ struct slob_page {
63779 unsigned long flags; /* mandatory */
63780 atomic_t _count; /* mandatory */
63781 slobidx_t units; /* free units left in page */
63782- unsigned long pad[2];
63783+ unsigned long pad[1];
63784+ unsigned long size; /* size when >=PAGE_SIZE */
63785 slob_t *free; /* first free slob_t in page */
63786 struct list_head list; /* linked list of free pages */
63787 };
63788@@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
63789 */
63790 static inline int is_slob_page(struct slob_page *sp)
63791 {
63792- return PageSlab((struct page *)sp);
63793+ return PageSlab((struct page *)sp) && !sp->size;
63794 }
63795
63796 static inline void set_slob_page(struct slob_page *sp)
63797@@ -150,7 +152,7 @@ static inline void clear_slob_page(struc
63798
63799 static inline struct slob_page *slob_page(const void *addr)
63800 {
63801- return (struct slob_page *)virt_to_page(addr);
63802+ return (struct slob_page *)virt_to_head_page(addr);
63803 }
63804
63805 /*
63806@@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_
63807 /*
63808 * Return the size of a slob block.
63809 */
63810-static slobidx_t slob_units(slob_t *s)
63811+static slobidx_t slob_units(const slob_t *s)
63812 {
63813 if (s->units > 0)
63814 return s->units;
63815@@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
63816 /*
63817 * Return the next free slob block pointer after this one.
63818 */
63819-static slob_t *slob_next(slob_t *s)
63820+static slob_t *slob_next(const slob_t *s)
63821 {
63822 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
63823 slobidx_t next;
63824@@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
63825 /*
63826 * Returns true if s is the last free block in its page.
63827 */
63828-static int slob_last(slob_t *s)
63829+static int slob_last(const slob_t *s)
63830 {
63831 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
63832 }
63833@@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, i
63834 if (!page)
63835 return NULL;
63836
63837+ set_slob_page(page);
63838 return page_address(page);
63839 }
63840
63841@@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp
63842 if (!b)
63843 return NULL;
63844 sp = slob_page(b);
63845- set_slob_page(sp);
63846
63847 spin_lock_irqsave(&slob_lock, flags);
63848 sp->units = SLOB_UNITS(PAGE_SIZE);
63849 sp->free = b;
63850+ sp->size = 0;
63851 INIT_LIST_HEAD(&sp->list);
63852 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
63853 set_slob_page_free(sp, slob_list);
63854@@ -476,10 +479,9 @@ out:
63855 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
63856 */
63857
63858-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
63859+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
63860 {
63861- unsigned int *m;
63862- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
63863+ slob_t *m;
63864 void *ret;
63865
63866 lockdep_trace_alloc(gfp);
63867@@ -492,7 +494,10 @@ void *__kmalloc_node(size_t size, gfp_t
63868
63869 if (!m)
63870 return NULL;
63871- *m = size;
63872+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
63873+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
63874+ m[0].units = size;
63875+ m[1].units = align;
63876 ret = (void *)m + align;
63877
63878 trace_kmalloc_node(_RET_IP_, ret,
63879@@ -504,16 +509,25 @@ void *__kmalloc_node(size_t size, gfp_t
63880 gfp |= __GFP_COMP;
63881 ret = slob_new_pages(gfp, order, node);
63882 if (ret) {
63883- struct page *page;
63884- page = virt_to_page(ret);
63885- page->private = size;
63886+ struct slob_page *sp;
63887+ sp = slob_page(ret);
63888+ sp->size = size;
63889 }
63890
63891 trace_kmalloc_node(_RET_IP_, ret,
63892 size, PAGE_SIZE << order, gfp, node);
63893 }
63894
63895- kmemleak_alloc(ret, size, 1, gfp);
63896+ return ret;
63897+}
63898+
63899+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
63900+{
63901+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
63902+ void *ret = __kmalloc_node_align(size, gfp, node, align);
63903+
63904+ if (!ZERO_OR_NULL_PTR(ret))
63905+ kmemleak_alloc(ret, size, 1, gfp);
63906 return ret;
63907 }
63908 EXPORT_SYMBOL(__kmalloc_node);
63909@@ -531,13 +545,88 @@ void kfree(const void *block)
63910 sp = slob_page(block);
63911 if (is_slob_page(sp)) {
63912 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
63913- unsigned int *m = (unsigned int *)(block - align);
63914- slob_free(m, *m + align);
63915- } else
63916+ slob_t *m = (slob_t *)(block - align);
63917+ slob_free(m, m[0].units + align);
63918+ } else {
63919+ clear_slob_page(sp);
63920+ free_slob_page(sp);
63921+ sp->size = 0;
63922 put_page(&sp->page);
63923+ }
63924 }
63925 EXPORT_SYMBOL(kfree);
63926
63927+void check_object_size(const void *ptr, unsigned long n, bool to)
63928+{
63929+
63930+#ifdef CONFIG_PAX_USERCOPY
63931+ struct slob_page *sp;
63932+ const slob_t *free;
63933+ const void *base;
63934+ unsigned long flags;
63935+
63936+ if (!n)
63937+ return;
63938+
63939+ if (ZERO_OR_NULL_PTR(ptr))
63940+ goto report;
63941+
63942+ if (!virt_addr_valid(ptr))
63943+ return;
63944+
63945+ sp = slob_page(ptr);
63946+ if (!PageSlab((struct page*)sp)) {
63947+ if (object_is_on_stack(ptr, n) == -1)
63948+ goto report;
63949+ return;
63950+ }
63951+
63952+ if (sp->size) {
63953+ base = page_address(&sp->page);
63954+ if (base <= ptr && n <= sp->size - (ptr - base))
63955+ return;
63956+ goto report;
63957+ }
63958+
63959+ /* some tricky double walking to find the chunk */
63960+ spin_lock_irqsave(&slob_lock, flags);
63961+ base = (void *)((unsigned long)ptr & PAGE_MASK);
63962+ free = sp->free;
63963+
63964+ while (!slob_last(free) && (void *)free <= ptr) {
63965+ base = free + slob_units(free);
63966+ free = slob_next(free);
63967+ }
63968+
63969+ while (base < (void *)free) {
63970+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
63971+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
63972+ int offset;
63973+
63974+ if (ptr < base + align)
63975+ break;
63976+
63977+ offset = ptr - base - align;
63978+ if (offset >= m) {
63979+ base += size;
63980+ continue;
63981+ }
63982+
63983+ if (n > m - offset)
63984+ break;
63985+
63986+ spin_unlock_irqrestore(&slob_lock, flags);
63987+ return;
63988+ }
63989+
63990+ spin_unlock_irqrestore(&slob_lock, flags);
63991+report:
63992+ pax_report_usercopy(ptr, n, to, NULL);
63993+#endif
63994+
63995+}
63996+EXPORT_SYMBOL(check_object_size);
63997+
63998 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
63999 size_t ksize(const void *block)
64000 {
64001@@ -550,10 +639,10 @@ size_t ksize(const void *block)
64002 sp = slob_page(block);
64003 if (is_slob_page(sp)) {
64004 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
64005- unsigned int *m = (unsigned int *)(block - align);
64006- return SLOB_UNITS(*m) * SLOB_UNIT;
64007+ slob_t *m = (slob_t *)(block - align);
64008+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
64009 } else
64010- return sp->page.private;
64011+ return sp->size;
64012 }
64013 EXPORT_SYMBOL(ksize);
64014
64015@@ -569,8 +658,13 @@ struct kmem_cache *kmem_cache_create(con
64016 {
64017 struct kmem_cache *c;
64018
64019+#ifdef CONFIG_PAX_USERCOPY
64020+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
64021+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
64022+#else
64023 c = slob_alloc(sizeof(struct kmem_cache),
64024 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
64025+#endif
64026
64027 if (c) {
64028 c->name = name;
64029@@ -608,17 +702,25 @@ void *kmem_cache_alloc_node(struct kmem_
64030 {
64031 void *b;
64032
64033+#ifdef CONFIG_PAX_USERCOPY
64034+ b = __kmalloc_node_align(c->size, flags, node, c->align);
64035+#else
64036 if (c->size < PAGE_SIZE) {
64037 b = slob_alloc(c->size, flags, c->align, node);
64038 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
64039 SLOB_UNITS(c->size) * SLOB_UNIT,
64040 flags, node);
64041 } else {
64042+ struct slob_page *sp;
64043+
64044 b = slob_new_pages(flags, get_order(c->size), node);
64045+ sp = slob_page(b);
64046+ sp->size = c->size;
64047 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
64048 PAGE_SIZE << get_order(c->size),
64049 flags, node);
64050 }
64051+#endif
64052
64053 if (c->ctor)
64054 c->ctor(b);
64055@@ -630,10 +732,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
64056
64057 static void __kmem_cache_free(void *b, int size)
64058 {
64059- if (size < PAGE_SIZE)
64060+ struct slob_page *sp = slob_page(b);
64061+
64062+ if (is_slob_page(sp))
64063 slob_free(b, size);
64064- else
64065+ else {
64066+ clear_slob_page(sp);
64067+ free_slob_page(sp);
64068+ sp->size = 0;
64069 slob_free_pages(b, get_order(size));
64070+ }
64071 }
64072
64073 static void kmem_rcu_free(struct rcu_head *head)
64074@@ -646,17 +754,31 @@ static void kmem_rcu_free(struct rcu_hea
64075
64076 void kmem_cache_free(struct kmem_cache *c, void *b)
64077 {
64078+ int size = c->size;
64079+
64080+#ifdef CONFIG_PAX_USERCOPY
64081+ if (size + c->align < PAGE_SIZE) {
64082+ size += c->align;
64083+ b -= c->align;
64084+ }
64085+#endif
64086+
64087 kmemleak_free_recursive(b, c->flags);
64088 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
64089 struct slob_rcu *slob_rcu;
64090- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
64091- slob_rcu->size = c->size;
64092+ slob_rcu = b + (size - sizeof(struct slob_rcu));
64093+ slob_rcu->size = size;
64094 call_rcu(&slob_rcu->head, kmem_rcu_free);
64095 } else {
64096- __kmem_cache_free(b, c->size);
64097+ __kmem_cache_free(b, size);
64098 }
64099
64100+#ifdef CONFIG_PAX_USERCOPY
64101+ trace_kfree(_RET_IP_, b);
64102+#else
64103 trace_kmem_cache_free(_RET_IP_, b);
64104+#endif
64105+
64106 }
64107 EXPORT_SYMBOL(kmem_cache_free);
64108
64109diff -urNp linux-3.0.3/mm/slub.c linux-3.0.3/mm/slub.c
64110--- linux-3.0.3/mm/slub.c 2011-07-21 22:17:23.000000000 -0400
64111+++ linux-3.0.3/mm/slub.c 2011-08-23 21:48:14.000000000 -0400
64112@@ -442,7 +442,7 @@ static void print_track(const char *s, s
64113 if (!t->addr)
64114 return;
64115
64116- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
64117+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
64118 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
64119 }
64120
64121@@ -2137,6 +2137,8 @@ void kmem_cache_free(struct kmem_cache *
64122
64123 page = virt_to_head_page(x);
64124
64125+ BUG_ON(!PageSlab(page));
64126+
64127 slab_free(s, page, x, _RET_IP_);
64128
64129 trace_kmem_cache_free(_RET_IP_, x);
64130@@ -2170,7 +2172,7 @@ static int slub_min_objects;
64131 * Merge control. If this is set then no merging of slab caches will occur.
64132 * (Could be removed. This was introduced to pacify the merge skeptics.)
64133 */
64134-static int slub_nomerge;
64135+static int slub_nomerge = 1;
64136
64137 /*
64138 * Calculate the order of allocation given an slab object size.
64139@@ -2594,7 +2596,7 @@ static int kmem_cache_open(struct kmem_c
64140 * list to avoid pounding the page allocator excessively.
64141 */
64142 set_min_partial(s, ilog2(s->size));
64143- s->refcount = 1;
64144+ atomic_set(&s->refcount, 1);
64145 #ifdef CONFIG_NUMA
64146 s->remote_node_defrag_ratio = 1000;
64147 #endif
64148@@ -2699,8 +2701,7 @@ static inline int kmem_cache_close(struc
64149 void kmem_cache_destroy(struct kmem_cache *s)
64150 {
64151 down_write(&slub_lock);
64152- s->refcount--;
64153- if (!s->refcount) {
64154+ if (atomic_dec_and_test(&s->refcount)) {
64155 list_del(&s->list);
64156 if (kmem_cache_close(s)) {
64157 printk(KERN_ERR "SLUB %s: %s called for cache that "
64158@@ -2910,6 +2911,46 @@ void *__kmalloc_node(size_t size, gfp_t
64159 EXPORT_SYMBOL(__kmalloc_node);
64160 #endif
64161
64162+void check_object_size(const void *ptr, unsigned long n, bool to)
64163+{
64164+
64165+#ifdef CONFIG_PAX_USERCOPY
64166+ struct page *page;
64167+ struct kmem_cache *s = NULL;
64168+ unsigned long offset;
64169+
64170+ if (!n)
64171+ return;
64172+
64173+ if (ZERO_OR_NULL_PTR(ptr))
64174+ goto report;
64175+
64176+ if (!virt_addr_valid(ptr))
64177+ return;
64178+
64179+ page = virt_to_head_page(ptr);
64180+
64181+ if (!PageSlab(page)) {
64182+ if (object_is_on_stack(ptr, n) == -1)
64183+ goto report;
64184+ return;
64185+ }
64186+
64187+ s = page->slab;
64188+ if (!(s->flags & SLAB_USERCOPY))
64189+ goto report;
64190+
64191+ offset = (ptr - page_address(page)) % s->size;
64192+ if (offset <= s->objsize && n <= s->objsize - offset)
64193+ return;
64194+
64195+report:
64196+ pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
64197+#endif
64198+
64199+}
64200+EXPORT_SYMBOL(check_object_size);
64201+
64202 size_t ksize(const void *object)
64203 {
64204 struct page *page;
64205@@ -3154,7 +3195,7 @@ static void __init kmem_cache_bootstrap_
64206 int node;
64207
64208 list_add(&s->list, &slab_caches);
64209- s->refcount = -1;
64210+ atomic_set(&s->refcount, -1);
64211
64212 for_each_node_state(node, N_NORMAL_MEMORY) {
64213 struct kmem_cache_node *n = get_node(s, node);
64214@@ -3271,17 +3312,17 @@ void __init kmem_cache_init(void)
64215
64216 /* Caches that are not of the two-to-the-power-of size */
64217 if (KMALLOC_MIN_SIZE <= 32) {
64218- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
64219+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
64220 caches++;
64221 }
64222
64223 if (KMALLOC_MIN_SIZE <= 64) {
64224- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
64225+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
64226 caches++;
64227 }
64228
64229 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
64230- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
64231+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
64232 caches++;
64233 }
64234
64235@@ -3349,7 +3390,7 @@ static int slab_unmergeable(struct kmem_
64236 /*
64237 * We may have set a slab to be unmergeable during bootstrap.
64238 */
64239- if (s->refcount < 0)
64240+ if (atomic_read(&s->refcount) < 0)
64241 return 1;
64242
64243 return 0;
64244@@ -3408,7 +3449,7 @@ struct kmem_cache *kmem_cache_create(con
64245 down_write(&slub_lock);
64246 s = find_mergeable(size, align, flags, name, ctor);
64247 if (s) {
64248- s->refcount++;
64249+ atomic_inc(&s->refcount);
64250 /*
64251 * Adjust the object sizes so that we clear
64252 * the complete object on kzalloc.
64253@@ -3417,7 +3458,7 @@ struct kmem_cache *kmem_cache_create(con
64254 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
64255
64256 if (sysfs_slab_alias(s, name)) {
64257- s->refcount--;
64258+ atomic_dec(&s->refcount);
64259 goto err;
64260 }
64261 up_write(&slub_lock);
64262@@ -4150,7 +4191,7 @@ SLAB_ATTR_RO(ctor);
64263
64264 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
64265 {
64266- return sprintf(buf, "%d\n", s->refcount - 1);
64267+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
64268 }
64269 SLAB_ATTR_RO(aliases);
64270
64271@@ -4894,7 +4935,13 @@ static const struct file_operations proc
64272
64273 static int __init slab_proc_init(void)
64274 {
64275- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
64276+ mode_t gr_mode = S_IRUGO;
64277+
64278+#ifdef CONFIG_GRKERNSEC_PROC_ADD
64279+ gr_mode = S_IRUSR;
64280+#endif
64281+
64282+ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
64283 return 0;
64284 }
64285 module_init(slab_proc_init);
64286diff -urNp linux-3.0.3/mm/swap.c linux-3.0.3/mm/swap.c
64287--- linux-3.0.3/mm/swap.c 2011-07-21 22:17:23.000000000 -0400
64288+++ linux-3.0.3/mm/swap.c 2011-08-23 21:47:56.000000000 -0400
64289@@ -31,6 +31,7 @@
64290 #include <linux/backing-dev.h>
64291 #include <linux/memcontrol.h>
64292 #include <linux/gfp.h>
64293+#include <linux/hugetlb.h>
64294
64295 #include "internal.h"
64296
64297@@ -71,6 +72,8 @@ static void __put_compound_page(struct p
64298
64299 __page_cache_release(page);
64300 dtor = get_compound_page_dtor(page);
64301+ if (!PageHuge(page))
64302+ BUG_ON(dtor != free_compound_page);
64303 (*dtor)(page);
64304 }
64305
64306diff -urNp linux-3.0.3/mm/swapfile.c linux-3.0.3/mm/swapfile.c
64307--- linux-3.0.3/mm/swapfile.c 2011-07-21 22:17:23.000000000 -0400
64308+++ linux-3.0.3/mm/swapfile.c 2011-08-23 21:47:56.000000000 -0400
64309@@ -62,7 +62,7 @@ static DEFINE_MUTEX(swapon_mutex);
64310
64311 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
64312 /* Activity counter to indicate that a swapon or swapoff has occurred */
64313-static atomic_t proc_poll_event = ATOMIC_INIT(0);
64314+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
64315
64316 static inline unsigned char swap_count(unsigned char ent)
64317 {
64318@@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __us
64319 }
64320 filp_close(swap_file, NULL);
64321 err = 0;
64322- atomic_inc(&proc_poll_event);
64323+ atomic_inc_unchecked(&proc_poll_event);
64324 wake_up_interruptible(&proc_poll_wait);
64325
64326 out_dput:
64327@@ -1692,8 +1692,8 @@ static unsigned swaps_poll(struct file *
64328
64329 poll_wait(file, &proc_poll_wait, wait);
64330
64331- if (s->event != atomic_read(&proc_poll_event)) {
64332- s->event = atomic_read(&proc_poll_event);
64333+ if (s->event != atomic_read_unchecked(&proc_poll_event)) {
64334+ s->event = atomic_read_unchecked(&proc_poll_event);
64335 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
64336 }
64337
64338@@ -1799,7 +1799,7 @@ static int swaps_open(struct inode *inod
64339 }
64340
64341 s->seq.private = s;
64342- s->event = atomic_read(&proc_poll_event);
64343+ s->event = atomic_read_unchecked(&proc_poll_event);
64344 return ret;
64345 }
64346
64347@@ -2133,7 +2133,7 @@ SYSCALL_DEFINE2(swapon, const char __use
64348 (p->flags & SWP_DISCARDABLE) ? "D" : "");
64349
64350 mutex_unlock(&swapon_mutex);
64351- atomic_inc(&proc_poll_event);
64352+ atomic_inc_unchecked(&proc_poll_event);
64353 wake_up_interruptible(&proc_poll_wait);
64354
64355 if (S_ISREG(inode->i_mode))
64356diff -urNp linux-3.0.3/mm/util.c linux-3.0.3/mm/util.c
64357--- linux-3.0.3/mm/util.c 2011-07-21 22:17:23.000000000 -0400
64358+++ linux-3.0.3/mm/util.c 2011-08-23 21:47:56.000000000 -0400
64359@@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
64360 * allocated buffer. Use this if you don't want to free the buffer immediately
64361 * like, for example, with RCU.
64362 */
64363+#undef __krealloc
64364 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
64365 {
64366 void *ret;
64367@@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
64368 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
64369 * %NULL pointer, the object pointed to is freed.
64370 */
64371+#undef krealloc
64372 void *krealloc(const void *p, size_t new_size, gfp_t flags)
64373 {
64374 void *ret;
64375@@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *m
64376 void arch_pick_mmap_layout(struct mm_struct *mm)
64377 {
64378 mm->mmap_base = TASK_UNMAPPED_BASE;
64379+
64380+#ifdef CONFIG_PAX_RANDMMAP
64381+ if (mm->pax_flags & MF_PAX_RANDMMAP)
64382+ mm->mmap_base += mm->delta_mmap;
64383+#endif
64384+
64385 mm->get_unmapped_area = arch_get_unmapped_area;
64386 mm->unmap_area = arch_unmap_area;
64387 }
64388diff -urNp linux-3.0.3/mm/vmalloc.c linux-3.0.3/mm/vmalloc.c
64389--- linux-3.0.3/mm/vmalloc.c 2011-08-23 21:44:40.000000000 -0400
64390+++ linux-3.0.3/mm/vmalloc.c 2011-08-23 21:47:56.000000000 -0400
64391@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd,
64392
64393 pte = pte_offset_kernel(pmd, addr);
64394 do {
64395- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
64396- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
64397+
64398+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64399+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
64400+ BUG_ON(!pte_exec(*pte));
64401+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
64402+ continue;
64403+ }
64404+#endif
64405+
64406+ {
64407+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
64408+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
64409+ }
64410 } while (pte++, addr += PAGE_SIZE, addr != end);
64411 }
64412
64413@@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, un
64414 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
64415 {
64416 pte_t *pte;
64417+ int ret = -ENOMEM;
64418
64419 /*
64420 * nr is a running index into the array which helps higher level
64421@@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, un
64422 pte = pte_alloc_kernel(pmd, addr);
64423 if (!pte)
64424 return -ENOMEM;
64425+
64426+ pax_open_kernel();
64427 do {
64428 struct page *page = pages[*nr];
64429
64430- if (WARN_ON(!pte_none(*pte)))
64431- return -EBUSY;
64432- if (WARN_ON(!page))
64433- return -ENOMEM;
64434+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64435+ if (pgprot_val(prot) & _PAGE_NX)
64436+#endif
64437+
64438+ if (WARN_ON(!pte_none(*pte))) {
64439+ ret = -EBUSY;
64440+ goto out;
64441+ }
64442+ if (WARN_ON(!page)) {
64443+ ret = -ENOMEM;
64444+ goto out;
64445+ }
64446 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
64447 (*nr)++;
64448 } while (pte++, addr += PAGE_SIZE, addr != end);
64449- return 0;
64450+ ret = 0;
64451+out:
64452+ pax_close_kernel();
64453+ return ret;
64454 }
64455
64456 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
64457@@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void
64458 * and fall back on vmalloc() if that fails. Others
64459 * just put it in the vmalloc space.
64460 */
64461-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
64462+#ifdef CONFIG_MODULES
64463+#ifdef MODULES_VADDR
64464 unsigned long addr = (unsigned long)x;
64465 if (addr >= MODULES_VADDR && addr < MODULES_END)
64466 return 1;
64467 #endif
64468+
64469+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64470+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
64471+ return 1;
64472+#endif
64473+
64474+#endif
64475+
64476 return is_vmalloc_addr(x);
64477 }
64478
64479@@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void
64480
64481 if (!pgd_none(*pgd)) {
64482 pud_t *pud = pud_offset(pgd, addr);
64483+#ifdef CONFIG_X86
64484+ if (!pud_large(*pud))
64485+#endif
64486 if (!pud_none(*pud)) {
64487 pmd_t *pmd = pmd_offset(pud, addr);
64488+#ifdef CONFIG_X86
64489+ if (!pmd_large(*pmd))
64490+#endif
64491 if (!pmd_none(*pmd)) {
64492 pte_t *ptep, pte;
64493
64494@@ -1297,6 +1337,16 @@ static struct vm_struct *__get_vm_area_n
64495 struct vm_struct *area;
64496
64497 BUG_ON(in_interrupt());
64498+
64499+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64500+ if (flags & VM_KERNEXEC) {
64501+ if (start != VMALLOC_START || end != VMALLOC_END)
64502+ return NULL;
64503+ start = (unsigned long)MODULES_EXEC_VADDR;
64504+ end = (unsigned long)MODULES_EXEC_END;
64505+ }
64506+#endif
64507+
64508 if (flags & VM_IOREMAP) {
64509 int bit = fls(size);
64510
64511@@ -1515,6 +1565,11 @@ void *vmap(struct page **pages, unsigned
64512 if (count > totalram_pages)
64513 return NULL;
64514
64515+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64516+ if (!(pgprot_val(prot) & _PAGE_NX))
64517+ flags |= VM_KERNEXEC;
64518+#endif
64519+
64520 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
64521 __builtin_return_address(0));
64522 if (!area)
64523@@ -1616,6 +1671,13 @@ void *__vmalloc_node_range(unsigned long
64524 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
64525 return NULL;
64526
64527+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64528+ if (!(pgprot_val(prot) & _PAGE_NX))
64529+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
64530+ node, gfp_mask, caller);
64531+ else
64532+#endif
64533+
64534 area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node,
64535 gfp_mask, caller);
64536
64537@@ -1655,6 +1717,7 @@ static void *__vmalloc_node(unsigned lon
64538 gfp_mask, prot, node, caller);
64539 }
64540
64541+#undef __vmalloc
64542 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
64543 {
64544 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
64545@@ -1678,6 +1741,7 @@ static inline void *__vmalloc_node_flags
64546 * For tight control over page level allocator and protection flags
64547 * use __vmalloc() instead.
64548 */
64549+#undef vmalloc
64550 void *vmalloc(unsigned long size)
64551 {
64552 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
64553@@ -1694,6 +1758,7 @@ EXPORT_SYMBOL(vmalloc);
64554 * For tight control over page level allocator and protection flags
64555 * use __vmalloc() instead.
64556 */
64557+#undef vzalloc
64558 void *vzalloc(unsigned long size)
64559 {
64560 return __vmalloc_node_flags(size, -1,
64561@@ -1708,6 +1773,7 @@ EXPORT_SYMBOL(vzalloc);
64562 * The resulting memory area is zeroed so it can be mapped to userspace
64563 * without leaking data.
64564 */
64565+#undef vmalloc_user
64566 void *vmalloc_user(unsigned long size)
64567 {
64568 struct vm_struct *area;
64569@@ -1735,6 +1801,7 @@ EXPORT_SYMBOL(vmalloc_user);
64570 * For tight control over page level allocator and protection flags
64571 * use __vmalloc() instead.
64572 */
64573+#undef vmalloc_node
64574 void *vmalloc_node(unsigned long size, int node)
64575 {
64576 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
64577@@ -1754,6 +1821,7 @@ EXPORT_SYMBOL(vmalloc_node);
64578 * For tight control over page level allocator and protection flags
64579 * use __vmalloc_node() instead.
64580 */
64581+#undef vzalloc_node
64582 void *vzalloc_node(unsigned long size, int node)
64583 {
64584 return __vmalloc_node_flags(size, node,
64585@@ -1776,10 +1844,10 @@ EXPORT_SYMBOL(vzalloc_node);
64586 * For tight control over page level allocator and protection flags
64587 * use __vmalloc() instead.
64588 */
64589-
64590+#undef vmalloc_exec
64591 void *vmalloc_exec(unsigned long size)
64592 {
64593- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
64594+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
64595 -1, __builtin_return_address(0));
64596 }
64597
64598@@ -1798,6 +1866,7 @@ void *vmalloc_exec(unsigned long size)
64599 * Allocate enough 32bit PA addressable pages to cover @size from the
64600 * page level allocator and map them into contiguous kernel virtual space.
64601 */
64602+#undef vmalloc_32
64603 void *vmalloc_32(unsigned long size)
64604 {
64605 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
64606@@ -1812,6 +1881,7 @@ EXPORT_SYMBOL(vmalloc_32);
64607 * The resulting memory area is 32bit addressable and zeroed so it can be
64608 * mapped to userspace without leaking data.
64609 */
64610+#undef vmalloc_32_user
64611 void *vmalloc_32_user(unsigned long size)
64612 {
64613 struct vm_struct *area;
64614@@ -2074,6 +2144,8 @@ int remap_vmalloc_range(struct vm_area_s
64615 unsigned long uaddr = vma->vm_start;
64616 unsigned long usize = vma->vm_end - vma->vm_start;
64617
64618+ BUG_ON(vma->vm_mirror);
64619+
64620 if ((PAGE_SIZE-1) & (unsigned long)addr)
64621 return -EINVAL;
64622
64623diff -urNp linux-3.0.3/mm/vmstat.c linux-3.0.3/mm/vmstat.c
64624--- linux-3.0.3/mm/vmstat.c 2011-07-21 22:17:23.000000000 -0400
64625+++ linux-3.0.3/mm/vmstat.c 2011-08-23 21:48:14.000000000 -0400
64626@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
64627 *
64628 * vm_stat contains the global counters
64629 */
64630-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
64631+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
64632 EXPORT_SYMBOL(vm_stat);
64633
64634 #ifdef CONFIG_SMP
64635@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
64636 v = p->vm_stat_diff[i];
64637 p->vm_stat_diff[i] = 0;
64638 local_irq_restore(flags);
64639- atomic_long_add(v, &zone->vm_stat[i]);
64640+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
64641 global_diff[i] += v;
64642 #ifdef CONFIG_NUMA
64643 /* 3 seconds idle till flush */
64644@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
64645
64646 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
64647 if (global_diff[i])
64648- atomic_long_add(global_diff[i], &vm_stat[i]);
64649+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
64650 }
64651
64652 #endif
64653@@ -1207,10 +1207,20 @@ static int __init setup_vmstat(void)
64654 start_cpu_timer(cpu);
64655 #endif
64656 #ifdef CONFIG_PROC_FS
64657- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
64658- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
64659- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
64660- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
64661+ {
64662+ mode_t gr_mode = S_IRUGO;
64663+#ifdef CONFIG_GRKERNSEC_PROC_ADD
64664+ gr_mode = S_IRUSR;
64665+#endif
64666+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
64667+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
64668+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
64669+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
64670+#else
64671+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
64672+#endif
64673+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
64674+ }
64675 #endif
64676 return 0;
64677 }
64678diff -urNp linux-3.0.3/net/8021q/vlan.c linux-3.0.3/net/8021q/vlan.c
64679--- linux-3.0.3/net/8021q/vlan.c 2011-07-21 22:17:23.000000000 -0400
64680+++ linux-3.0.3/net/8021q/vlan.c 2011-08-23 21:47:56.000000000 -0400
64681@@ -591,8 +591,7 @@ static int vlan_ioctl_handler(struct net
64682 err = -EPERM;
64683 if (!capable(CAP_NET_ADMIN))
64684 break;
64685- if ((args.u.name_type >= 0) &&
64686- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
64687+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
64688 struct vlan_net *vn;
64689
64690 vn = net_generic(net, vlan_net_id);
64691diff -urNp linux-3.0.3/net/atm/atm_misc.c linux-3.0.3/net/atm/atm_misc.c
64692--- linux-3.0.3/net/atm/atm_misc.c 2011-07-21 22:17:23.000000000 -0400
64693+++ linux-3.0.3/net/atm/atm_misc.c 2011-08-23 21:47:56.000000000 -0400
64694@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int
64695 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
64696 return 1;
64697 atm_return(vcc, truesize);
64698- atomic_inc(&vcc->stats->rx_drop);
64699+ atomic_inc_unchecked(&vcc->stats->rx_drop);
64700 return 0;
64701 }
64702 EXPORT_SYMBOL(atm_charge);
64703@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct
64704 }
64705 }
64706 atm_return(vcc, guess);
64707- atomic_inc(&vcc->stats->rx_drop);
64708+ atomic_inc_unchecked(&vcc->stats->rx_drop);
64709 return NULL;
64710 }
64711 EXPORT_SYMBOL(atm_alloc_charge);
64712@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
64713
64714 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
64715 {
64716-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
64717+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
64718 __SONET_ITEMS
64719 #undef __HANDLE_ITEM
64720 }
64721@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
64722
64723 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
64724 {
64725-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
64726+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
64727 __SONET_ITEMS
64728 #undef __HANDLE_ITEM
64729 }
64730diff -urNp linux-3.0.3/net/atm/lec.h linux-3.0.3/net/atm/lec.h
64731--- linux-3.0.3/net/atm/lec.h 2011-07-21 22:17:23.000000000 -0400
64732+++ linux-3.0.3/net/atm/lec.h 2011-08-23 21:47:56.000000000 -0400
64733@@ -48,7 +48,7 @@ struct lane2_ops {
64734 const u8 *tlvs, u32 sizeoftlvs);
64735 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
64736 const u8 *tlvs, u32 sizeoftlvs);
64737-};
64738+} __no_const;
64739
64740 /*
64741 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
64742diff -urNp linux-3.0.3/net/atm/mpc.h linux-3.0.3/net/atm/mpc.h
64743--- linux-3.0.3/net/atm/mpc.h 2011-07-21 22:17:23.000000000 -0400
64744+++ linux-3.0.3/net/atm/mpc.h 2011-08-23 21:47:56.000000000 -0400
64745@@ -33,7 +33,7 @@ struct mpoa_client {
64746 struct mpc_parameters parameters; /* parameters for this client */
64747
64748 const struct net_device_ops *old_ops;
64749- struct net_device_ops new_ops;
64750+ net_device_ops_no_const new_ops;
64751 };
64752
64753
64754diff -urNp linux-3.0.3/net/atm/mpoa_caches.c linux-3.0.3/net/atm/mpoa_caches.c
64755--- linux-3.0.3/net/atm/mpoa_caches.c 2011-07-21 22:17:23.000000000 -0400
64756+++ linux-3.0.3/net/atm/mpoa_caches.c 2011-08-23 21:48:14.000000000 -0400
64757@@ -255,6 +255,8 @@ static void check_resolving_entries(stru
64758 struct timeval now;
64759 struct k_message msg;
64760
64761+ pax_track_stack();
64762+
64763 do_gettimeofday(&now);
64764
64765 read_lock_bh(&client->ingress_lock);
64766diff -urNp linux-3.0.3/net/atm/proc.c linux-3.0.3/net/atm/proc.c
64767--- linux-3.0.3/net/atm/proc.c 2011-07-21 22:17:23.000000000 -0400
64768+++ linux-3.0.3/net/atm/proc.c 2011-08-23 21:47:56.000000000 -0400
64769@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *s
64770 const struct k_atm_aal_stats *stats)
64771 {
64772 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
64773- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
64774- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
64775- atomic_read(&stats->rx_drop));
64776+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
64777+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
64778+ atomic_read_unchecked(&stats->rx_drop));
64779 }
64780
64781 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
64782diff -urNp linux-3.0.3/net/atm/resources.c linux-3.0.3/net/atm/resources.c
64783--- linux-3.0.3/net/atm/resources.c 2011-07-21 22:17:23.000000000 -0400
64784+++ linux-3.0.3/net/atm/resources.c 2011-08-23 21:47:56.000000000 -0400
64785@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
64786 static void copy_aal_stats(struct k_atm_aal_stats *from,
64787 struct atm_aal_stats *to)
64788 {
64789-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
64790+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
64791 __AAL_STAT_ITEMS
64792 #undef __HANDLE_ITEM
64793 }
64794@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_
64795 static void subtract_aal_stats(struct k_atm_aal_stats *from,
64796 struct atm_aal_stats *to)
64797 {
64798-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
64799+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
64800 __AAL_STAT_ITEMS
64801 #undef __HANDLE_ITEM
64802 }
64803diff -urNp linux-3.0.3/net/batman-adv/hard-interface.c linux-3.0.3/net/batman-adv/hard-interface.c
64804--- linux-3.0.3/net/batman-adv/hard-interface.c 2011-07-21 22:17:23.000000000 -0400
64805+++ linux-3.0.3/net/batman-adv/hard-interface.c 2011-08-23 21:47:56.000000000 -0400
64806@@ -351,8 +351,8 @@ int hardif_enable_interface(struct hard_
64807 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
64808 dev_add_pack(&hard_iface->batman_adv_ptype);
64809
64810- atomic_set(&hard_iface->seqno, 1);
64811- atomic_set(&hard_iface->frag_seqno, 1);
64812+ atomic_set_unchecked(&hard_iface->seqno, 1);
64813+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
64814 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
64815 hard_iface->net_dev->name);
64816
64817diff -urNp linux-3.0.3/net/batman-adv/routing.c linux-3.0.3/net/batman-adv/routing.c
64818--- linux-3.0.3/net/batman-adv/routing.c 2011-07-21 22:17:23.000000000 -0400
64819+++ linux-3.0.3/net/batman-adv/routing.c 2011-08-23 21:47:56.000000000 -0400
64820@@ -627,7 +627,7 @@ void receive_bat_packet(struct ethhdr *e
64821 return;
64822
64823 /* could be changed by schedule_own_packet() */
64824- if_incoming_seqno = atomic_read(&if_incoming->seqno);
64825+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
64826
64827 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
64828
64829diff -urNp linux-3.0.3/net/batman-adv/send.c linux-3.0.3/net/batman-adv/send.c
64830--- linux-3.0.3/net/batman-adv/send.c 2011-07-21 22:17:23.000000000 -0400
64831+++ linux-3.0.3/net/batman-adv/send.c 2011-08-23 21:47:56.000000000 -0400
64832@@ -279,7 +279,7 @@ void schedule_own_packet(struct hard_ifa
64833
64834 /* change sequence number to network order */
64835 batman_packet->seqno =
64836- htonl((uint32_t)atomic_read(&hard_iface->seqno));
64837+ htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
64838
64839 if (vis_server == VIS_TYPE_SERVER_SYNC)
64840 batman_packet->flags |= VIS_SERVER;
64841@@ -293,7 +293,7 @@ void schedule_own_packet(struct hard_ifa
64842 else
64843 batman_packet->gw_flags = 0;
64844
64845- atomic_inc(&hard_iface->seqno);
64846+ atomic_inc_unchecked(&hard_iface->seqno);
64847
64848 slide_own_bcast_window(hard_iface);
64849 send_time = own_send_time(bat_priv);
64850diff -urNp linux-3.0.3/net/batman-adv/soft-interface.c linux-3.0.3/net/batman-adv/soft-interface.c
64851--- linux-3.0.3/net/batman-adv/soft-interface.c 2011-07-21 22:17:23.000000000 -0400
64852+++ linux-3.0.3/net/batman-adv/soft-interface.c 2011-08-23 21:47:56.000000000 -0400
64853@@ -628,7 +628,7 @@ int interface_tx(struct sk_buff *skb, st
64854
64855 /* set broadcast sequence number */
64856 bcast_packet->seqno =
64857- htonl(atomic_inc_return(&bat_priv->bcast_seqno));
64858+ htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
64859
64860 add_bcast_packet_to_list(bat_priv, skb);
64861
64862@@ -830,7 +830,7 @@ struct net_device *softif_create(char *n
64863 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
64864
64865 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
64866- atomic_set(&bat_priv->bcast_seqno, 1);
64867+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
64868 atomic_set(&bat_priv->tt_local_changed, 0);
64869
64870 bat_priv->primary_if = NULL;
64871diff -urNp linux-3.0.3/net/batman-adv/types.h linux-3.0.3/net/batman-adv/types.h
64872--- linux-3.0.3/net/batman-adv/types.h 2011-07-21 22:17:23.000000000 -0400
64873+++ linux-3.0.3/net/batman-adv/types.h 2011-08-23 21:47:56.000000000 -0400
64874@@ -38,8 +38,8 @@ struct hard_iface {
64875 int16_t if_num;
64876 char if_status;
64877 struct net_device *net_dev;
64878- atomic_t seqno;
64879- atomic_t frag_seqno;
64880+ atomic_unchecked_t seqno;
64881+ atomic_unchecked_t frag_seqno;
64882 unsigned char *packet_buff;
64883 int packet_len;
64884 struct kobject *hardif_obj;
64885@@ -142,7 +142,7 @@ struct bat_priv {
64886 atomic_t orig_interval; /* uint */
64887 atomic_t hop_penalty; /* uint */
64888 atomic_t log_level; /* uint */
64889- atomic_t bcast_seqno;
64890+ atomic_unchecked_t bcast_seqno;
64891 atomic_t bcast_queue_left;
64892 atomic_t batman_queue_left;
64893 char num_ifaces;
64894diff -urNp linux-3.0.3/net/batman-adv/unicast.c linux-3.0.3/net/batman-adv/unicast.c
64895--- linux-3.0.3/net/batman-adv/unicast.c 2011-07-21 22:17:23.000000000 -0400
64896+++ linux-3.0.3/net/batman-adv/unicast.c 2011-08-23 21:47:56.000000000 -0400
64897@@ -265,7 +265,7 @@ int frag_send_skb(struct sk_buff *skb, s
64898 frag1->flags = UNI_FRAG_HEAD | large_tail;
64899 frag2->flags = large_tail;
64900
64901- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
64902+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
64903 frag1->seqno = htons(seqno - 1);
64904 frag2->seqno = htons(seqno);
64905
64906diff -urNp linux-3.0.3/net/bridge/br_multicast.c linux-3.0.3/net/bridge/br_multicast.c
64907--- linux-3.0.3/net/bridge/br_multicast.c 2011-07-21 22:17:23.000000000 -0400
64908+++ linux-3.0.3/net/bridge/br_multicast.c 2011-08-23 21:47:56.000000000 -0400
64909@@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct
64910 nexthdr = ip6h->nexthdr;
64911 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
64912
64913- if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
64914+ if (nexthdr != IPPROTO_ICMPV6)
64915 return 0;
64916
64917 /* Okay, we found ICMPv6 header */
64918diff -urNp linux-3.0.3/net/bridge/netfilter/ebtables.c linux-3.0.3/net/bridge/netfilter/ebtables.c
64919--- linux-3.0.3/net/bridge/netfilter/ebtables.c 2011-07-21 22:17:23.000000000 -0400
64920+++ linux-3.0.3/net/bridge/netfilter/ebtables.c 2011-08-23 21:48:14.000000000 -0400
64921@@ -1512,7 +1512,7 @@ static int do_ebt_get_ctl(struct sock *s
64922 tmp.valid_hooks = t->table->valid_hooks;
64923 }
64924 mutex_unlock(&ebt_mutex);
64925- if (copy_to_user(user, &tmp, *len) != 0){
64926+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
64927 BUGPRINT("c2u Didn't work\n");
64928 ret = -EFAULT;
64929 break;
64930@@ -1780,6 +1780,8 @@ static int compat_copy_everything_to_use
64931 int ret;
64932 void __user *pos;
64933
64934+ pax_track_stack();
64935+
64936 memset(&tinfo, 0, sizeof(tinfo));
64937
64938 if (cmd == EBT_SO_GET_ENTRIES) {
64939diff -urNp linux-3.0.3/net/caif/caif_socket.c linux-3.0.3/net/caif/caif_socket.c
64940--- linux-3.0.3/net/caif/caif_socket.c 2011-07-21 22:17:23.000000000 -0400
64941+++ linux-3.0.3/net/caif/caif_socket.c 2011-08-23 21:47:56.000000000 -0400
64942@@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
64943 #ifdef CONFIG_DEBUG_FS
64944 struct debug_fs_counter {
64945 atomic_t caif_nr_socks;
64946- atomic_t caif_sock_create;
64947- atomic_t num_connect_req;
64948- atomic_t num_connect_resp;
64949- atomic_t num_connect_fail_resp;
64950- atomic_t num_disconnect;
64951- atomic_t num_remote_shutdown_ind;
64952- atomic_t num_tx_flow_off_ind;
64953- atomic_t num_tx_flow_on_ind;
64954- atomic_t num_rx_flow_off;
64955- atomic_t num_rx_flow_on;
64956+ atomic_unchecked_t caif_sock_create;
64957+ atomic_unchecked_t num_connect_req;
64958+ atomic_unchecked_t num_connect_resp;
64959+ atomic_unchecked_t num_connect_fail_resp;
64960+ atomic_unchecked_t num_disconnect;
64961+ atomic_unchecked_t num_remote_shutdown_ind;
64962+ atomic_unchecked_t num_tx_flow_off_ind;
64963+ atomic_unchecked_t num_tx_flow_on_ind;
64964+ atomic_unchecked_t num_rx_flow_off;
64965+ atomic_unchecked_t num_rx_flow_on;
64966 };
64967 static struct debug_fs_counter cnt;
64968 #define dbfs_atomic_inc(v) atomic_inc_return(v)
64969+#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
64970 #define dbfs_atomic_dec(v) atomic_dec_return(v)
64971 #else
64972 #define dbfs_atomic_inc(v) 0
64973@@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct soc
64974 atomic_read(&cf_sk->sk.sk_rmem_alloc),
64975 sk_rcvbuf_lowwater(cf_sk));
64976 set_rx_flow_off(cf_sk);
64977- dbfs_atomic_inc(&cnt.num_rx_flow_off);
64978+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
64979 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
64980 }
64981
64982@@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct soc
64983 set_rx_flow_off(cf_sk);
64984 if (net_ratelimit())
64985 pr_debug("sending flow OFF due to rmem_schedule\n");
64986- dbfs_atomic_inc(&cnt.num_rx_flow_off);
64987+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
64988 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
64989 }
64990 skb->dev = NULL;
64991@@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer
64992 switch (flow) {
64993 case CAIF_CTRLCMD_FLOW_ON_IND:
64994 /* OK from modem to start sending again */
64995- dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
64996+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
64997 set_tx_flow_on(cf_sk);
64998 cf_sk->sk.sk_state_change(&cf_sk->sk);
64999 break;
65000
65001 case CAIF_CTRLCMD_FLOW_OFF_IND:
65002 /* Modem asks us to shut up */
65003- dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
65004+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
65005 set_tx_flow_off(cf_sk);
65006 cf_sk->sk.sk_state_change(&cf_sk->sk);
65007 break;
65008@@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer
65009 /* We're now connected */
65010 caif_client_register_refcnt(&cf_sk->layer,
65011 cfsk_hold, cfsk_put);
65012- dbfs_atomic_inc(&cnt.num_connect_resp);
65013+ dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
65014 cf_sk->sk.sk_state = CAIF_CONNECTED;
65015 set_tx_flow_on(cf_sk);
65016 cf_sk->sk.sk_state_change(&cf_sk->sk);
65017@@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer
65018
65019 case CAIF_CTRLCMD_INIT_FAIL_RSP:
65020 /* Connect request failed */
65021- dbfs_atomic_inc(&cnt.num_connect_fail_resp);
65022+ dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
65023 cf_sk->sk.sk_err = ECONNREFUSED;
65024 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
65025 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
65026@@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer
65027
65028 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
65029 /* Modem has closed this connection, or device is down. */
65030- dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
65031+ dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
65032 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
65033 cf_sk->sk.sk_err = ECONNRESET;
65034 set_rx_flow_on(cf_sk);
65035@@ -297,7 +298,7 @@ static void caif_check_flow_release(stru
65036 return;
65037
65038 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
65039- dbfs_atomic_inc(&cnt.num_rx_flow_on);
65040+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
65041 set_rx_flow_on(cf_sk);
65042 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
65043 }
65044@@ -854,7 +855,7 @@ static int caif_connect(struct socket *s
65045 /*ifindex = id of the interface.*/
65046 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
65047
65048- dbfs_atomic_inc(&cnt.num_connect_req);
65049+ dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
65050 cf_sk->layer.receive = caif_sktrecv_cb;
65051
65052 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
65053@@ -943,7 +944,7 @@ static int caif_release(struct socket *s
65054 spin_unlock_bh(&sk->sk_receive_queue.lock);
65055 sock->sk = NULL;
65056
65057- dbfs_atomic_inc(&cnt.num_disconnect);
65058+ dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
65059
65060 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
65061 if (cf_sk->debugfs_socket_dir != NULL)
65062@@ -1122,7 +1123,7 @@ static int caif_create(struct net *net,
65063 cf_sk->conn_req.protocol = protocol;
65064 /* Increase the number of sockets created. */
65065 dbfs_atomic_inc(&cnt.caif_nr_socks);
65066- num = dbfs_atomic_inc(&cnt.caif_sock_create);
65067+ num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
65068 #ifdef CONFIG_DEBUG_FS
65069 if (!IS_ERR(debugfsdir)) {
65070
65071diff -urNp linux-3.0.3/net/caif/cfctrl.c linux-3.0.3/net/caif/cfctrl.c
65072--- linux-3.0.3/net/caif/cfctrl.c 2011-07-21 22:17:23.000000000 -0400
65073+++ linux-3.0.3/net/caif/cfctrl.c 2011-08-23 21:48:14.000000000 -0400
65074@@ -9,6 +9,7 @@
65075 #include <linux/stddef.h>
65076 #include <linux/spinlock.h>
65077 #include <linux/slab.h>
65078+#include <linux/sched.h>
65079 #include <net/caif/caif_layer.h>
65080 #include <net/caif/cfpkt.h>
65081 #include <net/caif/cfctrl.h>
65082@@ -45,8 +46,8 @@ struct cflayer *cfctrl_create(void)
65083 dev_info.id = 0xff;
65084 memset(this, 0, sizeof(*this));
65085 cfsrvl_init(&this->serv, 0, &dev_info, false);
65086- atomic_set(&this->req_seq_no, 1);
65087- atomic_set(&this->rsp_seq_no, 1);
65088+ atomic_set_unchecked(&this->req_seq_no, 1);
65089+ atomic_set_unchecked(&this->rsp_seq_no, 1);
65090 this->serv.layer.receive = cfctrl_recv;
65091 sprintf(this->serv.layer.name, "ctrl");
65092 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
65093@@ -132,8 +133,8 @@ static void cfctrl_insert_req(struct cfc
65094 struct cfctrl_request_info *req)
65095 {
65096 spin_lock_bh(&ctrl->info_list_lock);
65097- atomic_inc(&ctrl->req_seq_no);
65098- req->sequence_no = atomic_read(&ctrl->req_seq_no);
65099+ atomic_inc_unchecked(&ctrl->req_seq_no);
65100+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
65101 list_add_tail(&req->list, &ctrl->list);
65102 spin_unlock_bh(&ctrl->info_list_lock);
65103 }
65104@@ -151,7 +152,7 @@ static struct cfctrl_request_info *cfctr
65105 if (p != first)
65106 pr_warn("Requests are not received in order\n");
65107
65108- atomic_set(&ctrl->rsp_seq_no,
65109+ atomic_set_unchecked(&ctrl->rsp_seq_no,
65110 p->sequence_no);
65111 list_del(&p->list);
65112 goto out;
65113@@ -364,6 +365,7 @@ static int cfctrl_recv(struct cflayer *l
65114 struct cfctrl *cfctrl = container_obj(layer);
65115 struct cfctrl_request_info rsp, *req;
65116
65117+ pax_track_stack();
65118
65119 cfpkt_extr_head(pkt, &cmdrsp, 1);
65120 cmd = cmdrsp & CFCTRL_CMD_MASK;
65121diff -urNp linux-3.0.3/net/core/datagram.c linux-3.0.3/net/core/datagram.c
65122--- linux-3.0.3/net/core/datagram.c 2011-07-21 22:17:23.000000000 -0400
65123+++ linux-3.0.3/net/core/datagram.c 2011-08-23 21:47:56.000000000 -0400
65124@@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, s
65125 }
65126
65127 kfree_skb(skb);
65128- atomic_inc(&sk->sk_drops);
65129+ atomic_inc_unchecked(&sk->sk_drops);
65130 sk_mem_reclaim_partial(sk);
65131
65132 return err;
65133diff -urNp linux-3.0.3/net/core/dev.c linux-3.0.3/net/core/dev.c
65134--- linux-3.0.3/net/core/dev.c 2011-07-21 22:17:23.000000000 -0400
65135+++ linux-3.0.3/net/core/dev.c 2011-08-23 21:48:14.000000000 -0400
65136@@ -1125,10 +1125,14 @@ void dev_load(struct net *net, const cha
65137 if (no_module && capable(CAP_NET_ADMIN))
65138 no_module = request_module("netdev-%s", name);
65139 if (no_module && capable(CAP_SYS_MODULE)) {
65140+#ifdef CONFIG_GRKERNSEC_MODHARDEN
65141+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
65142+#else
65143 if (!request_module("%s", name))
65144 pr_err("Loading kernel module for a network device "
65145 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
65146 "instead\n", name);
65147+#endif
65148 }
65149 }
65150 EXPORT_SYMBOL(dev_load);
65151@@ -1959,7 +1963,7 @@ static int illegal_highdma(struct net_de
65152
65153 struct dev_gso_cb {
65154 void (*destructor)(struct sk_buff *skb);
65155-};
65156+} __no_const;
65157
65158 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
65159
65160@@ -2912,7 +2916,7 @@ int netif_rx_ni(struct sk_buff *skb)
65161 }
65162 EXPORT_SYMBOL(netif_rx_ni);
65163
65164-static void net_tx_action(struct softirq_action *h)
65165+static void net_tx_action(void)
65166 {
65167 struct softnet_data *sd = &__get_cpu_var(softnet_data);
65168
65169@@ -3761,7 +3765,7 @@ void netif_napi_del(struct napi_struct *
65170 }
65171 EXPORT_SYMBOL(netif_napi_del);
65172
65173-static void net_rx_action(struct softirq_action *h)
65174+static void net_rx_action(void)
65175 {
65176 struct softnet_data *sd = &__get_cpu_var(softnet_data);
65177 unsigned long time_limit = jiffies + 2;
65178diff -urNp linux-3.0.3/net/core/flow.c linux-3.0.3/net/core/flow.c
65179--- linux-3.0.3/net/core/flow.c 2011-07-21 22:17:23.000000000 -0400
65180+++ linux-3.0.3/net/core/flow.c 2011-08-23 21:47:56.000000000 -0400
65181@@ -60,7 +60,7 @@ struct flow_cache {
65182 struct timer_list rnd_timer;
65183 };
65184
65185-atomic_t flow_cache_genid = ATOMIC_INIT(0);
65186+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
65187 EXPORT_SYMBOL(flow_cache_genid);
65188 static struct flow_cache flow_cache_global;
65189 static struct kmem_cache *flow_cachep __read_mostly;
65190@@ -85,7 +85,7 @@ static void flow_cache_new_hashrnd(unsig
65191
65192 static int flow_entry_valid(struct flow_cache_entry *fle)
65193 {
65194- if (atomic_read(&flow_cache_genid) != fle->genid)
65195+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
65196 return 0;
65197 if (fle->object && !fle->object->ops->check(fle->object))
65198 return 0;
65199@@ -253,7 +253,7 @@ flow_cache_lookup(struct net *net, const
65200 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
65201 fcp->hash_count++;
65202 }
65203- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
65204+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
65205 flo = fle->object;
65206 if (!flo)
65207 goto ret_object;
65208@@ -274,7 +274,7 @@ nocache:
65209 }
65210 flo = resolver(net, key, family, dir, flo, ctx);
65211 if (fle) {
65212- fle->genid = atomic_read(&flow_cache_genid);
65213+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
65214 if (!IS_ERR(flo))
65215 fle->object = flo;
65216 else
65217diff -urNp linux-3.0.3/net/core/rtnetlink.c linux-3.0.3/net/core/rtnetlink.c
65218--- linux-3.0.3/net/core/rtnetlink.c 2011-07-21 22:17:23.000000000 -0400
65219+++ linux-3.0.3/net/core/rtnetlink.c 2011-08-23 21:47:56.000000000 -0400
65220@@ -56,7 +56,7 @@
65221 struct rtnl_link {
65222 rtnl_doit_func doit;
65223 rtnl_dumpit_func dumpit;
65224-};
65225+} __no_const;
65226
65227 static DEFINE_MUTEX(rtnl_mutex);
65228
65229diff -urNp linux-3.0.3/net/core/skbuff.c linux-3.0.3/net/core/skbuff.c
65230--- linux-3.0.3/net/core/skbuff.c 2011-07-21 22:17:23.000000000 -0400
65231+++ linux-3.0.3/net/core/skbuff.c 2011-08-23 21:48:14.000000000 -0400
65232@@ -1543,6 +1543,8 @@ int skb_splice_bits(struct sk_buff *skb,
65233 struct sock *sk = skb->sk;
65234 int ret = 0;
65235
65236+ pax_track_stack();
65237+
65238 if (splice_grow_spd(pipe, &spd))
65239 return -ENOMEM;
65240
65241diff -urNp linux-3.0.3/net/core/sock.c linux-3.0.3/net/core/sock.c
65242--- linux-3.0.3/net/core/sock.c 2011-07-21 22:17:23.000000000 -0400
65243+++ linux-3.0.3/net/core/sock.c 2011-08-23 21:48:14.000000000 -0400
65244@@ -291,7 +291,7 @@ int sock_queue_rcv_skb(struct sock *sk,
65245 */
65246 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
65247 (unsigned)sk->sk_rcvbuf) {
65248- atomic_inc(&sk->sk_drops);
65249+ atomic_inc_unchecked(&sk->sk_drops);
65250 return -ENOMEM;
65251 }
65252
65253@@ -300,7 +300,7 @@ int sock_queue_rcv_skb(struct sock *sk,
65254 return err;
65255
65256 if (!sk_rmem_schedule(sk, skb->truesize)) {
65257- atomic_inc(&sk->sk_drops);
65258+ atomic_inc_unchecked(&sk->sk_drops);
65259 return -ENOBUFS;
65260 }
65261
65262@@ -320,7 +320,7 @@ int sock_queue_rcv_skb(struct sock *sk,
65263 skb_dst_force(skb);
65264
65265 spin_lock_irqsave(&list->lock, flags);
65266- skb->dropcount = atomic_read(&sk->sk_drops);
65267+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
65268 __skb_queue_tail(list, skb);
65269 spin_unlock_irqrestore(&list->lock, flags);
65270
65271@@ -340,7 +340,7 @@ int sk_receive_skb(struct sock *sk, stru
65272 skb->dev = NULL;
65273
65274 if (sk_rcvqueues_full(sk, skb)) {
65275- atomic_inc(&sk->sk_drops);
65276+ atomic_inc_unchecked(&sk->sk_drops);
65277 goto discard_and_relse;
65278 }
65279 if (nested)
65280@@ -358,7 +358,7 @@ int sk_receive_skb(struct sock *sk, stru
65281 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
65282 } else if (sk_add_backlog(sk, skb)) {
65283 bh_unlock_sock(sk);
65284- atomic_inc(&sk->sk_drops);
65285+ atomic_inc_unchecked(&sk->sk_drops);
65286 goto discard_and_relse;
65287 }
65288
65289@@ -921,7 +921,7 @@ int sock_getsockopt(struct socket *sock,
65290 if (len > sizeof(peercred))
65291 len = sizeof(peercred);
65292 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
65293- if (copy_to_user(optval, &peercred, len))
65294+ if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
65295 return -EFAULT;
65296 goto lenout;
65297 }
65298@@ -934,7 +934,7 @@ int sock_getsockopt(struct socket *sock,
65299 return -ENOTCONN;
65300 if (lv < len)
65301 return -EINVAL;
65302- if (copy_to_user(optval, address, len))
65303+ if (len > sizeof(address) || copy_to_user(optval, address, len))
65304 return -EFAULT;
65305 goto lenout;
65306 }
65307@@ -967,7 +967,7 @@ int sock_getsockopt(struct socket *sock,
65308
65309 if (len > lv)
65310 len = lv;
65311- if (copy_to_user(optval, &v, len))
65312+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
65313 return -EFAULT;
65314 lenout:
65315 if (put_user(len, optlen))
65316@@ -2023,7 +2023,7 @@ void sock_init_data(struct socket *sock,
65317 */
65318 smp_wmb();
65319 atomic_set(&sk->sk_refcnt, 1);
65320- atomic_set(&sk->sk_drops, 0);
65321+ atomic_set_unchecked(&sk->sk_drops, 0);
65322 }
65323 EXPORT_SYMBOL(sock_init_data);
65324
65325diff -urNp linux-3.0.3/net/decnet/sysctl_net_decnet.c linux-3.0.3/net/decnet/sysctl_net_decnet.c
65326--- linux-3.0.3/net/decnet/sysctl_net_decnet.c 2011-07-21 22:17:23.000000000 -0400
65327+++ linux-3.0.3/net/decnet/sysctl_net_decnet.c 2011-08-23 21:47:56.000000000 -0400
65328@@ -173,7 +173,7 @@ static int dn_node_address_handler(ctl_t
65329
65330 if (len > *lenp) len = *lenp;
65331
65332- if (copy_to_user(buffer, addr, len))
65333+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
65334 return -EFAULT;
65335
65336 *lenp = len;
65337@@ -236,7 +236,7 @@ static int dn_def_dev_handler(ctl_table
65338
65339 if (len > *lenp) len = *lenp;
65340
65341- if (copy_to_user(buffer, devname, len))
65342+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
65343 return -EFAULT;
65344
65345 *lenp = len;
65346diff -urNp linux-3.0.3/net/econet/Kconfig linux-3.0.3/net/econet/Kconfig
65347--- linux-3.0.3/net/econet/Kconfig 2011-07-21 22:17:23.000000000 -0400
65348+++ linux-3.0.3/net/econet/Kconfig 2011-08-23 21:48:14.000000000 -0400
65349@@ -4,7 +4,7 @@
65350
65351 config ECONET
65352 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
65353- depends on EXPERIMENTAL && INET
65354+ depends on EXPERIMENTAL && INET && BROKEN
65355 ---help---
65356 Econet is a fairly old and slow networking protocol mainly used by
65357 Acorn computers to access file and print servers. It uses native
65358diff -urNp linux-3.0.3/net/ipv4/fib_frontend.c linux-3.0.3/net/ipv4/fib_frontend.c
65359--- linux-3.0.3/net/ipv4/fib_frontend.c 2011-07-21 22:17:23.000000000 -0400
65360+++ linux-3.0.3/net/ipv4/fib_frontend.c 2011-08-23 21:47:56.000000000 -0400
65361@@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct not
65362 #ifdef CONFIG_IP_ROUTE_MULTIPATH
65363 fib_sync_up(dev);
65364 #endif
65365- atomic_inc(&net->ipv4.dev_addr_genid);
65366+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
65367 rt_cache_flush(dev_net(dev), -1);
65368 break;
65369 case NETDEV_DOWN:
65370 fib_del_ifaddr(ifa, NULL);
65371- atomic_inc(&net->ipv4.dev_addr_genid);
65372+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
65373 if (ifa->ifa_dev->ifa_list == NULL) {
65374 /* Last address was deleted from this interface.
65375 * Disable IP.
65376@@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notif
65377 #ifdef CONFIG_IP_ROUTE_MULTIPATH
65378 fib_sync_up(dev);
65379 #endif
65380- atomic_inc(&net->ipv4.dev_addr_genid);
65381+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
65382 rt_cache_flush(dev_net(dev), -1);
65383 break;
65384 case NETDEV_DOWN:
65385diff -urNp linux-3.0.3/net/ipv4/fib_semantics.c linux-3.0.3/net/ipv4/fib_semantics.c
65386--- linux-3.0.3/net/ipv4/fib_semantics.c 2011-07-21 22:17:23.000000000 -0400
65387+++ linux-3.0.3/net/ipv4/fib_semantics.c 2011-08-23 21:47:56.000000000 -0400
65388@@ -691,7 +691,7 @@ __be32 fib_info_update_nh_saddr(struct n
65389 nh->nh_saddr = inet_select_addr(nh->nh_dev,
65390 nh->nh_gw,
65391 nh->nh_parent->fib_scope);
65392- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
65393+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
65394
65395 return nh->nh_saddr;
65396 }
65397diff -urNp linux-3.0.3/net/ipv4/inet_diag.c linux-3.0.3/net/ipv4/inet_diag.c
65398--- linux-3.0.3/net/ipv4/inet_diag.c 2011-07-21 22:17:23.000000000 -0400
65399+++ linux-3.0.3/net/ipv4/inet_diag.c 2011-08-23 21:48:14.000000000 -0400
65400@@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct soc
65401 r->idiag_retrans = 0;
65402
65403 r->id.idiag_if = sk->sk_bound_dev_if;
65404+
65405+#ifdef CONFIG_GRKERNSEC_HIDESYM
65406+ r->id.idiag_cookie[0] = 0;
65407+ r->id.idiag_cookie[1] = 0;
65408+#else
65409 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
65410 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
65411+#endif
65412
65413 r->id.idiag_sport = inet->inet_sport;
65414 r->id.idiag_dport = inet->inet_dport;
65415@@ -201,8 +207,15 @@ static int inet_twsk_diag_fill(struct in
65416 r->idiag_family = tw->tw_family;
65417 r->idiag_retrans = 0;
65418 r->id.idiag_if = tw->tw_bound_dev_if;
65419+
65420+#ifdef CONFIG_GRKERNSEC_HIDESYM
65421+ r->id.idiag_cookie[0] = 0;
65422+ r->id.idiag_cookie[1] = 0;
65423+#else
65424 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
65425 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
65426+#endif
65427+
65428 r->id.idiag_sport = tw->tw_sport;
65429 r->id.idiag_dport = tw->tw_dport;
65430 r->id.idiag_src[0] = tw->tw_rcv_saddr;
65431@@ -285,12 +298,14 @@ static int inet_diag_get_exact(struct sk
65432 if (sk == NULL)
65433 goto unlock;
65434
65435+#ifndef CONFIG_GRKERNSEC_HIDESYM
65436 err = -ESTALE;
65437 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
65438 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
65439 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
65440 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
65441 goto out;
65442+#endif
65443
65444 err = -ENOMEM;
65445 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
65446@@ -580,8 +595,14 @@ static int inet_diag_fill_req(struct sk_
65447 r->idiag_retrans = req->retrans;
65448
65449 r->id.idiag_if = sk->sk_bound_dev_if;
65450+
65451+#ifdef CONFIG_GRKERNSEC_HIDESYM
65452+ r->id.idiag_cookie[0] = 0;
65453+ r->id.idiag_cookie[1] = 0;
65454+#else
65455 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
65456 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
65457+#endif
65458
65459 tmo = req->expires - jiffies;
65460 if (tmo < 0)
65461diff -urNp linux-3.0.3/net/ipv4/inet_hashtables.c linux-3.0.3/net/ipv4/inet_hashtables.c
65462--- linux-3.0.3/net/ipv4/inet_hashtables.c 2011-08-23 21:44:40.000000000 -0400
65463+++ linux-3.0.3/net/ipv4/inet_hashtables.c 2011-08-23 21:55:24.000000000 -0400
65464@@ -18,12 +18,15 @@
65465 #include <linux/sched.h>
65466 #include <linux/slab.h>
65467 #include <linux/wait.h>
65468+#include <linux/security.h>
65469
65470 #include <net/inet_connection_sock.h>
65471 #include <net/inet_hashtables.h>
65472 #include <net/secure_seq.h>
65473 #include <net/ip.h>
65474
65475+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
65476+
65477 /*
65478 * Allocate and initialize a new local port bind bucket.
65479 * The bindhash mutex for snum's hash chain must be held here.
65480@@ -530,6 +533,8 @@ ok:
65481 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
65482 spin_unlock(&head->lock);
65483
65484+ gr_update_task_in_ip_table(current, inet_sk(sk));
65485+
65486 if (tw) {
65487 inet_twsk_deschedule(tw, death_row);
65488 while (twrefcnt) {
65489diff -urNp linux-3.0.3/net/ipv4/inetpeer.c linux-3.0.3/net/ipv4/inetpeer.c
65490--- linux-3.0.3/net/ipv4/inetpeer.c 2011-08-23 21:44:40.000000000 -0400
65491+++ linux-3.0.3/net/ipv4/inetpeer.c 2011-08-23 21:48:14.000000000 -0400
65492@@ -481,6 +481,8 @@ struct inet_peer *inet_getpeer(struct in
65493 unsigned int sequence;
65494 int invalidated, newrefcnt = 0;
65495
65496+ pax_track_stack();
65497+
65498 /* Look up for the address quickly, lockless.
65499 * Because of a concurrent writer, we might not find an existing entry.
65500 */
65501@@ -517,8 +519,8 @@ found: /* The existing node has been fo
65502 if (p) {
65503 p->daddr = *daddr;
65504 atomic_set(&p->refcnt, 1);
65505- atomic_set(&p->rid, 0);
65506- atomic_set(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
65507+ atomic_set_unchecked(&p->rid, 0);
65508+ atomic_set_unchecked(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
65509 p->tcp_ts_stamp = 0;
65510 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
65511 p->rate_tokens = 0;
65512diff -urNp linux-3.0.3/net/ipv4/ip_fragment.c linux-3.0.3/net/ipv4/ip_fragment.c
65513--- linux-3.0.3/net/ipv4/ip_fragment.c 2011-07-21 22:17:23.000000000 -0400
65514+++ linux-3.0.3/net/ipv4/ip_fragment.c 2011-08-23 21:47:56.000000000 -0400
65515@@ -315,7 +315,7 @@ static inline int ip_frag_too_far(struct
65516 return 0;
65517
65518 start = qp->rid;
65519- end = atomic_inc_return(&peer->rid);
65520+ end = atomic_inc_return_unchecked(&peer->rid);
65521 qp->rid = end;
65522
65523 rc = qp->q.fragments && (end - start) > max;
65524diff -urNp linux-3.0.3/net/ipv4/ip_sockglue.c linux-3.0.3/net/ipv4/ip_sockglue.c
65525--- linux-3.0.3/net/ipv4/ip_sockglue.c 2011-07-21 22:17:23.000000000 -0400
65526+++ linux-3.0.3/net/ipv4/ip_sockglue.c 2011-08-23 21:48:14.000000000 -0400
65527@@ -1073,6 +1073,8 @@ static int do_ip_getsockopt(struct sock
65528 int val;
65529 int len;
65530
65531+ pax_track_stack();
65532+
65533 if (level != SOL_IP)
65534 return -EOPNOTSUPP;
65535
65536@@ -1110,7 +1112,8 @@ static int do_ip_getsockopt(struct sock
65537 len = min_t(unsigned int, len, opt->optlen);
65538 if (put_user(len, optlen))
65539 return -EFAULT;
65540- if (copy_to_user(optval, opt->__data, len))
65541+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
65542+ copy_to_user(optval, opt->__data, len))
65543 return -EFAULT;
65544 return 0;
65545 }
65546diff -urNp linux-3.0.3/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-3.0.3/net/ipv4/netfilter/nf_nat_snmp_basic.c
65547--- linux-3.0.3/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-07-21 22:17:23.000000000 -0400
65548+++ linux-3.0.3/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-08-23 21:47:56.000000000 -0400
65549@@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(
65550
65551 *len = 0;
65552
65553- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
65554+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
65555 if (*octets == NULL) {
65556 if (net_ratelimit())
65557 pr_notice("OOM in bsalg (%d)\n", __LINE__);
65558diff -urNp linux-3.0.3/net/ipv4/ping.c linux-3.0.3/net/ipv4/ping.c
65559--- linux-3.0.3/net/ipv4/ping.c 2011-07-21 22:17:23.000000000 -0400
65560+++ linux-3.0.3/net/ipv4/ping.c 2011-08-23 21:47:56.000000000 -0400
65561@@ -837,7 +837,7 @@ static void ping_format_sock(struct sock
65562 sk_rmem_alloc_get(sp),
65563 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
65564 atomic_read(&sp->sk_refcnt), sp,
65565- atomic_read(&sp->sk_drops), len);
65566+ atomic_read_unchecked(&sp->sk_drops), len);
65567 }
65568
65569 static int ping_seq_show(struct seq_file *seq, void *v)
65570diff -urNp linux-3.0.3/net/ipv4/raw.c linux-3.0.3/net/ipv4/raw.c
65571--- linux-3.0.3/net/ipv4/raw.c 2011-07-21 22:17:23.000000000 -0400
65572+++ linux-3.0.3/net/ipv4/raw.c 2011-08-23 21:48:14.000000000 -0400
65573@@ -302,7 +302,7 @@ static int raw_rcv_skb(struct sock * sk,
65574 int raw_rcv(struct sock *sk, struct sk_buff *skb)
65575 {
65576 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
65577- atomic_inc(&sk->sk_drops);
65578+ atomic_inc_unchecked(&sk->sk_drops);
65579 kfree_skb(skb);
65580 return NET_RX_DROP;
65581 }
65582@@ -736,16 +736,20 @@ static int raw_init(struct sock *sk)
65583
65584 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
65585 {
65586+ struct icmp_filter filter;
65587+
65588 if (optlen > sizeof(struct icmp_filter))
65589 optlen = sizeof(struct icmp_filter);
65590- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
65591+ if (copy_from_user(&filter, optval, optlen))
65592 return -EFAULT;
65593+ raw_sk(sk)->filter = filter;
65594 return 0;
65595 }
65596
65597 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
65598 {
65599 int len, ret = -EFAULT;
65600+ struct icmp_filter filter;
65601
65602 if (get_user(len, optlen))
65603 goto out;
65604@@ -755,8 +759,9 @@ static int raw_geticmpfilter(struct sock
65605 if (len > sizeof(struct icmp_filter))
65606 len = sizeof(struct icmp_filter);
65607 ret = -EFAULT;
65608- if (put_user(len, optlen) ||
65609- copy_to_user(optval, &raw_sk(sk)->filter, len))
65610+ filter = raw_sk(sk)->filter;
65611+ if (put_user(len, optlen) || len > sizeof filter ||
65612+ copy_to_user(optval, &filter, len))
65613 goto out;
65614 ret = 0;
65615 out: return ret;
65616@@ -984,7 +989,13 @@ static void raw_sock_seq_show(struct seq
65617 sk_wmem_alloc_get(sp),
65618 sk_rmem_alloc_get(sp),
65619 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
65620- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
65621+ atomic_read(&sp->sk_refcnt),
65622+#ifdef CONFIG_GRKERNSEC_HIDESYM
65623+ NULL,
65624+#else
65625+ sp,
65626+#endif
65627+ atomic_read_unchecked(&sp->sk_drops));
65628 }
65629
65630 static int raw_seq_show(struct seq_file *seq, void *v)
65631diff -urNp linux-3.0.3/net/ipv4/route.c linux-3.0.3/net/ipv4/route.c
65632--- linux-3.0.3/net/ipv4/route.c 2011-08-23 21:44:40.000000000 -0400
65633+++ linux-3.0.3/net/ipv4/route.c 2011-08-23 21:47:56.000000000 -0400
65634@@ -304,7 +304,7 @@ static inline unsigned int rt_hash(__be3
65635
65636 static inline int rt_genid(struct net *net)
65637 {
65638- return atomic_read(&net->ipv4.rt_genid);
65639+ return atomic_read_unchecked(&net->ipv4.rt_genid);
65640 }
65641
65642 #ifdef CONFIG_PROC_FS
65643@@ -833,7 +833,7 @@ static void rt_cache_invalidate(struct n
65644 unsigned char shuffle;
65645
65646 get_random_bytes(&shuffle, sizeof(shuffle));
65647- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
65648+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
65649 }
65650
65651 /*
65652@@ -2834,7 +2834,7 @@ static int rt_fill_info(struct net *net,
65653 error = rt->dst.error;
65654 if (peer) {
65655 inet_peer_refcheck(rt->peer);
65656- id = atomic_read(&peer->ip_id_count) & 0xffff;
65657+ id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
65658 if (peer->tcp_ts_stamp) {
65659 ts = peer->tcp_ts;
65660 tsage = get_seconds() - peer->tcp_ts_stamp;
65661diff -urNp linux-3.0.3/net/ipv4/tcp.c linux-3.0.3/net/ipv4/tcp.c
65662--- linux-3.0.3/net/ipv4/tcp.c 2011-07-21 22:17:23.000000000 -0400
65663+++ linux-3.0.3/net/ipv4/tcp.c 2011-08-23 21:48:14.000000000 -0400
65664@@ -2122,6 +2122,8 @@ static int do_tcp_setsockopt(struct sock
65665 int val;
65666 int err = 0;
65667
65668+ pax_track_stack();
65669+
65670 /* These are data/string values, all the others are ints */
65671 switch (optname) {
65672 case TCP_CONGESTION: {
65673@@ -2501,6 +2503,8 @@ static int do_tcp_getsockopt(struct sock
65674 struct tcp_sock *tp = tcp_sk(sk);
65675 int val, len;
65676
65677+ pax_track_stack();
65678+
65679 if (get_user(len, optlen))
65680 return -EFAULT;
65681
65682diff -urNp linux-3.0.3/net/ipv4/tcp_ipv4.c linux-3.0.3/net/ipv4/tcp_ipv4.c
65683--- linux-3.0.3/net/ipv4/tcp_ipv4.c 2011-08-23 21:44:40.000000000 -0400
65684+++ linux-3.0.3/net/ipv4/tcp_ipv4.c 2011-08-23 21:48:14.000000000 -0400
65685@@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
65686 int sysctl_tcp_low_latency __read_mostly;
65687 EXPORT_SYMBOL(sysctl_tcp_low_latency);
65688
65689+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65690+extern int grsec_enable_blackhole;
65691+#endif
65692
65693 #ifdef CONFIG_TCP_MD5SIG
65694 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
65695@@ -1607,6 +1610,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
65696 return 0;
65697
65698 reset:
65699+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65700+ if (!grsec_enable_blackhole)
65701+#endif
65702 tcp_v4_send_reset(rsk, skb);
65703 discard:
65704 kfree_skb(skb);
65705@@ -1669,12 +1675,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
65706 TCP_SKB_CB(skb)->sacked = 0;
65707
65708 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
65709- if (!sk)
65710+ if (!sk) {
65711+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65712+ ret = 1;
65713+#endif
65714 goto no_tcp_socket;
65715-
65716+ }
65717 process:
65718- if (sk->sk_state == TCP_TIME_WAIT)
65719+ if (sk->sk_state == TCP_TIME_WAIT) {
65720+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65721+ ret = 2;
65722+#endif
65723 goto do_time_wait;
65724+ }
65725
65726 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
65727 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
65728@@ -1724,6 +1737,10 @@ no_tcp_socket:
65729 bad_packet:
65730 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
65731 } else {
65732+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65733+ if (!grsec_enable_blackhole || (ret == 1 &&
65734+ (skb->dev->flags & IFF_LOOPBACK)))
65735+#endif
65736 tcp_v4_send_reset(NULL, skb);
65737 }
65738
65739@@ -2388,7 +2405,11 @@ static void get_openreq4(struct sock *sk
65740 0, /* non standard timer */
65741 0, /* open_requests have no inode */
65742 atomic_read(&sk->sk_refcnt),
65743+#ifdef CONFIG_GRKERNSEC_HIDESYM
65744+ NULL,
65745+#else
65746 req,
65747+#endif
65748 len);
65749 }
65750
65751@@ -2438,7 +2459,12 @@ static void get_tcp4_sock(struct sock *s
65752 sock_i_uid(sk),
65753 icsk->icsk_probes_out,
65754 sock_i_ino(sk),
65755- atomic_read(&sk->sk_refcnt), sk,
65756+ atomic_read(&sk->sk_refcnt),
65757+#ifdef CONFIG_GRKERNSEC_HIDESYM
65758+ NULL,
65759+#else
65760+ sk,
65761+#endif
65762 jiffies_to_clock_t(icsk->icsk_rto),
65763 jiffies_to_clock_t(icsk->icsk_ack.ato),
65764 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
65765@@ -2466,7 +2492,13 @@ static void get_timewait4_sock(struct in
65766 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
65767 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
65768 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
65769- atomic_read(&tw->tw_refcnt), tw, len);
65770+ atomic_read(&tw->tw_refcnt),
65771+#ifdef CONFIG_GRKERNSEC_HIDESYM
65772+ NULL,
65773+#else
65774+ tw,
65775+#endif
65776+ len);
65777 }
65778
65779 #define TMPSZ 150
65780diff -urNp linux-3.0.3/net/ipv4/tcp_minisocks.c linux-3.0.3/net/ipv4/tcp_minisocks.c
65781--- linux-3.0.3/net/ipv4/tcp_minisocks.c 2011-07-21 22:17:23.000000000 -0400
65782+++ linux-3.0.3/net/ipv4/tcp_minisocks.c 2011-08-23 21:48:14.000000000 -0400
65783@@ -27,6 +27,10 @@
65784 #include <net/inet_common.h>
65785 #include <net/xfrm.h>
65786
65787+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65788+extern int grsec_enable_blackhole;
65789+#endif
65790+
65791 int sysctl_tcp_syncookies __read_mostly = 1;
65792 EXPORT_SYMBOL(sysctl_tcp_syncookies);
65793
65794@@ -745,6 +749,10 @@ listen_overflow:
65795
65796 embryonic_reset:
65797 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
65798+
65799+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65800+ if (!grsec_enable_blackhole)
65801+#endif
65802 if (!(flg & TCP_FLAG_RST))
65803 req->rsk_ops->send_reset(sk, skb);
65804
65805diff -urNp linux-3.0.3/net/ipv4/tcp_output.c linux-3.0.3/net/ipv4/tcp_output.c
65806--- linux-3.0.3/net/ipv4/tcp_output.c 2011-07-21 22:17:23.000000000 -0400
65807+++ linux-3.0.3/net/ipv4/tcp_output.c 2011-08-23 21:48:14.000000000 -0400
65808@@ -2421,6 +2421,8 @@ struct sk_buff *tcp_make_synack(struct s
65809 int mss;
65810 int s_data_desired = 0;
65811
65812+ pax_track_stack();
65813+
65814 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
65815 s_data_desired = cvp->s_data_desired;
65816 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
65817diff -urNp linux-3.0.3/net/ipv4/tcp_probe.c linux-3.0.3/net/ipv4/tcp_probe.c
65818--- linux-3.0.3/net/ipv4/tcp_probe.c 2011-07-21 22:17:23.000000000 -0400
65819+++ linux-3.0.3/net/ipv4/tcp_probe.c 2011-08-23 21:47:56.000000000 -0400
65820@@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file
65821 if (cnt + width >= len)
65822 break;
65823
65824- if (copy_to_user(buf + cnt, tbuf, width))
65825+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
65826 return -EFAULT;
65827 cnt += width;
65828 }
65829diff -urNp linux-3.0.3/net/ipv4/tcp_timer.c linux-3.0.3/net/ipv4/tcp_timer.c
65830--- linux-3.0.3/net/ipv4/tcp_timer.c 2011-07-21 22:17:23.000000000 -0400
65831+++ linux-3.0.3/net/ipv4/tcp_timer.c 2011-08-23 21:48:14.000000000 -0400
65832@@ -22,6 +22,10 @@
65833 #include <linux/gfp.h>
65834 #include <net/tcp.h>
65835
65836+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65837+extern int grsec_lastack_retries;
65838+#endif
65839+
65840 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
65841 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
65842 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
65843@@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock
65844 }
65845 }
65846
65847+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65848+ if ((sk->sk_state == TCP_LAST_ACK) &&
65849+ (grsec_lastack_retries > 0) &&
65850+ (grsec_lastack_retries < retry_until))
65851+ retry_until = grsec_lastack_retries;
65852+#endif
65853+
65854 if (retransmits_timed_out(sk, retry_until,
65855 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
65856 /* Has it gone just too far? */
65857diff -urNp linux-3.0.3/net/ipv4/udp.c linux-3.0.3/net/ipv4/udp.c
65858--- linux-3.0.3/net/ipv4/udp.c 2011-07-21 22:17:23.000000000 -0400
65859+++ linux-3.0.3/net/ipv4/udp.c 2011-08-23 21:48:14.000000000 -0400
65860@@ -86,6 +86,7 @@
65861 #include <linux/types.h>
65862 #include <linux/fcntl.h>
65863 #include <linux/module.h>
65864+#include <linux/security.h>
65865 #include <linux/socket.h>
65866 #include <linux/sockios.h>
65867 #include <linux/igmp.h>
65868@@ -107,6 +108,10 @@
65869 #include <net/xfrm.h>
65870 #include "udp_impl.h"
65871
65872+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65873+extern int grsec_enable_blackhole;
65874+#endif
65875+
65876 struct udp_table udp_table __read_mostly;
65877 EXPORT_SYMBOL(udp_table);
65878
65879@@ -564,6 +569,9 @@ found:
65880 return s;
65881 }
65882
65883+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
65884+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
65885+
65886 /*
65887 * This routine is called by the ICMP module when it gets some
65888 * sort of error condition. If err < 0 then the socket should
65889@@ -855,9 +863,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
65890 dport = usin->sin_port;
65891 if (dport == 0)
65892 return -EINVAL;
65893+
65894+ err = gr_search_udp_sendmsg(sk, usin);
65895+ if (err)
65896+ return err;
65897 } else {
65898 if (sk->sk_state != TCP_ESTABLISHED)
65899 return -EDESTADDRREQ;
65900+
65901+ err = gr_search_udp_sendmsg(sk, NULL);
65902+ if (err)
65903+ return err;
65904+
65905 daddr = inet->inet_daddr;
65906 dport = inet->inet_dport;
65907 /* Open fast path for connected socket.
65908@@ -1098,7 +1115,7 @@ static unsigned int first_packet_length(
65909 udp_lib_checksum_complete(skb)) {
65910 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
65911 IS_UDPLITE(sk));
65912- atomic_inc(&sk->sk_drops);
65913+ atomic_inc_unchecked(&sk->sk_drops);
65914 __skb_unlink(skb, rcvq);
65915 __skb_queue_tail(&list_kill, skb);
65916 }
65917@@ -1184,6 +1201,10 @@ try_again:
65918 if (!skb)
65919 goto out;
65920
65921+ err = gr_search_udp_recvmsg(sk, skb);
65922+ if (err)
65923+ goto out_free;
65924+
65925 ulen = skb->len - sizeof(struct udphdr);
65926 if (len > ulen)
65927 len = ulen;
65928@@ -1483,7 +1504,7 @@ int udp_queue_rcv_skb(struct sock *sk, s
65929
65930 drop:
65931 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
65932- atomic_inc(&sk->sk_drops);
65933+ atomic_inc_unchecked(&sk->sk_drops);
65934 kfree_skb(skb);
65935 return -1;
65936 }
65937@@ -1502,7 +1523,7 @@ static void flush_stack(struct sock **st
65938 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
65939
65940 if (!skb1) {
65941- atomic_inc(&sk->sk_drops);
65942+ atomic_inc_unchecked(&sk->sk_drops);
65943 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
65944 IS_UDPLITE(sk));
65945 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
65946@@ -1671,6 +1692,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
65947 goto csum_error;
65948
65949 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
65950+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65951+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
65952+#endif
65953 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
65954
65955 /*
65956@@ -2098,8 +2122,13 @@ static void udp4_format_sock(struct sock
65957 sk_wmem_alloc_get(sp),
65958 sk_rmem_alloc_get(sp),
65959 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
65960- atomic_read(&sp->sk_refcnt), sp,
65961- atomic_read(&sp->sk_drops), len);
65962+ atomic_read(&sp->sk_refcnt),
65963+#ifdef CONFIG_GRKERNSEC_HIDESYM
65964+ NULL,
65965+#else
65966+ sp,
65967+#endif
65968+ atomic_read_unchecked(&sp->sk_drops), len);
65969 }
65970
65971 int udp4_seq_show(struct seq_file *seq, void *v)
65972diff -urNp linux-3.0.3/net/ipv6/inet6_connection_sock.c linux-3.0.3/net/ipv6/inet6_connection_sock.c
65973--- linux-3.0.3/net/ipv6/inet6_connection_sock.c 2011-07-21 22:17:23.000000000 -0400
65974+++ linux-3.0.3/net/ipv6/inet6_connection_sock.c 2011-08-23 21:47:56.000000000 -0400
65975@@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *
65976 #ifdef CONFIG_XFRM
65977 {
65978 struct rt6_info *rt = (struct rt6_info *)dst;
65979- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
65980+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
65981 }
65982 #endif
65983 }
65984@@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(
65985 #ifdef CONFIG_XFRM
65986 if (dst) {
65987 struct rt6_info *rt = (struct rt6_info *)dst;
65988- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
65989+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
65990 __sk_dst_reset(sk);
65991 dst = NULL;
65992 }
65993diff -urNp linux-3.0.3/net/ipv6/ipv6_sockglue.c linux-3.0.3/net/ipv6/ipv6_sockglue.c
65994--- linux-3.0.3/net/ipv6/ipv6_sockglue.c 2011-07-21 22:17:23.000000000 -0400
65995+++ linux-3.0.3/net/ipv6/ipv6_sockglue.c 2011-08-23 21:48:14.000000000 -0400
65996@@ -129,6 +129,8 @@ static int do_ipv6_setsockopt(struct soc
65997 int val, valbool;
65998 int retv = -ENOPROTOOPT;
65999
66000+ pax_track_stack();
66001+
66002 if (optval == NULL)
66003 val=0;
66004 else {
66005@@ -919,6 +921,8 @@ static int do_ipv6_getsockopt(struct soc
66006 int len;
66007 int val;
66008
66009+ pax_track_stack();
66010+
66011 if (ip6_mroute_opt(optname))
66012 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
66013
66014diff -urNp linux-3.0.3/net/ipv6/raw.c linux-3.0.3/net/ipv6/raw.c
66015--- linux-3.0.3/net/ipv6/raw.c 2011-07-21 22:17:23.000000000 -0400
66016+++ linux-3.0.3/net/ipv6/raw.c 2011-08-23 21:48:14.000000000 -0400
66017@@ -376,7 +376,7 @@ static inline int rawv6_rcv_skb(struct s
66018 {
66019 if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
66020 skb_checksum_complete(skb)) {
66021- atomic_inc(&sk->sk_drops);
66022+ atomic_inc_unchecked(&sk->sk_drops);
66023 kfree_skb(skb);
66024 return NET_RX_DROP;
66025 }
66026@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
66027 struct raw6_sock *rp = raw6_sk(sk);
66028
66029 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
66030- atomic_inc(&sk->sk_drops);
66031+ atomic_inc_unchecked(&sk->sk_drops);
66032 kfree_skb(skb);
66033 return NET_RX_DROP;
66034 }
66035@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
66036
66037 if (inet->hdrincl) {
66038 if (skb_checksum_complete(skb)) {
66039- atomic_inc(&sk->sk_drops);
66040+ atomic_inc_unchecked(&sk->sk_drops);
66041 kfree_skb(skb);
66042 return NET_RX_DROP;
66043 }
66044@@ -601,7 +601,7 @@ out:
66045 return err;
66046 }
66047
66048-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
66049+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
66050 struct flowi6 *fl6, struct dst_entry **dstp,
66051 unsigned int flags)
66052 {
66053@@ -742,6 +742,8 @@ static int rawv6_sendmsg(struct kiocb *i
66054 u16 proto;
66055 int err;
66056
66057+ pax_track_stack();
66058+
66059 /* Rough check on arithmetic overflow,
66060 better check is made in ip6_append_data().
66061 */
66062@@ -909,12 +911,15 @@ do_confirm:
66063 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
66064 char __user *optval, int optlen)
66065 {
66066+ struct icmp6_filter filter;
66067+
66068 switch (optname) {
66069 case ICMPV6_FILTER:
66070 if (optlen > sizeof(struct icmp6_filter))
66071 optlen = sizeof(struct icmp6_filter);
66072- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
66073+ if (copy_from_user(&filter, optval, optlen))
66074 return -EFAULT;
66075+ raw6_sk(sk)->filter = filter;
66076 return 0;
66077 default:
66078 return -ENOPROTOOPT;
66079@@ -927,6 +932,7 @@ static int rawv6_geticmpfilter(struct so
66080 char __user *optval, int __user *optlen)
66081 {
66082 int len;
66083+ struct icmp6_filter filter;
66084
66085 switch (optname) {
66086 case ICMPV6_FILTER:
66087@@ -938,7 +944,8 @@ static int rawv6_geticmpfilter(struct so
66088 len = sizeof(struct icmp6_filter);
66089 if (put_user(len, optlen))
66090 return -EFAULT;
66091- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
66092+ filter = raw6_sk(sk)->filter;
66093+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
66094 return -EFAULT;
66095 return 0;
66096 default:
66097@@ -1252,7 +1259,13 @@ static void raw6_sock_seq_show(struct se
66098 0, 0L, 0,
66099 sock_i_uid(sp), 0,
66100 sock_i_ino(sp),
66101- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
66102+ atomic_read(&sp->sk_refcnt),
66103+#ifdef CONFIG_GRKERNSEC_HIDESYM
66104+ NULL,
66105+#else
66106+ sp,
66107+#endif
66108+ atomic_read_unchecked(&sp->sk_drops));
66109 }
66110
66111 static int raw6_seq_show(struct seq_file *seq, void *v)
66112diff -urNp linux-3.0.3/net/ipv6/tcp_ipv6.c linux-3.0.3/net/ipv6/tcp_ipv6.c
66113--- linux-3.0.3/net/ipv6/tcp_ipv6.c 2011-08-23 21:44:40.000000000 -0400
66114+++ linux-3.0.3/net/ipv6/tcp_ipv6.c 2011-08-23 21:48:14.000000000 -0400
66115@@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
66116 }
66117 #endif
66118
66119+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66120+extern int grsec_enable_blackhole;
66121+#endif
66122+
66123 static void tcp_v6_hash(struct sock *sk)
66124 {
66125 if (sk->sk_state != TCP_CLOSE) {
66126@@ -1662,6 +1666,9 @@ static int tcp_v6_do_rcv(struct sock *sk
66127 return 0;
66128
66129 reset:
66130+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66131+ if (!grsec_enable_blackhole)
66132+#endif
66133 tcp_v6_send_reset(sk, skb);
66134 discard:
66135 if (opt_skb)
66136@@ -1741,12 +1748,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
66137 TCP_SKB_CB(skb)->sacked = 0;
66138
66139 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
66140- if (!sk)
66141+ if (!sk) {
66142+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66143+ ret = 1;
66144+#endif
66145 goto no_tcp_socket;
66146+ }
66147
66148 process:
66149- if (sk->sk_state == TCP_TIME_WAIT)
66150+ if (sk->sk_state == TCP_TIME_WAIT) {
66151+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66152+ ret = 2;
66153+#endif
66154 goto do_time_wait;
66155+ }
66156
66157 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
66158 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
66159@@ -1794,6 +1809,10 @@ no_tcp_socket:
66160 bad_packet:
66161 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
66162 } else {
66163+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66164+ if (!grsec_enable_blackhole || (ret == 1 &&
66165+ (skb->dev->flags & IFF_LOOPBACK)))
66166+#endif
66167 tcp_v6_send_reset(NULL, skb);
66168 }
66169
66170@@ -2054,7 +2073,13 @@ static void get_openreq6(struct seq_file
66171 uid,
66172 0, /* non standard timer */
66173 0, /* open_requests have no inode */
66174- 0, req);
66175+ 0,
66176+#ifdef CONFIG_GRKERNSEC_HIDESYM
66177+ NULL
66178+#else
66179+ req
66180+#endif
66181+ );
66182 }
66183
66184 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
66185@@ -2104,7 +2129,12 @@ static void get_tcp6_sock(struct seq_fil
66186 sock_i_uid(sp),
66187 icsk->icsk_probes_out,
66188 sock_i_ino(sp),
66189- atomic_read(&sp->sk_refcnt), sp,
66190+ atomic_read(&sp->sk_refcnt),
66191+#ifdef CONFIG_GRKERNSEC_HIDESYM
66192+ NULL,
66193+#else
66194+ sp,
66195+#endif
66196 jiffies_to_clock_t(icsk->icsk_rto),
66197 jiffies_to_clock_t(icsk->icsk_ack.ato),
66198 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
66199@@ -2139,7 +2169,13 @@ static void get_timewait6_sock(struct se
66200 dest->s6_addr32[2], dest->s6_addr32[3], destp,
66201 tw->tw_substate, 0, 0,
66202 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
66203- atomic_read(&tw->tw_refcnt), tw);
66204+ atomic_read(&tw->tw_refcnt),
66205+#ifdef CONFIG_GRKERNSEC_HIDESYM
66206+ NULL
66207+#else
66208+ tw
66209+#endif
66210+ );
66211 }
66212
66213 static int tcp6_seq_show(struct seq_file *seq, void *v)
66214diff -urNp linux-3.0.3/net/ipv6/udp.c linux-3.0.3/net/ipv6/udp.c
66215--- linux-3.0.3/net/ipv6/udp.c 2011-08-23 21:44:40.000000000 -0400
66216+++ linux-3.0.3/net/ipv6/udp.c 2011-08-23 21:48:14.000000000 -0400
66217@@ -50,6 +50,10 @@
66218 #include <linux/seq_file.h>
66219 #include "udp_impl.h"
66220
66221+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66222+extern int grsec_enable_blackhole;
66223+#endif
66224+
66225 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
66226 {
66227 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
66228@@ -548,7 +552,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
66229
66230 return 0;
66231 drop:
66232- atomic_inc(&sk->sk_drops);
66233+ atomic_inc_unchecked(&sk->sk_drops);
66234 drop_no_sk_drops_inc:
66235 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
66236 kfree_skb(skb);
66237@@ -624,7 +628,7 @@ static void flush_stack(struct sock **st
66238 continue;
66239 }
66240 drop:
66241- atomic_inc(&sk->sk_drops);
66242+ atomic_inc_unchecked(&sk->sk_drops);
66243 UDP6_INC_STATS_BH(sock_net(sk),
66244 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
66245 UDP6_INC_STATS_BH(sock_net(sk),
66246@@ -779,6 +783,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
66247 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
66248 proto == IPPROTO_UDPLITE);
66249
66250+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66251+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
66252+#endif
66253 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
66254
66255 kfree_skb(skb);
66256@@ -795,7 +802,7 @@ int __udp6_lib_rcv(struct sk_buff *skb,
66257 if (!sock_owned_by_user(sk))
66258 udpv6_queue_rcv_skb(sk, skb);
66259 else if (sk_add_backlog(sk, skb)) {
66260- atomic_inc(&sk->sk_drops);
66261+ atomic_inc_unchecked(&sk->sk_drops);
66262 bh_unlock_sock(sk);
66263 sock_put(sk);
66264 goto discard;
66265@@ -1406,8 +1413,13 @@ static void udp6_sock_seq_show(struct se
66266 0, 0L, 0,
66267 sock_i_uid(sp), 0,
66268 sock_i_ino(sp),
66269- atomic_read(&sp->sk_refcnt), sp,
66270- atomic_read(&sp->sk_drops));
66271+ atomic_read(&sp->sk_refcnt),
66272+#ifdef CONFIG_GRKERNSEC_HIDESYM
66273+ NULL,
66274+#else
66275+ sp,
66276+#endif
66277+ atomic_read_unchecked(&sp->sk_drops));
66278 }
66279
66280 int udp6_seq_show(struct seq_file *seq, void *v)
66281diff -urNp linux-3.0.3/net/irda/ircomm/ircomm_tty.c linux-3.0.3/net/irda/ircomm/ircomm_tty.c
66282--- linux-3.0.3/net/irda/ircomm/ircomm_tty.c 2011-07-21 22:17:23.000000000 -0400
66283+++ linux-3.0.3/net/irda/ircomm/ircomm_tty.c 2011-08-23 21:47:56.000000000 -0400
66284@@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(st
66285 add_wait_queue(&self->open_wait, &wait);
66286
66287 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
66288- __FILE__,__LINE__, tty->driver->name, self->open_count );
66289+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
66290
66291 /* As far as I can see, we protect open_count - Jean II */
66292 spin_lock_irqsave(&self->spinlock, flags);
66293 if (!tty_hung_up_p(filp)) {
66294 extra_count = 1;
66295- self->open_count--;
66296+ local_dec(&self->open_count);
66297 }
66298 spin_unlock_irqrestore(&self->spinlock, flags);
66299- self->blocked_open++;
66300+ local_inc(&self->blocked_open);
66301
66302 while (1) {
66303 if (tty->termios->c_cflag & CBAUD) {
66304@@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(st
66305 }
66306
66307 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
66308- __FILE__,__LINE__, tty->driver->name, self->open_count );
66309+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
66310
66311 schedule();
66312 }
66313@@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(st
66314 if (extra_count) {
66315 /* ++ is not atomic, so this should be protected - Jean II */
66316 spin_lock_irqsave(&self->spinlock, flags);
66317- self->open_count++;
66318+ local_inc(&self->open_count);
66319 spin_unlock_irqrestore(&self->spinlock, flags);
66320 }
66321- self->blocked_open--;
66322+ local_dec(&self->blocked_open);
66323
66324 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
66325- __FILE__,__LINE__, tty->driver->name, self->open_count);
66326+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
66327
66328 if (!retval)
66329 self->flags |= ASYNC_NORMAL_ACTIVE;
66330@@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_st
66331 }
66332 /* ++ is not atomic, so this should be protected - Jean II */
66333 spin_lock_irqsave(&self->spinlock, flags);
66334- self->open_count++;
66335+ local_inc(&self->open_count);
66336
66337 tty->driver_data = self;
66338 self->tty = tty;
66339 spin_unlock_irqrestore(&self->spinlock, flags);
66340
66341 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
66342- self->line, self->open_count);
66343+ self->line, local_read(&self->open_count));
66344
66345 /* Not really used by us, but lets do it anyway */
66346 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
66347@@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_
66348 return;
66349 }
66350
66351- if ((tty->count == 1) && (self->open_count != 1)) {
66352+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
66353 /*
66354 * Uh, oh. tty->count is 1, which means that the tty
66355 * structure will be freed. state->count should always
66356@@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_
66357 */
66358 IRDA_DEBUG(0, "%s(), bad serial port count; "
66359 "tty->count is 1, state->count is %d\n", __func__ ,
66360- self->open_count);
66361- self->open_count = 1;
66362+ local_read(&self->open_count));
66363+ local_set(&self->open_count, 1);
66364 }
66365
66366- if (--self->open_count < 0) {
66367+ if (local_dec_return(&self->open_count) < 0) {
66368 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
66369- __func__, self->line, self->open_count);
66370- self->open_count = 0;
66371+ __func__, self->line, local_read(&self->open_count));
66372+ local_set(&self->open_count, 0);
66373 }
66374- if (self->open_count) {
66375+ if (local_read(&self->open_count)) {
66376 spin_unlock_irqrestore(&self->spinlock, flags);
66377
66378 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
66379@@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_
66380 tty->closing = 0;
66381 self->tty = NULL;
66382
66383- if (self->blocked_open) {
66384+ if (local_read(&self->blocked_open)) {
66385 if (self->close_delay)
66386 schedule_timeout_interruptible(self->close_delay);
66387 wake_up_interruptible(&self->open_wait);
66388@@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty
66389 spin_lock_irqsave(&self->spinlock, flags);
66390 self->flags &= ~ASYNC_NORMAL_ACTIVE;
66391 self->tty = NULL;
66392- self->open_count = 0;
66393+ local_set(&self->open_count, 0);
66394 spin_unlock_irqrestore(&self->spinlock, flags);
66395
66396 wake_up_interruptible(&self->open_wait);
66397@@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct
66398 seq_putc(m, '\n');
66399
66400 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
66401- seq_printf(m, "Open count: %d\n", self->open_count);
66402+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
66403 seq_printf(m, "Max data size: %d\n", self->max_data_size);
66404 seq_printf(m, "Max header size: %d\n", self->max_header_size);
66405
66406diff -urNp linux-3.0.3/net/iucv/af_iucv.c linux-3.0.3/net/iucv/af_iucv.c
66407--- linux-3.0.3/net/iucv/af_iucv.c 2011-07-21 22:17:23.000000000 -0400
66408+++ linux-3.0.3/net/iucv/af_iucv.c 2011-08-23 21:47:56.000000000 -0400
66409@@ -648,10 +648,10 @@ static int iucv_sock_autobind(struct soc
66410
66411 write_lock_bh(&iucv_sk_list.lock);
66412
66413- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
66414+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
66415 while (__iucv_get_sock_by_name(name)) {
66416 sprintf(name, "%08x",
66417- atomic_inc_return(&iucv_sk_list.autobind_name));
66418+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
66419 }
66420
66421 write_unlock_bh(&iucv_sk_list.lock);
66422diff -urNp linux-3.0.3/net/key/af_key.c linux-3.0.3/net/key/af_key.c
66423--- linux-3.0.3/net/key/af_key.c 2011-07-21 22:17:23.000000000 -0400
66424+++ linux-3.0.3/net/key/af_key.c 2011-08-23 21:48:14.000000000 -0400
66425@@ -2481,6 +2481,8 @@ static int pfkey_migrate(struct sock *sk
66426 struct xfrm_migrate m[XFRM_MAX_DEPTH];
66427 struct xfrm_kmaddress k;
66428
66429+ pax_track_stack();
66430+
66431 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
66432 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
66433 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
66434@@ -3016,10 +3018,10 @@ static int pfkey_send_policy_notify(stru
66435 static u32 get_acqseq(void)
66436 {
66437 u32 res;
66438- static atomic_t acqseq;
66439+ static atomic_unchecked_t acqseq;
66440
66441 do {
66442- res = atomic_inc_return(&acqseq);
66443+ res = atomic_inc_return_unchecked(&acqseq);
66444 } while (!res);
66445 return res;
66446 }
66447diff -urNp linux-3.0.3/net/lapb/lapb_iface.c linux-3.0.3/net/lapb/lapb_iface.c
66448--- linux-3.0.3/net/lapb/lapb_iface.c 2011-07-21 22:17:23.000000000 -0400
66449+++ linux-3.0.3/net/lapb/lapb_iface.c 2011-08-23 21:47:56.000000000 -0400
66450@@ -158,7 +158,7 @@ int lapb_register(struct net_device *dev
66451 goto out;
66452
66453 lapb->dev = dev;
66454- lapb->callbacks = *callbacks;
66455+ lapb->callbacks = callbacks;
66456
66457 __lapb_insert_cb(lapb);
66458
66459@@ -380,32 +380,32 @@ int lapb_data_received(struct net_device
66460
66461 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
66462 {
66463- if (lapb->callbacks.connect_confirmation)
66464- lapb->callbacks.connect_confirmation(lapb->dev, reason);
66465+ if (lapb->callbacks->connect_confirmation)
66466+ lapb->callbacks->connect_confirmation(lapb->dev, reason);
66467 }
66468
66469 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
66470 {
66471- if (lapb->callbacks.connect_indication)
66472- lapb->callbacks.connect_indication(lapb->dev, reason);
66473+ if (lapb->callbacks->connect_indication)
66474+ lapb->callbacks->connect_indication(lapb->dev, reason);
66475 }
66476
66477 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
66478 {
66479- if (lapb->callbacks.disconnect_confirmation)
66480- lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
66481+ if (lapb->callbacks->disconnect_confirmation)
66482+ lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
66483 }
66484
66485 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
66486 {
66487- if (lapb->callbacks.disconnect_indication)
66488- lapb->callbacks.disconnect_indication(lapb->dev, reason);
66489+ if (lapb->callbacks->disconnect_indication)
66490+ lapb->callbacks->disconnect_indication(lapb->dev, reason);
66491 }
66492
66493 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
66494 {
66495- if (lapb->callbacks.data_indication)
66496- return lapb->callbacks.data_indication(lapb->dev, skb);
66497+ if (lapb->callbacks->data_indication)
66498+ return lapb->callbacks->data_indication(lapb->dev, skb);
66499
66500 kfree_skb(skb);
66501 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
66502@@ -415,8 +415,8 @@ int lapb_data_transmit(struct lapb_cb *l
66503 {
66504 int used = 0;
66505
66506- if (lapb->callbacks.data_transmit) {
66507- lapb->callbacks.data_transmit(lapb->dev, skb);
66508+ if (lapb->callbacks->data_transmit) {
66509+ lapb->callbacks->data_transmit(lapb->dev, skb);
66510 used = 1;
66511 }
66512
66513diff -urNp linux-3.0.3/net/mac80211/debugfs_sta.c linux-3.0.3/net/mac80211/debugfs_sta.c
66514--- linux-3.0.3/net/mac80211/debugfs_sta.c 2011-07-21 22:17:23.000000000 -0400
66515+++ linux-3.0.3/net/mac80211/debugfs_sta.c 2011-08-23 21:48:14.000000000 -0400
66516@@ -140,6 +140,8 @@ static ssize_t sta_agg_status_read(struc
66517 struct tid_ampdu_rx *tid_rx;
66518 struct tid_ampdu_tx *tid_tx;
66519
66520+ pax_track_stack();
66521+
66522 rcu_read_lock();
66523
66524 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
66525@@ -240,6 +242,8 @@ static ssize_t sta_ht_capa_read(struct f
66526 struct sta_info *sta = file->private_data;
66527 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
66528
66529+ pax_track_stack();
66530+
66531 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
66532 htc->ht_supported ? "" : "not ");
66533 if (htc->ht_supported) {
66534diff -urNp linux-3.0.3/net/mac80211/ieee80211_i.h linux-3.0.3/net/mac80211/ieee80211_i.h
66535--- linux-3.0.3/net/mac80211/ieee80211_i.h 2011-07-21 22:17:23.000000000 -0400
66536+++ linux-3.0.3/net/mac80211/ieee80211_i.h 2011-08-23 21:47:56.000000000 -0400
66537@@ -27,6 +27,7 @@
66538 #include <net/ieee80211_radiotap.h>
66539 #include <net/cfg80211.h>
66540 #include <net/mac80211.h>
66541+#include <asm/local.h>
66542 #include "key.h"
66543 #include "sta_info.h"
66544
66545@@ -721,7 +722,7 @@ struct ieee80211_local {
66546 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
66547 spinlock_t queue_stop_reason_lock;
66548
66549- int open_count;
66550+ local_t open_count;
66551 int monitors, cooked_mntrs;
66552 /* number of interfaces with corresponding FIF_ flags */
66553 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
66554diff -urNp linux-3.0.3/net/mac80211/iface.c linux-3.0.3/net/mac80211/iface.c
66555--- linux-3.0.3/net/mac80211/iface.c 2011-08-23 21:44:40.000000000 -0400
66556+++ linux-3.0.3/net/mac80211/iface.c 2011-08-23 21:47:56.000000000 -0400
66557@@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_
66558 break;
66559 }
66560
66561- if (local->open_count == 0) {
66562+ if (local_read(&local->open_count) == 0) {
66563 res = drv_start(local);
66564 if (res)
66565 goto err_del_bss;
66566@@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_
66567 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
66568
66569 if (!is_valid_ether_addr(dev->dev_addr)) {
66570- if (!local->open_count)
66571+ if (!local_read(&local->open_count))
66572 drv_stop(local);
66573 return -EADDRNOTAVAIL;
66574 }
66575@@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_
66576 mutex_unlock(&local->mtx);
66577
66578 if (coming_up)
66579- local->open_count++;
66580+ local_inc(&local->open_count);
66581
66582 if (hw_reconf_flags) {
66583 ieee80211_hw_config(local, hw_reconf_flags);
66584@@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_
66585 err_del_interface:
66586 drv_remove_interface(local, &sdata->vif);
66587 err_stop:
66588- if (!local->open_count)
66589+ if (!local_read(&local->open_count))
66590 drv_stop(local);
66591 err_del_bss:
66592 sdata->bss = NULL;
66593@@ -475,7 +475,7 @@ static void ieee80211_do_stop(struct iee
66594 }
66595
66596 if (going_down)
66597- local->open_count--;
66598+ local_dec(&local->open_count);
66599
66600 switch (sdata->vif.type) {
66601 case NL80211_IFTYPE_AP_VLAN:
66602@@ -534,7 +534,7 @@ static void ieee80211_do_stop(struct iee
66603
66604 ieee80211_recalc_ps(local, -1);
66605
66606- if (local->open_count == 0) {
66607+ if (local_read(&local->open_count) == 0) {
66608 if (local->ops->napi_poll)
66609 napi_disable(&local->napi);
66610 ieee80211_clear_tx_pending(local);
66611diff -urNp linux-3.0.3/net/mac80211/main.c linux-3.0.3/net/mac80211/main.c
66612--- linux-3.0.3/net/mac80211/main.c 2011-07-21 22:17:23.000000000 -0400
66613+++ linux-3.0.3/net/mac80211/main.c 2011-08-23 21:47:56.000000000 -0400
66614@@ -209,7 +209,7 @@ int ieee80211_hw_config(struct ieee80211
66615 local->hw.conf.power_level = power;
66616 }
66617
66618- if (changed && local->open_count) {
66619+ if (changed && local_read(&local->open_count)) {
66620 ret = drv_config(local, changed);
66621 /*
66622 * Goal:
66623diff -urNp linux-3.0.3/net/mac80211/mlme.c linux-3.0.3/net/mac80211/mlme.c
66624--- linux-3.0.3/net/mac80211/mlme.c 2011-08-23 21:44:40.000000000 -0400
66625+++ linux-3.0.3/net/mac80211/mlme.c 2011-08-23 21:48:14.000000000 -0400
66626@@ -1444,6 +1444,8 @@ static bool ieee80211_assoc_success(stru
66627 bool have_higher_than_11mbit = false;
66628 u16 ap_ht_cap_flags;
66629
66630+ pax_track_stack();
66631+
66632 /* AssocResp and ReassocResp have identical structure */
66633
66634 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
66635diff -urNp linux-3.0.3/net/mac80211/pm.c linux-3.0.3/net/mac80211/pm.c
66636--- linux-3.0.3/net/mac80211/pm.c 2011-07-21 22:17:23.000000000 -0400
66637+++ linux-3.0.3/net/mac80211/pm.c 2011-08-23 21:47:56.000000000 -0400
66638@@ -47,7 +47,7 @@ int __ieee80211_suspend(struct ieee80211
66639 cancel_work_sync(&local->dynamic_ps_enable_work);
66640 del_timer_sync(&local->dynamic_ps_timer);
66641
66642- local->wowlan = wowlan && local->open_count;
66643+ local->wowlan = wowlan && local_read(&local->open_count);
66644 if (local->wowlan) {
66645 int err = drv_suspend(local, wowlan);
66646 if (err) {
66647@@ -111,7 +111,7 @@ int __ieee80211_suspend(struct ieee80211
66648 }
66649
66650 /* stop hardware - this must stop RX */
66651- if (local->open_count)
66652+ if (local_read(&local->open_count))
66653 ieee80211_stop_device(local);
66654
66655 suspend:
66656diff -urNp linux-3.0.3/net/mac80211/rate.c linux-3.0.3/net/mac80211/rate.c
66657--- linux-3.0.3/net/mac80211/rate.c 2011-07-21 22:17:23.000000000 -0400
66658+++ linux-3.0.3/net/mac80211/rate.c 2011-08-23 21:47:56.000000000 -0400
66659@@ -371,7 +371,7 @@ int ieee80211_init_rate_ctrl_alg(struct
66660
66661 ASSERT_RTNL();
66662
66663- if (local->open_count)
66664+ if (local_read(&local->open_count))
66665 return -EBUSY;
66666
66667 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
66668diff -urNp linux-3.0.3/net/mac80211/rc80211_pid_debugfs.c linux-3.0.3/net/mac80211/rc80211_pid_debugfs.c
66669--- linux-3.0.3/net/mac80211/rc80211_pid_debugfs.c 2011-07-21 22:17:23.000000000 -0400
66670+++ linux-3.0.3/net/mac80211/rc80211_pid_debugfs.c 2011-08-23 21:47:56.000000000 -0400
66671@@ -192,7 +192,7 @@ static ssize_t rate_control_pid_events_r
66672
66673 spin_unlock_irqrestore(&events->lock, status);
66674
66675- if (copy_to_user(buf, pb, p))
66676+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
66677 return -EFAULT;
66678
66679 return p;
66680diff -urNp linux-3.0.3/net/mac80211/util.c linux-3.0.3/net/mac80211/util.c
66681--- linux-3.0.3/net/mac80211/util.c 2011-07-21 22:17:23.000000000 -0400
66682+++ linux-3.0.3/net/mac80211/util.c 2011-08-23 21:47:56.000000000 -0400
66683@@ -1147,7 +1147,7 @@ int ieee80211_reconfig(struct ieee80211_
66684 #endif
66685
66686 /* restart hardware */
66687- if (local->open_count) {
66688+ if (local_read(&local->open_count)) {
66689 /*
66690 * Upon resume hardware can sometimes be goofy due to
66691 * various platform / driver / bus issues, so restarting
66692diff -urNp linux-3.0.3/net/netfilter/ipvs/ip_vs_conn.c linux-3.0.3/net/netfilter/ipvs/ip_vs_conn.c
66693--- linux-3.0.3/net/netfilter/ipvs/ip_vs_conn.c 2011-07-21 22:17:23.000000000 -0400
66694+++ linux-3.0.3/net/netfilter/ipvs/ip_vs_conn.c 2011-08-23 21:47:56.000000000 -0400
66695@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
66696 /* Increase the refcnt counter of the dest */
66697 atomic_inc(&dest->refcnt);
66698
66699- conn_flags = atomic_read(&dest->conn_flags);
66700+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
66701 if (cp->protocol != IPPROTO_UDP)
66702 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
66703 /* Bind with the destination and its corresponding transmitter */
66704@@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_p
66705 atomic_set(&cp->refcnt, 1);
66706
66707 atomic_set(&cp->n_control, 0);
66708- atomic_set(&cp->in_pkts, 0);
66709+ atomic_set_unchecked(&cp->in_pkts, 0);
66710
66711 atomic_inc(&ipvs->conn_count);
66712 if (flags & IP_VS_CONN_F_NO_CPORT)
66713@@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip
66714
66715 /* Don't drop the entry if its number of incoming packets is not
66716 located in [0, 8] */
66717- i = atomic_read(&cp->in_pkts);
66718+ i = atomic_read_unchecked(&cp->in_pkts);
66719 if (i > 8 || i < 0) return 0;
66720
66721 if (!todrop_rate[i]) return 0;
66722diff -urNp linux-3.0.3/net/netfilter/ipvs/ip_vs_core.c linux-3.0.3/net/netfilter/ipvs/ip_vs_core.c
66723--- linux-3.0.3/net/netfilter/ipvs/ip_vs_core.c 2011-07-21 22:17:23.000000000 -0400
66724+++ linux-3.0.3/net/netfilter/ipvs/ip_vs_core.c 2011-08-23 21:47:56.000000000 -0400
66725@@ -563,7 +563,7 @@ int ip_vs_leave(struct ip_vs_service *sv
66726 ret = cp->packet_xmit(skb, cp, pd->pp);
66727 /* do not touch skb anymore */
66728
66729- atomic_inc(&cp->in_pkts);
66730+ atomic_inc_unchecked(&cp->in_pkts);
66731 ip_vs_conn_put(cp);
66732 return ret;
66733 }
66734@@ -1613,7 +1613,7 @@ ip_vs_in(unsigned int hooknum, struct sk
66735 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
66736 pkts = sysctl_sync_threshold(ipvs);
66737 else
66738- pkts = atomic_add_return(1, &cp->in_pkts);
66739+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
66740
66741 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
66742 cp->protocol == IPPROTO_SCTP) {
66743diff -urNp linux-3.0.3/net/netfilter/ipvs/ip_vs_ctl.c linux-3.0.3/net/netfilter/ipvs/ip_vs_ctl.c
66744--- linux-3.0.3/net/netfilter/ipvs/ip_vs_ctl.c 2011-08-23 21:44:40.000000000 -0400
66745+++ linux-3.0.3/net/netfilter/ipvs/ip_vs_ctl.c 2011-08-23 21:48:14.000000000 -0400
66746@@ -782,7 +782,7 @@ __ip_vs_update_dest(struct ip_vs_service
66747 ip_vs_rs_hash(ipvs, dest);
66748 write_unlock_bh(&ipvs->rs_lock);
66749 }
66750- atomic_set(&dest->conn_flags, conn_flags);
66751+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
66752
66753 /* bind the service */
66754 if (!dest->svc) {
66755@@ -2027,7 +2027,7 @@ static int ip_vs_info_seq_show(struct se
66756 " %-7s %-6d %-10d %-10d\n",
66757 &dest->addr.in6,
66758 ntohs(dest->port),
66759- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
66760+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
66761 atomic_read(&dest->weight),
66762 atomic_read(&dest->activeconns),
66763 atomic_read(&dest->inactconns));
66764@@ -2038,7 +2038,7 @@ static int ip_vs_info_seq_show(struct se
66765 "%-7s %-6d %-10d %-10d\n",
66766 ntohl(dest->addr.ip),
66767 ntohs(dest->port),
66768- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
66769+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
66770 atomic_read(&dest->weight),
66771 atomic_read(&dest->activeconns),
66772 atomic_read(&dest->inactconns));
66773@@ -2284,6 +2284,8 @@ do_ip_vs_set_ctl(struct sock *sk, int cm
66774 struct ip_vs_dest_user *udest_compat;
66775 struct ip_vs_dest_user_kern udest;
66776
66777+ pax_track_stack();
66778+
66779 if (!capable(CAP_NET_ADMIN))
66780 return -EPERM;
66781
66782@@ -2498,7 +2500,7 @@ __ip_vs_get_dest_entries(struct net *net
66783
66784 entry.addr = dest->addr.ip;
66785 entry.port = dest->port;
66786- entry.conn_flags = atomic_read(&dest->conn_flags);
66787+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
66788 entry.weight = atomic_read(&dest->weight);
66789 entry.u_threshold = dest->u_threshold;
66790 entry.l_threshold = dest->l_threshold;
66791@@ -3026,7 +3028,7 @@ static int ip_vs_genl_fill_dest(struct s
66792 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
66793
66794 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
66795- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
66796+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
66797 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
66798 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
66799 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
66800diff -urNp linux-3.0.3/net/netfilter/ipvs/ip_vs_sync.c linux-3.0.3/net/netfilter/ipvs/ip_vs_sync.c
66801--- linux-3.0.3/net/netfilter/ipvs/ip_vs_sync.c 2011-07-21 22:17:23.000000000 -0400
66802+++ linux-3.0.3/net/netfilter/ipvs/ip_vs_sync.c 2011-08-23 21:47:56.000000000 -0400
66803@@ -648,7 +648,7 @@ control:
66804 * i.e only increment in_pkts for Templates.
66805 */
66806 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
66807- int pkts = atomic_add_return(1, &cp->in_pkts);
66808+ int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
66809
66810 if (pkts % sysctl_sync_period(ipvs) != 1)
66811 return;
66812@@ -794,7 +794,7 @@ static void ip_vs_proc_conn(struct net *
66813
66814 if (opt)
66815 memcpy(&cp->in_seq, opt, sizeof(*opt));
66816- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
66817+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
66818 cp->state = state;
66819 cp->old_state = cp->state;
66820 /*
66821diff -urNp linux-3.0.3/net/netfilter/ipvs/ip_vs_xmit.c linux-3.0.3/net/netfilter/ipvs/ip_vs_xmit.c
66822--- linux-3.0.3/net/netfilter/ipvs/ip_vs_xmit.c 2011-07-21 22:17:23.000000000 -0400
66823+++ linux-3.0.3/net/netfilter/ipvs/ip_vs_xmit.c 2011-08-23 21:47:56.000000000 -0400
66824@@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
66825 else
66826 rc = NF_ACCEPT;
66827 /* do not touch skb anymore */
66828- atomic_inc(&cp->in_pkts);
66829+ atomic_inc_unchecked(&cp->in_pkts);
66830 goto out;
66831 }
66832
66833@@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
66834 else
66835 rc = NF_ACCEPT;
66836 /* do not touch skb anymore */
66837- atomic_inc(&cp->in_pkts);
66838+ atomic_inc_unchecked(&cp->in_pkts);
66839 goto out;
66840 }
66841
66842diff -urNp linux-3.0.3/net/netfilter/Kconfig linux-3.0.3/net/netfilter/Kconfig
66843--- linux-3.0.3/net/netfilter/Kconfig 2011-07-21 22:17:23.000000000 -0400
66844+++ linux-3.0.3/net/netfilter/Kconfig 2011-08-23 21:48:14.000000000 -0400
66845@@ -781,6 +781,16 @@ config NETFILTER_XT_MATCH_ESP
66846
66847 To compile it as a module, choose M here. If unsure, say N.
66848
66849+config NETFILTER_XT_MATCH_GRADM
66850+ tristate '"gradm" match support'
66851+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
66852+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
66853+ ---help---
66854+ The gradm match allows to match on grsecurity RBAC being enabled.
66855+ It is useful when iptables rules are applied early on bootup to
66856+ prevent connections to the machine (except from a trusted host)
66857+ while the RBAC system is disabled.
66858+
66859 config NETFILTER_XT_MATCH_HASHLIMIT
66860 tristate '"hashlimit" match support'
66861 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
66862diff -urNp linux-3.0.3/net/netfilter/Makefile linux-3.0.3/net/netfilter/Makefile
66863--- linux-3.0.3/net/netfilter/Makefile 2011-07-21 22:17:23.000000000 -0400
66864+++ linux-3.0.3/net/netfilter/Makefile 2011-08-23 21:48:14.000000000 -0400
66865@@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) +=
66866 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
66867 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
66868 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
66869+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
66870 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
66871 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
66872 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
66873diff -urNp linux-3.0.3/net/netfilter/nfnetlink_log.c linux-3.0.3/net/netfilter/nfnetlink_log.c
66874--- linux-3.0.3/net/netfilter/nfnetlink_log.c 2011-07-21 22:17:23.000000000 -0400
66875+++ linux-3.0.3/net/netfilter/nfnetlink_log.c 2011-08-23 21:47:56.000000000 -0400
66876@@ -70,7 +70,7 @@ struct nfulnl_instance {
66877 };
66878
66879 static DEFINE_SPINLOCK(instances_lock);
66880-static atomic_t global_seq;
66881+static atomic_unchecked_t global_seq;
66882
66883 #define INSTANCE_BUCKETS 16
66884 static struct hlist_head instance_table[INSTANCE_BUCKETS];
66885@@ -505,7 +505,7 @@ __build_packet_message(struct nfulnl_ins
66886 /* global sequence number */
66887 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
66888 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
66889- htonl(atomic_inc_return(&global_seq)));
66890+ htonl(atomic_inc_return_unchecked(&global_seq)));
66891
66892 if (data_len) {
66893 struct nlattr *nla;
66894diff -urNp linux-3.0.3/net/netfilter/nfnetlink_queue.c linux-3.0.3/net/netfilter/nfnetlink_queue.c
66895--- linux-3.0.3/net/netfilter/nfnetlink_queue.c 2011-07-21 22:17:23.000000000 -0400
66896+++ linux-3.0.3/net/netfilter/nfnetlink_queue.c 2011-08-23 21:47:56.000000000 -0400
66897@@ -58,7 +58,7 @@ struct nfqnl_instance {
66898 */
66899 spinlock_t lock;
66900 unsigned int queue_total;
66901- atomic_t id_sequence; /* 'sequence' of pkt ids */
66902+ atomic_unchecked_t id_sequence; /* 'sequence' of pkt ids */
66903 struct list_head queue_list; /* packets in queue */
66904 };
66905
66906@@ -272,7 +272,7 @@ nfqnl_build_packet_message(struct nfqnl_
66907 nfmsg->version = NFNETLINK_V0;
66908 nfmsg->res_id = htons(queue->queue_num);
66909
66910- entry->id = atomic_inc_return(&queue->id_sequence);
66911+ entry->id = atomic_inc_return_unchecked(&queue->id_sequence);
66912 pmsg.packet_id = htonl(entry->id);
66913 pmsg.hw_protocol = entskb->protocol;
66914 pmsg.hook = entry->hook;
66915@@ -870,7 +870,7 @@ static int seq_show(struct seq_file *s,
66916 inst->peer_pid, inst->queue_total,
66917 inst->copy_mode, inst->copy_range,
66918 inst->queue_dropped, inst->queue_user_dropped,
66919- atomic_read(&inst->id_sequence), 1);
66920+ atomic_read_unchecked(&inst->id_sequence), 1);
66921 }
66922
66923 static const struct seq_operations nfqnl_seq_ops = {
66924diff -urNp linux-3.0.3/net/netfilter/xt_gradm.c linux-3.0.3/net/netfilter/xt_gradm.c
66925--- linux-3.0.3/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
66926+++ linux-3.0.3/net/netfilter/xt_gradm.c 2011-08-23 21:48:14.000000000 -0400
66927@@ -0,0 +1,51 @@
66928+/*
66929+ * gradm match for netfilter
66930