]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.2.2-3.0.3-201108281458.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-3.0.3-201108281458.patch
CommitLineData
ab198edb
PK
1diff -urNp linux-3.0.3/arch/alpha/include/asm/elf.h linux-3.0.3/arch/alpha/include/asm/elf.h
2--- linux-3.0.3/arch/alpha/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
3+++ linux-3.0.3/arch/alpha/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
4@@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8+#ifdef CONFIG_PAX_ASLR
9+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10+
11+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13+#endif
14+
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18diff -urNp linux-3.0.3/arch/alpha/include/asm/pgtable.h linux-3.0.3/arch/alpha/include/asm/pgtable.h
19--- linux-3.0.3/arch/alpha/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
20+++ linux-3.0.3/arch/alpha/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
21@@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25+
26+#ifdef CONFIG_PAX_PAGEEXEC
27+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30+#else
31+# define PAGE_SHARED_NOEXEC PAGE_SHARED
32+# define PAGE_COPY_NOEXEC PAGE_COPY
33+# define PAGE_READONLY_NOEXEC PAGE_READONLY
34+#endif
35+
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39diff -urNp linux-3.0.3/arch/alpha/kernel/module.c linux-3.0.3/arch/alpha/kernel/module.c
40--- linux-3.0.3/arch/alpha/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
41+++ linux-3.0.3/arch/alpha/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
42@@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46- gp = (u64)me->module_core + me->core_size - 0x8000;
47+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51diff -urNp linux-3.0.3/arch/alpha/kernel/osf_sys.c linux-3.0.3/arch/alpha/kernel/osf_sys.c
52--- linux-3.0.3/arch/alpha/kernel/osf_sys.c 2011-07-21 22:17:23.000000000 -0400
53+++ linux-3.0.3/arch/alpha/kernel/osf_sys.c 2011-08-23 21:47:55.000000000 -0400
54@@ -1145,7 +1145,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
57 return -ENOMEM;
58- if (!vma || addr + len <= vma->vm_start)
59+ if (check_heap_stack_gap(vma, addr, len))
60 return addr;
61 addr = vma->vm_end;
62 vma = vma->vm_next;
63@@ -1181,6 +1181,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
66
67+#ifdef CONFIG_PAX_RANDMMAP
68+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69+#endif
70+
71 if (addr) {
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74@@ -1188,8 +1192,8 @@ arch_get_unmapped_area(struct file *filp
75 }
76
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79- len, limit);
80+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81+
82 if (addr != (unsigned long) -ENOMEM)
83 return addr;
84
85diff -urNp linux-3.0.3/arch/alpha/mm/fault.c linux-3.0.3/arch/alpha/mm/fault.c
86--- linux-3.0.3/arch/alpha/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
87+++ linux-3.0.3/arch/alpha/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
88@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89 __reload_thread(pcb);
90 }
91
92+#ifdef CONFIG_PAX_PAGEEXEC
93+/*
94+ * PaX: decide what to do with offenders (regs->pc = fault address)
95+ *
96+ * returns 1 when task should be killed
97+ * 2 when patched PLT trampoline was detected
98+ * 3 when unpatched PLT trampoline was detected
99+ */
100+static int pax_handle_fetch_fault(struct pt_regs *regs)
101+{
102+
103+#ifdef CONFIG_PAX_EMUPLT
104+ int err;
105+
106+ do { /* PaX: patched PLT emulation #1 */
107+ unsigned int ldah, ldq, jmp;
108+
109+ err = get_user(ldah, (unsigned int *)regs->pc);
110+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112+
113+ if (err)
114+ break;
115+
116+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118+ jmp == 0x6BFB0000U)
119+ {
120+ unsigned long r27, addr;
121+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123+
124+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125+ err = get_user(r27, (unsigned long *)addr);
126+ if (err)
127+ break;
128+
129+ regs->r27 = r27;
130+ regs->pc = r27;
131+ return 2;
132+ }
133+ } while (0);
134+
135+ do { /* PaX: patched PLT emulation #2 */
136+ unsigned int ldah, lda, br;
137+
138+ err = get_user(ldah, (unsigned int *)regs->pc);
139+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
140+ err |= get_user(br, (unsigned int *)(regs->pc+8));
141+
142+ if (err)
143+ break;
144+
145+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
147+ (br & 0xFFE00000U) == 0xC3E00000U)
148+ {
149+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152+
153+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155+ return 2;
156+ }
157+ } while (0);
158+
159+ do { /* PaX: unpatched PLT emulation */
160+ unsigned int br;
161+
162+ err = get_user(br, (unsigned int *)regs->pc);
163+
164+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165+ unsigned int br2, ldq, nop, jmp;
166+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167+
168+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169+ err = get_user(br2, (unsigned int *)addr);
170+ err |= get_user(ldq, (unsigned int *)(addr+4));
171+ err |= get_user(nop, (unsigned int *)(addr+8));
172+ err |= get_user(jmp, (unsigned int *)(addr+12));
173+ err |= get_user(resolver, (unsigned long *)(addr+16));
174+
175+ if (err)
176+ break;
177+
178+ if (br2 == 0xC3600000U &&
179+ ldq == 0xA77B000CU &&
180+ nop == 0x47FF041FU &&
181+ jmp == 0x6B7B0000U)
182+ {
183+ regs->r28 = regs->pc+4;
184+ regs->r27 = addr+16;
185+ regs->pc = resolver;
186+ return 3;
187+ }
188+ }
189+ } while (0);
190+#endif
191+
192+ return 1;
193+}
194+
195+void pax_report_insns(void *pc, void *sp)
196+{
197+ unsigned long i;
198+
199+ printk(KERN_ERR "PAX: bytes at PC: ");
200+ for (i = 0; i < 5; i++) {
201+ unsigned int c;
202+ if (get_user(c, (unsigned int *)pc+i))
203+ printk(KERN_CONT "???????? ");
204+ else
205+ printk(KERN_CONT "%08x ", c);
206+ }
207+ printk("\n");
208+}
209+#endif
210
211 /*
212 * This routine handles page faults. It determines the address,
213@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214 good_area:
215 si_code = SEGV_ACCERR;
216 if (cause < 0) {
217- if (!(vma->vm_flags & VM_EXEC))
218+ if (!(vma->vm_flags & VM_EXEC)) {
219+
220+#ifdef CONFIG_PAX_PAGEEXEC
221+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222+ goto bad_area;
223+
224+ up_read(&mm->mmap_sem);
225+ switch (pax_handle_fetch_fault(regs)) {
226+
227+#ifdef CONFIG_PAX_EMUPLT
228+ case 2:
229+ case 3:
230+ return;
231+#endif
232+
233+ }
234+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235+ do_group_exit(SIGKILL);
236+#else
237 goto bad_area;
238+#endif
239+
240+ }
241 } else if (!cause) {
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244diff -urNp linux-3.0.3/arch/arm/include/asm/elf.h linux-3.0.3/arch/arm/include/asm/elf.h
245--- linux-3.0.3/arch/arm/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
246+++ linux-3.0.3/arch/arm/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
247@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
250
251-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253+
254+#ifdef CONFIG_PAX_ASLR
255+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
256+
257+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259+#endif
260
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263@@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t
264 extern void elf_set_personality(const struct elf32_hdr *);
265 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
266
267-struct mm_struct;
268-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
269-#define arch_randomize_brk arch_randomize_brk
270-
271 extern int vectors_user_mapping(void);
272 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
273 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
274diff -urNp linux-3.0.3/arch/arm/include/asm/kmap_types.h linux-3.0.3/arch/arm/include/asm/kmap_types.h
275--- linux-3.0.3/arch/arm/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
276+++ linux-3.0.3/arch/arm/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
277@@ -21,6 +21,7 @@ enum km_type {
278 KM_L1_CACHE,
279 KM_L2_CACHE,
280 KM_KDB,
281+ KM_CLEARPAGE,
282 KM_TYPE_NR
283 };
284
285diff -urNp linux-3.0.3/arch/arm/include/asm/uaccess.h linux-3.0.3/arch/arm/include/asm/uaccess.h
286--- linux-3.0.3/arch/arm/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
287+++ linux-3.0.3/arch/arm/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
288@@ -22,6 +22,8 @@
289 #define VERIFY_READ 0
290 #define VERIFY_WRITE 1
291
292+extern void check_object_size(const void *ptr, unsigned long n, bool to);
293+
294 /*
295 * The exception table consists of pairs of addresses: the first is the
296 * address of an instruction that is allowed to fault, and the second is
297@@ -387,8 +389,23 @@ do { \
298
299
300 #ifdef CONFIG_MMU
301-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
302-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
303+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
304+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
305+
306+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
307+{
308+ if (!__builtin_constant_p(n))
309+ check_object_size(to, n, false);
310+ return ___copy_from_user(to, from, n);
311+}
312+
313+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
314+{
315+ if (!__builtin_constant_p(n))
316+ check_object_size(from, n, true);
317+ return ___copy_to_user(to, from, n);
318+}
319+
320 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
321 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
322 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
323@@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
324
325 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
326 {
327+ if ((long)n < 0)
328+ return n;
329+
330 if (access_ok(VERIFY_READ, from, n))
331 n = __copy_from_user(to, from, n);
332 else /* security hole - plug it */
333@@ -412,6 +432,9 @@ static inline unsigned long __must_check
334
335 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
336 {
337+ if ((long)n < 0)
338+ return n;
339+
340 if (access_ok(VERIFY_WRITE, to, n))
341 n = __copy_to_user(to, from, n);
342 return n;
343diff -urNp linux-3.0.3/arch/arm/kernel/armksyms.c linux-3.0.3/arch/arm/kernel/armksyms.c
344--- linux-3.0.3/arch/arm/kernel/armksyms.c 2011-07-21 22:17:23.000000000 -0400
345+++ linux-3.0.3/arch/arm/kernel/armksyms.c 2011-08-23 21:47:55.000000000 -0400
346@@ -98,8 +98,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
347 #ifdef CONFIG_MMU
348 EXPORT_SYMBOL(copy_page);
349
350-EXPORT_SYMBOL(__copy_from_user);
351-EXPORT_SYMBOL(__copy_to_user);
352+EXPORT_SYMBOL(___copy_from_user);
353+EXPORT_SYMBOL(___copy_to_user);
354 EXPORT_SYMBOL(__clear_user);
355
356 EXPORT_SYMBOL(__get_user_1);
357diff -urNp linux-3.0.3/arch/arm/kernel/process.c linux-3.0.3/arch/arm/kernel/process.c
358--- linux-3.0.3/arch/arm/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
359+++ linux-3.0.3/arch/arm/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
360@@ -28,7 +28,6 @@
361 #include <linux/tick.h>
362 #include <linux/utsname.h>
363 #include <linux/uaccess.h>
364-#include <linux/random.h>
365 #include <linux/hw_breakpoint.h>
366
367 #include <asm/cacheflush.h>
368@@ -479,12 +478,6 @@ unsigned long get_wchan(struct task_stru
369 return 0;
370 }
371
372-unsigned long arch_randomize_brk(struct mm_struct *mm)
373-{
374- unsigned long range_end = mm->brk + 0x02000000;
375- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
376-}
377-
378 #ifdef CONFIG_MMU
379 /*
380 * The vectors page is always readable from user space for the
381diff -urNp linux-3.0.3/arch/arm/kernel/traps.c linux-3.0.3/arch/arm/kernel/traps.c
382--- linux-3.0.3/arch/arm/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
383+++ linux-3.0.3/arch/arm/kernel/traps.c 2011-08-23 21:48:14.000000000 -0400
384@@ -257,6 +257,8 @@ static int __die(const char *str, int er
385
386 static DEFINE_SPINLOCK(die_lock);
387
388+extern void gr_handle_kernel_exploit(void);
389+
390 /*
391 * This function is protected against re-entrancy.
392 */
393@@ -284,6 +286,9 @@ void die(const char *str, struct pt_regs
394 panic("Fatal exception in interrupt");
395 if (panic_on_oops)
396 panic("Fatal exception");
397+
398+ gr_handle_kernel_exploit();
399+
400 if (ret != NOTIFY_STOP)
401 do_exit(SIGSEGV);
402 }
403diff -urNp linux-3.0.3/arch/arm/lib/copy_from_user.S linux-3.0.3/arch/arm/lib/copy_from_user.S
404--- linux-3.0.3/arch/arm/lib/copy_from_user.S 2011-07-21 22:17:23.000000000 -0400
405+++ linux-3.0.3/arch/arm/lib/copy_from_user.S 2011-08-23 21:47:55.000000000 -0400
406@@ -16,7 +16,7 @@
407 /*
408 * Prototype:
409 *
410- * size_t __copy_from_user(void *to, const void *from, size_t n)
411+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
412 *
413 * Purpose:
414 *
415@@ -84,11 +84,11 @@
416
417 .text
418
419-ENTRY(__copy_from_user)
420+ENTRY(___copy_from_user)
421
422 #include "copy_template.S"
423
424-ENDPROC(__copy_from_user)
425+ENDPROC(___copy_from_user)
426
427 .pushsection .fixup,"ax"
428 .align 0
429diff -urNp linux-3.0.3/arch/arm/lib/copy_to_user.S linux-3.0.3/arch/arm/lib/copy_to_user.S
430--- linux-3.0.3/arch/arm/lib/copy_to_user.S 2011-07-21 22:17:23.000000000 -0400
431+++ linux-3.0.3/arch/arm/lib/copy_to_user.S 2011-08-23 21:47:55.000000000 -0400
432@@ -16,7 +16,7 @@
433 /*
434 * Prototype:
435 *
436- * size_t __copy_to_user(void *to, const void *from, size_t n)
437+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
438 *
439 * Purpose:
440 *
441@@ -88,11 +88,11 @@
442 .text
443
444 ENTRY(__copy_to_user_std)
445-WEAK(__copy_to_user)
446+WEAK(___copy_to_user)
447
448 #include "copy_template.S"
449
450-ENDPROC(__copy_to_user)
451+ENDPROC(___copy_to_user)
452 ENDPROC(__copy_to_user_std)
453
454 .pushsection .fixup,"ax"
455diff -urNp linux-3.0.3/arch/arm/lib/uaccess.S linux-3.0.3/arch/arm/lib/uaccess.S
456--- linux-3.0.3/arch/arm/lib/uaccess.S 2011-07-21 22:17:23.000000000 -0400
457+++ linux-3.0.3/arch/arm/lib/uaccess.S 2011-08-23 21:47:55.000000000 -0400
458@@ -20,7 +20,7 @@
459
460 #define PAGE_SHIFT 12
461
462-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
463+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
464 * Purpose : copy a block to user memory from kernel memory
465 * Params : to - user memory
466 * : from - kernel memory
467@@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May f
468 sub r2, r2, ip
469 b .Lc2u_dest_aligned
470
471-ENTRY(__copy_to_user)
472+ENTRY(___copy_to_user)
473 stmfd sp!, {r2, r4 - r7, lr}
474 cmp r2, #4
475 blt .Lc2u_not_enough
476@@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May f
477 ldrgtb r3, [r1], #0
478 USER( T(strgtb) r3, [r0], #1) @ May fault
479 b .Lc2u_finished
480-ENDPROC(__copy_to_user)
481+ENDPROC(___copy_to_user)
482
483 .pushsection .fixup,"ax"
484 .align 0
485 9001: ldmfd sp!, {r0, r4 - r7, pc}
486 .popsection
487
488-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
489+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
490 * Purpose : copy a block from user memory to kernel memory
491 * Params : to - kernel memory
492 * : from - user memory
493@@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May f
494 sub r2, r2, ip
495 b .Lcfu_dest_aligned
496
497-ENTRY(__copy_from_user)
498+ENTRY(___copy_from_user)
499 stmfd sp!, {r0, r2, r4 - r7, lr}
500 cmp r2, #4
501 blt .Lcfu_not_enough
502@@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May f
503 USER( T(ldrgtb) r3, [r1], #1) @ May fault
504 strgtb r3, [r0], #1
505 b .Lcfu_finished
506-ENDPROC(__copy_from_user)
507+ENDPROC(___copy_from_user)
508
509 .pushsection .fixup,"ax"
510 .align 0
511diff -urNp linux-3.0.3/arch/arm/lib/uaccess_with_memcpy.c linux-3.0.3/arch/arm/lib/uaccess_with_memcpy.c
512--- linux-3.0.3/arch/arm/lib/uaccess_with_memcpy.c 2011-07-21 22:17:23.000000000 -0400
513+++ linux-3.0.3/arch/arm/lib/uaccess_with_memcpy.c 2011-08-23 21:47:55.000000000 -0400
514@@ -103,7 +103,7 @@ out:
515 }
516
517 unsigned long
518-__copy_to_user(void __user *to, const void *from, unsigned long n)
519+___copy_to_user(void __user *to, const void *from, unsigned long n)
520 {
521 /*
522 * This test is stubbed out of the main function above to keep
523diff -urNp linux-3.0.3/arch/arm/mach-ux500/mbox-db5500.c linux-3.0.3/arch/arm/mach-ux500/mbox-db5500.c
524--- linux-3.0.3/arch/arm/mach-ux500/mbox-db5500.c 2011-07-21 22:17:23.000000000 -0400
525+++ linux-3.0.3/arch/arm/mach-ux500/mbox-db5500.c 2011-08-23 21:48:14.000000000 -0400
526@@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct dev
527 return sprintf(buf, "0x%X\n", mbox_value);
528 }
529
530-static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
531+static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
532
533 static int mbox_show(struct seq_file *s, void *data)
534 {
535diff -urNp linux-3.0.3/arch/arm/mm/fault.c linux-3.0.3/arch/arm/mm/fault.c
536--- linux-3.0.3/arch/arm/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
537+++ linux-3.0.3/arch/arm/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
538@@ -182,6 +182,13 @@ __do_user_fault(struct task_struct *tsk,
539 }
540 #endif
541
542+#ifdef CONFIG_PAX_PAGEEXEC
543+ if (fsr & FSR_LNX_PF) {
544+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
545+ do_group_exit(SIGKILL);
546+ }
547+#endif
548+
549 tsk->thread.address = addr;
550 tsk->thread.error_code = fsr;
551 tsk->thread.trap_no = 14;
552@@ -379,6 +386,33 @@ do_page_fault(unsigned long addr, unsign
553 }
554 #endif /* CONFIG_MMU */
555
556+#ifdef CONFIG_PAX_PAGEEXEC
557+void pax_report_insns(void *pc, void *sp)
558+{
559+ long i;
560+
561+ printk(KERN_ERR "PAX: bytes at PC: ");
562+ for (i = 0; i < 20; i++) {
563+ unsigned char c;
564+ if (get_user(c, (__force unsigned char __user *)pc+i))
565+ printk(KERN_CONT "?? ");
566+ else
567+ printk(KERN_CONT "%02x ", c);
568+ }
569+ printk("\n");
570+
571+ printk(KERN_ERR "PAX: bytes at SP-4: ");
572+ for (i = -1; i < 20; i++) {
573+ unsigned long c;
574+ if (get_user(c, (__force unsigned long __user *)sp+i))
575+ printk(KERN_CONT "???????? ");
576+ else
577+ printk(KERN_CONT "%08lx ", c);
578+ }
579+ printk("\n");
580+}
581+#endif
582+
583 /*
584 * First Level Translation Fault Handler
585 *
586diff -urNp linux-3.0.3/arch/arm/mm/mmap.c linux-3.0.3/arch/arm/mm/mmap.c
587--- linux-3.0.3/arch/arm/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
588+++ linux-3.0.3/arch/arm/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
589@@ -65,6 +65,10 @@ arch_get_unmapped_area(struct file *filp
590 if (len > TASK_SIZE)
591 return -ENOMEM;
592
593+#ifdef CONFIG_PAX_RANDMMAP
594+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
595+#endif
596+
597 if (addr) {
598 if (do_align)
599 addr = COLOUR_ALIGN(addr, pgoff);
600@@ -72,15 +76,14 @@ arch_get_unmapped_area(struct file *filp
601 addr = PAGE_ALIGN(addr);
602
603 vma = find_vma(mm, addr);
604- if (TASK_SIZE - len >= addr &&
605- (!vma || addr + len <= vma->vm_start))
606+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
607 return addr;
608 }
609 if (len > mm->cached_hole_size) {
610- start_addr = addr = mm->free_area_cache;
611+ start_addr = addr = mm->free_area_cache;
612 } else {
613- start_addr = addr = TASK_UNMAPPED_BASE;
614- mm->cached_hole_size = 0;
615+ start_addr = addr = mm->mmap_base;
616+ mm->cached_hole_size = 0;
617 }
618 /* 8 bits of randomness in 20 address space bits */
619 if ((current->flags & PF_RANDOMIZE) &&
620@@ -100,14 +103,14 @@ full_search:
621 * Start a new search - just in case we missed
622 * some holes.
623 */
624- if (start_addr != TASK_UNMAPPED_BASE) {
625- start_addr = addr = TASK_UNMAPPED_BASE;
626+ if (start_addr != mm->mmap_base) {
627+ start_addr = addr = mm->mmap_base;
628 mm->cached_hole_size = 0;
629 goto full_search;
630 }
631 return -ENOMEM;
632 }
633- if (!vma || addr + len <= vma->vm_start) {
634+ if (check_heap_stack_gap(vma, addr, len)) {
635 /*
636 * Remember the place where we stopped the search:
637 */
638diff -urNp linux-3.0.3/arch/avr32/include/asm/elf.h linux-3.0.3/arch/avr32/include/asm/elf.h
639--- linux-3.0.3/arch/avr32/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
640+++ linux-3.0.3/arch/avr32/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
641@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpreg
642 the loader. We need to make sure that it is out of the way of the program
643 that it will "exec", and that there is sufficient room for the brk. */
644
645-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
646+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
647
648+#ifdef CONFIG_PAX_ASLR
649+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
650+
651+#define PAX_DELTA_MMAP_LEN 15
652+#define PAX_DELTA_STACK_LEN 15
653+#endif
654
655 /* This yields a mask that user programs can use to figure out what
656 instruction set this CPU supports. This could be done in user space,
657diff -urNp linux-3.0.3/arch/avr32/include/asm/kmap_types.h linux-3.0.3/arch/avr32/include/asm/kmap_types.h
658--- linux-3.0.3/arch/avr32/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
659+++ linux-3.0.3/arch/avr32/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
660@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
661 D(11) KM_IRQ1,
662 D(12) KM_SOFTIRQ0,
663 D(13) KM_SOFTIRQ1,
664-D(14) KM_TYPE_NR
665+D(14) KM_CLEARPAGE,
666+D(15) KM_TYPE_NR
667 };
668
669 #undef D
670diff -urNp linux-3.0.3/arch/avr32/mm/fault.c linux-3.0.3/arch/avr32/mm/fault.c
671--- linux-3.0.3/arch/avr32/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
672+++ linux-3.0.3/arch/avr32/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
673@@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
674
675 int exception_trace = 1;
676
677+#ifdef CONFIG_PAX_PAGEEXEC
678+void pax_report_insns(void *pc, void *sp)
679+{
680+ unsigned long i;
681+
682+ printk(KERN_ERR "PAX: bytes at PC: ");
683+ for (i = 0; i < 20; i++) {
684+ unsigned char c;
685+ if (get_user(c, (unsigned char *)pc+i))
686+ printk(KERN_CONT "???????? ");
687+ else
688+ printk(KERN_CONT "%02x ", c);
689+ }
690+ printk("\n");
691+}
692+#endif
693+
694 /*
695 * This routine handles page faults. It determines the address and the
696 * problem, and then passes it off to one of the appropriate routines.
697@@ -156,6 +173,16 @@ bad_area:
698 up_read(&mm->mmap_sem);
699
700 if (user_mode(regs)) {
701+
702+#ifdef CONFIG_PAX_PAGEEXEC
703+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
704+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
705+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
706+ do_group_exit(SIGKILL);
707+ }
708+ }
709+#endif
710+
711 if (exception_trace && printk_ratelimit())
712 printk("%s%s[%d]: segfault at %08lx pc %08lx "
713 "sp %08lx ecr %lu\n",
714diff -urNp linux-3.0.3/arch/frv/include/asm/kmap_types.h linux-3.0.3/arch/frv/include/asm/kmap_types.h
715--- linux-3.0.3/arch/frv/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
716+++ linux-3.0.3/arch/frv/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
717@@ -23,6 +23,7 @@ enum km_type {
718 KM_IRQ1,
719 KM_SOFTIRQ0,
720 KM_SOFTIRQ1,
721+ KM_CLEARPAGE,
722 KM_TYPE_NR
723 };
724
725diff -urNp linux-3.0.3/arch/frv/mm/elf-fdpic.c linux-3.0.3/arch/frv/mm/elf-fdpic.c
726--- linux-3.0.3/arch/frv/mm/elf-fdpic.c 2011-07-21 22:17:23.000000000 -0400
727+++ linux-3.0.3/arch/frv/mm/elf-fdpic.c 2011-08-23 21:47:55.000000000 -0400
728@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
729 if (addr) {
730 addr = PAGE_ALIGN(addr);
731 vma = find_vma(current->mm, addr);
732- if (TASK_SIZE - len >= addr &&
733- (!vma || addr + len <= vma->vm_start))
734+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
735 goto success;
736 }
737
738@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
739 for (; vma; vma = vma->vm_next) {
740 if (addr > limit)
741 break;
742- if (addr + len <= vma->vm_start)
743+ if (check_heap_stack_gap(vma, addr, len))
744 goto success;
745 addr = vma->vm_end;
746 }
747@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
748 for (; vma; vma = vma->vm_next) {
749 if (addr > limit)
750 break;
751- if (addr + len <= vma->vm_start)
752+ if (check_heap_stack_gap(vma, addr, len))
753 goto success;
754 addr = vma->vm_end;
755 }
756diff -urNp linux-3.0.3/arch/ia64/include/asm/elf.h linux-3.0.3/arch/ia64/include/asm/elf.h
757--- linux-3.0.3/arch/ia64/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
758+++ linux-3.0.3/arch/ia64/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
759@@ -42,6 +42,13 @@
760 */
761 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
762
763+#ifdef CONFIG_PAX_ASLR
764+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
765+
766+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
767+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
768+#endif
769+
770 #define PT_IA_64_UNWIND 0x70000001
771
772 /* IA-64 relocations: */
773diff -urNp linux-3.0.3/arch/ia64/include/asm/pgtable.h linux-3.0.3/arch/ia64/include/asm/pgtable.h
774--- linux-3.0.3/arch/ia64/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
775+++ linux-3.0.3/arch/ia64/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
776@@ -12,7 +12,7 @@
777 * David Mosberger-Tang <davidm@hpl.hp.com>
778 */
779
780-
781+#include <linux/const.h>
782 #include <asm/mman.h>
783 #include <asm/page.h>
784 #include <asm/processor.h>
785@@ -143,6 +143,17 @@
786 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
787 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
788 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
789+
790+#ifdef CONFIG_PAX_PAGEEXEC
791+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
792+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
793+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
794+#else
795+# define PAGE_SHARED_NOEXEC PAGE_SHARED
796+# define PAGE_READONLY_NOEXEC PAGE_READONLY
797+# define PAGE_COPY_NOEXEC PAGE_COPY
798+#endif
799+
800 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
801 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
802 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
803diff -urNp linux-3.0.3/arch/ia64/include/asm/spinlock.h linux-3.0.3/arch/ia64/include/asm/spinlock.h
804--- linux-3.0.3/arch/ia64/include/asm/spinlock.h 2011-07-21 22:17:23.000000000 -0400
805+++ linux-3.0.3/arch/ia64/include/asm/spinlock.h 2011-08-23 21:47:55.000000000 -0400
806@@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
807 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
808
809 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
810- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
811+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
812 }
813
814 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
815diff -urNp linux-3.0.3/arch/ia64/include/asm/uaccess.h linux-3.0.3/arch/ia64/include/asm/uaccess.h
816--- linux-3.0.3/arch/ia64/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
817+++ linux-3.0.3/arch/ia64/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
818@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
819 const void *__cu_from = (from); \
820 long __cu_len = (n); \
821 \
822- if (__access_ok(__cu_to, __cu_len, get_fs())) \
823+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
824 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
825 __cu_len; \
826 })
827@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
828 long __cu_len = (n); \
829 \
830 __chk_user_ptr(__cu_from); \
831- if (__access_ok(__cu_from, __cu_len, get_fs())) \
832+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
833 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
834 __cu_len; \
835 })
836diff -urNp linux-3.0.3/arch/ia64/kernel/module.c linux-3.0.3/arch/ia64/kernel/module.c
837--- linux-3.0.3/arch/ia64/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
838+++ linux-3.0.3/arch/ia64/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
839@@ -315,8 +315,7 @@ module_alloc (unsigned long size)
840 void
841 module_free (struct module *mod, void *module_region)
842 {
843- if (mod && mod->arch.init_unw_table &&
844- module_region == mod->module_init) {
845+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
846 unw_remove_unwind_table(mod->arch.init_unw_table);
847 mod->arch.init_unw_table = NULL;
848 }
849@@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
850 }
851
852 static inline int
853+in_init_rx (const struct module *mod, uint64_t addr)
854+{
855+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
856+}
857+
858+static inline int
859+in_init_rw (const struct module *mod, uint64_t addr)
860+{
861+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
862+}
863+
864+static inline int
865 in_init (const struct module *mod, uint64_t addr)
866 {
867- return addr - (uint64_t) mod->module_init < mod->init_size;
868+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
869+}
870+
871+static inline int
872+in_core_rx (const struct module *mod, uint64_t addr)
873+{
874+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
875+}
876+
877+static inline int
878+in_core_rw (const struct module *mod, uint64_t addr)
879+{
880+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
881 }
882
883 static inline int
884 in_core (const struct module *mod, uint64_t addr)
885 {
886- return addr - (uint64_t) mod->module_core < mod->core_size;
887+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
888 }
889
890 static inline int
891@@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
892 break;
893
894 case RV_BDREL:
895- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
896+ if (in_init_rx(mod, val))
897+ val -= (uint64_t) mod->module_init_rx;
898+ else if (in_init_rw(mod, val))
899+ val -= (uint64_t) mod->module_init_rw;
900+ else if (in_core_rx(mod, val))
901+ val -= (uint64_t) mod->module_core_rx;
902+ else if (in_core_rw(mod, val))
903+ val -= (uint64_t) mod->module_core_rw;
904 break;
905
906 case RV_LTV:
907@@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
908 * addresses have been selected...
909 */
910 uint64_t gp;
911- if (mod->core_size > MAX_LTOFF)
912+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
913 /*
914 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
915 * at the end of the module.
916 */
917- gp = mod->core_size - MAX_LTOFF / 2;
918+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
919 else
920- gp = mod->core_size / 2;
921- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
922+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
923+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
924 mod->arch.gp = gp;
925 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
926 }
927diff -urNp linux-3.0.3/arch/ia64/kernel/sys_ia64.c linux-3.0.3/arch/ia64/kernel/sys_ia64.c
928--- linux-3.0.3/arch/ia64/kernel/sys_ia64.c 2011-07-21 22:17:23.000000000 -0400
929+++ linux-3.0.3/arch/ia64/kernel/sys_ia64.c 2011-08-23 21:47:55.000000000 -0400
930@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
931 if (REGION_NUMBER(addr) == RGN_HPAGE)
932 addr = 0;
933 #endif
934+
935+#ifdef CONFIG_PAX_RANDMMAP
936+ if (mm->pax_flags & MF_PAX_RANDMMAP)
937+ addr = mm->free_area_cache;
938+ else
939+#endif
940+
941 if (!addr)
942 addr = mm->free_area_cache;
943
944@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
945 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
946 /* At this point: (!vma || addr < vma->vm_end). */
947 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
948- if (start_addr != TASK_UNMAPPED_BASE) {
949+ if (start_addr != mm->mmap_base) {
950 /* Start a new search --- just in case we missed some holes. */
951- addr = TASK_UNMAPPED_BASE;
952+ addr = mm->mmap_base;
953 goto full_search;
954 }
955 return -ENOMEM;
956 }
957- if (!vma || addr + len <= vma->vm_start) {
958+ if (check_heap_stack_gap(vma, addr, len)) {
959 /* Remember the address where we stopped this search: */
960 mm->free_area_cache = addr + len;
961 return addr;
962diff -urNp linux-3.0.3/arch/ia64/kernel/vmlinux.lds.S linux-3.0.3/arch/ia64/kernel/vmlinux.lds.S
963--- linux-3.0.3/arch/ia64/kernel/vmlinux.lds.S 2011-07-21 22:17:23.000000000 -0400
964+++ linux-3.0.3/arch/ia64/kernel/vmlinux.lds.S 2011-08-23 21:47:55.000000000 -0400
965@@ -199,7 +199,7 @@ SECTIONS {
966 /* Per-cpu data: */
967 . = ALIGN(PERCPU_PAGE_SIZE);
968 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
969- __phys_per_cpu_start = __per_cpu_load;
970+ __phys_per_cpu_start = per_cpu_load;
971 /*
972 * ensure percpu data fits
973 * into percpu page size
974diff -urNp linux-3.0.3/arch/ia64/mm/fault.c linux-3.0.3/arch/ia64/mm/fault.c
975--- linux-3.0.3/arch/ia64/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
976+++ linux-3.0.3/arch/ia64/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
977@@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned
978 return pte_present(pte);
979 }
980
981+#ifdef CONFIG_PAX_PAGEEXEC
982+void pax_report_insns(void *pc, void *sp)
983+{
984+ unsigned long i;
985+
986+ printk(KERN_ERR "PAX: bytes at PC: ");
987+ for (i = 0; i < 8; i++) {
988+ unsigned int c;
989+ if (get_user(c, (unsigned int *)pc+i))
990+ printk(KERN_CONT "???????? ");
991+ else
992+ printk(KERN_CONT "%08x ", c);
993+ }
994+ printk("\n");
995+}
996+#endif
997+
998 void __kprobes
999 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1000 {
1001@@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long addres
1002 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1003 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1004
1005- if ((vma->vm_flags & mask) != mask)
1006+ if ((vma->vm_flags & mask) != mask) {
1007+
1008+#ifdef CONFIG_PAX_PAGEEXEC
1009+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1010+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1011+ goto bad_area;
1012+
1013+ up_read(&mm->mmap_sem);
1014+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1015+ do_group_exit(SIGKILL);
1016+ }
1017+#endif
1018+
1019 goto bad_area;
1020
1021+ }
1022+
1023 /*
1024 * If for any reason at all we couldn't handle the fault, make
1025 * sure we exit gracefully rather than endlessly redo the
1026diff -urNp linux-3.0.3/arch/ia64/mm/hugetlbpage.c linux-3.0.3/arch/ia64/mm/hugetlbpage.c
1027--- linux-3.0.3/arch/ia64/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
1028+++ linux-3.0.3/arch/ia64/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
1029@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(
1030 /* At this point: (!vmm || addr < vmm->vm_end). */
1031 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1032 return -ENOMEM;
1033- if (!vmm || (addr + len) <= vmm->vm_start)
1034+ if (check_heap_stack_gap(vmm, addr, len))
1035 return addr;
1036 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1037 }
1038diff -urNp linux-3.0.3/arch/ia64/mm/init.c linux-3.0.3/arch/ia64/mm/init.c
1039--- linux-3.0.3/arch/ia64/mm/init.c 2011-07-21 22:17:23.000000000 -0400
1040+++ linux-3.0.3/arch/ia64/mm/init.c 2011-08-23 21:47:55.000000000 -0400
1041@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
1042 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1043 vma->vm_end = vma->vm_start + PAGE_SIZE;
1044 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1045+
1046+#ifdef CONFIG_PAX_PAGEEXEC
1047+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1048+ vma->vm_flags &= ~VM_EXEC;
1049+
1050+#ifdef CONFIG_PAX_MPROTECT
1051+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1052+ vma->vm_flags &= ~VM_MAYEXEC;
1053+#endif
1054+
1055+ }
1056+#endif
1057+
1058 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1059 down_write(&current->mm->mmap_sem);
1060 if (insert_vm_struct(current->mm, vma)) {
1061diff -urNp linux-3.0.3/arch/m32r/lib/usercopy.c linux-3.0.3/arch/m32r/lib/usercopy.c
1062--- linux-3.0.3/arch/m32r/lib/usercopy.c 2011-07-21 22:17:23.000000000 -0400
1063+++ linux-3.0.3/arch/m32r/lib/usercopy.c 2011-08-23 21:47:55.000000000 -0400
1064@@ -14,6 +14,9 @@
1065 unsigned long
1066 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1067 {
1068+ if ((long)n < 0)
1069+ return n;
1070+
1071 prefetch(from);
1072 if (access_ok(VERIFY_WRITE, to, n))
1073 __copy_user(to,from,n);
1074@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1075 unsigned long
1076 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1077 {
1078+ if ((long)n < 0)
1079+ return n;
1080+
1081 prefetchw(to);
1082 if (access_ok(VERIFY_READ, from, n))
1083 __copy_user_zeroing(to,from,n);
1084diff -urNp linux-3.0.3/arch/mips/include/asm/elf.h linux-3.0.3/arch/mips/include/asm/elf.h
1085--- linux-3.0.3/arch/mips/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1086+++ linux-3.0.3/arch/mips/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1087@@ -372,13 +372,16 @@ extern const char *__elf_platform;
1088 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1089 #endif
1090
1091+#ifdef CONFIG_PAX_ASLR
1092+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1093+
1094+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1095+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1096+#endif
1097+
1098 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1099 struct linux_binprm;
1100 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1101 int uses_interp);
1102
1103-struct mm_struct;
1104-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1105-#define arch_randomize_brk arch_randomize_brk
1106-
1107 #endif /* _ASM_ELF_H */
1108diff -urNp linux-3.0.3/arch/mips/include/asm/page.h linux-3.0.3/arch/mips/include/asm/page.h
1109--- linux-3.0.3/arch/mips/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
1110+++ linux-3.0.3/arch/mips/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
1111@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1112 #ifdef CONFIG_CPU_MIPS32
1113 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1114 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1115- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1116+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1117 #else
1118 typedef struct { unsigned long long pte; } pte_t;
1119 #define pte_val(x) ((x).pte)
1120diff -urNp linux-3.0.3/arch/mips/include/asm/system.h linux-3.0.3/arch/mips/include/asm/system.h
1121--- linux-3.0.3/arch/mips/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
1122+++ linux-3.0.3/arch/mips/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
1123@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1124 */
1125 #define __ARCH_WANT_UNLOCKED_CTXSW
1126
1127-extern unsigned long arch_align_stack(unsigned long sp);
1128+#define arch_align_stack(x) ((x) & ~0xfUL)
1129
1130 #endif /* _ASM_SYSTEM_H */
1131diff -urNp linux-3.0.3/arch/mips/kernel/binfmt_elfn32.c linux-3.0.3/arch/mips/kernel/binfmt_elfn32.c
1132--- linux-3.0.3/arch/mips/kernel/binfmt_elfn32.c 2011-07-21 22:17:23.000000000 -0400
1133+++ linux-3.0.3/arch/mips/kernel/binfmt_elfn32.c 2011-08-23 21:47:55.000000000 -0400
1134@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1135 #undef ELF_ET_DYN_BASE
1136 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1137
1138+#ifdef CONFIG_PAX_ASLR
1139+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1140+
1141+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1142+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1143+#endif
1144+
1145 #include <asm/processor.h>
1146 #include <linux/module.h>
1147 #include <linux/elfcore.h>
1148diff -urNp linux-3.0.3/arch/mips/kernel/binfmt_elfo32.c linux-3.0.3/arch/mips/kernel/binfmt_elfo32.c
1149--- linux-3.0.3/arch/mips/kernel/binfmt_elfo32.c 2011-07-21 22:17:23.000000000 -0400
1150+++ linux-3.0.3/arch/mips/kernel/binfmt_elfo32.c 2011-08-23 21:47:55.000000000 -0400
1151@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1152 #undef ELF_ET_DYN_BASE
1153 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1154
1155+#ifdef CONFIG_PAX_ASLR
1156+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1157+
1158+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1159+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1160+#endif
1161+
1162 #include <asm/processor.h>
1163
1164 /*
1165diff -urNp linux-3.0.3/arch/mips/kernel/process.c linux-3.0.3/arch/mips/kernel/process.c
1166--- linux-3.0.3/arch/mips/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
1167+++ linux-3.0.3/arch/mips/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
1168@@ -473,15 +473,3 @@ unsigned long get_wchan(struct task_stru
1169 out:
1170 return pc;
1171 }
1172-
1173-/*
1174- * Don't forget that the stack pointer must be aligned on a 8 bytes
1175- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1176- */
1177-unsigned long arch_align_stack(unsigned long sp)
1178-{
1179- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1180- sp -= get_random_int() & ~PAGE_MASK;
1181-
1182- return sp & ALMASK;
1183-}
1184diff -urNp linux-3.0.3/arch/mips/mm/fault.c linux-3.0.3/arch/mips/mm/fault.c
1185--- linux-3.0.3/arch/mips/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
1186+++ linux-3.0.3/arch/mips/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
1187@@ -28,6 +28,23 @@
1188 #include <asm/highmem.h> /* For VMALLOC_END */
1189 #include <linux/kdebug.h>
1190
1191+#ifdef CONFIG_PAX_PAGEEXEC
1192+void pax_report_insns(void *pc, void *sp)
1193+{
1194+ unsigned long i;
1195+
1196+ printk(KERN_ERR "PAX: bytes at PC: ");
1197+ for (i = 0; i < 5; i++) {
1198+ unsigned int c;
1199+ if (get_user(c, (unsigned int *)pc+i))
1200+ printk(KERN_CONT "???????? ");
1201+ else
1202+ printk(KERN_CONT "%08x ", c);
1203+ }
1204+ printk("\n");
1205+}
1206+#endif
1207+
1208 /*
1209 * This routine handles page faults. It determines the address,
1210 * and the problem, and then passes it off to one of the appropriate
1211diff -urNp linux-3.0.3/arch/mips/mm/mmap.c linux-3.0.3/arch/mips/mm/mmap.c
1212--- linux-3.0.3/arch/mips/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
1213+++ linux-3.0.3/arch/mips/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
1214@@ -48,14 +48,18 @@ unsigned long arch_get_unmapped_area(str
1215 do_color_align = 0;
1216 if (filp || (flags & MAP_SHARED))
1217 do_color_align = 1;
1218+
1219+#ifdef CONFIG_PAX_RANDMMAP
1220+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1221+#endif
1222+
1223 if (addr) {
1224 if (do_color_align)
1225 addr = COLOUR_ALIGN(addr, pgoff);
1226 else
1227 addr = PAGE_ALIGN(addr);
1228 vmm = find_vma(current->mm, addr);
1229- if (TASK_SIZE - len >= addr &&
1230- (!vmm || addr + len <= vmm->vm_start))
1231+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
1232 return addr;
1233 }
1234 addr = current->mm->mmap_base;
1235@@ -68,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
1236 /* At this point: (!vmm || addr < vmm->vm_end). */
1237 if (TASK_SIZE - len < addr)
1238 return -ENOMEM;
1239- if (!vmm || addr + len <= vmm->vm_start)
1240+ if (check_heap_stack_gap(vmm, addr, len))
1241 return addr;
1242 addr = vmm->vm_end;
1243 if (do_color_align)
1244@@ -93,30 +97,3 @@ void arch_pick_mmap_layout(struct mm_str
1245 mm->get_unmapped_area = arch_get_unmapped_area;
1246 mm->unmap_area = arch_unmap_area;
1247 }
1248-
1249-static inline unsigned long brk_rnd(void)
1250-{
1251- unsigned long rnd = get_random_int();
1252-
1253- rnd = rnd << PAGE_SHIFT;
1254- /* 8MB for 32bit, 256MB for 64bit */
1255- if (TASK_IS_32BIT_ADDR)
1256- rnd = rnd & 0x7ffffful;
1257- else
1258- rnd = rnd & 0xffffffful;
1259-
1260- return rnd;
1261-}
1262-
1263-unsigned long arch_randomize_brk(struct mm_struct *mm)
1264-{
1265- unsigned long base = mm->brk;
1266- unsigned long ret;
1267-
1268- ret = PAGE_ALIGN(base + brk_rnd());
1269-
1270- if (ret < mm->brk)
1271- return mm->brk;
1272-
1273- return ret;
1274-}
1275diff -urNp linux-3.0.3/arch/parisc/include/asm/elf.h linux-3.0.3/arch/parisc/include/asm/elf.h
1276--- linux-3.0.3/arch/parisc/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1277+++ linux-3.0.3/arch/parisc/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1278@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration..
1279
1280 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1281
1282+#ifdef CONFIG_PAX_ASLR
1283+#define PAX_ELF_ET_DYN_BASE 0x10000UL
1284+
1285+#define PAX_DELTA_MMAP_LEN 16
1286+#define PAX_DELTA_STACK_LEN 16
1287+#endif
1288+
1289 /* This yields a mask that user programs can use to figure out what
1290 instruction set this CPU supports. This could be done in user space,
1291 but it's not easy, and we've already done it here. */
1292diff -urNp linux-3.0.3/arch/parisc/include/asm/pgtable.h linux-3.0.3/arch/parisc/include/asm/pgtable.h
1293--- linux-3.0.3/arch/parisc/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
1294+++ linux-3.0.3/arch/parisc/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
1295@@ -210,6 +210,17 @@ struct vm_area_struct;
1296 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1297 #define PAGE_COPY PAGE_EXECREAD
1298 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1299+
1300+#ifdef CONFIG_PAX_PAGEEXEC
1301+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1302+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1303+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1304+#else
1305+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1306+# define PAGE_COPY_NOEXEC PAGE_COPY
1307+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1308+#endif
1309+
1310 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1311 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
1312 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
1313diff -urNp linux-3.0.3/arch/parisc/kernel/module.c linux-3.0.3/arch/parisc/kernel/module.c
1314--- linux-3.0.3/arch/parisc/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
1315+++ linux-3.0.3/arch/parisc/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
1316@@ -98,16 +98,38 @@
1317
1318 /* three functions to determine where in the module core
1319 * or init pieces the location is */
1320+static inline int in_init_rx(struct module *me, void *loc)
1321+{
1322+ return (loc >= me->module_init_rx &&
1323+ loc < (me->module_init_rx + me->init_size_rx));
1324+}
1325+
1326+static inline int in_init_rw(struct module *me, void *loc)
1327+{
1328+ return (loc >= me->module_init_rw &&
1329+ loc < (me->module_init_rw + me->init_size_rw));
1330+}
1331+
1332 static inline int in_init(struct module *me, void *loc)
1333 {
1334- return (loc >= me->module_init &&
1335- loc <= (me->module_init + me->init_size));
1336+ return in_init_rx(me, loc) || in_init_rw(me, loc);
1337+}
1338+
1339+static inline int in_core_rx(struct module *me, void *loc)
1340+{
1341+ return (loc >= me->module_core_rx &&
1342+ loc < (me->module_core_rx + me->core_size_rx));
1343+}
1344+
1345+static inline int in_core_rw(struct module *me, void *loc)
1346+{
1347+ return (loc >= me->module_core_rw &&
1348+ loc < (me->module_core_rw + me->core_size_rw));
1349 }
1350
1351 static inline int in_core(struct module *me, void *loc)
1352 {
1353- return (loc >= me->module_core &&
1354- loc <= (me->module_core + me->core_size));
1355+ return in_core_rx(me, loc) || in_core_rw(me, loc);
1356 }
1357
1358 static inline int in_local(struct module *me, void *loc)
1359@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_
1360 }
1361
1362 /* align things a bit */
1363- me->core_size = ALIGN(me->core_size, 16);
1364- me->arch.got_offset = me->core_size;
1365- me->core_size += gots * sizeof(struct got_entry);
1366-
1367- me->core_size = ALIGN(me->core_size, 16);
1368- me->arch.fdesc_offset = me->core_size;
1369- me->core_size += fdescs * sizeof(Elf_Fdesc);
1370+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1371+ me->arch.got_offset = me->core_size_rw;
1372+ me->core_size_rw += gots * sizeof(struct got_entry);
1373+
1374+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1375+ me->arch.fdesc_offset = me->core_size_rw;
1376+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1377
1378 me->arch.got_max = gots;
1379 me->arch.fdesc_max = fdescs;
1380@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module
1381
1382 BUG_ON(value == 0);
1383
1384- got = me->module_core + me->arch.got_offset;
1385+ got = me->module_core_rw + me->arch.got_offset;
1386 for (i = 0; got[i].addr; i++)
1387 if (got[i].addr == value)
1388 goto out;
1389@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module
1390 #ifdef CONFIG_64BIT
1391 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1392 {
1393- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1394+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1395
1396 if (!value) {
1397 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1398@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module
1399
1400 /* Create new one */
1401 fdesc->addr = value;
1402- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1403+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1404 return (Elf_Addr)fdesc;
1405 }
1406 #endif /* CONFIG_64BIT */
1407@@ -857,7 +879,7 @@ register_unwind_table(struct module *me,
1408
1409 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1410 end = table + sechdrs[me->arch.unwind_section].sh_size;
1411- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1412+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1413
1414 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1415 me->arch.unwind_section, table, end, gp);
1416diff -urNp linux-3.0.3/arch/parisc/kernel/sys_parisc.c linux-3.0.3/arch/parisc/kernel/sys_parisc.c
1417--- linux-3.0.3/arch/parisc/kernel/sys_parisc.c 2011-07-21 22:17:23.000000000 -0400
1418+++ linux-3.0.3/arch/parisc/kernel/sys_parisc.c 2011-08-23 21:47:55.000000000 -0400
1419@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1420 /* At this point: (!vma || addr < vma->vm_end). */
1421 if (TASK_SIZE - len < addr)
1422 return -ENOMEM;
1423- if (!vma || addr + len <= vma->vm_start)
1424+ if (check_heap_stack_gap(vma, addr, len))
1425 return addr;
1426 addr = vma->vm_end;
1427 }
1428@@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1429 /* At this point: (!vma || addr < vma->vm_end). */
1430 if (TASK_SIZE - len < addr)
1431 return -ENOMEM;
1432- if (!vma || addr + len <= vma->vm_start)
1433+ if (check_heap_stack_gap(vma, addr, len))
1434 return addr;
1435 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1436 if (addr < vma->vm_end) /* handle wraparound */
1437@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1438 if (flags & MAP_FIXED)
1439 return addr;
1440 if (!addr)
1441- addr = TASK_UNMAPPED_BASE;
1442+ addr = current->mm->mmap_base;
1443
1444 if (filp) {
1445 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1446diff -urNp linux-3.0.3/arch/parisc/kernel/traps.c linux-3.0.3/arch/parisc/kernel/traps.c
1447--- linux-3.0.3/arch/parisc/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
1448+++ linux-3.0.3/arch/parisc/kernel/traps.c 2011-08-23 21:47:55.000000000 -0400
1449@@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1450
1451 down_read(&current->mm->mmap_sem);
1452 vma = find_vma(current->mm,regs->iaoq[0]);
1453- if (vma && (regs->iaoq[0] >= vma->vm_start)
1454- && (vma->vm_flags & VM_EXEC)) {
1455-
1456+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1457 fault_address = regs->iaoq[0];
1458 fault_space = regs->iasq[0];
1459
1460diff -urNp linux-3.0.3/arch/parisc/mm/fault.c linux-3.0.3/arch/parisc/mm/fault.c
1461--- linux-3.0.3/arch/parisc/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
1462+++ linux-3.0.3/arch/parisc/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
1463@@ -15,6 +15,7 @@
1464 #include <linux/sched.h>
1465 #include <linux/interrupt.h>
1466 #include <linux/module.h>
1467+#include <linux/unistd.h>
1468
1469 #include <asm/uaccess.h>
1470 #include <asm/traps.h>
1471@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1472 static unsigned long
1473 parisc_acctyp(unsigned long code, unsigned int inst)
1474 {
1475- if (code == 6 || code == 16)
1476+ if (code == 6 || code == 7 || code == 16)
1477 return VM_EXEC;
1478
1479 switch (inst & 0xf0000000) {
1480@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1481 }
1482 #endif
1483
1484+#ifdef CONFIG_PAX_PAGEEXEC
1485+/*
1486+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1487+ *
1488+ * returns 1 when task should be killed
1489+ * 2 when rt_sigreturn trampoline was detected
1490+ * 3 when unpatched PLT trampoline was detected
1491+ */
1492+static int pax_handle_fetch_fault(struct pt_regs *regs)
1493+{
1494+
1495+#ifdef CONFIG_PAX_EMUPLT
1496+ int err;
1497+
1498+ do { /* PaX: unpatched PLT emulation */
1499+ unsigned int bl, depwi;
1500+
1501+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1502+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1503+
1504+ if (err)
1505+ break;
1506+
1507+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1508+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1509+
1510+ err = get_user(ldw, (unsigned int *)addr);
1511+ err |= get_user(bv, (unsigned int *)(addr+4));
1512+ err |= get_user(ldw2, (unsigned int *)(addr+8));
1513+
1514+ if (err)
1515+ break;
1516+
1517+ if (ldw == 0x0E801096U &&
1518+ bv == 0xEAC0C000U &&
1519+ ldw2 == 0x0E881095U)
1520+ {
1521+ unsigned int resolver, map;
1522+
1523+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1524+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1525+ if (err)
1526+ break;
1527+
1528+ regs->gr[20] = instruction_pointer(regs)+8;
1529+ regs->gr[21] = map;
1530+ regs->gr[22] = resolver;
1531+ regs->iaoq[0] = resolver | 3UL;
1532+ regs->iaoq[1] = regs->iaoq[0] + 4;
1533+ return 3;
1534+ }
1535+ }
1536+ } while (0);
1537+#endif
1538+
1539+#ifdef CONFIG_PAX_EMUTRAMP
1540+
1541+#ifndef CONFIG_PAX_EMUSIGRT
1542+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1543+ return 1;
1544+#endif
1545+
1546+ do { /* PaX: rt_sigreturn emulation */
1547+ unsigned int ldi1, ldi2, bel, nop;
1548+
1549+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1550+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1551+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1552+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1553+
1554+ if (err)
1555+ break;
1556+
1557+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1558+ ldi2 == 0x3414015AU &&
1559+ bel == 0xE4008200U &&
1560+ nop == 0x08000240U)
1561+ {
1562+ regs->gr[25] = (ldi1 & 2) >> 1;
1563+ regs->gr[20] = __NR_rt_sigreturn;
1564+ regs->gr[31] = regs->iaoq[1] + 16;
1565+ regs->sr[0] = regs->iasq[1];
1566+ regs->iaoq[0] = 0x100UL;
1567+ regs->iaoq[1] = regs->iaoq[0] + 4;
1568+ regs->iasq[0] = regs->sr[2];
1569+ regs->iasq[1] = regs->sr[2];
1570+ return 2;
1571+ }
1572+ } while (0);
1573+#endif
1574+
1575+ return 1;
1576+}
1577+
1578+void pax_report_insns(void *pc, void *sp)
1579+{
1580+ unsigned long i;
1581+
1582+ printk(KERN_ERR "PAX: bytes at PC: ");
1583+ for (i = 0; i < 5; i++) {
1584+ unsigned int c;
1585+ if (get_user(c, (unsigned int *)pc+i))
1586+ printk(KERN_CONT "???????? ");
1587+ else
1588+ printk(KERN_CONT "%08x ", c);
1589+ }
1590+ printk("\n");
1591+}
1592+#endif
1593+
1594 int fixup_exception(struct pt_regs *regs)
1595 {
1596 const struct exception_table_entry *fix;
1597@@ -192,8 +303,33 @@ good_area:
1598
1599 acc_type = parisc_acctyp(code,regs->iir);
1600
1601- if ((vma->vm_flags & acc_type) != acc_type)
1602+ if ((vma->vm_flags & acc_type) != acc_type) {
1603+
1604+#ifdef CONFIG_PAX_PAGEEXEC
1605+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1606+ (address & ~3UL) == instruction_pointer(regs))
1607+ {
1608+ up_read(&mm->mmap_sem);
1609+ switch (pax_handle_fetch_fault(regs)) {
1610+
1611+#ifdef CONFIG_PAX_EMUPLT
1612+ case 3:
1613+ return;
1614+#endif
1615+
1616+#ifdef CONFIG_PAX_EMUTRAMP
1617+ case 2:
1618+ return;
1619+#endif
1620+
1621+ }
1622+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1623+ do_group_exit(SIGKILL);
1624+ }
1625+#endif
1626+
1627 goto bad_area;
1628+ }
1629
1630 /*
1631 * If for any reason at all we couldn't handle the fault, make
1632diff -urNp linux-3.0.3/arch/powerpc/include/asm/elf.h linux-3.0.3/arch/powerpc/include/asm/elf.h
1633--- linux-3.0.3/arch/powerpc/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1634+++ linux-3.0.3/arch/powerpc/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1635@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
1636 the loader. We need to make sure that it is out of the way of the program
1637 that it will "exec", and that there is sufficient room for the brk. */
1638
1639-extern unsigned long randomize_et_dyn(unsigned long base);
1640-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
1641+#define ELF_ET_DYN_BASE (0x20000000)
1642+
1643+#ifdef CONFIG_PAX_ASLR
1644+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
1645+
1646+#ifdef __powerpc64__
1647+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
1648+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
1649+#else
1650+#define PAX_DELTA_MMAP_LEN 15
1651+#define PAX_DELTA_STACK_LEN 15
1652+#endif
1653+#endif
1654
1655 /*
1656 * Our registers are always unsigned longs, whether we're a 32 bit
1657@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(s
1658 (0x7ff >> (PAGE_SHIFT - 12)) : \
1659 (0x3ffff >> (PAGE_SHIFT - 12)))
1660
1661-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1662-#define arch_randomize_brk arch_randomize_brk
1663-
1664 #endif /* __KERNEL__ */
1665
1666 /*
1667diff -urNp linux-3.0.3/arch/powerpc/include/asm/kmap_types.h linux-3.0.3/arch/powerpc/include/asm/kmap_types.h
1668--- linux-3.0.3/arch/powerpc/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
1669+++ linux-3.0.3/arch/powerpc/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
1670@@ -27,6 +27,7 @@ enum km_type {
1671 KM_PPC_SYNC_PAGE,
1672 KM_PPC_SYNC_ICACHE,
1673 KM_KDB,
1674+ KM_CLEARPAGE,
1675 KM_TYPE_NR
1676 };
1677
1678diff -urNp linux-3.0.3/arch/powerpc/include/asm/mman.h linux-3.0.3/arch/powerpc/include/asm/mman.h
1679--- linux-3.0.3/arch/powerpc/include/asm/mman.h 2011-07-21 22:17:23.000000000 -0400
1680+++ linux-3.0.3/arch/powerpc/include/asm/mman.h 2011-08-23 21:47:55.000000000 -0400
1681@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm
1682 }
1683 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
1684
1685-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
1686+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
1687 {
1688 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
1689 }
1690diff -urNp linux-3.0.3/arch/powerpc/include/asm/page_64.h linux-3.0.3/arch/powerpc/include/asm/page_64.h
1691--- linux-3.0.3/arch/powerpc/include/asm/page_64.h 2011-07-21 22:17:23.000000000 -0400
1692+++ linux-3.0.3/arch/powerpc/include/asm/page_64.h 2011-08-23 21:47:55.000000000 -0400
1693@@ -155,15 +155,18 @@ do { \
1694 * stack by default, so in the absence of a PT_GNU_STACK program header
1695 * we turn execute permission off.
1696 */
1697-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1698- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1699+#define VM_STACK_DEFAULT_FLAGS32 \
1700+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1701+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1702
1703 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1704 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1705
1706+#ifndef CONFIG_PAX_PAGEEXEC
1707 #define VM_STACK_DEFAULT_FLAGS \
1708 (is_32bit_task() ? \
1709 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
1710+#endif
1711
1712 #include <asm-generic/getorder.h>
1713
1714diff -urNp linux-3.0.3/arch/powerpc/include/asm/page.h linux-3.0.3/arch/powerpc/include/asm/page.h
1715--- linux-3.0.3/arch/powerpc/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
1716+++ linux-3.0.3/arch/powerpc/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
1717@@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr;
1718 * and needs to be executable. This means the whole heap ends
1719 * up being executable.
1720 */
1721-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1722- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1723+#define VM_DATA_DEFAULT_FLAGS32 \
1724+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1725+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1726
1727 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1728 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1729@@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr;
1730 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
1731 #endif
1732
1733+#define ktla_ktva(addr) (addr)
1734+#define ktva_ktla(addr) (addr)
1735+
1736 #ifndef __ASSEMBLY__
1737
1738 #undef STRICT_MM_TYPECHECKS
1739diff -urNp linux-3.0.3/arch/powerpc/include/asm/pgtable.h linux-3.0.3/arch/powerpc/include/asm/pgtable.h
1740--- linux-3.0.3/arch/powerpc/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
1741+++ linux-3.0.3/arch/powerpc/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
1742@@ -2,6 +2,7 @@
1743 #define _ASM_POWERPC_PGTABLE_H
1744 #ifdef __KERNEL__
1745
1746+#include <linux/const.h>
1747 #ifndef __ASSEMBLY__
1748 #include <asm/processor.h> /* For TASK_SIZE */
1749 #include <asm/mmu.h>
1750diff -urNp linux-3.0.3/arch/powerpc/include/asm/pte-hash32.h linux-3.0.3/arch/powerpc/include/asm/pte-hash32.h
1751--- linux-3.0.3/arch/powerpc/include/asm/pte-hash32.h 2011-07-21 22:17:23.000000000 -0400
1752+++ linux-3.0.3/arch/powerpc/include/asm/pte-hash32.h 2011-08-23 21:47:55.000000000 -0400
1753@@ -21,6 +21,7 @@
1754 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
1755 #define _PAGE_USER 0x004 /* usermode access allowed */
1756 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
1757+#define _PAGE_EXEC _PAGE_GUARDED
1758 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
1759 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
1760 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
1761diff -urNp linux-3.0.3/arch/powerpc/include/asm/reg.h linux-3.0.3/arch/powerpc/include/asm/reg.h
1762--- linux-3.0.3/arch/powerpc/include/asm/reg.h 2011-07-21 22:17:23.000000000 -0400
1763+++ linux-3.0.3/arch/powerpc/include/asm/reg.h 2011-08-23 21:47:55.000000000 -0400
1764@@ -209,6 +209,7 @@
1765 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
1766 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
1767 #define DSISR_NOHPTE 0x40000000 /* no translation found */
1768+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
1769 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
1770 #define DSISR_ISSTORE 0x02000000 /* access was a store */
1771 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
1772diff -urNp linux-3.0.3/arch/powerpc/include/asm/system.h linux-3.0.3/arch/powerpc/include/asm/system.h
1773--- linux-3.0.3/arch/powerpc/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
1774+++ linux-3.0.3/arch/powerpc/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
1775@@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
1776 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
1777 #endif
1778
1779-extern unsigned long arch_align_stack(unsigned long sp);
1780+#define arch_align_stack(x) ((x) & ~0xfUL)
1781
1782 /* Used in very early kernel initialization. */
1783 extern unsigned long reloc_offset(void);
1784diff -urNp linux-3.0.3/arch/powerpc/include/asm/uaccess.h linux-3.0.3/arch/powerpc/include/asm/uaccess.h
1785--- linux-3.0.3/arch/powerpc/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
1786+++ linux-3.0.3/arch/powerpc/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
1787@@ -13,6 +13,8 @@
1788 #define VERIFY_READ 0
1789 #define VERIFY_WRITE 1
1790
1791+extern void check_object_size(const void *ptr, unsigned long n, bool to);
1792+
1793 /*
1794 * The fs value determines whether argument validity checking should be
1795 * performed or not. If get_fs() == USER_DS, checking is performed, with
1796@@ -327,52 +329,6 @@ do { \
1797 extern unsigned long __copy_tofrom_user(void __user *to,
1798 const void __user *from, unsigned long size);
1799
1800-#ifndef __powerpc64__
1801-
1802-static inline unsigned long copy_from_user(void *to,
1803- const void __user *from, unsigned long n)
1804-{
1805- unsigned long over;
1806-
1807- if (access_ok(VERIFY_READ, from, n))
1808- return __copy_tofrom_user((__force void __user *)to, from, n);
1809- if ((unsigned long)from < TASK_SIZE) {
1810- over = (unsigned long)from + n - TASK_SIZE;
1811- return __copy_tofrom_user((__force void __user *)to, from,
1812- n - over) + over;
1813- }
1814- return n;
1815-}
1816-
1817-static inline unsigned long copy_to_user(void __user *to,
1818- const void *from, unsigned long n)
1819-{
1820- unsigned long over;
1821-
1822- if (access_ok(VERIFY_WRITE, to, n))
1823- return __copy_tofrom_user(to, (__force void __user *)from, n);
1824- if ((unsigned long)to < TASK_SIZE) {
1825- over = (unsigned long)to + n - TASK_SIZE;
1826- return __copy_tofrom_user(to, (__force void __user *)from,
1827- n - over) + over;
1828- }
1829- return n;
1830-}
1831-
1832-#else /* __powerpc64__ */
1833-
1834-#define __copy_in_user(to, from, size) \
1835- __copy_tofrom_user((to), (from), (size))
1836-
1837-extern unsigned long copy_from_user(void *to, const void __user *from,
1838- unsigned long n);
1839-extern unsigned long copy_to_user(void __user *to, const void *from,
1840- unsigned long n);
1841-extern unsigned long copy_in_user(void __user *to, const void __user *from,
1842- unsigned long n);
1843-
1844-#endif /* __powerpc64__ */
1845-
1846 static inline unsigned long __copy_from_user_inatomic(void *to,
1847 const void __user *from, unsigned long n)
1848 {
1849@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
1850 if (ret == 0)
1851 return 0;
1852 }
1853+
1854+ if (!__builtin_constant_p(n))
1855+ check_object_size(to, n, false);
1856+
1857 return __copy_tofrom_user((__force void __user *)to, from, n);
1858 }
1859
1860@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
1861 if (ret == 0)
1862 return 0;
1863 }
1864+
1865+ if (!__builtin_constant_p(n))
1866+ check_object_size(from, n, true);
1867+
1868 return __copy_tofrom_user(to, (__force const void __user *)from, n);
1869 }
1870
1871@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
1872 return __copy_to_user_inatomic(to, from, size);
1873 }
1874
1875+#ifndef __powerpc64__
1876+
1877+static inline unsigned long __must_check copy_from_user(void *to,
1878+ const void __user *from, unsigned long n)
1879+{
1880+ unsigned long over;
1881+
1882+ if ((long)n < 0)
1883+ return n;
1884+
1885+ if (access_ok(VERIFY_READ, from, n)) {
1886+ if (!__builtin_constant_p(n))
1887+ check_object_size(to, n, false);
1888+ return __copy_tofrom_user((__force void __user *)to, from, n);
1889+ }
1890+ if ((unsigned long)from < TASK_SIZE) {
1891+ over = (unsigned long)from + n - TASK_SIZE;
1892+ if (!__builtin_constant_p(n - over))
1893+ check_object_size(to, n - over, false);
1894+ return __copy_tofrom_user((__force void __user *)to, from,
1895+ n - over) + over;
1896+ }
1897+ return n;
1898+}
1899+
1900+static inline unsigned long __must_check copy_to_user(void __user *to,
1901+ const void *from, unsigned long n)
1902+{
1903+ unsigned long over;
1904+
1905+ if ((long)n < 0)
1906+ return n;
1907+
1908+ if (access_ok(VERIFY_WRITE, to, n)) {
1909+ if (!__builtin_constant_p(n))
1910+ check_object_size(from, n, true);
1911+ return __copy_tofrom_user(to, (__force void __user *)from, n);
1912+ }
1913+ if ((unsigned long)to < TASK_SIZE) {
1914+ over = (unsigned long)to + n - TASK_SIZE;
1915+ if (!__builtin_constant_p(n))
1916+ check_object_size(from, n - over, true);
1917+ return __copy_tofrom_user(to, (__force void __user *)from,
1918+ n - over) + over;
1919+ }
1920+ return n;
1921+}
1922+
1923+#else /* __powerpc64__ */
1924+
1925+#define __copy_in_user(to, from, size) \
1926+ __copy_tofrom_user((to), (from), (size))
1927+
1928+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1929+{
1930+ if ((long)n < 0 || n > INT_MAX)
1931+ return n;
1932+
1933+ if (!__builtin_constant_p(n))
1934+ check_object_size(to, n, false);
1935+
1936+ if (likely(access_ok(VERIFY_READ, from, n)))
1937+ n = __copy_from_user(to, from, n);
1938+ else
1939+ memset(to, 0, n);
1940+ return n;
1941+}
1942+
1943+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1944+{
1945+ if ((long)n < 0 || n > INT_MAX)
1946+ return n;
1947+
1948+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
1949+ if (!__builtin_constant_p(n))
1950+ check_object_size(from, n, true);
1951+ n = __copy_to_user(to, from, n);
1952+ }
1953+ return n;
1954+}
1955+
1956+extern unsigned long copy_in_user(void __user *to, const void __user *from,
1957+ unsigned long n);
1958+
1959+#endif /* __powerpc64__ */
1960+
1961 extern unsigned long __clear_user(void __user *addr, unsigned long size);
1962
1963 static inline unsigned long clear_user(void __user *addr, unsigned long size)
1964diff -urNp linux-3.0.3/arch/powerpc/kernel/exceptions-64e.S linux-3.0.3/arch/powerpc/kernel/exceptions-64e.S
1965--- linux-3.0.3/arch/powerpc/kernel/exceptions-64e.S 2011-07-21 22:17:23.000000000 -0400
1966+++ linux-3.0.3/arch/powerpc/kernel/exceptions-64e.S 2011-08-23 21:47:55.000000000 -0400
1967@@ -567,6 +567,7 @@ storage_fault_common:
1968 std r14,_DAR(r1)
1969 std r15,_DSISR(r1)
1970 addi r3,r1,STACK_FRAME_OVERHEAD
1971+ bl .save_nvgprs
1972 mr r4,r14
1973 mr r5,r15
1974 ld r14,PACA_EXGEN+EX_R14(r13)
1975@@ -576,8 +577,7 @@ storage_fault_common:
1976 cmpdi r3,0
1977 bne- 1f
1978 b .ret_from_except_lite
1979-1: bl .save_nvgprs
1980- mr r5,r3
1981+1: mr r5,r3
1982 addi r3,r1,STACK_FRAME_OVERHEAD
1983 ld r4,_DAR(r1)
1984 bl .bad_page_fault
1985diff -urNp linux-3.0.3/arch/powerpc/kernel/exceptions-64s.S linux-3.0.3/arch/powerpc/kernel/exceptions-64s.S
1986--- linux-3.0.3/arch/powerpc/kernel/exceptions-64s.S 2011-07-21 22:17:23.000000000 -0400
1987+++ linux-3.0.3/arch/powerpc/kernel/exceptions-64s.S 2011-08-23 21:47:55.000000000 -0400
1988@@ -956,10 +956,10 @@ handle_page_fault:
1989 11: ld r4,_DAR(r1)
1990 ld r5,_DSISR(r1)
1991 addi r3,r1,STACK_FRAME_OVERHEAD
1992+ bl .save_nvgprs
1993 bl .do_page_fault
1994 cmpdi r3,0
1995 beq+ 13f
1996- bl .save_nvgprs
1997 mr r5,r3
1998 addi r3,r1,STACK_FRAME_OVERHEAD
1999 lwz r4,_DAR(r1)
2000diff -urNp linux-3.0.3/arch/powerpc/kernel/module_32.c linux-3.0.3/arch/powerpc/kernel/module_32.c
2001--- linux-3.0.3/arch/powerpc/kernel/module_32.c 2011-07-21 22:17:23.000000000 -0400
2002+++ linux-3.0.3/arch/powerpc/kernel/module_32.c 2011-08-23 21:47:55.000000000 -0400
2003@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2004 me->arch.core_plt_section = i;
2005 }
2006 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2007- printk("Module doesn't contain .plt or .init.plt sections.\n");
2008+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2009 return -ENOEXEC;
2010 }
2011
2012@@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2013
2014 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2015 /* Init, or core PLT? */
2016- if (location >= mod->module_core
2017- && location < mod->module_core + mod->core_size)
2018+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2019+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2020 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2021- else
2022+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2023+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2024 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2025+ else {
2026+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2027+ return ~0UL;
2028+ }
2029
2030 /* Find this entry, or if that fails, the next avail. entry */
2031 while (entry->jump[0]) {
2032diff -urNp linux-3.0.3/arch/powerpc/kernel/module.c linux-3.0.3/arch/powerpc/kernel/module.c
2033--- linux-3.0.3/arch/powerpc/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
2034+++ linux-3.0.3/arch/powerpc/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
2035@@ -31,11 +31,24 @@
2036
2037 LIST_HEAD(module_bug_list);
2038
2039+#ifdef CONFIG_PAX_KERNEXEC
2040 void *module_alloc(unsigned long size)
2041 {
2042 if (size == 0)
2043 return NULL;
2044
2045+ return vmalloc(size);
2046+}
2047+
2048+void *module_alloc_exec(unsigned long size)
2049+#else
2050+void *module_alloc(unsigned long size)
2051+#endif
2052+
2053+{
2054+ if (size == 0)
2055+ return NULL;
2056+
2057 return vmalloc_exec(size);
2058 }
2059
2060@@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2061 vfree(module_region);
2062 }
2063
2064+#ifdef CONFIG_PAX_KERNEXEC
2065+void module_free_exec(struct module *mod, void *module_region)
2066+{
2067+ module_free(mod, module_region);
2068+}
2069+#endif
2070+
2071 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2072 const Elf_Shdr *sechdrs,
2073 const char *name)
2074diff -urNp linux-3.0.3/arch/powerpc/kernel/process.c linux-3.0.3/arch/powerpc/kernel/process.c
2075--- linux-3.0.3/arch/powerpc/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2076+++ linux-3.0.3/arch/powerpc/kernel/process.c 2011-08-23 21:48:14.000000000 -0400
2077@@ -676,8 +676,8 @@ void show_regs(struct pt_regs * regs)
2078 * Lookup NIP late so we have the best change of getting the
2079 * above info out without failing
2080 */
2081- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2082- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2083+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2084+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2085 #endif
2086 show_stack(current, (unsigned long *) regs->gpr[1]);
2087 if (!user_mode(regs))
2088@@ -1183,10 +1183,10 @@ void show_stack(struct task_struct *tsk,
2089 newsp = stack[0];
2090 ip = stack[STACK_FRAME_LR_SAVE];
2091 if (!firstframe || ip != lr) {
2092- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2093+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2094 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2095 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2096- printk(" (%pS)",
2097+ printk(" (%pA)",
2098 (void *)current->ret_stack[curr_frame].ret);
2099 curr_frame--;
2100 }
2101@@ -1206,7 +1206,7 @@ void show_stack(struct task_struct *tsk,
2102 struct pt_regs *regs = (struct pt_regs *)
2103 (sp + STACK_FRAME_OVERHEAD);
2104 lr = regs->link;
2105- printk("--- Exception: %lx at %pS\n LR = %pS\n",
2106+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
2107 regs->trap, (void *)regs->nip, (void *)lr);
2108 firstframe = 1;
2109 }
2110@@ -1281,58 +1281,3 @@ void thread_info_cache_init(void)
2111 }
2112
2113 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2114-
2115-unsigned long arch_align_stack(unsigned long sp)
2116-{
2117- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2118- sp -= get_random_int() & ~PAGE_MASK;
2119- return sp & ~0xf;
2120-}
2121-
2122-static inline unsigned long brk_rnd(void)
2123-{
2124- unsigned long rnd = 0;
2125-
2126- /* 8MB for 32bit, 1GB for 64bit */
2127- if (is_32bit_task())
2128- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2129- else
2130- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2131-
2132- return rnd << PAGE_SHIFT;
2133-}
2134-
2135-unsigned long arch_randomize_brk(struct mm_struct *mm)
2136-{
2137- unsigned long base = mm->brk;
2138- unsigned long ret;
2139-
2140-#ifdef CONFIG_PPC_STD_MMU_64
2141- /*
2142- * If we are using 1TB segments and we are allowed to randomise
2143- * the heap, we can put it above 1TB so it is backed by a 1TB
2144- * segment. Otherwise the heap will be in the bottom 1TB
2145- * which always uses 256MB segments and this may result in a
2146- * performance penalty.
2147- */
2148- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2149- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2150-#endif
2151-
2152- ret = PAGE_ALIGN(base + brk_rnd());
2153-
2154- if (ret < mm->brk)
2155- return mm->brk;
2156-
2157- return ret;
2158-}
2159-
2160-unsigned long randomize_et_dyn(unsigned long base)
2161-{
2162- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2163-
2164- if (ret < base)
2165- return base;
2166-
2167- return ret;
2168-}
2169diff -urNp linux-3.0.3/arch/powerpc/kernel/signal_32.c linux-3.0.3/arch/powerpc/kernel/signal_32.c
2170--- linux-3.0.3/arch/powerpc/kernel/signal_32.c 2011-07-21 22:17:23.000000000 -0400
2171+++ linux-3.0.3/arch/powerpc/kernel/signal_32.c 2011-08-23 21:47:55.000000000 -0400
2172@@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig
2173 /* Save user registers on the stack */
2174 frame = &rt_sf->uc.uc_mcontext;
2175 addr = frame;
2176- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2177+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2178 if (save_user_regs(regs, frame, 0, 1))
2179 goto badframe;
2180 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2181diff -urNp linux-3.0.3/arch/powerpc/kernel/signal_64.c linux-3.0.3/arch/powerpc/kernel/signal_64.c
2182--- linux-3.0.3/arch/powerpc/kernel/signal_64.c 2011-07-21 22:17:23.000000000 -0400
2183+++ linux-3.0.3/arch/powerpc/kernel/signal_64.c 2011-08-23 21:47:55.000000000 -0400
2184@@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct
2185 current->thread.fpscr.val = 0;
2186
2187 /* Set up to return from userspace. */
2188- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2189+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2190 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2191 } else {
2192 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2193diff -urNp linux-3.0.3/arch/powerpc/kernel/traps.c linux-3.0.3/arch/powerpc/kernel/traps.c
2194--- linux-3.0.3/arch/powerpc/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
2195+++ linux-3.0.3/arch/powerpc/kernel/traps.c 2011-08-23 21:48:14.000000000 -0400
2196@@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
2197 static inline void pmac_backlight_unblank(void) { }
2198 #endif
2199
2200+extern void gr_handle_kernel_exploit(void);
2201+
2202 int die(const char *str, struct pt_regs *regs, long err)
2203 {
2204 static struct {
2205@@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs
2206 if (panic_on_oops)
2207 panic("Fatal exception");
2208
2209+ gr_handle_kernel_exploit();
2210+
2211 oops_exit();
2212 do_exit(err);
2213
2214diff -urNp linux-3.0.3/arch/powerpc/kernel/vdso.c linux-3.0.3/arch/powerpc/kernel/vdso.c
2215--- linux-3.0.3/arch/powerpc/kernel/vdso.c 2011-07-21 22:17:23.000000000 -0400
2216+++ linux-3.0.3/arch/powerpc/kernel/vdso.c 2011-08-23 21:47:55.000000000 -0400
2217@@ -36,6 +36,7 @@
2218 #include <asm/firmware.h>
2219 #include <asm/vdso.h>
2220 #include <asm/vdso_datapage.h>
2221+#include <asm/mman.h>
2222
2223 #include "setup.h"
2224
2225@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2226 vdso_base = VDSO32_MBASE;
2227 #endif
2228
2229- current->mm->context.vdso_base = 0;
2230+ current->mm->context.vdso_base = ~0UL;
2231
2232 /* vDSO has a problem and was disabled, just don't "enable" it for the
2233 * process
2234@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2235 vdso_base = get_unmapped_area(NULL, vdso_base,
2236 (vdso_pages << PAGE_SHIFT) +
2237 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2238- 0, 0);
2239+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
2240 if (IS_ERR_VALUE(vdso_base)) {
2241 rc = vdso_base;
2242 goto fail_mmapsem;
2243diff -urNp linux-3.0.3/arch/powerpc/lib/usercopy_64.c linux-3.0.3/arch/powerpc/lib/usercopy_64.c
2244--- linux-3.0.3/arch/powerpc/lib/usercopy_64.c 2011-07-21 22:17:23.000000000 -0400
2245+++ linux-3.0.3/arch/powerpc/lib/usercopy_64.c 2011-08-23 21:47:55.000000000 -0400
2246@@ -9,22 +9,6 @@
2247 #include <linux/module.h>
2248 #include <asm/uaccess.h>
2249
2250-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2251-{
2252- if (likely(access_ok(VERIFY_READ, from, n)))
2253- n = __copy_from_user(to, from, n);
2254- else
2255- memset(to, 0, n);
2256- return n;
2257-}
2258-
2259-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2260-{
2261- if (likely(access_ok(VERIFY_WRITE, to, n)))
2262- n = __copy_to_user(to, from, n);
2263- return n;
2264-}
2265-
2266 unsigned long copy_in_user(void __user *to, const void __user *from,
2267 unsigned long n)
2268 {
2269@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2270 return n;
2271 }
2272
2273-EXPORT_SYMBOL(copy_from_user);
2274-EXPORT_SYMBOL(copy_to_user);
2275 EXPORT_SYMBOL(copy_in_user);
2276
2277diff -urNp linux-3.0.3/arch/powerpc/mm/fault.c linux-3.0.3/arch/powerpc/mm/fault.c
2278--- linux-3.0.3/arch/powerpc/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
2279+++ linux-3.0.3/arch/powerpc/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
2280@@ -32,6 +32,10 @@
2281 #include <linux/perf_event.h>
2282 #include <linux/magic.h>
2283 #include <linux/ratelimit.h>
2284+#include <linux/slab.h>
2285+#include <linux/pagemap.h>
2286+#include <linux/compiler.h>
2287+#include <linux/unistd.h>
2288
2289 #include <asm/firmware.h>
2290 #include <asm/page.h>
2291@@ -43,6 +47,7 @@
2292 #include <asm/tlbflush.h>
2293 #include <asm/siginfo.h>
2294 #include <mm/mmu_decl.h>
2295+#include <asm/ptrace.h>
2296
2297 #ifdef CONFIG_KPROBES
2298 static inline int notify_page_fault(struct pt_regs *regs)
2299@@ -66,6 +71,33 @@ static inline int notify_page_fault(stru
2300 }
2301 #endif
2302
2303+#ifdef CONFIG_PAX_PAGEEXEC
2304+/*
2305+ * PaX: decide what to do with offenders (regs->nip = fault address)
2306+ *
2307+ * returns 1 when task should be killed
2308+ */
2309+static int pax_handle_fetch_fault(struct pt_regs *regs)
2310+{
2311+ return 1;
2312+}
2313+
2314+void pax_report_insns(void *pc, void *sp)
2315+{
2316+ unsigned long i;
2317+
2318+ printk(KERN_ERR "PAX: bytes at PC: ");
2319+ for (i = 0; i < 5; i++) {
2320+ unsigned int c;
2321+ if (get_user(c, (unsigned int __user *)pc+i))
2322+ printk(KERN_CONT "???????? ");
2323+ else
2324+ printk(KERN_CONT "%08x ", c);
2325+ }
2326+ printk("\n");
2327+}
2328+#endif
2329+
2330 /*
2331 * Check whether the instruction at regs->nip is a store using
2332 * an update addressing form which will update r1.
2333@@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_re
2334 * indicate errors in DSISR but can validly be set in SRR1.
2335 */
2336 if (trap == 0x400)
2337- error_code &= 0x48200000;
2338+ error_code &= 0x58200000;
2339 else
2340 is_write = error_code & DSISR_ISSTORE;
2341 #else
2342@@ -259,7 +291,7 @@ good_area:
2343 * "undefined". Of those that can be set, this is the only
2344 * one which seems bad.
2345 */
2346- if (error_code & 0x10000000)
2347+ if (error_code & DSISR_GUARDED)
2348 /* Guarded storage error. */
2349 goto bad_area;
2350 #endif /* CONFIG_8xx */
2351@@ -274,7 +306,7 @@ good_area:
2352 * processors use the same I/D cache coherency mechanism
2353 * as embedded.
2354 */
2355- if (error_code & DSISR_PROTFAULT)
2356+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2357 goto bad_area;
2358 #endif /* CONFIG_PPC_STD_MMU */
2359
2360@@ -343,6 +375,23 @@ bad_area:
2361 bad_area_nosemaphore:
2362 /* User mode accesses cause a SIGSEGV */
2363 if (user_mode(regs)) {
2364+
2365+#ifdef CONFIG_PAX_PAGEEXEC
2366+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2367+#ifdef CONFIG_PPC_STD_MMU
2368+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2369+#else
2370+ if (is_exec && regs->nip == address) {
2371+#endif
2372+ switch (pax_handle_fetch_fault(regs)) {
2373+ }
2374+
2375+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2376+ do_group_exit(SIGKILL);
2377+ }
2378+ }
2379+#endif
2380+
2381 _exception(SIGSEGV, regs, code, address);
2382 return 0;
2383 }
2384diff -urNp linux-3.0.3/arch/powerpc/mm/mmap_64.c linux-3.0.3/arch/powerpc/mm/mmap_64.c
2385--- linux-3.0.3/arch/powerpc/mm/mmap_64.c 2011-07-21 22:17:23.000000000 -0400
2386+++ linux-3.0.3/arch/powerpc/mm/mmap_64.c 2011-08-23 21:47:55.000000000 -0400
2387@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2388 */
2389 if (mmap_is_legacy()) {
2390 mm->mmap_base = TASK_UNMAPPED_BASE;
2391+
2392+#ifdef CONFIG_PAX_RANDMMAP
2393+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2394+ mm->mmap_base += mm->delta_mmap;
2395+#endif
2396+
2397 mm->get_unmapped_area = arch_get_unmapped_area;
2398 mm->unmap_area = arch_unmap_area;
2399 } else {
2400 mm->mmap_base = mmap_base();
2401+
2402+#ifdef CONFIG_PAX_RANDMMAP
2403+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2404+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2405+#endif
2406+
2407 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2408 mm->unmap_area = arch_unmap_area_topdown;
2409 }
2410diff -urNp linux-3.0.3/arch/powerpc/mm/slice.c linux-3.0.3/arch/powerpc/mm/slice.c
2411--- linux-3.0.3/arch/powerpc/mm/slice.c 2011-07-21 22:17:23.000000000 -0400
2412+++ linux-3.0.3/arch/powerpc/mm/slice.c 2011-08-23 21:47:55.000000000 -0400
2413@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
2414 if ((mm->task_size - len) < addr)
2415 return 0;
2416 vma = find_vma(mm, addr);
2417- return (!vma || (addr + len) <= vma->vm_start);
2418+ return check_heap_stack_gap(vma, addr, len);
2419 }
2420
2421 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2422@@ -256,7 +256,7 @@ full_search:
2423 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2424 continue;
2425 }
2426- if (!vma || addr + len <= vma->vm_start) {
2427+ if (check_heap_stack_gap(vma, addr, len)) {
2428 /*
2429 * Remember the place where we stopped the search:
2430 */
2431@@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
2432 }
2433 }
2434
2435- addr = mm->mmap_base;
2436- while (addr > len) {
2437+ if (mm->mmap_base < len)
2438+ addr = -ENOMEM;
2439+ else
2440+ addr = mm->mmap_base - len;
2441+
2442+ while (!IS_ERR_VALUE(addr)) {
2443 /* Go down by chunk size */
2444- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2445+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
2446
2447 /* Check for hit with different page size */
2448 mask = slice_range_to_mask(addr, len);
2449@@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
2450 * return with success:
2451 */
2452 vma = find_vma(mm, addr);
2453- if (!vma || (addr + len) <= vma->vm_start) {
2454+ if (check_heap_stack_gap(vma, addr, len)) {
2455 /* remember the address as a hint for next time */
2456 if (use_cache)
2457 mm->free_area_cache = addr;
2458@@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
2459 mm->cached_hole_size = vma->vm_start - addr;
2460
2461 /* try just below the current vma->vm_start */
2462- addr = vma->vm_start;
2463+ addr = skip_heap_stack_gap(vma, len);
2464 }
2465
2466 /*
2467@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
2468 if (fixed && addr > (mm->task_size - len))
2469 return -EINVAL;
2470
2471+#ifdef CONFIG_PAX_RANDMMAP
2472+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2473+ addr = 0;
2474+#endif
2475+
2476 /* If hint, make sure it matches our alignment restrictions */
2477 if (!fixed && addr) {
2478 addr = _ALIGN_UP(addr, 1ul << pshift);
2479diff -urNp linux-3.0.3/arch/s390/include/asm/elf.h linux-3.0.3/arch/s390/include/asm/elf.h
2480--- linux-3.0.3/arch/s390/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
2481+++ linux-3.0.3/arch/s390/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
2482@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2483 the loader. We need to make sure that it is out of the way of the program
2484 that it will "exec", and that there is sufficient room for the brk. */
2485
2486-extern unsigned long randomize_et_dyn(unsigned long base);
2487-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2488+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2489+
2490+#ifdef CONFIG_PAX_ASLR
2491+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2492+
2493+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2494+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2495+#endif
2496
2497 /* This yields a mask that user programs can use to figure out what
2498 instruction set this CPU supports. */
2499@@ -210,7 +216,4 @@ struct linux_binprm;
2500 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2501 int arch_setup_additional_pages(struct linux_binprm *, int);
2502
2503-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2504-#define arch_randomize_brk arch_randomize_brk
2505-
2506 #endif
2507diff -urNp linux-3.0.3/arch/s390/include/asm/system.h linux-3.0.3/arch/s390/include/asm/system.h
2508--- linux-3.0.3/arch/s390/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
2509+++ linux-3.0.3/arch/s390/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
2510@@ -255,7 +255,7 @@ extern void (*_machine_restart)(char *co
2511 extern void (*_machine_halt)(void);
2512 extern void (*_machine_power_off)(void);
2513
2514-extern unsigned long arch_align_stack(unsigned long sp);
2515+#define arch_align_stack(x) ((x) & ~0xfUL)
2516
2517 static inline int tprot(unsigned long addr)
2518 {
2519diff -urNp linux-3.0.3/arch/s390/include/asm/uaccess.h linux-3.0.3/arch/s390/include/asm/uaccess.h
2520--- linux-3.0.3/arch/s390/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
2521+++ linux-3.0.3/arch/s390/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
2522@@ -235,6 +235,10 @@ static inline unsigned long __must_check
2523 copy_to_user(void __user *to, const void *from, unsigned long n)
2524 {
2525 might_fault();
2526+
2527+ if ((long)n < 0)
2528+ return n;
2529+
2530 if (access_ok(VERIFY_WRITE, to, n))
2531 n = __copy_to_user(to, from, n);
2532 return n;
2533@@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void
2534 static inline unsigned long __must_check
2535 __copy_from_user(void *to, const void __user *from, unsigned long n)
2536 {
2537+ if ((long)n < 0)
2538+ return n;
2539+
2540 if (__builtin_constant_p(n) && (n <= 256))
2541 return uaccess.copy_from_user_small(n, from, to);
2542 else
2543@@ -294,6 +301,10 @@ copy_from_user(void *to, const void __us
2544 unsigned int sz = __compiletime_object_size(to);
2545
2546 might_fault();
2547+
2548+ if ((long)n < 0)
2549+ return n;
2550+
2551 if (unlikely(sz != -1 && sz < n)) {
2552 copy_from_user_overflow();
2553 return n;
2554diff -urNp linux-3.0.3/arch/s390/kernel/module.c linux-3.0.3/arch/s390/kernel/module.c
2555--- linux-3.0.3/arch/s390/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
2556+++ linux-3.0.3/arch/s390/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
2557@@ -168,11 +168,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
2558
2559 /* Increase core size by size of got & plt and set start
2560 offsets for got and plt. */
2561- me->core_size = ALIGN(me->core_size, 4);
2562- me->arch.got_offset = me->core_size;
2563- me->core_size += me->arch.got_size;
2564- me->arch.plt_offset = me->core_size;
2565- me->core_size += me->arch.plt_size;
2566+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
2567+ me->arch.got_offset = me->core_size_rw;
2568+ me->core_size_rw += me->arch.got_size;
2569+ me->arch.plt_offset = me->core_size_rx;
2570+ me->core_size_rx += me->arch.plt_size;
2571 return 0;
2572 }
2573
2574@@ -258,7 +258,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2575 if (info->got_initialized == 0) {
2576 Elf_Addr *gotent;
2577
2578- gotent = me->module_core + me->arch.got_offset +
2579+ gotent = me->module_core_rw + me->arch.got_offset +
2580 info->got_offset;
2581 *gotent = val;
2582 info->got_initialized = 1;
2583@@ -282,7 +282,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2584 else if (r_type == R_390_GOTENT ||
2585 r_type == R_390_GOTPLTENT)
2586 *(unsigned int *) loc =
2587- (val + (Elf_Addr) me->module_core - loc) >> 1;
2588+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
2589 else if (r_type == R_390_GOT64 ||
2590 r_type == R_390_GOTPLT64)
2591 *(unsigned long *) loc = val;
2592@@ -296,7 +296,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2593 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
2594 if (info->plt_initialized == 0) {
2595 unsigned int *ip;
2596- ip = me->module_core + me->arch.plt_offset +
2597+ ip = me->module_core_rx + me->arch.plt_offset +
2598 info->plt_offset;
2599 #ifndef CONFIG_64BIT
2600 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
2601@@ -321,7 +321,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2602 val - loc + 0xffffUL < 0x1ffffeUL) ||
2603 (r_type == R_390_PLT32DBL &&
2604 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
2605- val = (Elf_Addr) me->module_core +
2606+ val = (Elf_Addr) me->module_core_rx +
2607 me->arch.plt_offset +
2608 info->plt_offset;
2609 val += rela->r_addend - loc;
2610@@ -343,7 +343,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2611 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
2612 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
2613 val = val + rela->r_addend -
2614- ((Elf_Addr) me->module_core + me->arch.got_offset);
2615+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
2616 if (r_type == R_390_GOTOFF16)
2617 *(unsigned short *) loc = val;
2618 else if (r_type == R_390_GOTOFF32)
2619@@ -353,7 +353,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2620 break;
2621 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
2622 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
2623- val = (Elf_Addr) me->module_core + me->arch.got_offset +
2624+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
2625 rela->r_addend - loc;
2626 if (r_type == R_390_GOTPC)
2627 *(unsigned int *) loc = val;
2628diff -urNp linux-3.0.3/arch/s390/kernel/process.c linux-3.0.3/arch/s390/kernel/process.c
2629--- linux-3.0.3/arch/s390/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2630+++ linux-3.0.3/arch/s390/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
2631@@ -319,39 +319,3 @@ unsigned long get_wchan(struct task_stru
2632 }
2633 return 0;
2634 }
2635-
2636-unsigned long arch_align_stack(unsigned long sp)
2637-{
2638- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2639- sp -= get_random_int() & ~PAGE_MASK;
2640- return sp & ~0xf;
2641-}
2642-
2643-static inline unsigned long brk_rnd(void)
2644-{
2645- /* 8MB for 32bit, 1GB for 64bit */
2646- if (is_32bit_task())
2647- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
2648- else
2649- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
2650-}
2651-
2652-unsigned long arch_randomize_brk(struct mm_struct *mm)
2653-{
2654- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
2655-
2656- if (ret < mm->brk)
2657- return mm->brk;
2658- return ret;
2659-}
2660-
2661-unsigned long randomize_et_dyn(unsigned long base)
2662-{
2663- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2664-
2665- if (!(current->flags & PF_RANDOMIZE))
2666- return base;
2667- if (ret < base)
2668- return base;
2669- return ret;
2670-}
2671diff -urNp linux-3.0.3/arch/s390/kernel/setup.c linux-3.0.3/arch/s390/kernel/setup.c
2672--- linux-3.0.3/arch/s390/kernel/setup.c 2011-07-21 22:17:23.000000000 -0400
2673+++ linux-3.0.3/arch/s390/kernel/setup.c 2011-08-23 21:47:55.000000000 -0400
2674@@ -271,7 +271,7 @@ static int __init early_parse_mem(char *
2675 }
2676 early_param("mem", early_parse_mem);
2677
2678-unsigned int user_mode = HOME_SPACE_MODE;
2679+unsigned int user_mode = SECONDARY_SPACE_MODE;
2680 EXPORT_SYMBOL_GPL(user_mode);
2681
2682 static int set_amode_and_uaccess(unsigned long user_amode,
2683diff -urNp linux-3.0.3/arch/s390/mm/mmap.c linux-3.0.3/arch/s390/mm/mmap.c
2684--- linux-3.0.3/arch/s390/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
2685+++ linux-3.0.3/arch/s390/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
2686@@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_str
2687 */
2688 if (mmap_is_legacy()) {
2689 mm->mmap_base = TASK_UNMAPPED_BASE;
2690+
2691+#ifdef CONFIG_PAX_RANDMMAP
2692+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2693+ mm->mmap_base += mm->delta_mmap;
2694+#endif
2695+
2696 mm->get_unmapped_area = arch_get_unmapped_area;
2697 mm->unmap_area = arch_unmap_area;
2698 } else {
2699 mm->mmap_base = mmap_base();
2700+
2701+#ifdef CONFIG_PAX_RANDMMAP
2702+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2703+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2704+#endif
2705+
2706 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2707 mm->unmap_area = arch_unmap_area_topdown;
2708 }
2709@@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_str
2710 */
2711 if (mmap_is_legacy()) {
2712 mm->mmap_base = TASK_UNMAPPED_BASE;
2713+
2714+#ifdef CONFIG_PAX_RANDMMAP
2715+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2716+ mm->mmap_base += mm->delta_mmap;
2717+#endif
2718+
2719 mm->get_unmapped_area = s390_get_unmapped_area;
2720 mm->unmap_area = arch_unmap_area;
2721 } else {
2722 mm->mmap_base = mmap_base();
2723+
2724+#ifdef CONFIG_PAX_RANDMMAP
2725+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2726+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2727+#endif
2728+
2729 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
2730 mm->unmap_area = arch_unmap_area_topdown;
2731 }
2732diff -urNp linux-3.0.3/arch/score/include/asm/system.h linux-3.0.3/arch/score/include/asm/system.h
2733--- linux-3.0.3/arch/score/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
2734+++ linux-3.0.3/arch/score/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
2735@@ -17,7 +17,7 @@ do { \
2736 #define finish_arch_switch(prev) do {} while (0)
2737
2738 typedef void (*vi_handler_t)(void);
2739-extern unsigned long arch_align_stack(unsigned long sp);
2740+#define arch_align_stack(x) (x)
2741
2742 #define mb() barrier()
2743 #define rmb() barrier()
2744diff -urNp linux-3.0.3/arch/score/kernel/process.c linux-3.0.3/arch/score/kernel/process.c
2745--- linux-3.0.3/arch/score/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2746+++ linux-3.0.3/arch/score/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
2747@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
2748
2749 return task_pt_regs(task)->cp0_epc;
2750 }
2751-
2752-unsigned long arch_align_stack(unsigned long sp)
2753-{
2754- return sp;
2755-}
2756diff -urNp linux-3.0.3/arch/sh/mm/mmap.c linux-3.0.3/arch/sh/mm/mmap.c
2757--- linux-3.0.3/arch/sh/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
2758+++ linux-3.0.3/arch/sh/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
2759@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
2760 addr = PAGE_ALIGN(addr);
2761
2762 vma = find_vma(mm, addr);
2763- if (TASK_SIZE - len >= addr &&
2764- (!vma || addr + len <= vma->vm_start))
2765+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2766 return addr;
2767 }
2768
2769@@ -106,7 +105,7 @@ full_search:
2770 }
2771 return -ENOMEM;
2772 }
2773- if (likely(!vma || addr + len <= vma->vm_start)) {
2774+ if (likely(check_heap_stack_gap(vma, addr, len))) {
2775 /*
2776 * Remember the place where we stopped the search:
2777 */
2778@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
2779 addr = PAGE_ALIGN(addr);
2780
2781 vma = find_vma(mm, addr);
2782- if (TASK_SIZE - len >= addr &&
2783- (!vma || addr + len <= vma->vm_start))
2784+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2785 return addr;
2786 }
2787
2788@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
2789 /* make sure it can fit in the remaining address space */
2790 if (likely(addr > len)) {
2791 vma = find_vma(mm, addr-len);
2792- if (!vma || addr <= vma->vm_start) {
2793+ if (check_heap_stack_gap(vma, addr - len, len)) {
2794 /* remember the address as a hint for next time */
2795 return (mm->free_area_cache = addr-len);
2796 }
2797@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
2798 if (unlikely(mm->mmap_base < len))
2799 goto bottomup;
2800
2801- addr = mm->mmap_base-len;
2802- if (do_colour_align)
2803- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2804+ addr = mm->mmap_base - len;
2805
2806 do {
2807+ if (do_colour_align)
2808+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2809 /*
2810 * Lookup failure means no vma is above this address,
2811 * else if new region fits below vma->vm_start,
2812 * return with success:
2813 */
2814 vma = find_vma(mm, addr);
2815- if (likely(!vma || addr+len <= vma->vm_start)) {
2816+ if (likely(check_heap_stack_gap(vma, addr, len))) {
2817 /* remember the address as a hint for next time */
2818 return (mm->free_area_cache = addr);
2819 }
2820@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
2821 mm->cached_hole_size = vma->vm_start - addr;
2822
2823 /* try just below the current vma->vm_start */
2824- addr = vma->vm_start-len;
2825- if (do_colour_align)
2826- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2827- } while (likely(len < vma->vm_start));
2828+ addr = skip_heap_stack_gap(vma, len);
2829+ } while (!IS_ERR_VALUE(addr));
2830
2831 bottomup:
2832 /*
2833diff -urNp linux-3.0.3/arch/sparc/include/asm/atomic_64.h linux-3.0.3/arch/sparc/include/asm/atomic_64.h
2834--- linux-3.0.3/arch/sparc/include/asm/atomic_64.h 2011-07-21 22:17:23.000000000 -0400
2835+++ linux-3.0.3/arch/sparc/include/asm/atomic_64.h 2011-08-23 21:48:14.000000000 -0400
2836@@ -14,18 +14,40 @@
2837 #define ATOMIC64_INIT(i) { (i) }
2838
2839 #define atomic_read(v) (*(volatile int *)&(v)->counter)
2840+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
2841+{
2842+ return v->counter;
2843+}
2844 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
2845+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
2846+{
2847+ return v->counter;
2848+}
2849
2850 #define atomic_set(v, i) (((v)->counter) = i)
2851+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
2852+{
2853+ v->counter = i;
2854+}
2855 #define atomic64_set(v, i) (((v)->counter) = i)
2856+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
2857+{
2858+ v->counter = i;
2859+}
2860
2861 extern void atomic_add(int, atomic_t *);
2862+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
2863 extern void atomic64_add(long, atomic64_t *);
2864+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
2865 extern void atomic_sub(int, atomic_t *);
2866+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
2867 extern void atomic64_sub(long, atomic64_t *);
2868+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
2869
2870 extern int atomic_add_ret(int, atomic_t *);
2871+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
2872 extern long atomic64_add_ret(long, atomic64_t *);
2873+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
2874 extern int atomic_sub_ret(int, atomic_t *);
2875 extern long atomic64_sub_ret(long, atomic64_t *);
2876
2877@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
2878 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
2879
2880 #define atomic_inc_return(v) atomic_add_ret(1, v)
2881+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
2882+{
2883+ return atomic_add_ret_unchecked(1, v);
2884+}
2885 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
2886+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
2887+{
2888+ return atomic64_add_ret_unchecked(1, v);
2889+}
2890
2891 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
2892 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
2893
2894 #define atomic_add_return(i, v) atomic_add_ret(i, v)
2895+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
2896+{
2897+ return atomic_add_ret_unchecked(i, v);
2898+}
2899 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
2900+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
2901+{
2902+ return atomic64_add_ret_unchecked(i, v);
2903+}
2904
2905 /*
2906 * atomic_inc_and_test - increment and test
2907@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomi
2908 * other cases.
2909 */
2910 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
2911+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
2912+{
2913+ return atomic_inc_return_unchecked(v) == 0;
2914+}
2915 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
2916
2917 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
2918@@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomi
2919 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
2920
2921 #define atomic_inc(v) atomic_add(1, v)
2922+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
2923+{
2924+ atomic_add_unchecked(1, v);
2925+}
2926 #define atomic64_inc(v) atomic64_add(1, v)
2927+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
2928+{
2929+ atomic64_add_unchecked(1, v);
2930+}
2931
2932 #define atomic_dec(v) atomic_sub(1, v)
2933+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
2934+{
2935+ atomic_sub_unchecked(1, v);
2936+}
2937 #define atomic64_dec(v) atomic64_sub(1, v)
2938+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
2939+{
2940+ atomic64_sub_unchecked(1, v);
2941+}
2942
2943 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
2944 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
2945
2946 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
2947+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
2948+{
2949+ return cmpxchg(&v->counter, old, new);
2950+}
2951 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
2952+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
2953+{
2954+ return xchg(&v->counter, new);
2955+}
2956
2957 static inline int atomic_add_unless(atomic_t *v, int a, int u)
2958 {
2959- int c, old;
2960+ int c, old, new;
2961 c = atomic_read(v);
2962 for (;;) {
2963- if (unlikely(c == (u)))
2964+ if (unlikely(c == u))
2965 break;
2966- old = atomic_cmpxchg((v), c, c + (a));
2967+
2968+ asm volatile("addcc %2, %0, %0\n"
2969+
2970+#ifdef CONFIG_PAX_REFCOUNT
2971+ "tvs %%icc, 6\n"
2972+#endif
2973+
2974+ : "=r" (new)
2975+ : "0" (c), "ir" (a)
2976+ : "cc");
2977+
2978+ old = atomic_cmpxchg(v, c, new);
2979 if (likely(old == c))
2980 break;
2981 c = old;
2982 }
2983- return c != (u);
2984+ return c != u;
2985 }
2986
2987 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
2988@@ -90,20 +167,35 @@ static inline int atomic_add_unless(atom
2989 #define atomic64_cmpxchg(v, o, n) \
2990 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
2991 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
2992+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
2993+{
2994+ return xchg(&v->counter, new);
2995+}
2996
2997 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
2998 {
2999- long c, old;
3000+ long c, old, new;
3001 c = atomic64_read(v);
3002 for (;;) {
3003- if (unlikely(c == (u)))
3004+ if (unlikely(c == u))
3005 break;
3006- old = atomic64_cmpxchg((v), c, c + (a));
3007+
3008+ asm volatile("addcc %2, %0, %0\n"
3009+
3010+#ifdef CONFIG_PAX_REFCOUNT
3011+ "tvs %%xcc, 6\n"
3012+#endif
3013+
3014+ : "=r" (new)
3015+ : "0" (c), "ir" (a)
3016+ : "cc");
3017+
3018+ old = atomic64_cmpxchg(v, c, new);
3019 if (likely(old == c))
3020 break;
3021 c = old;
3022 }
3023- return c != (u);
3024+ return c != u;
3025 }
3026
3027 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3028diff -urNp linux-3.0.3/arch/sparc/include/asm/cache.h linux-3.0.3/arch/sparc/include/asm/cache.h
3029--- linux-3.0.3/arch/sparc/include/asm/cache.h 2011-07-21 22:17:23.000000000 -0400
3030+++ linux-3.0.3/arch/sparc/include/asm/cache.h 2011-08-23 21:47:55.000000000 -0400
3031@@ -10,7 +10,7 @@
3032 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3033
3034 #define L1_CACHE_SHIFT 5
3035-#define L1_CACHE_BYTES 32
3036+#define L1_CACHE_BYTES 32UL
3037
3038 #ifdef CONFIG_SPARC32
3039 #define SMP_CACHE_BYTES_SHIFT 5
3040diff -urNp linux-3.0.3/arch/sparc/include/asm/elf_32.h linux-3.0.3/arch/sparc/include/asm/elf_32.h
3041--- linux-3.0.3/arch/sparc/include/asm/elf_32.h 2011-07-21 22:17:23.000000000 -0400
3042+++ linux-3.0.3/arch/sparc/include/asm/elf_32.h 2011-08-23 21:47:55.000000000 -0400
3043@@ -114,6 +114,13 @@ typedef struct {
3044
3045 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3046
3047+#ifdef CONFIG_PAX_ASLR
3048+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3049+
3050+#define PAX_DELTA_MMAP_LEN 16
3051+#define PAX_DELTA_STACK_LEN 16
3052+#endif
3053+
3054 /* This yields a mask that user programs can use to figure out what
3055 instruction set this cpu supports. This can NOT be done in userspace
3056 on Sparc. */
3057diff -urNp linux-3.0.3/arch/sparc/include/asm/elf_64.h linux-3.0.3/arch/sparc/include/asm/elf_64.h
3058--- linux-3.0.3/arch/sparc/include/asm/elf_64.h 2011-08-23 21:44:40.000000000 -0400
3059+++ linux-3.0.3/arch/sparc/include/asm/elf_64.h 2011-08-23 21:47:55.000000000 -0400
3060@@ -180,6 +180,13 @@ typedef struct {
3061 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3062 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3063
3064+#ifdef CONFIG_PAX_ASLR
3065+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3066+
3067+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3068+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3069+#endif
3070+
3071 extern unsigned long sparc64_elf_hwcap;
3072 #define ELF_HWCAP sparc64_elf_hwcap
3073
3074diff -urNp linux-3.0.3/arch/sparc/include/asm/pgtable_32.h linux-3.0.3/arch/sparc/include/asm/pgtable_32.h
3075--- linux-3.0.3/arch/sparc/include/asm/pgtable_32.h 2011-07-21 22:17:23.000000000 -0400
3076+++ linux-3.0.3/arch/sparc/include/asm/pgtable_32.h 2011-08-23 21:47:55.000000000 -0400
3077@@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3078 BTFIXUPDEF_INT(page_none)
3079 BTFIXUPDEF_INT(page_copy)
3080 BTFIXUPDEF_INT(page_readonly)
3081+
3082+#ifdef CONFIG_PAX_PAGEEXEC
3083+BTFIXUPDEF_INT(page_shared_noexec)
3084+BTFIXUPDEF_INT(page_copy_noexec)
3085+BTFIXUPDEF_INT(page_readonly_noexec)
3086+#endif
3087+
3088 BTFIXUPDEF_INT(page_kernel)
3089
3090 #define PMD_SHIFT SUN4C_PMD_SHIFT
3091@@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
3092 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3093 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3094
3095+#ifdef CONFIG_PAX_PAGEEXEC
3096+extern pgprot_t PAGE_SHARED_NOEXEC;
3097+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3098+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3099+#else
3100+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3101+# define PAGE_COPY_NOEXEC PAGE_COPY
3102+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3103+#endif
3104+
3105 extern unsigned long page_kernel;
3106
3107 #ifdef MODULE
3108diff -urNp linux-3.0.3/arch/sparc/include/asm/pgtsrmmu.h linux-3.0.3/arch/sparc/include/asm/pgtsrmmu.h
3109--- linux-3.0.3/arch/sparc/include/asm/pgtsrmmu.h 2011-07-21 22:17:23.000000000 -0400
3110+++ linux-3.0.3/arch/sparc/include/asm/pgtsrmmu.h 2011-08-23 21:47:55.000000000 -0400
3111@@ -115,6 +115,13 @@
3112 SRMMU_EXEC | SRMMU_REF)
3113 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3114 SRMMU_EXEC | SRMMU_REF)
3115+
3116+#ifdef CONFIG_PAX_PAGEEXEC
3117+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3118+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3119+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3120+#endif
3121+
3122 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3123 SRMMU_DIRTY | SRMMU_REF)
3124
3125diff -urNp linux-3.0.3/arch/sparc/include/asm/spinlock_64.h linux-3.0.3/arch/sparc/include/asm/spinlock_64.h
3126--- linux-3.0.3/arch/sparc/include/asm/spinlock_64.h 2011-07-21 22:17:23.000000000 -0400
3127+++ linux-3.0.3/arch/sparc/include/asm/spinlock_64.h 2011-08-23 21:47:55.000000000 -0400
3128@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(
3129
3130 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3131
3132-static void inline arch_read_lock(arch_rwlock_t *lock)
3133+static inline void arch_read_lock(arch_rwlock_t *lock)
3134 {
3135 unsigned long tmp1, tmp2;
3136
3137 __asm__ __volatile__ (
3138 "1: ldsw [%2], %0\n"
3139 " brlz,pn %0, 2f\n"
3140-"4: add %0, 1, %1\n"
3141+"4: addcc %0, 1, %1\n"
3142+
3143+#ifdef CONFIG_PAX_REFCOUNT
3144+" tvs %%icc, 6\n"
3145+#endif
3146+
3147 " cas [%2], %0, %1\n"
3148 " cmp %0, %1\n"
3149 " bne,pn %%icc, 1b\n"
3150@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_r
3151 " .previous"
3152 : "=&r" (tmp1), "=&r" (tmp2)
3153 : "r" (lock)
3154- : "memory");
3155+ : "memory", "cc");
3156 }
3157
3158-static int inline arch_read_trylock(arch_rwlock_t *lock)
3159+static inline int arch_read_trylock(arch_rwlock_t *lock)
3160 {
3161 int tmp1, tmp2;
3162
3163@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch
3164 "1: ldsw [%2], %0\n"
3165 " brlz,a,pn %0, 2f\n"
3166 " mov 0, %0\n"
3167-" add %0, 1, %1\n"
3168+" addcc %0, 1, %1\n"
3169+
3170+#ifdef CONFIG_PAX_REFCOUNT
3171+" tvs %%icc, 6\n"
3172+#endif
3173+
3174 " cas [%2], %0, %1\n"
3175 " cmp %0, %1\n"
3176 " bne,pn %%icc, 1b\n"
3177@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch
3178 return tmp1;
3179 }
3180
3181-static void inline arch_read_unlock(arch_rwlock_t *lock)
3182+static inline void arch_read_unlock(arch_rwlock_t *lock)
3183 {
3184 unsigned long tmp1, tmp2;
3185
3186 __asm__ __volatile__(
3187 "1: lduw [%2], %0\n"
3188-" sub %0, 1, %1\n"
3189+" subcc %0, 1, %1\n"
3190+
3191+#ifdef CONFIG_PAX_REFCOUNT
3192+" tvs %%icc, 6\n"
3193+#endif
3194+
3195 " cas [%2], %0, %1\n"
3196 " cmp %0, %1\n"
3197 " bne,pn %%xcc, 1b\n"
3198@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch
3199 : "memory");
3200 }
3201
3202-static void inline arch_write_lock(arch_rwlock_t *lock)
3203+static inline void arch_write_lock(arch_rwlock_t *lock)
3204 {
3205 unsigned long mask, tmp1, tmp2;
3206
3207@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_
3208 : "memory");
3209 }
3210
3211-static void inline arch_write_unlock(arch_rwlock_t *lock)
3212+static inline void arch_write_unlock(arch_rwlock_t *lock)
3213 {
3214 __asm__ __volatile__(
3215 " stw %%g0, [%0]"
3216@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arc
3217 : "memory");
3218 }
3219
3220-static int inline arch_write_trylock(arch_rwlock_t *lock)
3221+static inline int arch_write_trylock(arch_rwlock_t *lock)
3222 {
3223 unsigned long mask, tmp1, tmp2, result;
3224
3225diff -urNp linux-3.0.3/arch/sparc/include/asm/thread_info_32.h linux-3.0.3/arch/sparc/include/asm/thread_info_32.h
3226--- linux-3.0.3/arch/sparc/include/asm/thread_info_32.h 2011-07-21 22:17:23.000000000 -0400
3227+++ linux-3.0.3/arch/sparc/include/asm/thread_info_32.h 2011-08-23 21:47:55.000000000 -0400
3228@@ -50,6 +50,8 @@ struct thread_info {
3229 unsigned long w_saved;
3230
3231 struct restart_block restart_block;
3232+
3233+ unsigned long lowest_stack;
3234 };
3235
3236 /*
3237diff -urNp linux-3.0.3/arch/sparc/include/asm/thread_info_64.h linux-3.0.3/arch/sparc/include/asm/thread_info_64.h
3238--- linux-3.0.3/arch/sparc/include/asm/thread_info_64.h 2011-07-21 22:17:23.000000000 -0400
3239+++ linux-3.0.3/arch/sparc/include/asm/thread_info_64.h 2011-08-23 21:47:55.000000000 -0400
3240@@ -63,6 +63,8 @@ struct thread_info {
3241 struct pt_regs *kern_una_regs;
3242 unsigned int kern_una_insn;
3243
3244+ unsigned long lowest_stack;
3245+
3246 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3247 };
3248
3249diff -urNp linux-3.0.3/arch/sparc/include/asm/uaccess_32.h linux-3.0.3/arch/sparc/include/asm/uaccess_32.h
3250--- linux-3.0.3/arch/sparc/include/asm/uaccess_32.h 2011-07-21 22:17:23.000000000 -0400
3251+++ linux-3.0.3/arch/sparc/include/asm/uaccess_32.h 2011-08-23 21:47:55.000000000 -0400
3252@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
3253
3254 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3255 {
3256- if (n && __access_ok((unsigned long) to, n))
3257+ if ((long)n < 0)
3258+ return n;
3259+
3260+ if (n && __access_ok((unsigned long) to, n)) {
3261+ if (!__builtin_constant_p(n))
3262+ check_object_size(from, n, true);
3263 return __copy_user(to, (__force void __user *) from, n);
3264- else
3265+ } else
3266 return n;
3267 }
3268
3269 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3270 {
3271+ if ((long)n < 0)
3272+ return n;
3273+
3274+ if (!__builtin_constant_p(n))
3275+ check_object_size(from, n, true);
3276+
3277 return __copy_user(to, (__force void __user *) from, n);
3278 }
3279
3280 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3281 {
3282- if (n && __access_ok((unsigned long) from, n))
3283+ if ((long)n < 0)
3284+ return n;
3285+
3286+ if (n && __access_ok((unsigned long) from, n)) {
3287+ if (!__builtin_constant_p(n))
3288+ check_object_size(to, n, false);
3289 return __copy_user((__force void __user *) to, from, n);
3290- else
3291+ } else
3292 return n;
3293 }
3294
3295 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3296 {
3297+ if ((long)n < 0)
3298+ return n;
3299+
3300 return __copy_user((__force void __user *) to, from, n);
3301 }
3302
3303diff -urNp linux-3.0.3/arch/sparc/include/asm/uaccess_64.h linux-3.0.3/arch/sparc/include/asm/uaccess_64.h
3304--- linux-3.0.3/arch/sparc/include/asm/uaccess_64.h 2011-07-21 22:17:23.000000000 -0400
3305+++ linux-3.0.3/arch/sparc/include/asm/uaccess_64.h 2011-08-23 21:47:55.000000000 -0400
3306@@ -10,6 +10,7 @@
3307 #include <linux/compiler.h>
3308 #include <linux/string.h>
3309 #include <linux/thread_info.h>
3310+#include <linux/kernel.h>
3311 #include <asm/asi.h>
3312 #include <asm/system.h>
3313 #include <asm/spitfire.h>
3314@@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixu
3315 static inline unsigned long __must_check
3316 copy_from_user(void *to, const void __user *from, unsigned long size)
3317 {
3318- unsigned long ret = ___copy_from_user(to, from, size);
3319+ unsigned long ret;
3320
3321+ if ((long)size < 0 || size > INT_MAX)
3322+ return size;
3323+
3324+ if (!__builtin_constant_p(size))
3325+ check_object_size(to, size, false);
3326+
3327+ ret = ___copy_from_user(to, from, size);
3328 if (unlikely(ret))
3329 ret = copy_from_user_fixup(to, from, size);
3330
3331@@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(
3332 static inline unsigned long __must_check
3333 copy_to_user(void __user *to, const void *from, unsigned long size)
3334 {
3335- unsigned long ret = ___copy_to_user(to, from, size);
3336+ unsigned long ret;
3337+
3338+ if ((long)size < 0 || size > INT_MAX)
3339+ return size;
3340+
3341+ if (!__builtin_constant_p(size))
3342+ check_object_size(from, size, true);
3343
3344+ ret = ___copy_to_user(to, from, size);
3345 if (unlikely(ret))
3346 ret = copy_to_user_fixup(to, from, size);
3347 return ret;
3348diff -urNp linux-3.0.3/arch/sparc/include/asm/uaccess.h linux-3.0.3/arch/sparc/include/asm/uaccess.h
3349--- linux-3.0.3/arch/sparc/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
3350+++ linux-3.0.3/arch/sparc/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
3351@@ -1,5 +1,13 @@
3352 #ifndef ___ASM_SPARC_UACCESS_H
3353 #define ___ASM_SPARC_UACCESS_H
3354+
3355+#ifdef __KERNEL__
3356+#ifndef __ASSEMBLY__
3357+#include <linux/types.h>
3358+extern void check_object_size(const void *ptr, unsigned long n, bool to);
3359+#endif
3360+#endif
3361+
3362 #if defined(__sparc__) && defined(__arch64__)
3363 #include <asm/uaccess_64.h>
3364 #else
3365diff -urNp linux-3.0.3/arch/sparc/kernel/Makefile linux-3.0.3/arch/sparc/kernel/Makefile
3366--- linux-3.0.3/arch/sparc/kernel/Makefile 2011-07-21 22:17:23.000000000 -0400
3367+++ linux-3.0.3/arch/sparc/kernel/Makefile 2011-08-23 21:47:55.000000000 -0400
3368@@ -3,7 +3,7 @@
3369 #
3370
3371 asflags-y := -ansi
3372-ccflags-y := -Werror
3373+#ccflags-y := -Werror
3374
3375 extra-y := head_$(BITS).o
3376 extra-y += init_task.o
3377diff -urNp linux-3.0.3/arch/sparc/kernel/process_32.c linux-3.0.3/arch/sparc/kernel/process_32.c
3378--- linux-3.0.3/arch/sparc/kernel/process_32.c 2011-07-21 22:17:23.000000000 -0400
3379+++ linux-3.0.3/arch/sparc/kernel/process_32.c 2011-08-23 21:48:14.000000000 -0400
3380@@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
3381 rw->ins[4], rw->ins[5],
3382 rw->ins[6],
3383 rw->ins[7]);
3384- printk("%pS\n", (void *) rw->ins[7]);
3385+ printk("%pA\n", (void *) rw->ins[7]);
3386 rw = (struct reg_window32 *) rw->ins[6];
3387 }
3388 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3389@@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
3390
3391 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3392 r->psr, r->pc, r->npc, r->y, print_tainted());
3393- printk("PC: <%pS>\n", (void *) r->pc);
3394+ printk("PC: <%pA>\n", (void *) r->pc);
3395 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3396 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3397 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3398 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3399 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3400 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3401- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3402+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3403
3404 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3405 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3406@@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk,
3407 rw = (struct reg_window32 *) fp;
3408 pc = rw->ins[7];
3409 printk("[%08lx : ", pc);
3410- printk("%pS ] ", (void *) pc);
3411+ printk("%pA ] ", (void *) pc);
3412 fp = rw->ins[6];
3413 } while (++count < 16);
3414 printk("\n");
3415diff -urNp linux-3.0.3/arch/sparc/kernel/process_64.c linux-3.0.3/arch/sparc/kernel/process_64.c
3416--- linux-3.0.3/arch/sparc/kernel/process_64.c 2011-07-21 22:17:23.000000000 -0400
3417+++ linux-3.0.3/arch/sparc/kernel/process_64.c 2011-08-23 21:48:14.000000000 -0400
3418@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
3419 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3420 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3421 if (regs->tstate & TSTATE_PRIV)
3422- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3423+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3424 }
3425
3426 void show_regs(struct pt_regs *regs)
3427 {
3428 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3429 regs->tpc, regs->tnpc, regs->y, print_tainted());
3430- printk("TPC: <%pS>\n", (void *) regs->tpc);
3431+ printk("TPC: <%pA>\n", (void *) regs->tpc);
3432 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3433 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3434 regs->u_regs[3]);
3435@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3436 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3437 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3438 regs->u_regs[15]);
3439- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3440+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3441 show_regwindow(regs);
3442 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3443 }
3444@@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void
3445 ((tp && tp->task) ? tp->task->pid : -1));
3446
3447 if (gp->tstate & TSTATE_PRIV) {
3448- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3449+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3450 (void *) gp->tpc,
3451 (void *) gp->o7,
3452 (void *) gp->i7,
3453diff -urNp linux-3.0.3/arch/sparc/kernel/sys_sparc_32.c linux-3.0.3/arch/sparc/kernel/sys_sparc_32.c
3454--- linux-3.0.3/arch/sparc/kernel/sys_sparc_32.c 2011-07-21 22:17:23.000000000 -0400
3455+++ linux-3.0.3/arch/sparc/kernel/sys_sparc_32.c 2011-08-23 21:47:55.000000000 -0400
3456@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(str
3457 if (ARCH_SUN4C && len > 0x20000000)
3458 return -ENOMEM;
3459 if (!addr)
3460- addr = TASK_UNMAPPED_BASE;
3461+ addr = current->mm->mmap_base;
3462
3463 if (flags & MAP_SHARED)
3464 addr = COLOUR_ALIGN(addr);
3465@@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(str
3466 }
3467 if (TASK_SIZE - PAGE_SIZE - len < addr)
3468 return -ENOMEM;
3469- if (!vmm || addr + len <= vmm->vm_start)
3470+ if (check_heap_stack_gap(vmm, addr, len))
3471 return addr;
3472 addr = vmm->vm_end;
3473 if (flags & MAP_SHARED)
3474diff -urNp linux-3.0.3/arch/sparc/kernel/sys_sparc_64.c linux-3.0.3/arch/sparc/kernel/sys_sparc_64.c
3475--- linux-3.0.3/arch/sparc/kernel/sys_sparc_64.c 2011-07-21 22:17:23.000000000 -0400
3476+++ linux-3.0.3/arch/sparc/kernel/sys_sparc_64.c 2011-08-23 21:47:55.000000000 -0400
3477@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str
3478 /* We do not accept a shared mapping if it would violate
3479 * cache aliasing constraints.
3480 */
3481- if ((flags & MAP_SHARED) &&
3482+ if ((filp || (flags & MAP_SHARED)) &&
3483 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3484 return -EINVAL;
3485 return addr;
3486@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(str
3487 if (filp || (flags & MAP_SHARED))
3488 do_color_align = 1;
3489
3490+#ifdef CONFIG_PAX_RANDMMAP
3491+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3492+#endif
3493+
3494 if (addr) {
3495 if (do_color_align)
3496 addr = COLOUR_ALIGN(addr, pgoff);
3497@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(str
3498 addr = PAGE_ALIGN(addr);
3499
3500 vma = find_vma(mm, addr);
3501- if (task_size - len >= addr &&
3502- (!vma || addr + len <= vma->vm_start))
3503+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3504 return addr;
3505 }
3506
3507 if (len > mm->cached_hole_size) {
3508- start_addr = addr = mm->free_area_cache;
3509+ start_addr = addr = mm->free_area_cache;
3510 } else {
3511- start_addr = addr = TASK_UNMAPPED_BASE;
3512+ start_addr = addr = mm->mmap_base;
3513 mm->cached_hole_size = 0;
3514 }
3515
3516@@ -174,14 +177,14 @@ full_search:
3517 vma = find_vma(mm, VA_EXCLUDE_END);
3518 }
3519 if (unlikely(task_size < addr)) {
3520- if (start_addr != TASK_UNMAPPED_BASE) {
3521- start_addr = addr = TASK_UNMAPPED_BASE;
3522+ if (start_addr != mm->mmap_base) {
3523+ start_addr = addr = mm->mmap_base;
3524 mm->cached_hole_size = 0;
3525 goto full_search;
3526 }
3527 return -ENOMEM;
3528 }
3529- if (likely(!vma || addr + len <= vma->vm_start)) {
3530+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3531 /*
3532 * Remember the place where we stopped the search:
3533 */
3534@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct fi
3535 /* We do not accept a shared mapping if it would violate
3536 * cache aliasing constraints.
3537 */
3538- if ((flags & MAP_SHARED) &&
3539+ if ((filp || (flags & MAP_SHARED)) &&
3540 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3541 return -EINVAL;
3542 return addr;
3543@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct fi
3544 addr = PAGE_ALIGN(addr);
3545
3546 vma = find_vma(mm, addr);
3547- if (task_size - len >= addr &&
3548- (!vma || addr + len <= vma->vm_start))
3549+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3550 return addr;
3551 }
3552
3553@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct fi
3554 /* make sure it can fit in the remaining address space */
3555 if (likely(addr > len)) {
3556 vma = find_vma(mm, addr-len);
3557- if (!vma || addr <= vma->vm_start) {
3558+ if (check_heap_stack_gap(vma, addr - len, len)) {
3559 /* remember the address as a hint for next time */
3560 return (mm->free_area_cache = addr-len);
3561 }
3562@@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct fi
3563 if (unlikely(mm->mmap_base < len))
3564 goto bottomup;
3565
3566- addr = mm->mmap_base-len;
3567- if (do_color_align)
3568- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3569+ addr = mm->mmap_base - len;
3570
3571 do {
3572+ if (do_color_align)
3573+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3574 /*
3575 * Lookup failure means no vma is above this address,
3576 * else if new region fits below vma->vm_start,
3577 * return with success:
3578 */
3579 vma = find_vma(mm, addr);
3580- if (likely(!vma || addr+len <= vma->vm_start)) {
3581+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3582 /* remember the address as a hint for next time */
3583 return (mm->free_area_cache = addr);
3584 }
3585@@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct fi
3586 mm->cached_hole_size = vma->vm_start - addr;
3587
3588 /* try just below the current vma->vm_start */
3589- addr = vma->vm_start-len;
3590- if (do_color_align)
3591- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3592- } while (likely(len < vma->vm_start));
3593+ addr = skip_heap_stack_gap(vma, len);
3594+ } while (!IS_ERR_VALUE(addr));
3595
3596 bottomup:
3597 /*
3598@@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_str
3599 gap == RLIM_INFINITY ||
3600 sysctl_legacy_va_layout) {
3601 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
3602+
3603+#ifdef CONFIG_PAX_RANDMMAP
3604+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3605+ mm->mmap_base += mm->delta_mmap;
3606+#endif
3607+
3608 mm->get_unmapped_area = arch_get_unmapped_area;
3609 mm->unmap_area = arch_unmap_area;
3610 } else {
3611@@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_str
3612 gap = (task_size / 6 * 5);
3613
3614 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
3615+
3616+#ifdef CONFIG_PAX_RANDMMAP
3617+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3618+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3619+#endif
3620+
3621 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3622 mm->unmap_area = arch_unmap_area_topdown;
3623 }
3624diff -urNp linux-3.0.3/arch/sparc/kernel/traps_32.c linux-3.0.3/arch/sparc/kernel/traps_32.c
3625--- linux-3.0.3/arch/sparc/kernel/traps_32.c 2011-07-21 22:17:23.000000000 -0400
3626+++ linux-3.0.3/arch/sparc/kernel/traps_32.c 2011-08-23 21:48:14.000000000 -0400
3627@@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
3628 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
3629 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
3630
3631+extern void gr_handle_kernel_exploit(void);
3632+
3633 void die_if_kernel(char *str, struct pt_regs *regs)
3634 {
3635 static int die_counter;
3636@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
3637 count++ < 30 &&
3638 (((unsigned long) rw) >= PAGE_OFFSET) &&
3639 !(((unsigned long) rw) & 0x7)) {
3640- printk("Caller[%08lx]: %pS\n", rw->ins[7],
3641+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
3642 (void *) rw->ins[7]);
3643 rw = (struct reg_window32 *)rw->ins[6];
3644 }
3645 }
3646 printk("Instruction DUMP:");
3647 instruction_dump ((unsigned long *) regs->pc);
3648- if(regs->psr & PSR_PS)
3649+ if(regs->psr & PSR_PS) {
3650+ gr_handle_kernel_exploit();
3651 do_exit(SIGKILL);
3652+ }
3653 do_exit(SIGSEGV);
3654 }
3655
3656diff -urNp linux-3.0.3/arch/sparc/kernel/traps_64.c linux-3.0.3/arch/sparc/kernel/traps_64.c
3657--- linux-3.0.3/arch/sparc/kernel/traps_64.c 2011-07-21 22:17:23.000000000 -0400
3658+++ linux-3.0.3/arch/sparc/kernel/traps_64.c 2011-08-23 21:48:14.000000000 -0400
3659@@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_
3660 i + 1,
3661 p->trapstack[i].tstate, p->trapstack[i].tpc,
3662 p->trapstack[i].tnpc, p->trapstack[i].tt);
3663- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
3664+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
3665 }
3666 }
3667
3668@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long
3669
3670 lvl -= 0x100;
3671 if (regs->tstate & TSTATE_PRIV) {
3672+
3673+#ifdef CONFIG_PAX_REFCOUNT
3674+ if (lvl == 6)
3675+ pax_report_refcount_overflow(regs);
3676+#endif
3677+
3678 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
3679 die_if_kernel(buffer, regs);
3680 }
3681@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long
3682 void bad_trap_tl1(struct pt_regs *regs, long lvl)
3683 {
3684 char buffer[32];
3685-
3686+
3687 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
3688 0, lvl, SIGTRAP) == NOTIFY_STOP)
3689 return;
3690
3691+#ifdef CONFIG_PAX_REFCOUNT
3692+ if (lvl == 6)
3693+ pax_report_refcount_overflow(regs);
3694+#endif
3695+
3696 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
3697
3698 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
3699@@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt
3700 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
3701 printk("%s" "ERROR(%d): ",
3702 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
3703- printk("TPC<%pS>\n", (void *) regs->tpc);
3704+ printk("TPC<%pA>\n", (void *) regs->tpc);
3705 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
3706 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
3707 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
3708@@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type,
3709 smp_processor_id(),
3710 (type & 0x1) ? 'I' : 'D',
3711 regs->tpc);
3712- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
3713+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
3714 panic("Irrecoverable Cheetah+ parity error.");
3715 }
3716
3717@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type,
3718 smp_processor_id(),
3719 (type & 0x1) ? 'I' : 'D',
3720 regs->tpc);
3721- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
3722+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
3723 }
3724
3725 struct sun4v_error_entry {
3726@@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_r
3727
3728 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
3729 regs->tpc, tl);
3730- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
3731+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
3732 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3733- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
3734+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
3735 (void *) regs->u_regs[UREG_I7]);
3736 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
3737 "pte[%lx] error[%lx]\n",
3738@@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_r
3739
3740 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
3741 regs->tpc, tl);
3742- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
3743+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
3744 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3745- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
3746+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
3747 (void *) regs->u_regs[UREG_I7]);
3748 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
3749 "pte[%lx] error[%lx]\n",
3750@@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk,
3751 fp = (unsigned long)sf->fp + STACK_BIAS;
3752 }
3753
3754- printk(" [%016lx] %pS\n", pc, (void *) pc);
3755+ printk(" [%016lx] %pA\n", pc, (void *) pc);
3756 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3757 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
3758 int index = tsk->curr_ret_stack;
3759 if (tsk->ret_stack && index >= graph) {
3760 pc = tsk->ret_stack[index - graph].ret;
3761- printk(" [%016lx] %pS\n", pc, (void *) pc);
3762+ printk(" [%016lx] %pA\n", pc, (void *) pc);
3763 graph++;
3764 }
3765 }
3766@@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_
3767 return (struct reg_window *) (fp + STACK_BIAS);
3768 }
3769
3770+extern void gr_handle_kernel_exploit(void);
3771+
3772 void die_if_kernel(char *str, struct pt_regs *regs)
3773 {
3774 static int die_counter;
3775@@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_
3776 while (rw &&
3777 count++ < 30 &&
3778 kstack_valid(tp, (unsigned long) rw)) {
3779- printk("Caller[%016lx]: %pS\n", rw->ins[7],
3780+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
3781 (void *) rw->ins[7]);
3782
3783 rw = kernel_stack_up(rw);
3784@@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_
3785 }
3786 user_instruction_dump ((unsigned int __user *) regs->tpc);
3787 }
3788- if (regs->tstate & TSTATE_PRIV)
3789+ if (regs->tstate & TSTATE_PRIV) {
3790+ gr_handle_kernel_exploit();
3791 do_exit(SIGKILL);
3792+ }
3793 do_exit(SIGSEGV);
3794 }
3795 EXPORT_SYMBOL(die_if_kernel);
3796diff -urNp linux-3.0.3/arch/sparc/kernel/unaligned_64.c linux-3.0.3/arch/sparc/kernel/unaligned_64.c
3797--- linux-3.0.3/arch/sparc/kernel/unaligned_64.c 2011-08-23 21:44:40.000000000 -0400
3798+++ linux-3.0.3/arch/sparc/kernel/unaligned_64.c 2011-08-23 21:48:14.000000000 -0400
3799@@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs
3800 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
3801
3802 if (__ratelimit(&ratelimit)) {
3803- printk("Kernel unaligned access at TPC[%lx] %pS\n",
3804+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
3805 regs->tpc, (void *) regs->tpc);
3806 }
3807 }
3808diff -urNp linux-3.0.3/arch/sparc/lib/atomic_64.S linux-3.0.3/arch/sparc/lib/atomic_64.S
3809--- linux-3.0.3/arch/sparc/lib/atomic_64.S 2011-07-21 22:17:23.000000000 -0400
3810+++ linux-3.0.3/arch/sparc/lib/atomic_64.S 2011-08-23 21:47:55.000000000 -0400
3811@@ -18,7 +18,12 @@
3812 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
3813 BACKOFF_SETUP(%o2)
3814 1: lduw [%o1], %g1
3815- add %g1, %o0, %g7
3816+ addcc %g1, %o0, %g7
3817+
3818+#ifdef CONFIG_PAX_REFCOUNT
3819+ tvs %icc, 6
3820+#endif
3821+
3822 cas [%o1], %g1, %g7
3823 cmp %g1, %g7
3824 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3825@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
3826 2: BACKOFF_SPIN(%o2, %o3, 1b)
3827 .size atomic_add, .-atomic_add
3828
3829+ .globl atomic_add_unchecked
3830+ .type atomic_add_unchecked,#function
3831+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3832+ BACKOFF_SETUP(%o2)
3833+1: lduw [%o1], %g1
3834+ add %g1, %o0, %g7
3835+ cas [%o1], %g1, %g7
3836+ cmp %g1, %g7
3837+ bne,pn %icc, 2f
3838+ nop
3839+ retl
3840+ nop
3841+2: BACKOFF_SPIN(%o2, %o3, 1b)
3842+ .size atomic_add_unchecked, .-atomic_add_unchecked
3843+
3844 .globl atomic_sub
3845 .type atomic_sub,#function
3846 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3847 BACKOFF_SETUP(%o2)
3848 1: lduw [%o1], %g1
3849- sub %g1, %o0, %g7
3850+ subcc %g1, %o0, %g7
3851+
3852+#ifdef CONFIG_PAX_REFCOUNT
3853+ tvs %icc, 6
3854+#endif
3855+
3856 cas [%o1], %g1, %g7
3857 cmp %g1, %g7
3858 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3859@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
3860 2: BACKOFF_SPIN(%o2, %o3, 1b)
3861 .size atomic_sub, .-atomic_sub
3862
3863+ .globl atomic_sub_unchecked
3864+ .type atomic_sub_unchecked,#function
3865+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3866+ BACKOFF_SETUP(%o2)
3867+1: lduw [%o1], %g1
3868+ sub %g1, %o0, %g7
3869+ cas [%o1], %g1, %g7
3870+ cmp %g1, %g7
3871+ bne,pn %icc, 2f
3872+ nop
3873+ retl
3874+ nop
3875+2: BACKOFF_SPIN(%o2, %o3, 1b)
3876+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
3877+
3878 .globl atomic_add_ret
3879 .type atomic_add_ret,#function
3880 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3881 BACKOFF_SETUP(%o2)
3882 1: lduw [%o1], %g1
3883- add %g1, %o0, %g7
3884+ addcc %g1, %o0, %g7
3885+
3886+#ifdef CONFIG_PAX_REFCOUNT
3887+ tvs %icc, 6
3888+#endif
3889+
3890 cas [%o1], %g1, %g7
3891 cmp %g1, %g7
3892 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3893@@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1
3894 2: BACKOFF_SPIN(%o2, %o3, 1b)
3895 .size atomic_add_ret, .-atomic_add_ret
3896
3897+ .globl atomic_add_ret_unchecked
3898+ .type atomic_add_ret_unchecked,#function
3899+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3900+ BACKOFF_SETUP(%o2)
3901+1: lduw [%o1], %g1
3902+ addcc %g1, %o0, %g7
3903+ cas [%o1], %g1, %g7
3904+ cmp %g1, %g7
3905+ bne,pn %icc, 2f
3906+ add %g7, %o0, %g7
3907+ sra %g7, 0, %o0
3908+ retl
3909+ nop
3910+2: BACKOFF_SPIN(%o2, %o3, 1b)
3911+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
3912+
3913 .globl atomic_sub_ret
3914 .type atomic_sub_ret,#function
3915 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
3916 BACKOFF_SETUP(%o2)
3917 1: lduw [%o1], %g1
3918- sub %g1, %o0, %g7
3919+ subcc %g1, %o0, %g7
3920+
3921+#ifdef CONFIG_PAX_REFCOUNT
3922+ tvs %icc, 6
3923+#endif
3924+
3925 cas [%o1], %g1, %g7
3926 cmp %g1, %g7
3927 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3928@@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
3929 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
3930 BACKOFF_SETUP(%o2)
3931 1: ldx [%o1], %g1
3932- add %g1, %o0, %g7
3933+ addcc %g1, %o0, %g7
3934+
3935+#ifdef CONFIG_PAX_REFCOUNT
3936+ tvs %xcc, 6
3937+#endif
3938+
3939 casx [%o1], %g1, %g7
3940 cmp %g1, %g7
3941 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3942@@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 =
3943 2: BACKOFF_SPIN(%o2, %o3, 1b)
3944 .size atomic64_add, .-atomic64_add
3945
3946+ .globl atomic64_add_unchecked
3947+ .type atomic64_add_unchecked,#function
3948+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3949+ BACKOFF_SETUP(%o2)
3950+1: ldx [%o1], %g1
3951+ addcc %g1, %o0, %g7
3952+ casx [%o1], %g1, %g7
3953+ cmp %g1, %g7
3954+ bne,pn %xcc, 2f
3955+ nop
3956+ retl
3957+ nop
3958+2: BACKOFF_SPIN(%o2, %o3, 1b)
3959+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
3960+
3961 .globl atomic64_sub
3962 .type atomic64_sub,#function
3963 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3964 BACKOFF_SETUP(%o2)
3965 1: ldx [%o1], %g1
3966- sub %g1, %o0, %g7
3967+ subcc %g1, %o0, %g7
3968+
3969+#ifdef CONFIG_PAX_REFCOUNT
3970+ tvs %xcc, 6
3971+#endif
3972+
3973 casx [%o1], %g1, %g7
3974 cmp %g1, %g7
3975 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3976@@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
3977 2: BACKOFF_SPIN(%o2, %o3, 1b)
3978 .size atomic64_sub, .-atomic64_sub
3979
3980+ .globl atomic64_sub_unchecked
3981+ .type atomic64_sub_unchecked,#function
3982+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3983+ BACKOFF_SETUP(%o2)
3984+1: ldx [%o1], %g1
3985+ subcc %g1, %o0, %g7
3986+ casx [%o1], %g1, %g7
3987+ cmp %g1, %g7
3988+ bne,pn %xcc, 2f
3989+ nop
3990+ retl
3991+ nop
3992+2: BACKOFF_SPIN(%o2, %o3, 1b)
3993+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
3994+
3995 .globl atomic64_add_ret
3996 .type atomic64_add_ret,#function
3997 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3998 BACKOFF_SETUP(%o2)
3999 1: ldx [%o1], %g1
4000- add %g1, %o0, %g7
4001+ addcc %g1, %o0, %g7
4002+
4003+#ifdef CONFIG_PAX_REFCOUNT
4004+ tvs %xcc, 6
4005+#endif
4006+
4007 casx [%o1], %g1, %g7
4008 cmp %g1, %g7
4009 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4010@@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4011 2: BACKOFF_SPIN(%o2, %o3, 1b)
4012 .size atomic64_add_ret, .-atomic64_add_ret
4013
4014+ .globl atomic64_add_ret_unchecked
4015+ .type atomic64_add_ret_unchecked,#function
4016+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4017+ BACKOFF_SETUP(%o2)
4018+1: ldx [%o1], %g1
4019+ addcc %g1, %o0, %g7
4020+ casx [%o1], %g1, %g7
4021+ cmp %g1, %g7
4022+ bne,pn %xcc, 2f
4023+ add %g7, %o0, %g7
4024+ mov %g7, %o0
4025+ retl
4026+ nop
4027+2: BACKOFF_SPIN(%o2, %o3, 1b)
4028+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4029+
4030 .globl atomic64_sub_ret
4031 .type atomic64_sub_ret,#function
4032 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4033 BACKOFF_SETUP(%o2)
4034 1: ldx [%o1], %g1
4035- sub %g1, %o0, %g7
4036+ subcc %g1, %o0, %g7
4037+
4038+#ifdef CONFIG_PAX_REFCOUNT
4039+ tvs %xcc, 6
4040+#endif
4041+
4042 casx [%o1], %g1, %g7
4043 cmp %g1, %g7
4044 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4045diff -urNp linux-3.0.3/arch/sparc/lib/ksyms.c linux-3.0.3/arch/sparc/lib/ksyms.c
4046--- linux-3.0.3/arch/sparc/lib/ksyms.c 2011-07-21 22:17:23.000000000 -0400
4047+++ linux-3.0.3/arch/sparc/lib/ksyms.c 2011-08-23 21:48:14.000000000 -0400
4048@@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
4049
4050 /* Atomic counter implementation. */
4051 EXPORT_SYMBOL(atomic_add);
4052+EXPORT_SYMBOL(atomic_add_unchecked);
4053 EXPORT_SYMBOL(atomic_add_ret);
4054+EXPORT_SYMBOL(atomic_add_ret_unchecked);
4055 EXPORT_SYMBOL(atomic_sub);
4056+EXPORT_SYMBOL(atomic_sub_unchecked);
4057 EXPORT_SYMBOL(atomic_sub_ret);
4058 EXPORT_SYMBOL(atomic64_add);
4059+EXPORT_SYMBOL(atomic64_add_unchecked);
4060 EXPORT_SYMBOL(atomic64_add_ret);
4061+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4062 EXPORT_SYMBOL(atomic64_sub);
4063+EXPORT_SYMBOL(atomic64_sub_unchecked);
4064 EXPORT_SYMBOL(atomic64_sub_ret);
4065
4066 /* Atomic bit operations. */
4067diff -urNp linux-3.0.3/arch/sparc/lib/Makefile linux-3.0.3/arch/sparc/lib/Makefile
4068--- linux-3.0.3/arch/sparc/lib/Makefile 2011-08-23 21:44:40.000000000 -0400
4069+++ linux-3.0.3/arch/sparc/lib/Makefile 2011-08-23 21:47:55.000000000 -0400
4070@@ -2,7 +2,7 @@
4071 #
4072
4073 asflags-y := -ansi -DST_DIV0=0x02
4074-ccflags-y := -Werror
4075+#ccflags-y := -Werror
4076
4077 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4078 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4079diff -urNp linux-3.0.3/arch/sparc/Makefile linux-3.0.3/arch/sparc/Makefile
4080--- linux-3.0.3/arch/sparc/Makefile 2011-07-21 22:17:23.000000000 -0400
4081+++ linux-3.0.3/arch/sparc/Makefile 2011-08-23 21:48:14.000000000 -0400
4082@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
4083 # Export what is needed by arch/sparc/boot/Makefile
4084 export VMLINUX_INIT VMLINUX_MAIN
4085 VMLINUX_INIT := $(head-y) $(init-y)
4086-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4087+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4088 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4089 VMLINUX_MAIN += $(drivers-y) $(net-y)
4090
4091diff -urNp linux-3.0.3/arch/sparc/mm/fault_32.c linux-3.0.3/arch/sparc/mm/fault_32.c
4092--- linux-3.0.3/arch/sparc/mm/fault_32.c 2011-07-21 22:17:23.000000000 -0400
4093+++ linux-3.0.3/arch/sparc/mm/fault_32.c 2011-08-23 21:47:55.000000000 -0400
4094@@ -22,6 +22,9 @@
4095 #include <linux/interrupt.h>
4096 #include <linux/module.h>
4097 #include <linux/kdebug.h>
4098+#include <linux/slab.h>
4099+#include <linux/pagemap.h>
4100+#include <linux/compiler.h>
4101
4102 #include <asm/system.h>
4103 #include <asm/page.h>
4104@@ -209,6 +212,268 @@ static unsigned long compute_si_addr(str
4105 return safe_compute_effective_address(regs, insn);
4106 }
4107
4108+#ifdef CONFIG_PAX_PAGEEXEC
4109+#ifdef CONFIG_PAX_DLRESOLVE
4110+static void pax_emuplt_close(struct vm_area_struct *vma)
4111+{
4112+ vma->vm_mm->call_dl_resolve = 0UL;
4113+}
4114+
4115+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4116+{
4117+ unsigned int *kaddr;
4118+
4119+ vmf->page = alloc_page(GFP_HIGHUSER);
4120+ if (!vmf->page)
4121+ return VM_FAULT_OOM;
4122+
4123+ kaddr = kmap(vmf->page);
4124+ memset(kaddr, 0, PAGE_SIZE);
4125+ kaddr[0] = 0x9DE3BFA8U; /* save */
4126+ flush_dcache_page(vmf->page);
4127+ kunmap(vmf->page);
4128+ return VM_FAULT_MAJOR;
4129+}
4130+
4131+static const struct vm_operations_struct pax_vm_ops = {
4132+ .close = pax_emuplt_close,
4133+ .fault = pax_emuplt_fault
4134+};
4135+
4136+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4137+{
4138+ int ret;
4139+
4140+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4141+ vma->vm_mm = current->mm;
4142+ vma->vm_start = addr;
4143+ vma->vm_end = addr + PAGE_SIZE;
4144+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4145+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4146+ vma->vm_ops = &pax_vm_ops;
4147+
4148+ ret = insert_vm_struct(current->mm, vma);
4149+ if (ret)
4150+ return ret;
4151+
4152+ ++current->mm->total_vm;
4153+ return 0;
4154+}
4155+#endif
4156+
4157+/*
4158+ * PaX: decide what to do with offenders (regs->pc = fault address)
4159+ *
4160+ * returns 1 when task should be killed
4161+ * 2 when patched PLT trampoline was detected
4162+ * 3 when unpatched PLT trampoline was detected
4163+ */
4164+static int pax_handle_fetch_fault(struct pt_regs *regs)
4165+{
4166+
4167+#ifdef CONFIG_PAX_EMUPLT
4168+ int err;
4169+
4170+ do { /* PaX: patched PLT emulation #1 */
4171+ unsigned int sethi1, sethi2, jmpl;
4172+
4173+ err = get_user(sethi1, (unsigned int *)regs->pc);
4174+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4175+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4176+
4177+ if (err)
4178+ break;
4179+
4180+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4181+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4182+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4183+ {
4184+ unsigned int addr;
4185+
4186+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4187+ addr = regs->u_regs[UREG_G1];
4188+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4189+ regs->pc = addr;
4190+ regs->npc = addr+4;
4191+ return 2;
4192+ }
4193+ } while (0);
4194+
4195+ { /* PaX: patched PLT emulation #2 */
4196+ unsigned int ba;
4197+
4198+ err = get_user(ba, (unsigned int *)regs->pc);
4199+
4200+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4201+ unsigned int addr;
4202+
4203+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4204+ regs->pc = addr;
4205+ regs->npc = addr+4;
4206+ return 2;
4207+ }
4208+ }
4209+
4210+ do { /* PaX: patched PLT emulation #3 */
4211+ unsigned int sethi, jmpl, nop;
4212+
4213+ err = get_user(sethi, (unsigned int *)regs->pc);
4214+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4215+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4216+
4217+ if (err)
4218+ break;
4219+
4220+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4221+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4222+ nop == 0x01000000U)
4223+ {
4224+ unsigned int addr;
4225+
4226+ addr = (sethi & 0x003FFFFFU) << 10;
4227+ regs->u_regs[UREG_G1] = addr;
4228+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4229+ regs->pc = addr;
4230+ regs->npc = addr+4;
4231+ return 2;
4232+ }
4233+ } while (0);
4234+
4235+ do { /* PaX: unpatched PLT emulation step 1 */
4236+ unsigned int sethi, ba, nop;
4237+
4238+ err = get_user(sethi, (unsigned int *)regs->pc);
4239+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
4240+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4241+
4242+ if (err)
4243+ break;
4244+
4245+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4246+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4247+ nop == 0x01000000U)
4248+ {
4249+ unsigned int addr, save, call;
4250+
4251+ if ((ba & 0xFFC00000U) == 0x30800000U)
4252+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4253+ else
4254+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4255+
4256+ err = get_user(save, (unsigned int *)addr);
4257+ err |= get_user(call, (unsigned int *)(addr+4));
4258+ err |= get_user(nop, (unsigned int *)(addr+8));
4259+ if (err)
4260+ break;
4261+
4262+#ifdef CONFIG_PAX_DLRESOLVE
4263+ if (save == 0x9DE3BFA8U &&
4264+ (call & 0xC0000000U) == 0x40000000U &&
4265+ nop == 0x01000000U)
4266+ {
4267+ struct vm_area_struct *vma;
4268+ unsigned long call_dl_resolve;
4269+
4270+ down_read(&current->mm->mmap_sem);
4271+ call_dl_resolve = current->mm->call_dl_resolve;
4272+ up_read(&current->mm->mmap_sem);
4273+ if (likely(call_dl_resolve))
4274+ goto emulate;
4275+
4276+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4277+
4278+ down_write(&current->mm->mmap_sem);
4279+ if (current->mm->call_dl_resolve) {
4280+ call_dl_resolve = current->mm->call_dl_resolve;
4281+ up_write(&current->mm->mmap_sem);
4282+ if (vma)
4283+ kmem_cache_free(vm_area_cachep, vma);
4284+ goto emulate;
4285+ }
4286+
4287+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4288+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4289+ up_write(&current->mm->mmap_sem);
4290+ if (vma)
4291+ kmem_cache_free(vm_area_cachep, vma);
4292+ return 1;
4293+ }
4294+
4295+ if (pax_insert_vma(vma, call_dl_resolve)) {
4296+ up_write(&current->mm->mmap_sem);
4297+ kmem_cache_free(vm_area_cachep, vma);
4298+ return 1;
4299+ }
4300+
4301+ current->mm->call_dl_resolve = call_dl_resolve;
4302+ up_write(&current->mm->mmap_sem);
4303+
4304+emulate:
4305+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4306+ regs->pc = call_dl_resolve;
4307+ regs->npc = addr+4;
4308+ return 3;
4309+ }
4310+#endif
4311+
4312+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4313+ if ((save & 0xFFC00000U) == 0x05000000U &&
4314+ (call & 0xFFFFE000U) == 0x85C0A000U &&
4315+ nop == 0x01000000U)
4316+ {
4317+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4318+ regs->u_regs[UREG_G2] = addr + 4;
4319+ addr = (save & 0x003FFFFFU) << 10;
4320+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4321+ regs->pc = addr;
4322+ regs->npc = addr+4;
4323+ return 3;
4324+ }
4325+ }
4326+ } while (0);
4327+
4328+ do { /* PaX: unpatched PLT emulation step 2 */
4329+ unsigned int save, call, nop;
4330+
4331+ err = get_user(save, (unsigned int *)(regs->pc-4));
4332+ err |= get_user(call, (unsigned int *)regs->pc);
4333+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
4334+ if (err)
4335+ break;
4336+
4337+ if (save == 0x9DE3BFA8U &&
4338+ (call & 0xC0000000U) == 0x40000000U &&
4339+ nop == 0x01000000U)
4340+ {
4341+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4342+
4343+ regs->u_regs[UREG_RETPC] = regs->pc;
4344+ regs->pc = dl_resolve;
4345+ regs->npc = dl_resolve+4;
4346+ return 3;
4347+ }
4348+ } while (0);
4349+#endif
4350+
4351+ return 1;
4352+}
4353+
4354+void pax_report_insns(void *pc, void *sp)
4355+{
4356+ unsigned long i;
4357+
4358+ printk(KERN_ERR "PAX: bytes at PC: ");
4359+ for (i = 0; i < 8; i++) {
4360+ unsigned int c;
4361+ if (get_user(c, (unsigned int *)pc+i))
4362+ printk(KERN_CONT "???????? ");
4363+ else
4364+ printk(KERN_CONT "%08x ", c);
4365+ }
4366+ printk("\n");
4367+}
4368+#endif
4369+
4370 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4371 int text_fault)
4372 {
4373@@ -281,6 +546,24 @@ good_area:
4374 if(!(vma->vm_flags & VM_WRITE))
4375 goto bad_area;
4376 } else {
4377+
4378+#ifdef CONFIG_PAX_PAGEEXEC
4379+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4380+ up_read(&mm->mmap_sem);
4381+ switch (pax_handle_fetch_fault(regs)) {
4382+
4383+#ifdef CONFIG_PAX_EMUPLT
4384+ case 2:
4385+ case 3:
4386+ return;
4387+#endif
4388+
4389+ }
4390+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4391+ do_group_exit(SIGKILL);
4392+ }
4393+#endif
4394+
4395 /* Allow reads even for write-only mappings */
4396 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4397 goto bad_area;
4398diff -urNp linux-3.0.3/arch/sparc/mm/fault_64.c linux-3.0.3/arch/sparc/mm/fault_64.c
4399--- linux-3.0.3/arch/sparc/mm/fault_64.c 2011-07-21 22:17:23.000000000 -0400
4400+++ linux-3.0.3/arch/sparc/mm/fault_64.c 2011-08-23 21:48:14.000000000 -0400
4401@@ -21,6 +21,9 @@
4402 #include <linux/kprobes.h>
4403 #include <linux/kdebug.h>
4404 #include <linux/percpu.h>
4405+#include <linux/slab.h>
4406+#include <linux/pagemap.h>
4407+#include <linux/compiler.h>
4408
4409 #include <asm/page.h>
4410 #include <asm/pgtable.h>
4411@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(stru
4412 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4413 regs->tpc);
4414 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4415- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4416+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4417 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4418 dump_stack();
4419 unhandled_fault(regs->tpc, current, regs);
4420@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32b
4421 show_regs(regs);
4422 }
4423
4424+#ifdef CONFIG_PAX_PAGEEXEC
4425+#ifdef CONFIG_PAX_DLRESOLVE
4426+static void pax_emuplt_close(struct vm_area_struct *vma)
4427+{
4428+ vma->vm_mm->call_dl_resolve = 0UL;
4429+}
4430+
4431+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4432+{
4433+ unsigned int *kaddr;
4434+
4435+ vmf->page = alloc_page(GFP_HIGHUSER);
4436+ if (!vmf->page)
4437+ return VM_FAULT_OOM;
4438+
4439+ kaddr = kmap(vmf->page);
4440+ memset(kaddr, 0, PAGE_SIZE);
4441+ kaddr[0] = 0x9DE3BFA8U; /* save */
4442+ flush_dcache_page(vmf->page);
4443+ kunmap(vmf->page);
4444+ return VM_FAULT_MAJOR;
4445+}
4446+
4447+static const struct vm_operations_struct pax_vm_ops = {
4448+ .close = pax_emuplt_close,
4449+ .fault = pax_emuplt_fault
4450+};
4451+
4452+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4453+{
4454+ int ret;
4455+
4456+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4457+ vma->vm_mm = current->mm;
4458+ vma->vm_start = addr;
4459+ vma->vm_end = addr + PAGE_SIZE;
4460+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4461+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4462+ vma->vm_ops = &pax_vm_ops;
4463+
4464+ ret = insert_vm_struct(current->mm, vma);
4465+ if (ret)
4466+ return ret;
4467+
4468+ ++current->mm->total_vm;
4469+ return 0;
4470+}
4471+#endif
4472+
4473+/*
4474+ * PaX: decide what to do with offenders (regs->tpc = fault address)
4475+ *
4476+ * returns 1 when task should be killed
4477+ * 2 when patched PLT trampoline was detected
4478+ * 3 when unpatched PLT trampoline was detected
4479+ */
4480+static int pax_handle_fetch_fault(struct pt_regs *regs)
4481+{
4482+
4483+#ifdef CONFIG_PAX_EMUPLT
4484+ int err;
4485+
4486+ do { /* PaX: patched PLT emulation #1 */
4487+ unsigned int sethi1, sethi2, jmpl;
4488+
4489+ err = get_user(sethi1, (unsigned int *)regs->tpc);
4490+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4491+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4492+
4493+ if (err)
4494+ break;
4495+
4496+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4497+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4498+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4499+ {
4500+ unsigned long addr;
4501+
4502+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4503+ addr = regs->u_regs[UREG_G1];
4504+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4505+
4506+ if (test_thread_flag(TIF_32BIT))
4507+ addr &= 0xFFFFFFFFUL;
4508+
4509+ regs->tpc = addr;
4510+ regs->tnpc = addr+4;
4511+ return 2;
4512+ }
4513+ } while (0);
4514+
4515+ { /* PaX: patched PLT emulation #2 */
4516+ unsigned int ba;
4517+
4518+ err = get_user(ba, (unsigned int *)regs->tpc);
4519+
4520+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4521+ unsigned long addr;
4522+
4523+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4524+
4525+ if (test_thread_flag(TIF_32BIT))
4526+ addr &= 0xFFFFFFFFUL;
4527+
4528+ regs->tpc = addr;
4529+ regs->tnpc = addr+4;
4530+ return 2;
4531+ }
4532+ }
4533+
4534+ do { /* PaX: patched PLT emulation #3 */
4535+ unsigned int sethi, jmpl, nop;
4536+
4537+ err = get_user(sethi, (unsigned int *)regs->tpc);
4538+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
4539+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4540+
4541+ if (err)
4542+ break;
4543+
4544+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4545+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4546+ nop == 0x01000000U)
4547+ {
4548+ unsigned long addr;
4549+
4550+ addr = (sethi & 0x003FFFFFU) << 10;
4551+ regs->u_regs[UREG_G1] = addr;
4552+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4553+
4554+ if (test_thread_flag(TIF_32BIT))
4555+ addr &= 0xFFFFFFFFUL;
4556+
4557+ regs->tpc = addr;
4558+ regs->tnpc = addr+4;
4559+ return 2;
4560+ }
4561+ } while (0);
4562+
4563+ do { /* PaX: patched PLT emulation #4 */
4564+ unsigned int sethi, mov1, call, mov2;
4565+
4566+ err = get_user(sethi, (unsigned int *)regs->tpc);
4567+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
4568+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
4569+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
4570+
4571+ if (err)
4572+ break;
4573+
4574+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4575+ mov1 == 0x8210000FU &&
4576+ (call & 0xC0000000U) == 0x40000000U &&
4577+ mov2 == 0x9E100001U)
4578+ {
4579+ unsigned long addr;
4580+
4581+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
4582+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4583+
4584+ if (test_thread_flag(TIF_32BIT))
4585+ addr &= 0xFFFFFFFFUL;
4586+
4587+ regs->tpc = addr;
4588+ regs->tnpc = addr+4;
4589+ return 2;
4590+ }
4591+ } while (0);
4592+
4593+ do { /* PaX: patched PLT emulation #5 */
4594+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
4595+
4596+ err = get_user(sethi, (unsigned int *)regs->tpc);
4597+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4598+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4599+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
4600+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
4601+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
4602+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
4603+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
4604+
4605+ if (err)
4606+ break;
4607+
4608+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4609+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
4610+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4611+ (or1 & 0xFFFFE000U) == 0x82106000U &&
4612+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
4613+ sllx == 0x83287020U &&
4614+ jmpl == 0x81C04005U &&
4615+ nop == 0x01000000U)
4616+ {
4617+ unsigned long addr;
4618+
4619+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4620+ regs->u_regs[UREG_G1] <<= 32;
4621+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4622+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4623+ regs->tpc = addr;
4624+ regs->tnpc = addr+4;
4625+ return 2;
4626+ }
4627+ } while (0);
4628+
4629+ do { /* PaX: patched PLT emulation #6 */
4630+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
4631+
4632+ err = get_user(sethi, (unsigned int *)regs->tpc);
4633+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4634+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4635+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
4636+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
4637+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
4638+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
4639+
4640+ if (err)
4641+ break;
4642+
4643+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4644+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
4645+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4646+ sllx == 0x83287020U &&
4647+ (or & 0xFFFFE000U) == 0x8A116000U &&
4648+ jmpl == 0x81C04005U &&
4649+ nop == 0x01000000U)
4650+ {
4651+ unsigned long addr;
4652+
4653+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
4654+ regs->u_regs[UREG_G1] <<= 32;
4655+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
4656+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4657+ regs->tpc = addr;
4658+ regs->tnpc = addr+4;
4659+ return 2;
4660+ }
4661+ } while (0);
4662+
4663+ do { /* PaX: unpatched PLT emulation step 1 */
4664+ unsigned int sethi, ba, nop;
4665+
4666+ err = get_user(sethi, (unsigned int *)regs->tpc);
4667+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4668+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4669+
4670+ if (err)
4671+ break;
4672+
4673+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4674+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4675+ nop == 0x01000000U)
4676+ {
4677+ unsigned long addr;
4678+ unsigned int save, call;
4679+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
4680+
4681+ if ((ba & 0xFFC00000U) == 0x30800000U)
4682+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4683+ else
4684+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4685+
4686+ if (test_thread_flag(TIF_32BIT))
4687+ addr &= 0xFFFFFFFFUL;
4688+
4689+ err = get_user(save, (unsigned int *)addr);
4690+ err |= get_user(call, (unsigned int *)(addr+4));
4691+ err |= get_user(nop, (unsigned int *)(addr+8));
4692+ if (err)
4693+ break;
4694+
4695+#ifdef CONFIG_PAX_DLRESOLVE
4696+ if (save == 0x9DE3BFA8U &&
4697+ (call & 0xC0000000U) == 0x40000000U &&
4698+ nop == 0x01000000U)
4699+ {
4700+ struct vm_area_struct *vma;
4701+ unsigned long call_dl_resolve;
4702+
4703+ down_read(&current->mm->mmap_sem);
4704+ call_dl_resolve = current->mm->call_dl_resolve;
4705+ up_read(&current->mm->mmap_sem);
4706+ if (likely(call_dl_resolve))
4707+ goto emulate;
4708+
4709+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4710+
4711+ down_write(&current->mm->mmap_sem);
4712+ if (current->mm->call_dl_resolve) {
4713+ call_dl_resolve = current->mm->call_dl_resolve;
4714+ up_write(&current->mm->mmap_sem);
4715+ if (vma)
4716+ kmem_cache_free(vm_area_cachep, vma);
4717+ goto emulate;
4718+ }
4719+
4720+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4721+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4722+ up_write(&current->mm->mmap_sem);
4723+ if (vma)
4724+ kmem_cache_free(vm_area_cachep, vma);
4725+ return 1;
4726+ }
4727+
4728+ if (pax_insert_vma(vma, call_dl_resolve)) {
4729+ up_write(&current->mm->mmap_sem);
4730+ kmem_cache_free(vm_area_cachep, vma);
4731+ return 1;
4732+ }
4733+
4734+ current->mm->call_dl_resolve = call_dl_resolve;
4735+ up_write(&current->mm->mmap_sem);
4736+
4737+emulate:
4738+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4739+ regs->tpc = call_dl_resolve;
4740+ regs->tnpc = addr+4;
4741+ return 3;
4742+ }
4743+#endif
4744+
4745+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4746+ if ((save & 0xFFC00000U) == 0x05000000U &&
4747+ (call & 0xFFFFE000U) == 0x85C0A000U &&
4748+ nop == 0x01000000U)
4749+ {
4750+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4751+ regs->u_regs[UREG_G2] = addr + 4;
4752+ addr = (save & 0x003FFFFFU) << 10;
4753+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4754+
4755+ if (test_thread_flag(TIF_32BIT))
4756+ addr &= 0xFFFFFFFFUL;
4757+
4758+ regs->tpc = addr;
4759+ regs->tnpc = addr+4;
4760+ return 3;
4761+ }
4762+
4763+ /* PaX: 64-bit PLT stub */
4764+ err = get_user(sethi1, (unsigned int *)addr);
4765+ err |= get_user(sethi2, (unsigned int *)(addr+4));
4766+ err |= get_user(or1, (unsigned int *)(addr+8));
4767+ err |= get_user(or2, (unsigned int *)(addr+12));
4768+ err |= get_user(sllx, (unsigned int *)(addr+16));
4769+ err |= get_user(add, (unsigned int *)(addr+20));
4770+ err |= get_user(jmpl, (unsigned int *)(addr+24));
4771+ err |= get_user(nop, (unsigned int *)(addr+28));
4772+ if (err)
4773+ break;
4774+
4775+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
4776+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4777+ (or1 & 0xFFFFE000U) == 0x88112000U &&
4778+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
4779+ sllx == 0x89293020U &&
4780+ add == 0x8A010005U &&
4781+ jmpl == 0x89C14000U &&
4782+ nop == 0x01000000U)
4783+ {
4784+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4785+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4786+ regs->u_regs[UREG_G4] <<= 32;
4787+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4788+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
4789+ regs->u_regs[UREG_G4] = addr + 24;
4790+ addr = regs->u_regs[UREG_G5];
4791+ regs->tpc = addr;
4792+ regs->tnpc = addr+4;
4793+ return 3;
4794+ }
4795+ }
4796+ } while (0);
4797+
4798+#ifdef CONFIG_PAX_DLRESOLVE
4799+ do { /* PaX: unpatched PLT emulation step 2 */
4800+ unsigned int save, call, nop;
4801+
4802+ err = get_user(save, (unsigned int *)(regs->tpc-4));
4803+ err |= get_user(call, (unsigned int *)regs->tpc);
4804+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
4805+ if (err)
4806+ break;
4807+
4808+ if (save == 0x9DE3BFA8U &&
4809+ (call & 0xC0000000U) == 0x40000000U &&
4810+ nop == 0x01000000U)
4811+ {
4812+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4813+
4814+ if (test_thread_flag(TIF_32BIT))
4815+ dl_resolve &= 0xFFFFFFFFUL;
4816+
4817+ regs->u_regs[UREG_RETPC] = regs->tpc;
4818+ regs->tpc = dl_resolve;
4819+ regs->tnpc = dl_resolve+4;
4820+ return 3;
4821+ }
4822+ } while (0);
4823+#endif
4824+
4825+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
4826+ unsigned int sethi, ba, nop;
4827+
4828+ err = get_user(sethi, (unsigned int *)regs->tpc);
4829+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4830+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4831+
4832+ if (err)
4833+ break;
4834+
4835+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4836+ (ba & 0xFFF00000U) == 0x30600000U &&
4837+ nop == 0x01000000U)
4838+ {
4839+ unsigned long addr;
4840+
4841+ addr = (sethi & 0x003FFFFFU) << 10;
4842+ regs->u_regs[UREG_G1] = addr;
4843+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4844+
4845+ if (test_thread_flag(TIF_32BIT))
4846+ addr &= 0xFFFFFFFFUL;
4847+
4848+ regs->tpc = addr;
4849+ regs->tnpc = addr+4;
4850+ return 2;
4851+ }
4852+ } while (0);
4853+
4854+#endif
4855+
4856+ return 1;
4857+}
4858+
4859+void pax_report_insns(void *pc, void *sp)
4860+{
4861+ unsigned long i;
4862+
4863+ printk(KERN_ERR "PAX: bytes at PC: ");
4864+ for (i = 0; i < 8; i++) {
4865+ unsigned int c;
4866+ if (get_user(c, (unsigned int *)pc+i))
4867+ printk(KERN_CONT "???????? ");
4868+ else
4869+ printk(KERN_CONT "%08x ", c);
4870+ }
4871+ printk("\n");
4872+}
4873+#endif
4874+
4875 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
4876 {
4877 struct mm_struct *mm = current->mm;
4878@@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fau
4879 if (!vma)
4880 goto bad_area;
4881
4882+#ifdef CONFIG_PAX_PAGEEXEC
4883+ /* PaX: detect ITLB misses on non-exec pages */
4884+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
4885+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
4886+ {
4887+ if (address != regs->tpc)
4888+ goto good_area;
4889+
4890+ up_read(&mm->mmap_sem);
4891+ switch (pax_handle_fetch_fault(regs)) {
4892+
4893+#ifdef CONFIG_PAX_EMUPLT
4894+ case 2:
4895+ case 3:
4896+ return;
4897+#endif
4898+
4899+ }
4900+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
4901+ do_group_exit(SIGKILL);
4902+ }
4903+#endif
4904+
4905 /* Pure DTLB misses do not tell us whether the fault causing
4906 * load/store/atomic was a write or not, it only says that there
4907 * was no match. So in such a case we (carefully) read the
4908diff -urNp linux-3.0.3/arch/sparc/mm/hugetlbpage.c linux-3.0.3/arch/sparc/mm/hugetlbpage.c
4909--- linux-3.0.3/arch/sparc/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
4910+++ linux-3.0.3/arch/sparc/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
4911@@ -68,7 +68,7 @@ full_search:
4912 }
4913 return -ENOMEM;
4914 }
4915- if (likely(!vma || addr + len <= vma->vm_start)) {
4916+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4917 /*
4918 * Remember the place where we stopped the search:
4919 */
4920@@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct
4921 /* make sure it can fit in the remaining address space */
4922 if (likely(addr > len)) {
4923 vma = find_vma(mm, addr-len);
4924- if (!vma || addr <= vma->vm_start) {
4925+ if (check_heap_stack_gap(vma, addr - len, len)) {
4926 /* remember the address as a hint for next time */
4927 return (mm->free_area_cache = addr-len);
4928 }
4929@@ -116,16 +116,17 @@ hugetlb_get_unmapped_area_topdown(struct
4930 if (unlikely(mm->mmap_base < len))
4931 goto bottomup;
4932
4933- addr = (mm->mmap_base-len) & HPAGE_MASK;
4934+ addr = mm->mmap_base - len;
4935
4936 do {
4937+ addr &= HPAGE_MASK;
4938 /*
4939 * Lookup failure means no vma is above this address,
4940 * else if new region fits below vma->vm_start,
4941 * return with success:
4942 */
4943 vma = find_vma(mm, addr);
4944- if (likely(!vma || addr+len <= vma->vm_start)) {
4945+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4946 /* remember the address as a hint for next time */
4947 return (mm->free_area_cache = addr);
4948 }
4949@@ -135,8 +136,8 @@ hugetlb_get_unmapped_area_topdown(struct
4950 mm->cached_hole_size = vma->vm_start - addr;
4951
4952 /* try just below the current vma->vm_start */
4953- addr = (vma->vm_start-len) & HPAGE_MASK;
4954- } while (likely(len < vma->vm_start));
4955+ addr = skip_heap_stack_gap(vma, len);
4956+ } while (!IS_ERR_VALUE(addr));
4957
4958 bottomup:
4959 /*
4960@@ -182,8 +183,7 @@ hugetlb_get_unmapped_area(struct file *f
4961 if (addr) {
4962 addr = ALIGN(addr, HPAGE_SIZE);
4963 vma = find_vma(mm, addr);
4964- if (task_size - len >= addr &&
4965- (!vma || addr + len <= vma->vm_start))
4966+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4967 return addr;
4968 }
4969 if (mm->get_unmapped_area == arch_get_unmapped_area)
4970diff -urNp linux-3.0.3/arch/sparc/mm/init_32.c linux-3.0.3/arch/sparc/mm/init_32.c
4971--- linux-3.0.3/arch/sparc/mm/init_32.c 2011-07-21 22:17:23.000000000 -0400
4972+++ linux-3.0.3/arch/sparc/mm/init_32.c 2011-08-23 21:47:55.000000000 -0400
4973@@ -316,6 +316,9 @@ extern void device_scan(void);
4974 pgprot_t PAGE_SHARED __read_mostly;
4975 EXPORT_SYMBOL(PAGE_SHARED);
4976
4977+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
4978+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
4979+
4980 void __init paging_init(void)
4981 {
4982 switch(sparc_cpu_model) {
4983@@ -344,17 +347,17 @@ void __init paging_init(void)
4984
4985 /* Initialize the protection map with non-constant, MMU dependent values. */
4986 protection_map[0] = PAGE_NONE;
4987- protection_map[1] = PAGE_READONLY;
4988- protection_map[2] = PAGE_COPY;
4989- protection_map[3] = PAGE_COPY;
4990+ protection_map[1] = PAGE_READONLY_NOEXEC;
4991+ protection_map[2] = PAGE_COPY_NOEXEC;
4992+ protection_map[3] = PAGE_COPY_NOEXEC;
4993 protection_map[4] = PAGE_READONLY;
4994 protection_map[5] = PAGE_READONLY;
4995 protection_map[6] = PAGE_COPY;
4996 protection_map[7] = PAGE_COPY;
4997 protection_map[8] = PAGE_NONE;
4998- protection_map[9] = PAGE_READONLY;
4999- protection_map[10] = PAGE_SHARED;
5000- protection_map[11] = PAGE_SHARED;
5001+ protection_map[9] = PAGE_READONLY_NOEXEC;
5002+ protection_map[10] = PAGE_SHARED_NOEXEC;
5003+ protection_map[11] = PAGE_SHARED_NOEXEC;
5004 protection_map[12] = PAGE_READONLY;
5005 protection_map[13] = PAGE_READONLY;
5006 protection_map[14] = PAGE_SHARED;
5007diff -urNp linux-3.0.3/arch/sparc/mm/Makefile linux-3.0.3/arch/sparc/mm/Makefile
5008--- linux-3.0.3/arch/sparc/mm/Makefile 2011-07-21 22:17:23.000000000 -0400
5009+++ linux-3.0.3/arch/sparc/mm/Makefile 2011-08-23 21:47:55.000000000 -0400
5010@@ -2,7 +2,7 @@
5011 #
5012
5013 asflags-y := -ansi
5014-ccflags-y := -Werror
5015+#ccflags-y := -Werror
5016
5017 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5018 obj-y += fault_$(BITS).o
5019diff -urNp linux-3.0.3/arch/sparc/mm/srmmu.c linux-3.0.3/arch/sparc/mm/srmmu.c
5020--- linux-3.0.3/arch/sparc/mm/srmmu.c 2011-07-21 22:17:23.000000000 -0400
5021+++ linux-3.0.3/arch/sparc/mm/srmmu.c 2011-08-23 21:47:55.000000000 -0400
5022@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5023 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5024 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5025 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5026+
5027+#ifdef CONFIG_PAX_PAGEEXEC
5028+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5029+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5030+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5031+#endif
5032+
5033 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5034 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5035
5036diff -urNp linux-3.0.3/arch/um/include/asm/kmap_types.h linux-3.0.3/arch/um/include/asm/kmap_types.h
5037--- linux-3.0.3/arch/um/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
5038+++ linux-3.0.3/arch/um/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
5039@@ -23,6 +23,7 @@ enum km_type {
5040 KM_IRQ1,
5041 KM_SOFTIRQ0,
5042 KM_SOFTIRQ1,
5043+ KM_CLEARPAGE,
5044 KM_TYPE_NR
5045 };
5046
5047diff -urNp linux-3.0.3/arch/um/include/asm/page.h linux-3.0.3/arch/um/include/asm/page.h
5048--- linux-3.0.3/arch/um/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
5049+++ linux-3.0.3/arch/um/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
5050@@ -14,6 +14,9 @@
5051 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5052 #define PAGE_MASK (~(PAGE_SIZE-1))
5053
5054+#define ktla_ktva(addr) (addr)
5055+#define ktva_ktla(addr) (addr)
5056+
5057 #ifndef __ASSEMBLY__
5058
5059 struct page;
5060diff -urNp linux-3.0.3/arch/um/kernel/process.c linux-3.0.3/arch/um/kernel/process.c
5061--- linux-3.0.3/arch/um/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
5062+++ linux-3.0.3/arch/um/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
5063@@ -404,22 +404,6 @@ int singlestepping(void * t)
5064 return 2;
5065 }
5066
5067-/*
5068- * Only x86 and x86_64 have an arch_align_stack().
5069- * All other arches have "#define arch_align_stack(x) (x)"
5070- * in their asm/system.h
5071- * As this is included in UML from asm-um/system-generic.h,
5072- * we can use it to behave as the subarch does.
5073- */
5074-#ifndef arch_align_stack
5075-unsigned long arch_align_stack(unsigned long sp)
5076-{
5077- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5078- sp -= get_random_int() % 8192;
5079- return sp & ~0xf;
5080-}
5081-#endif
5082-
5083 unsigned long get_wchan(struct task_struct *p)
5084 {
5085 unsigned long stack_page, sp, ip;
5086diff -urNp linux-3.0.3/arch/um/sys-i386/syscalls.c linux-3.0.3/arch/um/sys-i386/syscalls.c
5087--- linux-3.0.3/arch/um/sys-i386/syscalls.c 2011-07-21 22:17:23.000000000 -0400
5088+++ linux-3.0.3/arch/um/sys-i386/syscalls.c 2011-08-23 21:47:55.000000000 -0400
5089@@ -11,6 +11,21 @@
5090 #include "asm/uaccess.h"
5091 #include "asm/unistd.h"
5092
5093+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5094+{
5095+ unsigned long pax_task_size = TASK_SIZE;
5096+
5097+#ifdef CONFIG_PAX_SEGMEXEC
5098+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5099+ pax_task_size = SEGMEXEC_TASK_SIZE;
5100+#endif
5101+
5102+ if (len > pax_task_size || addr > pax_task_size - len)
5103+ return -EINVAL;
5104+
5105+ return 0;
5106+}
5107+
5108 /*
5109 * The prototype on i386 is:
5110 *
5111diff -urNp linux-3.0.3/arch/x86/boot/bitops.h linux-3.0.3/arch/x86/boot/bitops.h
5112--- linux-3.0.3/arch/x86/boot/bitops.h 2011-07-21 22:17:23.000000000 -0400
5113+++ linux-3.0.3/arch/x86/boot/bitops.h 2011-08-23 21:47:55.000000000 -0400
5114@@ -26,7 +26,7 @@ static inline int variable_test_bit(int
5115 u8 v;
5116 const u32 *p = (const u32 *)addr;
5117
5118- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5119+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5120 return v;
5121 }
5122
5123@@ -37,7 +37,7 @@ static inline int variable_test_bit(int
5124
5125 static inline void set_bit(int nr, void *addr)
5126 {
5127- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5128+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5129 }
5130
5131 #endif /* BOOT_BITOPS_H */
5132diff -urNp linux-3.0.3/arch/x86/boot/boot.h linux-3.0.3/arch/x86/boot/boot.h
5133--- linux-3.0.3/arch/x86/boot/boot.h 2011-07-21 22:17:23.000000000 -0400
5134+++ linux-3.0.3/arch/x86/boot/boot.h 2011-08-23 21:47:55.000000000 -0400
5135@@ -85,7 +85,7 @@ static inline void io_delay(void)
5136 static inline u16 ds(void)
5137 {
5138 u16 seg;
5139- asm("movw %%ds,%0" : "=rm" (seg));
5140+ asm volatile("movw %%ds,%0" : "=rm" (seg));
5141 return seg;
5142 }
5143
5144@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t
5145 static inline int memcmp(const void *s1, const void *s2, size_t len)
5146 {
5147 u8 diff;
5148- asm("repe; cmpsb; setnz %0"
5149+ asm volatile("repe; cmpsb; setnz %0"
5150 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5151 return diff;
5152 }
5153diff -urNp linux-3.0.3/arch/x86/boot/compressed/head_32.S linux-3.0.3/arch/x86/boot/compressed/head_32.S
5154--- linux-3.0.3/arch/x86/boot/compressed/head_32.S 2011-07-21 22:17:23.000000000 -0400
5155+++ linux-3.0.3/arch/x86/boot/compressed/head_32.S 2011-08-23 21:47:55.000000000 -0400
5156@@ -76,7 +76,7 @@ ENTRY(startup_32)
5157 notl %eax
5158 andl %eax, %ebx
5159 #else
5160- movl $LOAD_PHYSICAL_ADDR, %ebx
5161+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5162 #endif
5163
5164 /* Target address to relocate to for decompression */
5165@@ -162,7 +162,7 @@ relocated:
5166 * and where it was actually loaded.
5167 */
5168 movl %ebp, %ebx
5169- subl $LOAD_PHYSICAL_ADDR, %ebx
5170+ subl $____LOAD_PHYSICAL_ADDR, %ebx
5171 jz 2f /* Nothing to be done if loaded at compiled addr. */
5172 /*
5173 * Process relocations.
5174@@ -170,8 +170,7 @@ relocated:
5175
5176 1: subl $4, %edi
5177 movl (%edi), %ecx
5178- testl %ecx, %ecx
5179- jz 2f
5180+ jecxz 2f
5181 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5182 jmp 1b
5183 2:
5184diff -urNp linux-3.0.3/arch/x86/boot/compressed/head_64.S linux-3.0.3/arch/x86/boot/compressed/head_64.S
5185--- linux-3.0.3/arch/x86/boot/compressed/head_64.S 2011-07-21 22:17:23.000000000 -0400
5186+++ linux-3.0.3/arch/x86/boot/compressed/head_64.S 2011-08-23 21:47:55.000000000 -0400
5187@@ -91,7 +91,7 @@ ENTRY(startup_32)
5188 notl %eax
5189 andl %eax, %ebx
5190 #else
5191- movl $LOAD_PHYSICAL_ADDR, %ebx
5192+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5193 #endif
5194
5195 /* Target address to relocate to for decompression */
5196@@ -233,7 +233,7 @@ ENTRY(startup_64)
5197 notq %rax
5198 andq %rax, %rbp
5199 #else
5200- movq $LOAD_PHYSICAL_ADDR, %rbp
5201+ movq $____LOAD_PHYSICAL_ADDR, %rbp
5202 #endif
5203
5204 /* Target address to relocate to for decompression */
5205diff -urNp linux-3.0.3/arch/x86/boot/compressed/Makefile linux-3.0.3/arch/x86/boot/compressed/Makefile
5206--- linux-3.0.3/arch/x86/boot/compressed/Makefile 2011-07-21 22:17:23.000000000 -0400
5207+++ linux-3.0.3/arch/x86/boot/compressed/Makefile 2011-08-23 21:47:55.000000000 -0400
5208@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
5209 KBUILD_CFLAGS += $(cflags-y)
5210 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5211 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5212+ifdef CONSTIFY_PLUGIN
5213+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5214+endif
5215
5216 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5217 GCOV_PROFILE := n
5218diff -urNp linux-3.0.3/arch/x86/boot/compressed/misc.c linux-3.0.3/arch/x86/boot/compressed/misc.c
5219--- linux-3.0.3/arch/x86/boot/compressed/misc.c 2011-07-21 22:17:23.000000000 -0400
5220+++ linux-3.0.3/arch/x86/boot/compressed/misc.c 2011-08-23 21:47:55.000000000 -0400
5221@@ -310,7 +310,7 @@ static void parse_elf(void *output)
5222 case PT_LOAD:
5223 #ifdef CONFIG_RELOCATABLE
5224 dest = output;
5225- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5226+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5227 #else
5228 dest = (void *)(phdr->p_paddr);
5229 #endif
5230@@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *
5231 error("Destination address too large");
5232 #endif
5233 #ifndef CONFIG_RELOCATABLE
5234- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5235+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5236 error("Wrong destination address");
5237 #endif
5238
5239diff -urNp linux-3.0.3/arch/x86/boot/compressed/relocs.c linux-3.0.3/arch/x86/boot/compressed/relocs.c
5240--- linux-3.0.3/arch/x86/boot/compressed/relocs.c 2011-07-21 22:17:23.000000000 -0400
5241+++ linux-3.0.3/arch/x86/boot/compressed/relocs.c 2011-08-23 21:47:55.000000000 -0400
5242@@ -13,8 +13,11 @@
5243
5244 static void die(char *fmt, ...);
5245
5246+#include "../../../../include/generated/autoconf.h"
5247+
5248 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5249 static Elf32_Ehdr ehdr;
5250+static Elf32_Phdr *phdr;
5251 static unsigned long reloc_count, reloc_idx;
5252 static unsigned long *relocs;
5253
5254@@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5255 }
5256 }
5257
5258+static void read_phdrs(FILE *fp)
5259+{
5260+ unsigned int i;
5261+
5262+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5263+ if (!phdr) {
5264+ die("Unable to allocate %d program headers\n",
5265+ ehdr.e_phnum);
5266+ }
5267+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5268+ die("Seek to %d failed: %s\n",
5269+ ehdr.e_phoff, strerror(errno));
5270+ }
5271+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5272+ die("Cannot read ELF program headers: %s\n",
5273+ strerror(errno));
5274+ }
5275+ for(i = 0; i < ehdr.e_phnum; i++) {
5276+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5277+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5278+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5279+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5280+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5281+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5282+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5283+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5284+ }
5285+
5286+}
5287+
5288 static void read_shdrs(FILE *fp)
5289 {
5290- int i;
5291+ unsigned int i;
5292 Elf32_Shdr shdr;
5293
5294 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5295@@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5296
5297 static void read_strtabs(FILE *fp)
5298 {
5299- int i;
5300+ unsigned int i;
5301 for (i = 0; i < ehdr.e_shnum; i++) {
5302 struct section *sec = &secs[i];
5303 if (sec->shdr.sh_type != SHT_STRTAB) {
5304@@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5305
5306 static void read_symtabs(FILE *fp)
5307 {
5308- int i,j;
5309+ unsigned int i,j;
5310 for (i = 0; i < ehdr.e_shnum; i++) {
5311 struct section *sec = &secs[i];
5312 if (sec->shdr.sh_type != SHT_SYMTAB) {
5313@@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5314
5315 static void read_relocs(FILE *fp)
5316 {
5317- int i,j;
5318+ unsigned int i,j;
5319+ uint32_t base;
5320+
5321 for (i = 0; i < ehdr.e_shnum; i++) {
5322 struct section *sec = &secs[i];
5323 if (sec->shdr.sh_type != SHT_REL) {
5324@@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
5325 die("Cannot read symbol table: %s\n",
5326 strerror(errno));
5327 }
5328+ base = 0;
5329+ for (j = 0; j < ehdr.e_phnum; j++) {
5330+ if (phdr[j].p_type != PT_LOAD )
5331+ continue;
5332+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
5333+ continue;
5334+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
5335+ break;
5336+ }
5337 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
5338 Elf32_Rel *rel = &sec->reltab[j];
5339- rel->r_offset = elf32_to_cpu(rel->r_offset);
5340+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
5341 rel->r_info = elf32_to_cpu(rel->r_info);
5342 }
5343 }
5344@@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
5345
5346 static void print_absolute_symbols(void)
5347 {
5348- int i;
5349+ unsigned int i;
5350 printf("Absolute symbols\n");
5351 printf(" Num: Value Size Type Bind Visibility Name\n");
5352 for (i = 0; i < ehdr.e_shnum; i++) {
5353 struct section *sec = &secs[i];
5354 char *sym_strtab;
5355 Elf32_Sym *sh_symtab;
5356- int j;
5357+ unsigned int j;
5358
5359 if (sec->shdr.sh_type != SHT_SYMTAB) {
5360 continue;
5361@@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
5362
5363 static void print_absolute_relocs(void)
5364 {
5365- int i, printed = 0;
5366+ unsigned int i, printed = 0;
5367
5368 for (i = 0; i < ehdr.e_shnum; i++) {
5369 struct section *sec = &secs[i];
5370 struct section *sec_applies, *sec_symtab;
5371 char *sym_strtab;
5372 Elf32_Sym *sh_symtab;
5373- int j;
5374+ unsigned int j;
5375 if (sec->shdr.sh_type != SHT_REL) {
5376 continue;
5377 }
5378@@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
5379
5380 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
5381 {
5382- int i;
5383+ unsigned int i;
5384 /* Walk through the relocations */
5385 for (i = 0; i < ehdr.e_shnum; i++) {
5386 char *sym_strtab;
5387 Elf32_Sym *sh_symtab;
5388 struct section *sec_applies, *sec_symtab;
5389- int j;
5390+ unsigned int j;
5391 struct section *sec = &secs[i];
5392
5393 if (sec->shdr.sh_type != SHT_REL) {
5394@@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(El
5395 !is_rel_reloc(sym_name(sym_strtab, sym))) {
5396 continue;
5397 }
5398+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
5399+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
5400+ continue;
5401+
5402+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
5403+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
5404+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
5405+ continue;
5406+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
5407+ continue;
5408+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
5409+ continue;
5410+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
5411+ continue;
5412+#endif
5413+
5414 switch (r_type) {
5415 case R_386_NONE:
5416 case R_386_PC32:
5417@@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, co
5418
5419 static void emit_relocs(int as_text)
5420 {
5421- int i;
5422+ unsigned int i;
5423 /* Count how many relocations I have and allocate space for them. */
5424 reloc_count = 0;
5425 walk_relocs(count_reloc);
5426@@ -665,6 +725,7 @@ int main(int argc, char **argv)
5427 fname, strerror(errno));
5428 }
5429 read_ehdr(fp);
5430+ read_phdrs(fp);
5431 read_shdrs(fp);
5432 read_strtabs(fp);
5433 read_symtabs(fp);
5434diff -urNp linux-3.0.3/arch/x86/boot/cpucheck.c linux-3.0.3/arch/x86/boot/cpucheck.c
5435--- linux-3.0.3/arch/x86/boot/cpucheck.c 2011-07-21 22:17:23.000000000 -0400
5436+++ linux-3.0.3/arch/x86/boot/cpucheck.c 2011-08-23 21:47:55.000000000 -0400
5437@@ -74,7 +74,7 @@ static int has_fpu(void)
5438 u16 fcw = -1, fsw = -1;
5439 u32 cr0;
5440
5441- asm("movl %%cr0,%0" : "=r" (cr0));
5442+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
5443 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
5444 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
5445 asm volatile("movl %0,%%cr0" : : "r" (cr0));
5446@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
5447 {
5448 u32 f0, f1;
5449
5450- asm("pushfl ; "
5451+ asm volatile("pushfl ; "
5452 "pushfl ; "
5453 "popl %0 ; "
5454 "movl %0,%1 ; "
5455@@ -115,7 +115,7 @@ static void get_flags(void)
5456 set_bit(X86_FEATURE_FPU, cpu.flags);
5457
5458 if (has_eflag(X86_EFLAGS_ID)) {
5459- asm("cpuid"
5460+ asm volatile("cpuid"
5461 : "=a" (max_intel_level),
5462 "=b" (cpu_vendor[0]),
5463 "=d" (cpu_vendor[1]),
5464@@ -124,7 +124,7 @@ static void get_flags(void)
5465
5466 if (max_intel_level >= 0x00000001 &&
5467 max_intel_level <= 0x0000ffff) {
5468- asm("cpuid"
5469+ asm volatile("cpuid"
5470 : "=a" (tfms),
5471 "=c" (cpu.flags[4]),
5472 "=d" (cpu.flags[0])
5473@@ -136,7 +136,7 @@ static void get_flags(void)
5474 cpu.model += ((tfms >> 16) & 0xf) << 4;
5475 }
5476
5477- asm("cpuid"
5478+ asm volatile("cpuid"
5479 : "=a" (max_amd_level)
5480 : "a" (0x80000000)
5481 : "ebx", "ecx", "edx");
5482@@ -144,7 +144,7 @@ static void get_flags(void)
5483 if (max_amd_level >= 0x80000001 &&
5484 max_amd_level <= 0x8000ffff) {
5485 u32 eax = 0x80000001;
5486- asm("cpuid"
5487+ asm volatile("cpuid"
5488 : "+a" (eax),
5489 "=c" (cpu.flags[6]),
5490 "=d" (cpu.flags[1])
5491@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5492 u32 ecx = MSR_K7_HWCR;
5493 u32 eax, edx;
5494
5495- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5496+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5497 eax &= ~(1 << 15);
5498- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5499+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5500
5501 get_flags(); /* Make sure it really did something */
5502 err = check_flags();
5503@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5504 u32 ecx = MSR_VIA_FCR;
5505 u32 eax, edx;
5506
5507- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5508+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5509 eax |= (1<<1)|(1<<7);
5510- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5511+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5512
5513 set_bit(X86_FEATURE_CX8, cpu.flags);
5514 err = check_flags();
5515@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
5516 u32 eax, edx;
5517 u32 level = 1;
5518
5519- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5520- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5521- asm("cpuid"
5522+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5523+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5524+ asm volatile("cpuid"
5525 : "+a" (level), "=d" (cpu.flags[0])
5526 : : "ecx", "ebx");
5527- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5528+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5529
5530 err = check_flags();
5531 }
5532diff -urNp linux-3.0.3/arch/x86/boot/header.S linux-3.0.3/arch/x86/boot/header.S
5533--- linux-3.0.3/arch/x86/boot/header.S 2011-07-21 22:17:23.000000000 -0400
5534+++ linux-3.0.3/arch/x86/boot/header.S 2011-08-23 21:47:55.000000000 -0400
5535@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
5536 # single linked list of
5537 # struct setup_data
5538
5539-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
5540+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
5541
5542 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
5543 #define VO_INIT_SIZE (VO__end - VO__text)
5544diff -urNp linux-3.0.3/arch/x86/boot/Makefile linux-3.0.3/arch/x86/boot/Makefile
5545--- linux-3.0.3/arch/x86/boot/Makefile 2011-07-21 22:17:23.000000000 -0400
5546+++ linux-3.0.3/arch/x86/boot/Makefile 2011-08-23 21:47:55.000000000 -0400
5547@@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
5548 $(call cc-option, -fno-stack-protector) \
5549 $(call cc-option, -mpreferred-stack-boundary=2)
5550 KBUILD_CFLAGS += $(call cc-option, -m32)
5551+ifdef CONSTIFY_PLUGIN
5552+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5553+endif
5554 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5555 GCOV_PROFILE := n
5556
5557diff -urNp linux-3.0.3/arch/x86/boot/memory.c linux-3.0.3/arch/x86/boot/memory.c
5558--- linux-3.0.3/arch/x86/boot/memory.c 2011-07-21 22:17:23.000000000 -0400
5559+++ linux-3.0.3/arch/x86/boot/memory.c 2011-08-23 21:47:55.000000000 -0400
5560@@ -19,7 +19,7 @@
5561
5562 static int detect_memory_e820(void)
5563 {
5564- int count = 0;
5565+ unsigned int count = 0;
5566 struct biosregs ireg, oreg;
5567 struct e820entry *desc = boot_params.e820_map;
5568 static struct e820entry buf; /* static so it is zeroed */
5569diff -urNp linux-3.0.3/arch/x86/boot/video.c linux-3.0.3/arch/x86/boot/video.c
5570--- linux-3.0.3/arch/x86/boot/video.c 2011-07-21 22:17:23.000000000 -0400
5571+++ linux-3.0.3/arch/x86/boot/video.c 2011-08-23 21:47:55.000000000 -0400
5572@@ -96,7 +96,7 @@ static void store_mode_params(void)
5573 static unsigned int get_entry(void)
5574 {
5575 char entry_buf[4];
5576- int i, len = 0;
5577+ unsigned int i, len = 0;
5578 int key;
5579 unsigned int v;
5580
5581diff -urNp linux-3.0.3/arch/x86/boot/video-vesa.c linux-3.0.3/arch/x86/boot/video-vesa.c
5582--- linux-3.0.3/arch/x86/boot/video-vesa.c 2011-07-21 22:17:23.000000000 -0400
5583+++ linux-3.0.3/arch/x86/boot/video-vesa.c 2011-08-23 21:47:55.000000000 -0400
5584@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
5585
5586 boot_params.screen_info.vesapm_seg = oreg.es;
5587 boot_params.screen_info.vesapm_off = oreg.di;
5588+ boot_params.screen_info.vesapm_size = oreg.cx;
5589 }
5590
5591 /*
5592diff -urNp linux-3.0.3/arch/x86/ia32/ia32_aout.c linux-3.0.3/arch/x86/ia32/ia32_aout.c
5593--- linux-3.0.3/arch/x86/ia32/ia32_aout.c 2011-07-21 22:17:23.000000000 -0400
5594+++ linux-3.0.3/arch/x86/ia32/ia32_aout.c 2011-08-23 21:48:14.000000000 -0400
5595@@ -162,6 +162,8 @@ static int aout_core_dump(long signr, st
5596 unsigned long dump_start, dump_size;
5597 struct user32 dump;
5598
5599+ memset(&dump, 0, sizeof(dump));
5600+
5601 fs = get_fs();
5602 set_fs(KERNEL_DS);
5603 has_dumped = 1;
5604diff -urNp linux-3.0.3/arch/x86/ia32/ia32entry.S linux-3.0.3/arch/x86/ia32/ia32entry.S
5605--- linux-3.0.3/arch/x86/ia32/ia32entry.S 2011-07-21 22:17:23.000000000 -0400
5606+++ linux-3.0.3/arch/x86/ia32/ia32entry.S 2011-08-25 17:36:37.000000000 -0400
5607@@ -13,6 +13,7 @@
5608 #include <asm/thread_info.h>
5609 #include <asm/segment.h>
5610 #include <asm/irqflags.h>
5611+#include <asm/pgtable.h>
5612 #include <linux/linkage.h>
5613
5614 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
5615@@ -95,6 +96,29 @@ ENTRY(native_irq_enable_sysexit)
5616 ENDPROC(native_irq_enable_sysexit)
5617 #endif
5618
5619+ .macro pax_enter_kernel_user
5620+#ifdef CONFIG_PAX_MEMORY_UDEREF
5621+ call pax_enter_kernel_user
5622+#endif
5623+ .endm
5624+
5625+ .macro pax_exit_kernel_user
5626+#ifdef CONFIG_PAX_MEMORY_UDEREF
5627+ call pax_exit_kernel_user
5628+#endif
5629+#ifdef CONFIG_PAX_RANDKSTACK
5630+ pushq %rax
5631+ call pax_randomize_kstack
5632+ popq %rax
5633+#endif
5634+ .endm
5635+
5636+ .macro pax_erase_kstack
5637+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
5638+ call pax_erase_kstack
5639+#endif
5640+ .endm
5641+
5642 /*
5643 * 32bit SYSENTER instruction entry.
5644 *
5645@@ -121,7 +145,7 @@ ENTRY(ia32_sysenter_target)
5646 CFI_REGISTER rsp,rbp
5647 SWAPGS_UNSAFE_STACK
5648 movq PER_CPU_VAR(kernel_stack), %rsp
5649- addq $(KERNEL_STACK_OFFSET),%rsp
5650+ pax_enter_kernel_user
5651 /*
5652 * No need to follow this irqs on/off section: the syscall
5653 * disabled irqs, here we enable it straight after entry:
5654@@ -134,7 +158,8 @@ ENTRY(ia32_sysenter_target)
5655 CFI_REL_OFFSET rsp,0
5656 pushfq_cfi
5657 /*CFI_REL_OFFSET rflags,0*/
5658- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
5659+ GET_THREAD_INFO(%r10)
5660+ movl TI_sysenter_return(%r10), %r10d
5661 CFI_REGISTER rip,r10
5662 pushq_cfi $__USER32_CS
5663 /*CFI_REL_OFFSET cs,0*/
5664@@ -146,6 +171,12 @@ ENTRY(ia32_sysenter_target)
5665 SAVE_ARGS 0,0,1
5666 /* no need to do an access_ok check here because rbp has been
5667 32bit zero extended */
5668+
5669+#ifdef CONFIG_PAX_MEMORY_UDEREF
5670+ mov $PAX_USER_SHADOW_BASE,%r10
5671+ add %r10,%rbp
5672+#endif
5673+
5674 1: movl (%rbp),%ebp
5675 .section __ex_table,"a"
5676 .quad 1b,ia32_badarg
5677@@ -168,6 +199,8 @@ sysenter_dispatch:
5678 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5679 jnz sysexit_audit
5680 sysexit_from_sys_call:
5681+ pax_exit_kernel_user
5682+ pax_erase_kstack
5683 andl $~TS_COMPAT,TI_status(%r10)
5684 /* clear IF, that popfq doesn't enable interrupts early */
5685 andl $~0x200,EFLAGS-R11(%rsp)
5686@@ -194,6 +227,9 @@ sysexit_from_sys_call:
5687 movl %eax,%esi /* 2nd arg: syscall number */
5688 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
5689 call audit_syscall_entry
5690+
5691+ pax_erase_kstack
5692+
5693 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
5694 cmpq $(IA32_NR_syscalls-1),%rax
5695 ja ia32_badsys
5696@@ -246,6 +282,9 @@ sysenter_tracesys:
5697 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
5698 movq %rsp,%rdi /* &pt_regs -> arg1 */
5699 call syscall_trace_enter
5700+
5701+ pax_erase_kstack
5702+
5703 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5704 RESTORE_REST
5705 cmpq $(IA32_NR_syscalls-1),%rax
5706@@ -277,19 +316,24 @@ ENDPROC(ia32_sysenter_target)
5707 ENTRY(ia32_cstar_target)
5708 CFI_STARTPROC32 simple
5709 CFI_SIGNAL_FRAME
5710- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
5711+ CFI_DEF_CFA rsp,0
5712 CFI_REGISTER rip,rcx
5713 /*CFI_REGISTER rflags,r11*/
5714 SWAPGS_UNSAFE_STACK
5715 movl %esp,%r8d
5716 CFI_REGISTER rsp,r8
5717 movq PER_CPU_VAR(kernel_stack),%rsp
5718+
5719+#ifdef CONFIG_PAX_MEMORY_UDEREF
5720+ pax_enter_kernel_user
5721+#endif
5722+
5723 /*
5724 * No need to follow this irqs on/off section: the syscall
5725 * disabled irqs and here we enable it straight after entry:
5726 */
5727 ENABLE_INTERRUPTS(CLBR_NONE)
5728- SAVE_ARGS 8,1,1
5729+ SAVE_ARGS 8*6,1,1
5730 movl %eax,%eax /* zero extension */
5731 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
5732 movq %rcx,RIP-ARGOFFSET(%rsp)
5733@@ -305,6 +349,12 @@ ENTRY(ia32_cstar_target)
5734 /* no need to do an access_ok check here because r8 has been
5735 32bit zero extended */
5736 /* hardware stack frame is complete now */
5737+
5738+#ifdef CONFIG_PAX_MEMORY_UDEREF
5739+ mov $PAX_USER_SHADOW_BASE,%r10
5740+ add %r10,%r8
5741+#endif
5742+
5743 1: movl (%r8),%r9d
5744 .section __ex_table,"a"
5745 .quad 1b,ia32_badarg
5746@@ -327,6 +377,8 @@ cstar_dispatch:
5747 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5748 jnz sysretl_audit
5749 sysretl_from_sys_call:
5750+ pax_exit_kernel_user
5751+ pax_erase_kstack
5752 andl $~TS_COMPAT,TI_status(%r10)
5753 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
5754 movl RIP-ARGOFFSET(%rsp),%ecx
5755@@ -364,6 +416,9 @@ cstar_tracesys:
5756 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5757 movq %rsp,%rdi /* &pt_regs -> arg1 */
5758 call syscall_trace_enter
5759+
5760+ pax_erase_kstack
5761+
5762 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
5763 RESTORE_REST
5764 xchgl %ebp,%r9d
5765@@ -409,6 +464,7 @@ ENTRY(ia32_syscall)
5766 CFI_REL_OFFSET rip,RIP-RIP
5767 PARAVIRT_ADJUST_EXCEPTION_FRAME
5768 SWAPGS
5769+ pax_enter_kernel_user
5770 /*
5771 * No need to follow this irqs on/off section: the syscall
5772 * disabled irqs and here we enable it straight after entry:
5773@@ -441,6 +497,9 @@ ia32_tracesys:
5774 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5775 movq %rsp,%rdi /* &pt_regs -> arg1 */
5776 call syscall_trace_enter
5777+
5778+ pax_erase_kstack
5779+
5780 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5781 RESTORE_REST
5782 cmpq $(IA32_NR_syscalls-1),%rax
5783diff -urNp linux-3.0.3/arch/x86/ia32/ia32_signal.c linux-3.0.3/arch/x86/ia32/ia32_signal.c
5784--- linux-3.0.3/arch/x86/ia32/ia32_signal.c 2011-07-21 22:17:23.000000000 -0400
5785+++ linux-3.0.3/arch/x86/ia32/ia32_signal.c 2011-08-23 21:47:55.000000000 -0400
5786@@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
5787 sp -= frame_size;
5788 /* Align the stack pointer according to the i386 ABI,
5789 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
5790- sp = ((sp + 4) & -16ul) - 4;
5791+ sp = ((sp - 12) & -16ul) - 4;
5792 return (void __user *) sp;
5793 }
5794
5795@@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
5796 * These are actually not used anymore, but left because some
5797 * gdb versions depend on them as a marker.
5798 */
5799- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5800+ put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
5801 } put_user_catch(err);
5802
5803 if (err)
5804@@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
5805 0xb8,
5806 __NR_ia32_rt_sigreturn,
5807 0x80cd,
5808- 0,
5809+ 0
5810 };
5811
5812 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
5813@@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
5814
5815 if (ka->sa.sa_flags & SA_RESTORER)
5816 restorer = ka->sa.sa_restorer;
5817+ else if (current->mm->context.vdso)
5818+ /* Return stub is in 32bit vsyscall page */
5819+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
5820 else
5821- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
5822- rt_sigreturn);
5823+ restorer = &frame->retcode;
5824 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
5825
5826 /*
5827 * Not actually used anymore, but left because some gdb
5828 * versions need it.
5829 */
5830- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5831+ put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
5832 } put_user_catch(err);
5833
5834 if (err)
5835diff -urNp linux-3.0.3/arch/x86/include/asm/alternative.h linux-3.0.3/arch/x86/include/asm/alternative.h
5836--- linux-3.0.3/arch/x86/include/asm/alternative.h 2011-07-21 22:17:23.000000000 -0400
5837+++ linux-3.0.3/arch/x86/include/asm/alternative.h 2011-08-23 21:47:55.000000000 -0400
5838@@ -93,7 +93,7 @@ static inline int alternatives_text_rese
5839 ".section .discard,\"aw\",@progbits\n" \
5840 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
5841 ".previous\n" \
5842- ".section .altinstr_replacement, \"ax\"\n" \
5843+ ".section .altinstr_replacement, \"a\"\n" \
5844 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
5845 ".previous"
5846
5847diff -urNp linux-3.0.3/arch/x86/include/asm/apic.h linux-3.0.3/arch/x86/include/asm/apic.h
5848--- linux-3.0.3/arch/x86/include/asm/apic.h 2011-07-21 22:17:23.000000000 -0400
5849+++ linux-3.0.3/arch/x86/include/asm/apic.h 2011-08-23 21:48:14.000000000 -0400
5850@@ -45,7 +45,7 @@ static inline void generic_apic_probe(vo
5851
5852 #ifdef CONFIG_X86_LOCAL_APIC
5853
5854-extern unsigned int apic_verbosity;
5855+extern int apic_verbosity;
5856 extern int local_apic_timer_c2_ok;
5857
5858 extern int disable_apic;
5859diff -urNp linux-3.0.3/arch/x86/include/asm/apm.h linux-3.0.3/arch/x86/include/asm/apm.h
5860--- linux-3.0.3/arch/x86/include/asm/apm.h 2011-07-21 22:17:23.000000000 -0400
5861+++ linux-3.0.3/arch/x86/include/asm/apm.h 2011-08-23 21:47:55.000000000 -0400
5862@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
5863 __asm__ __volatile__(APM_DO_ZERO_SEGS
5864 "pushl %%edi\n\t"
5865 "pushl %%ebp\n\t"
5866- "lcall *%%cs:apm_bios_entry\n\t"
5867+ "lcall *%%ss:apm_bios_entry\n\t"
5868 "setc %%al\n\t"
5869 "popl %%ebp\n\t"
5870 "popl %%edi\n\t"
5871@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
5872 __asm__ __volatile__(APM_DO_ZERO_SEGS
5873 "pushl %%edi\n\t"
5874 "pushl %%ebp\n\t"
5875- "lcall *%%cs:apm_bios_entry\n\t"
5876+ "lcall *%%ss:apm_bios_entry\n\t"
5877 "setc %%bl\n\t"
5878 "popl %%ebp\n\t"
5879 "popl %%edi\n\t"
5880diff -urNp linux-3.0.3/arch/x86/include/asm/atomic64_32.h linux-3.0.3/arch/x86/include/asm/atomic64_32.h
5881--- linux-3.0.3/arch/x86/include/asm/atomic64_32.h 2011-07-21 22:17:23.000000000 -0400
5882+++ linux-3.0.3/arch/x86/include/asm/atomic64_32.h 2011-08-23 21:47:55.000000000 -0400
5883@@ -12,6 +12,14 @@ typedef struct {
5884 u64 __aligned(8) counter;
5885 } atomic64_t;
5886
5887+#ifdef CONFIG_PAX_REFCOUNT
5888+typedef struct {
5889+ u64 __aligned(8) counter;
5890+} atomic64_unchecked_t;
5891+#else
5892+typedef atomic64_t atomic64_unchecked_t;
5893+#endif
5894+
5895 #define ATOMIC64_INIT(val) { (val) }
5896
5897 #ifdef CONFIG_X86_CMPXCHG64
5898@@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg
5899 }
5900
5901 /**
5902+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
5903+ * @p: pointer to type atomic64_unchecked_t
5904+ * @o: expected value
5905+ * @n: new value
5906+ *
5907+ * Atomically sets @v to @n if it was equal to @o and returns
5908+ * the old value.
5909+ */
5910+
5911+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
5912+{
5913+ return cmpxchg64(&v->counter, o, n);
5914+}
5915+
5916+/**
5917 * atomic64_xchg - xchg atomic64 variable
5918 * @v: pointer to type atomic64_t
5919 * @n: value to assign
5920@@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64
5921 }
5922
5923 /**
5924+ * atomic64_set_unchecked - set atomic64 variable
5925+ * @v: pointer to type atomic64_unchecked_t
5926+ * @n: value to assign
5927+ *
5928+ * Atomically sets the value of @v to @n.
5929+ */
5930+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
5931+{
5932+ unsigned high = (unsigned)(i >> 32);
5933+ unsigned low = (unsigned)i;
5934+ asm volatile(ATOMIC64_ALTERNATIVE(set)
5935+ : "+b" (low), "+c" (high)
5936+ : "S" (v)
5937+ : "eax", "edx", "memory"
5938+ );
5939+}
5940+
5941+/**
5942 * atomic64_read - read atomic64 variable
5943 * @v: pointer to type atomic64_t
5944 *
5945@@ -93,6 +134,22 @@ static inline long long atomic64_read(at
5946 }
5947
5948 /**
5949+ * atomic64_read_unchecked - read atomic64 variable
5950+ * @v: pointer to type atomic64_unchecked_t
5951+ *
5952+ * Atomically reads the value of @v and returns it.
5953+ */
5954+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
5955+{
5956+ long long r;
5957+ asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
5958+ : "=A" (r), "+c" (v)
5959+ : : "memory"
5960+ );
5961+ return r;
5962+ }
5963+
5964+/**
5965 * atomic64_add_return - add and return
5966 * @i: integer value to add
5967 * @v: pointer to type atomic64_t
5968@@ -108,6 +165,22 @@ static inline long long atomic64_add_ret
5969 return i;
5970 }
5971
5972+/**
5973+ * atomic64_add_return_unchecked - add and return
5974+ * @i: integer value to add
5975+ * @v: pointer to type atomic64_unchecked_t
5976+ *
5977+ * Atomically adds @i to @v and returns @i + *@v
5978+ */
5979+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
5980+{
5981+ asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
5982+ : "+A" (i), "+c" (v)
5983+ : : "memory"
5984+ );
5985+ return i;
5986+}
5987+
5988 /*
5989 * Other variants with different arithmetic operators:
5990 */
5991@@ -131,6 +204,17 @@ static inline long long atomic64_inc_ret
5992 return a;
5993 }
5994
5995+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
5996+{
5997+ long long a;
5998+ asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
5999+ : "=A" (a)
6000+ : "S" (v)
6001+ : "memory", "ecx"
6002+ );
6003+ return a;
6004+}
6005+
6006 static inline long long atomic64_dec_return(atomic64_t *v)
6007 {
6008 long long a;
6009@@ -159,6 +243,22 @@ static inline long long atomic64_add(lon
6010 }
6011
6012 /**
6013+ * atomic64_add_unchecked - add integer to atomic64 variable
6014+ * @i: integer value to add
6015+ * @v: pointer to type atomic64_unchecked_t
6016+ *
6017+ * Atomically adds @i to @v.
6018+ */
6019+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
6020+{
6021+ asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
6022+ : "+A" (i), "+c" (v)
6023+ : : "memory"
6024+ );
6025+ return i;
6026+}
6027+
6028+/**
6029 * atomic64_sub - subtract the atomic64 variable
6030 * @i: integer value to subtract
6031 * @v: pointer to type atomic64_t
6032diff -urNp linux-3.0.3/arch/x86/include/asm/atomic64_64.h linux-3.0.3/arch/x86/include/asm/atomic64_64.h
6033--- linux-3.0.3/arch/x86/include/asm/atomic64_64.h 2011-07-21 22:17:23.000000000 -0400
6034+++ linux-3.0.3/arch/x86/include/asm/atomic64_64.h 2011-08-23 21:47:55.000000000 -0400
6035@@ -18,7 +18,19 @@
6036 */
6037 static inline long atomic64_read(const atomic64_t *v)
6038 {
6039- return (*(volatile long *)&(v)->counter);
6040+ return (*(volatile const long *)&(v)->counter);
6041+}
6042+
6043+/**
6044+ * atomic64_read_unchecked - read atomic64 variable
6045+ * @v: pointer of type atomic64_unchecked_t
6046+ *
6047+ * Atomically reads the value of @v.
6048+ * Doesn't imply a read memory barrier.
6049+ */
6050+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6051+{
6052+ return (*(volatile const long *)&(v)->counter);
6053 }
6054
6055 /**
6056@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64
6057 }
6058
6059 /**
6060+ * atomic64_set_unchecked - set atomic64 variable
6061+ * @v: pointer to type atomic64_unchecked_t
6062+ * @i: required value
6063+ *
6064+ * Atomically sets the value of @v to @i.
6065+ */
6066+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6067+{
6068+ v->counter = i;
6069+}
6070+
6071+/**
6072 * atomic64_add - add integer to atomic64 variable
6073 * @i: integer value to add
6074 * @v: pointer to type atomic64_t
6075@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64
6076 */
6077 static inline void atomic64_add(long i, atomic64_t *v)
6078 {
6079+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
6080+
6081+#ifdef CONFIG_PAX_REFCOUNT
6082+ "jno 0f\n"
6083+ LOCK_PREFIX "subq %1,%0\n"
6084+ "int $4\n0:\n"
6085+ _ASM_EXTABLE(0b, 0b)
6086+#endif
6087+
6088+ : "=m" (v->counter)
6089+ : "er" (i), "m" (v->counter));
6090+}
6091+
6092+/**
6093+ * atomic64_add_unchecked - add integer to atomic64 variable
6094+ * @i: integer value to add
6095+ * @v: pointer to type atomic64_unchecked_t
6096+ *
6097+ * Atomically adds @i to @v.
6098+ */
6099+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6100+{
6101 asm volatile(LOCK_PREFIX "addq %1,%0"
6102 : "=m" (v->counter)
6103 : "er" (i), "m" (v->counter));
6104@@ -56,7 +102,29 @@ static inline void atomic64_add(long i,
6105 */
6106 static inline void atomic64_sub(long i, atomic64_t *v)
6107 {
6108- asm volatile(LOCK_PREFIX "subq %1,%0"
6109+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
6110+
6111+#ifdef CONFIG_PAX_REFCOUNT
6112+ "jno 0f\n"
6113+ LOCK_PREFIX "addq %1,%0\n"
6114+ "int $4\n0:\n"
6115+ _ASM_EXTABLE(0b, 0b)
6116+#endif
6117+
6118+ : "=m" (v->counter)
6119+ : "er" (i), "m" (v->counter));
6120+}
6121+
6122+/**
6123+ * atomic64_sub_unchecked - subtract the atomic64 variable
6124+ * @i: integer value to subtract
6125+ * @v: pointer to type atomic64_unchecked_t
6126+ *
6127+ * Atomically subtracts @i from @v.
6128+ */
6129+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6130+{
6131+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
6132 : "=m" (v->counter)
6133 : "er" (i), "m" (v->counter));
6134 }
6135@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(
6136 {
6137 unsigned char c;
6138
6139- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
6140+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
6141+
6142+#ifdef CONFIG_PAX_REFCOUNT
6143+ "jno 0f\n"
6144+ LOCK_PREFIX "addq %2,%0\n"
6145+ "int $4\n0:\n"
6146+ _ASM_EXTABLE(0b, 0b)
6147+#endif
6148+
6149+ "sete %1\n"
6150 : "=m" (v->counter), "=qm" (c)
6151 : "er" (i), "m" (v->counter) : "memory");
6152 return c;
6153@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(
6154 */
6155 static inline void atomic64_inc(atomic64_t *v)
6156 {
6157+ asm volatile(LOCK_PREFIX "incq %0\n"
6158+
6159+#ifdef CONFIG_PAX_REFCOUNT
6160+ "jno 0f\n"
6161+ LOCK_PREFIX "decq %0\n"
6162+ "int $4\n0:\n"
6163+ _ASM_EXTABLE(0b, 0b)
6164+#endif
6165+
6166+ : "=m" (v->counter)
6167+ : "m" (v->counter));
6168+}
6169+
6170+/**
6171+ * atomic64_inc_unchecked - increment atomic64 variable
6172+ * @v: pointer to type atomic64_unchecked_t
6173+ *
6174+ * Atomically increments @v by 1.
6175+ */
6176+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
6177+{
6178 asm volatile(LOCK_PREFIX "incq %0"
6179 : "=m" (v->counter)
6180 : "m" (v->counter));
6181@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64
6182 */
6183 static inline void atomic64_dec(atomic64_t *v)
6184 {
6185- asm volatile(LOCK_PREFIX "decq %0"
6186+ asm volatile(LOCK_PREFIX "decq %0\n"
6187+
6188+#ifdef CONFIG_PAX_REFCOUNT
6189+ "jno 0f\n"
6190+ LOCK_PREFIX "incq %0\n"
6191+ "int $4\n0:\n"
6192+ _ASM_EXTABLE(0b, 0b)
6193+#endif
6194+
6195+ : "=m" (v->counter)
6196+ : "m" (v->counter));
6197+}
6198+
6199+/**
6200+ * atomic64_dec_unchecked - decrement atomic64 variable
6201+ * @v: pointer to type atomic64_t
6202+ *
6203+ * Atomically decrements @v by 1.
6204+ */
6205+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
6206+{
6207+ asm volatile(LOCK_PREFIX "decq %0\n"
6208 : "=m" (v->counter)
6209 : "m" (v->counter));
6210 }
6211@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(
6212 {
6213 unsigned char c;
6214
6215- asm volatile(LOCK_PREFIX "decq %0; sete %1"
6216+ asm volatile(LOCK_PREFIX "decq %0\n"
6217+
6218+#ifdef CONFIG_PAX_REFCOUNT
6219+ "jno 0f\n"
6220+ LOCK_PREFIX "incq %0\n"
6221+ "int $4\n0:\n"
6222+ _ASM_EXTABLE(0b, 0b)
6223+#endif
6224+
6225+ "sete %1\n"
6226 : "=m" (v->counter), "=qm" (c)
6227 : "m" (v->counter) : "memory");
6228 return c != 0;
6229@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(
6230 {
6231 unsigned char c;
6232
6233- asm volatile(LOCK_PREFIX "incq %0; sete %1"
6234+ asm volatile(LOCK_PREFIX "incq %0\n"
6235+
6236+#ifdef CONFIG_PAX_REFCOUNT
6237+ "jno 0f\n"
6238+ LOCK_PREFIX "decq %0\n"
6239+ "int $4\n0:\n"
6240+ _ASM_EXTABLE(0b, 0b)
6241+#endif
6242+
6243+ "sete %1\n"
6244 : "=m" (v->counter), "=qm" (c)
6245 : "m" (v->counter) : "memory");
6246 return c != 0;
6247@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(
6248 {
6249 unsigned char c;
6250
6251- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
6252+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
6253+
6254+#ifdef CONFIG_PAX_REFCOUNT
6255+ "jno 0f\n"
6256+ LOCK_PREFIX "subq %2,%0\n"
6257+ "int $4\n0:\n"
6258+ _ASM_EXTABLE(0b, 0b)
6259+#endif
6260+
6261+ "sets %1\n"
6262 : "=m" (v->counter), "=qm" (c)
6263 : "er" (i), "m" (v->counter) : "memory");
6264 return c;
6265@@ -171,7 +317,31 @@ static inline int atomic64_add_negative(
6266 static inline long atomic64_add_return(long i, atomic64_t *v)
6267 {
6268 long __i = i;
6269- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
6270+ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
6271+
6272+#ifdef CONFIG_PAX_REFCOUNT
6273+ "jno 0f\n"
6274+ "movq %0, %1\n"
6275+ "int $4\n0:\n"
6276+ _ASM_EXTABLE(0b, 0b)
6277+#endif
6278+
6279+ : "+r" (i), "+m" (v->counter)
6280+ : : "memory");
6281+ return i + __i;
6282+}
6283+
6284+/**
6285+ * atomic64_add_return_unchecked - add and return
6286+ * @i: integer value to add
6287+ * @v: pointer to type atomic64_unchecked_t
6288+ *
6289+ * Atomically adds @i to @v and returns @i + @v
6290+ */
6291+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6292+{
6293+ long __i = i;
6294+ asm volatile(LOCK_PREFIX "xaddq %0, %1"
6295 : "+r" (i), "+m" (v->counter)
6296 : : "memory");
6297 return i + __i;
6298@@ -183,6 +353,10 @@ static inline long atomic64_sub_return(l
6299 }
6300
6301 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
6302+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6303+{
6304+ return atomic64_add_return_unchecked(1, v);
6305+}
6306 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
6307
6308 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6309@@ -190,6 +364,11 @@ static inline long atomic64_cmpxchg(atom
6310 return cmpxchg(&v->counter, old, new);
6311 }
6312
6313+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
6314+{
6315+ return cmpxchg(&v->counter, old, new);
6316+}
6317+
6318 static inline long atomic64_xchg(atomic64_t *v, long new)
6319 {
6320 return xchg(&v->counter, new);
6321@@ -206,17 +385,30 @@ static inline long atomic64_xchg(atomic6
6322 */
6323 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
6324 {
6325- long c, old;
6326+ long c, old, new;
6327 c = atomic64_read(v);
6328 for (;;) {
6329- if (unlikely(c == (u)))
6330+ if (unlikely(c == u))
6331 break;
6332- old = atomic64_cmpxchg((v), c, c + (a));
6333+
6334+ asm volatile("add %2,%0\n"
6335+
6336+#ifdef CONFIG_PAX_REFCOUNT
6337+ "jno 0f\n"
6338+ "sub %2,%0\n"
6339+ "int $4\n0:\n"
6340+ _ASM_EXTABLE(0b, 0b)
6341+#endif
6342+
6343+ : "=r" (new)
6344+ : "0" (c), "ir" (a));
6345+
6346+ old = atomic64_cmpxchg(v, c, new);
6347 if (likely(old == c))
6348 break;
6349 c = old;
6350 }
6351- return c != (u);
6352+ return c != u;
6353 }
6354
6355 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6356diff -urNp linux-3.0.3/arch/x86/include/asm/atomic.h linux-3.0.3/arch/x86/include/asm/atomic.h
6357--- linux-3.0.3/arch/x86/include/asm/atomic.h 2011-07-21 22:17:23.000000000 -0400
6358+++ linux-3.0.3/arch/x86/include/asm/atomic.h 2011-08-23 21:47:55.000000000 -0400
6359@@ -22,7 +22,18 @@
6360 */
6361 static inline int atomic_read(const atomic_t *v)
6362 {
6363- return (*(volatile int *)&(v)->counter);
6364+ return (*(volatile const int *)&(v)->counter);
6365+}
6366+
6367+/**
6368+ * atomic_read_unchecked - read atomic variable
6369+ * @v: pointer of type atomic_unchecked_t
6370+ *
6371+ * Atomically reads the value of @v.
6372+ */
6373+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6374+{
6375+ return (*(volatile const int *)&(v)->counter);
6376 }
6377
6378 /**
6379@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *
6380 }
6381
6382 /**
6383+ * atomic_set_unchecked - set atomic variable
6384+ * @v: pointer of type atomic_unchecked_t
6385+ * @i: required value
6386+ *
6387+ * Atomically sets the value of @v to @i.
6388+ */
6389+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6390+{
6391+ v->counter = i;
6392+}
6393+
6394+/**
6395 * atomic_add - add integer to atomic variable
6396 * @i: integer value to add
6397 * @v: pointer of type atomic_t
6398@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *
6399 */
6400 static inline void atomic_add(int i, atomic_t *v)
6401 {
6402- asm volatile(LOCK_PREFIX "addl %1,%0"
6403+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6404+
6405+#ifdef CONFIG_PAX_REFCOUNT
6406+ "jno 0f\n"
6407+ LOCK_PREFIX "subl %1,%0\n"
6408+ "int $4\n0:\n"
6409+ _ASM_EXTABLE(0b, 0b)
6410+#endif
6411+
6412+ : "+m" (v->counter)
6413+ : "ir" (i));
6414+}
6415+
6416+/**
6417+ * atomic_add_unchecked - add integer to atomic variable
6418+ * @i: integer value to add
6419+ * @v: pointer of type atomic_unchecked_t
6420+ *
6421+ * Atomically adds @i to @v.
6422+ */
6423+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6424+{
6425+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6426 : "+m" (v->counter)
6427 : "ir" (i));
6428 }
6429@@ -60,7 +105,29 @@ static inline void atomic_add(int i, ato
6430 */
6431 static inline void atomic_sub(int i, atomic_t *v)
6432 {
6433- asm volatile(LOCK_PREFIX "subl %1,%0"
6434+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
6435+
6436+#ifdef CONFIG_PAX_REFCOUNT
6437+ "jno 0f\n"
6438+ LOCK_PREFIX "addl %1,%0\n"
6439+ "int $4\n0:\n"
6440+ _ASM_EXTABLE(0b, 0b)
6441+#endif
6442+
6443+ : "+m" (v->counter)
6444+ : "ir" (i));
6445+}
6446+
6447+/**
6448+ * atomic_sub_unchecked - subtract integer from atomic variable
6449+ * @i: integer value to subtract
6450+ * @v: pointer of type atomic_unchecked_t
6451+ *
6452+ * Atomically subtracts @i from @v.
6453+ */
6454+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6455+{
6456+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
6457 : "+m" (v->counter)
6458 : "ir" (i));
6459 }
6460@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(in
6461 {
6462 unsigned char c;
6463
6464- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6465+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
6466+
6467+#ifdef CONFIG_PAX_REFCOUNT
6468+ "jno 0f\n"
6469+ LOCK_PREFIX "addl %2,%0\n"
6470+ "int $4\n0:\n"
6471+ _ASM_EXTABLE(0b, 0b)
6472+#endif
6473+
6474+ "sete %1\n"
6475 : "+m" (v->counter), "=qm" (c)
6476 : "ir" (i) : "memory");
6477 return c;
6478@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(in
6479 */
6480 static inline void atomic_inc(atomic_t *v)
6481 {
6482- asm volatile(LOCK_PREFIX "incl %0"
6483+ asm volatile(LOCK_PREFIX "incl %0\n"
6484+
6485+#ifdef CONFIG_PAX_REFCOUNT
6486+ "jno 0f\n"
6487+ LOCK_PREFIX "decl %0\n"
6488+ "int $4\n0:\n"
6489+ _ASM_EXTABLE(0b, 0b)
6490+#endif
6491+
6492+ : "+m" (v->counter));
6493+}
6494+
6495+/**
6496+ * atomic_inc_unchecked - increment atomic variable
6497+ * @v: pointer of type atomic_unchecked_t
6498+ *
6499+ * Atomically increments @v by 1.
6500+ */
6501+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
6502+{
6503+ asm volatile(LOCK_PREFIX "incl %0\n"
6504 : "+m" (v->counter));
6505 }
6506
6507@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *
6508 */
6509 static inline void atomic_dec(atomic_t *v)
6510 {
6511- asm volatile(LOCK_PREFIX "decl %0"
6512+ asm volatile(LOCK_PREFIX "decl %0\n"
6513+
6514+#ifdef CONFIG_PAX_REFCOUNT
6515+ "jno 0f\n"
6516+ LOCK_PREFIX "incl %0\n"
6517+ "int $4\n0:\n"
6518+ _ASM_EXTABLE(0b, 0b)
6519+#endif
6520+
6521+ : "+m" (v->counter));
6522+}
6523+
6524+/**
6525+ * atomic_dec_unchecked - decrement atomic variable
6526+ * @v: pointer of type atomic_unchecked_t
6527+ *
6528+ * Atomically decrements @v by 1.
6529+ */
6530+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
6531+{
6532+ asm volatile(LOCK_PREFIX "decl %0\n"
6533 : "+m" (v->counter));
6534 }
6535
6536@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(at
6537 {
6538 unsigned char c;
6539
6540- asm volatile(LOCK_PREFIX "decl %0; sete %1"
6541+ asm volatile(LOCK_PREFIX "decl %0\n"
6542+
6543+#ifdef CONFIG_PAX_REFCOUNT
6544+ "jno 0f\n"
6545+ LOCK_PREFIX "incl %0\n"
6546+ "int $4\n0:\n"
6547+ _ASM_EXTABLE(0b, 0b)
6548+#endif
6549+
6550+ "sete %1\n"
6551 : "+m" (v->counter), "=qm" (c)
6552 : : "memory");
6553 return c != 0;
6554@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(at
6555 {
6556 unsigned char c;
6557
6558- asm volatile(LOCK_PREFIX "incl %0; sete %1"
6559+ asm volatile(LOCK_PREFIX "incl %0\n"
6560+
6561+#ifdef CONFIG_PAX_REFCOUNT
6562+ "jno 0f\n"
6563+ LOCK_PREFIX "decl %0\n"
6564+ "int $4\n0:\n"
6565+ _ASM_EXTABLE(0b, 0b)
6566+#endif
6567+
6568+ "sete %1\n"
6569+ : "+m" (v->counter), "=qm" (c)
6570+ : : "memory");
6571+ return c != 0;
6572+}
6573+
6574+/**
6575+ * atomic_inc_and_test_unchecked - increment and test
6576+ * @v: pointer of type atomic_unchecked_t
6577+ *
6578+ * Atomically increments @v by 1
6579+ * and returns true if the result is zero, or false for all
6580+ * other cases.
6581+ */
6582+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6583+{
6584+ unsigned char c;
6585+
6586+ asm volatile(LOCK_PREFIX "incl %0\n"
6587+ "sete %1\n"
6588 : "+m" (v->counter), "=qm" (c)
6589 : : "memory");
6590 return c != 0;
6591@@ -157,7 +310,16 @@ static inline int atomic_add_negative(in
6592 {
6593 unsigned char c;
6594
6595- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
6596+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
6597+
6598+#ifdef CONFIG_PAX_REFCOUNT
6599+ "jno 0f\n"
6600+ LOCK_PREFIX "subl %2,%0\n"
6601+ "int $4\n0:\n"
6602+ _ASM_EXTABLE(0b, 0b)
6603+#endif
6604+
6605+ "sets %1\n"
6606 : "+m" (v->counter), "=qm" (c)
6607 : "ir" (i) : "memory");
6608 return c;
6609@@ -180,6 +342,46 @@ static inline int atomic_add_return(int
6610 #endif
6611 /* Modern 486+ processor */
6612 __i = i;
6613+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
6614+
6615+#ifdef CONFIG_PAX_REFCOUNT
6616+ "jno 0f\n"
6617+ "movl %0, %1\n"
6618+ "int $4\n0:\n"
6619+ _ASM_EXTABLE(0b, 0b)
6620+#endif
6621+
6622+ : "+r" (i), "+m" (v->counter)
6623+ : : "memory");
6624+ return i + __i;
6625+
6626+#ifdef CONFIG_M386
6627+no_xadd: /* Legacy 386 processor */
6628+ local_irq_save(flags);
6629+ __i = atomic_read(v);
6630+ atomic_set(v, i + __i);
6631+ local_irq_restore(flags);
6632+ return i + __i;
6633+#endif
6634+}
6635+
6636+/**
6637+ * atomic_add_return_unchecked - add integer and return
6638+ * @v: pointer of type atomic_unchecked_t
6639+ * @i: integer value to add
6640+ *
6641+ * Atomically adds @i to @v and returns @i + @v
6642+ */
6643+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6644+{
6645+ int __i;
6646+#ifdef CONFIG_M386
6647+ unsigned long flags;
6648+ if (unlikely(boot_cpu_data.x86 <= 3))
6649+ goto no_xadd;
6650+#endif
6651+ /* Modern 486+ processor */
6652+ __i = i;
6653 asm volatile(LOCK_PREFIX "xaddl %0, %1"
6654 : "+r" (i), "+m" (v->counter)
6655 : : "memory");
6656@@ -208,6 +410,10 @@ static inline int atomic_sub_return(int
6657 }
6658
6659 #define atomic_inc_return(v) (atomic_add_return(1, v))
6660+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6661+{
6662+ return atomic_add_return_unchecked(1, v);
6663+}
6664 #define atomic_dec_return(v) (atomic_sub_return(1, v))
6665
6666 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6667@@ -215,11 +421,21 @@ static inline int atomic_cmpxchg(atomic_
6668 return cmpxchg(&v->counter, old, new);
6669 }
6670
6671+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
6672+{
6673+ return cmpxchg(&v->counter, old, new);
6674+}
6675+
6676 static inline int atomic_xchg(atomic_t *v, int new)
6677 {
6678 return xchg(&v->counter, new);
6679 }
6680
6681+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6682+{
6683+ return xchg(&v->counter, new);
6684+}
6685+
6686 /**
6687 * atomic_add_unless - add unless the number is already a given value
6688 * @v: pointer of type atomic_t
6689@@ -231,21 +447,77 @@ static inline int atomic_xchg(atomic_t *
6690 */
6691 static inline int atomic_add_unless(atomic_t *v, int a, int u)
6692 {
6693- int c, old;
6694+ int c, old, new;
6695 c = atomic_read(v);
6696 for (;;) {
6697- if (unlikely(c == (u)))
6698+ if (unlikely(c == u))
6699 break;
6700- old = atomic_cmpxchg((v), c, c + (a));
6701+
6702+ asm volatile("addl %2,%0\n"
6703+
6704+#ifdef CONFIG_PAX_REFCOUNT
6705+ "jno 0f\n"
6706+ "subl %2,%0\n"
6707+ "int $4\n0:\n"
6708+ _ASM_EXTABLE(0b, 0b)
6709+#endif
6710+
6711+ : "=r" (new)
6712+ : "0" (c), "ir" (a));
6713+
6714+ old = atomic_cmpxchg(v, c, new);
6715 if (likely(old == c))
6716 break;
6717 c = old;
6718 }
6719- return c != (u);
6720+ return c != u;
6721 }
6722
6723 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
6724
6725+/**
6726+ * atomic_inc_not_zero_hint - increment if not null
6727+ * @v: pointer of type atomic_t
6728+ * @hint: probable value of the atomic before the increment
6729+ *
6730+ * This version of atomic_inc_not_zero() gives a hint of probable
6731+ * value of the atomic. This helps processor to not read the memory
6732+ * before doing the atomic read/modify/write cycle, lowering
6733+ * number of bus transactions on some arches.
6734+ *
6735+ * Returns: 0 if increment was not done, 1 otherwise.
6736+ */
6737+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
6738+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
6739+{
6740+ int val, c = hint, new;
6741+
6742+ /* sanity test, should be removed by compiler if hint is a constant */
6743+ if (!hint)
6744+ return atomic_inc_not_zero(v);
6745+
6746+ do {
6747+ asm volatile("incl %0\n"
6748+
6749+#ifdef CONFIG_PAX_REFCOUNT
6750+ "jno 0f\n"
6751+ "decl %0\n"
6752+ "int $4\n0:\n"
6753+ _ASM_EXTABLE(0b, 0b)
6754+#endif
6755+
6756+ : "=r" (new)
6757+ : "0" (c));
6758+
6759+ val = atomic_cmpxchg(v, c, new);
6760+ if (val == c)
6761+ return 1;
6762+ c = val;
6763+ } while (c);
6764+
6765+ return 0;
6766+}
6767+
6768 /*
6769 * atomic_dec_if_positive - decrement by 1 if old value positive
6770 * @v: pointer of type atomic_t
6771diff -urNp linux-3.0.3/arch/x86/include/asm/bitops.h linux-3.0.3/arch/x86/include/asm/bitops.h
6772--- linux-3.0.3/arch/x86/include/asm/bitops.h 2011-07-21 22:17:23.000000000 -0400
6773+++ linux-3.0.3/arch/x86/include/asm/bitops.h 2011-08-23 21:47:55.000000000 -0400
6774@@ -38,7 +38,7 @@
6775 * a mask operation on a byte.
6776 */
6777 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
6778-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
6779+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
6780 #define CONST_MASK(nr) (1 << ((nr) & 7))
6781
6782 /**
6783diff -urNp linux-3.0.3/arch/x86/include/asm/boot.h linux-3.0.3/arch/x86/include/asm/boot.h
6784--- linux-3.0.3/arch/x86/include/asm/boot.h 2011-07-21 22:17:23.000000000 -0400
6785+++ linux-3.0.3/arch/x86/include/asm/boot.h 2011-08-23 21:47:55.000000000 -0400
6786@@ -11,10 +11,15 @@
6787 #include <asm/pgtable_types.h>
6788
6789 /* Physical address where kernel should be loaded. */
6790-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6791+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6792 + (CONFIG_PHYSICAL_ALIGN - 1)) \
6793 & ~(CONFIG_PHYSICAL_ALIGN - 1))
6794
6795+#ifndef __ASSEMBLY__
6796+extern unsigned char __LOAD_PHYSICAL_ADDR[];
6797+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
6798+#endif
6799+
6800 /* Minimum kernel alignment, as a power of two */
6801 #ifdef CONFIG_X86_64
6802 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
6803diff -urNp linux-3.0.3/arch/x86/include/asm/cacheflush.h linux-3.0.3/arch/x86/include/asm/cacheflush.h
6804--- linux-3.0.3/arch/x86/include/asm/cacheflush.h 2011-07-21 22:17:23.000000000 -0400
6805+++ linux-3.0.3/arch/x86/include/asm/cacheflush.h 2011-08-23 21:47:55.000000000 -0400
6806@@ -26,7 +26,7 @@ static inline unsigned long get_page_mem
6807 unsigned long pg_flags = pg->flags & _PGMT_MASK;
6808
6809 if (pg_flags == _PGMT_DEFAULT)
6810- return -1;
6811+ return ~0UL;
6812 else if (pg_flags == _PGMT_WC)
6813 return _PAGE_CACHE_WC;
6814 else if (pg_flags == _PGMT_UC_MINUS)
6815diff -urNp linux-3.0.3/arch/x86/include/asm/cache.h linux-3.0.3/arch/x86/include/asm/cache.h
6816--- linux-3.0.3/arch/x86/include/asm/cache.h 2011-07-21 22:17:23.000000000 -0400
6817+++ linux-3.0.3/arch/x86/include/asm/cache.h 2011-08-23 21:47:55.000000000 -0400
6818@@ -5,12 +5,13 @@
6819
6820 /* L1 cache line size */
6821 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
6822-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6823+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6824
6825 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
6826+#define __read_only __attribute__((__section__(".data..read_only")))
6827
6828 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
6829-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
6830+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
6831
6832 #ifdef CONFIG_X86_VSMP
6833 #ifdef CONFIG_SMP
6834diff -urNp linux-3.0.3/arch/x86/include/asm/checksum_32.h linux-3.0.3/arch/x86/include/asm/checksum_32.h
6835--- linux-3.0.3/arch/x86/include/asm/checksum_32.h 2011-07-21 22:17:23.000000000 -0400
6836+++ linux-3.0.3/arch/x86/include/asm/checksum_32.h 2011-08-23 21:47:55.000000000 -0400
6837@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
6838 int len, __wsum sum,
6839 int *src_err_ptr, int *dst_err_ptr);
6840
6841+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
6842+ int len, __wsum sum,
6843+ int *src_err_ptr, int *dst_err_ptr);
6844+
6845+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
6846+ int len, __wsum sum,
6847+ int *src_err_ptr, int *dst_err_ptr);
6848+
6849 /*
6850 * Note: when you get a NULL pointer exception here this means someone
6851 * passed in an incorrect kernel address to one of these functions.
6852@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
6853 int *err_ptr)
6854 {
6855 might_sleep();
6856- return csum_partial_copy_generic((__force void *)src, dst,
6857+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
6858 len, sum, err_ptr, NULL);
6859 }
6860
6861@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
6862 {
6863 might_sleep();
6864 if (access_ok(VERIFY_WRITE, dst, len))
6865- return csum_partial_copy_generic(src, (__force void *)dst,
6866+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
6867 len, sum, NULL, err_ptr);
6868
6869 if (len)
6870diff -urNp linux-3.0.3/arch/x86/include/asm/cpufeature.h linux-3.0.3/arch/x86/include/asm/cpufeature.h
6871--- linux-3.0.3/arch/x86/include/asm/cpufeature.h 2011-07-21 22:17:23.000000000 -0400
6872+++ linux-3.0.3/arch/x86/include/asm/cpufeature.h 2011-08-23 21:47:55.000000000 -0400
6873@@ -358,7 +358,7 @@ static __always_inline __pure bool __sta
6874 ".section .discard,\"aw\",@progbits\n"
6875 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
6876 ".previous\n"
6877- ".section .altinstr_replacement,\"ax\"\n"
6878+ ".section .altinstr_replacement,\"a\"\n"
6879 "3: movb $1,%0\n"
6880 "4:\n"
6881 ".previous\n"
6882diff -urNp linux-3.0.3/arch/x86/include/asm/desc_defs.h linux-3.0.3/arch/x86/include/asm/desc_defs.h
6883--- linux-3.0.3/arch/x86/include/asm/desc_defs.h 2011-07-21 22:17:23.000000000 -0400
6884+++ linux-3.0.3/arch/x86/include/asm/desc_defs.h 2011-08-23 21:47:55.000000000 -0400
6885@@ -31,6 +31,12 @@ struct desc_struct {
6886 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
6887 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
6888 };
6889+ struct {
6890+ u16 offset_low;
6891+ u16 seg;
6892+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
6893+ unsigned offset_high: 16;
6894+ } gate;
6895 };
6896 } __attribute__((packed));
6897
6898diff -urNp linux-3.0.3/arch/x86/include/asm/desc.h linux-3.0.3/arch/x86/include/asm/desc.h
6899--- linux-3.0.3/arch/x86/include/asm/desc.h 2011-07-21 22:17:23.000000000 -0400
6900+++ linux-3.0.3/arch/x86/include/asm/desc.h 2011-08-23 21:47:55.000000000 -0400
6901@@ -4,6 +4,7 @@
6902 #include <asm/desc_defs.h>
6903 #include <asm/ldt.h>
6904 #include <asm/mmu.h>
6905+#include <asm/pgtable.h>
6906
6907 #include <linux/smp.h>
6908
6909@@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_
6910
6911 desc->type = (info->read_exec_only ^ 1) << 1;
6912 desc->type |= info->contents << 2;
6913+ desc->type |= info->seg_not_present ^ 1;
6914
6915 desc->s = 1;
6916 desc->dpl = 0x3;
6917@@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_
6918 }
6919
6920 extern struct desc_ptr idt_descr;
6921-extern gate_desc idt_table[];
6922-
6923-struct gdt_page {
6924- struct desc_struct gdt[GDT_ENTRIES];
6925-} __attribute__((aligned(PAGE_SIZE)));
6926-
6927-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
6928+extern gate_desc idt_table[256];
6929
6930+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
6931 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
6932 {
6933- return per_cpu(gdt_page, cpu).gdt;
6934+ return cpu_gdt_table[cpu];
6935 }
6936
6937 #ifdef CONFIG_X86_64
6938@@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *
6939 unsigned long base, unsigned dpl, unsigned flags,
6940 unsigned short seg)
6941 {
6942- gate->a = (seg << 16) | (base & 0xffff);
6943- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
6944+ gate->gate.offset_low = base;
6945+ gate->gate.seg = seg;
6946+ gate->gate.reserved = 0;
6947+ gate->gate.type = type;
6948+ gate->gate.s = 0;
6949+ gate->gate.dpl = dpl;
6950+ gate->gate.p = 1;
6951+ gate->gate.offset_high = base >> 16;
6952 }
6953
6954 #endif
6955@@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(str
6956
6957 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
6958 {
6959+ pax_open_kernel();
6960 memcpy(&idt[entry], gate, sizeof(*gate));
6961+ pax_close_kernel();
6962 }
6963
6964 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
6965 {
6966+ pax_open_kernel();
6967 memcpy(&ldt[entry], desc, 8);
6968+ pax_close_kernel();
6969 }
6970
6971 static inline void
6972@@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struc
6973 default: size = sizeof(*gdt); break;
6974 }
6975
6976+ pax_open_kernel();
6977 memcpy(&gdt[entry], desc, size);
6978+ pax_close_kernel();
6979 }
6980
6981 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
6982@@ -207,7 +216,9 @@ static inline void native_set_ldt(const
6983
6984 static inline void native_load_tr_desc(void)
6985 {
6986+ pax_open_kernel();
6987 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
6988+ pax_close_kernel();
6989 }
6990
6991 static inline void native_load_gdt(const struct desc_ptr *dtr)
6992@@ -244,8 +255,10 @@ static inline void native_load_tls(struc
6993 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
6994 unsigned int i;
6995
6996+ pax_open_kernel();
6997 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
6998 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
6999+ pax_close_kernel();
7000 }
7001
7002 #define _LDT_empty(info) \
7003@@ -307,7 +320,7 @@ static inline void set_desc_limit(struct
7004 desc->limit = (limit >> 16) & 0xf;
7005 }
7006
7007-static inline void _set_gate(int gate, unsigned type, void *addr,
7008+static inline void _set_gate(int gate, unsigned type, const void *addr,
7009 unsigned dpl, unsigned ist, unsigned seg)
7010 {
7011 gate_desc s;
7012@@ -326,7 +339,7 @@ static inline void _set_gate(int gate, u
7013 * Pentium F0 0F bugfix can have resulted in the mapped
7014 * IDT being write-protected.
7015 */
7016-static inline void set_intr_gate(unsigned int n, void *addr)
7017+static inline void set_intr_gate(unsigned int n, const void *addr)
7018 {
7019 BUG_ON((unsigned)n > 0xFF);
7020 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
7021@@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsig
7022 /*
7023 * This routine sets up an interrupt gate at directory privilege level 3.
7024 */
7025-static inline void set_system_intr_gate(unsigned int n, void *addr)
7026+static inline void set_system_intr_gate(unsigned int n, const void *addr)
7027 {
7028 BUG_ON((unsigned)n > 0xFF);
7029 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
7030 }
7031
7032-static inline void set_system_trap_gate(unsigned int n, void *addr)
7033+static inline void set_system_trap_gate(unsigned int n, const void *addr)
7034 {
7035 BUG_ON((unsigned)n > 0xFF);
7036 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
7037 }
7038
7039-static inline void set_trap_gate(unsigned int n, void *addr)
7040+static inline void set_trap_gate(unsigned int n, const void *addr)
7041 {
7042 BUG_ON((unsigned)n > 0xFF);
7043 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
7044@@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigne
7045 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
7046 {
7047 BUG_ON((unsigned)n > 0xFF);
7048- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
7049+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
7050 }
7051
7052-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
7053+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
7054 {
7055 BUG_ON((unsigned)n > 0xFF);
7056 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
7057 }
7058
7059-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
7060+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
7061 {
7062 BUG_ON((unsigned)n > 0xFF);
7063 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
7064 }
7065
7066+#ifdef CONFIG_X86_32
7067+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
7068+{
7069+ struct desc_struct d;
7070+
7071+ if (likely(limit))
7072+ limit = (limit - 1UL) >> PAGE_SHIFT;
7073+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
7074+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
7075+}
7076+#endif
7077+
7078 #endif /* _ASM_X86_DESC_H */
7079diff -urNp linux-3.0.3/arch/x86/include/asm/e820.h linux-3.0.3/arch/x86/include/asm/e820.h
7080--- linux-3.0.3/arch/x86/include/asm/e820.h 2011-07-21 22:17:23.000000000 -0400
7081+++ linux-3.0.3/arch/x86/include/asm/e820.h 2011-08-23 21:47:55.000000000 -0400
7082@@ -69,7 +69,7 @@ struct e820map {
7083 #define ISA_START_ADDRESS 0xa0000
7084 #define ISA_END_ADDRESS 0x100000
7085
7086-#define BIOS_BEGIN 0x000a0000
7087+#define BIOS_BEGIN 0x000c0000
7088 #define BIOS_END 0x00100000
7089
7090 #define BIOS_ROM_BASE 0xffe00000
7091diff -urNp linux-3.0.3/arch/x86/include/asm/elf.h linux-3.0.3/arch/x86/include/asm/elf.h
7092--- linux-3.0.3/arch/x86/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
7093+++ linux-3.0.3/arch/x86/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
7094@@ -237,7 +237,25 @@ extern int force_personality32;
7095 the loader. We need to make sure that it is out of the way of the program
7096 that it will "exec", and that there is sufficient room for the brk. */
7097
7098+#ifdef CONFIG_PAX_SEGMEXEC
7099+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
7100+#else
7101 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
7102+#endif
7103+
7104+#ifdef CONFIG_PAX_ASLR
7105+#ifdef CONFIG_X86_32
7106+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
7107+
7108+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7109+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7110+#else
7111+#define PAX_ELF_ET_DYN_BASE 0x400000UL
7112+
7113+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7114+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7115+#endif
7116+#endif
7117
7118 /* This yields a mask that user programs can use to figure out what
7119 instruction set this CPU supports. This could be done in user space,
7120@@ -290,9 +308,7 @@ do { \
7121
7122 #define ARCH_DLINFO \
7123 do { \
7124- if (vdso_enabled) \
7125- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
7126- (unsigned long)current->mm->context.vdso); \
7127+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
7128 } while (0)
7129
7130 #define AT_SYSINFO 32
7131@@ -303,7 +319,7 @@ do { \
7132
7133 #endif /* !CONFIG_X86_32 */
7134
7135-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
7136+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
7137
7138 #define VDSO_ENTRY \
7139 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
7140@@ -317,7 +333,4 @@ extern int arch_setup_additional_pages(s
7141 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
7142 #define compat_arch_setup_additional_pages syscall32_setup_pages
7143
7144-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7145-#define arch_randomize_brk arch_randomize_brk
7146-
7147 #endif /* _ASM_X86_ELF_H */
7148diff -urNp linux-3.0.3/arch/x86/include/asm/emergency-restart.h linux-3.0.3/arch/x86/include/asm/emergency-restart.h
7149--- linux-3.0.3/arch/x86/include/asm/emergency-restart.h 2011-07-21 22:17:23.000000000 -0400
7150+++ linux-3.0.3/arch/x86/include/asm/emergency-restart.h 2011-08-23 21:47:55.000000000 -0400
7151@@ -15,6 +15,6 @@ enum reboot_type {
7152
7153 extern enum reboot_type reboot_type;
7154
7155-extern void machine_emergency_restart(void);
7156+extern void machine_emergency_restart(void) __noreturn;
7157
7158 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
7159diff -urNp linux-3.0.3/arch/x86/include/asm/futex.h linux-3.0.3/arch/x86/include/asm/futex.h
7160--- linux-3.0.3/arch/x86/include/asm/futex.h 2011-07-21 22:17:23.000000000 -0400
7161+++ linux-3.0.3/arch/x86/include/asm/futex.h 2011-08-23 21:47:55.000000000 -0400
7162@@ -12,16 +12,18 @@
7163 #include <asm/system.h>
7164
7165 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
7166+ typecheck(u32 *, uaddr); \
7167 asm volatile("1:\t" insn "\n" \
7168 "2:\t.section .fixup,\"ax\"\n" \
7169 "3:\tmov\t%3, %1\n" \
7170 "\tjmp\t2b\n" \
7171 "\t.previous\n" \
7172 _ASM_EXTABLE(1b, 3b) \
7173- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
7174+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
7175 : "i" (-EFAULT), "0" (oparg), "1" (0))
7176
7177 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
7178+ typecheck(u32 *, uaddr); \
7179 asm volatile("1:\tmovl %2, %0\n" \
7180 "\tmovl\t%0, %3\n" \
7181 "\t" insn "\n" \
7182@@ -34,7 +36,7 @@
7183 _ASM_EXTABLE(1b, 4b) \
7184 _ASM_EXTABLE(2b, 4b) \
7185 : "=&a" (oldval), "=&r" (ret), \
7186- "+m" (*uaddr), "=&r" (tem) \
7187+ "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
7188 : "r" (oparg), "i" (-EFAULT), "1" (0))
7189
7190 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
7191@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
7192
7193 switch (op) {
7194 case FUTEX_OP_SET:
7195- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
7196+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
7197 break;
7198 case FUTEX_OP_ADD:
7199- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
7200+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
7201 uaddr, oparg);
7202 break;
7203 case FUTEX_OP_OR:
7204@@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_i
7205 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
7206 return -EFAULT;
7207
7208- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
7209+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
7210 "2:\t.section .fixup, \"ax\"\n"
7211 "3:\tmov %3, %0\n"
7212 "\tjmp 2b\n"
7213 "\t.previous\n"
7214 _ASM_EXTABLE(1b, 3b)
7215- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
7216+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
7217 : "i" (-EFAULT), "r" (newval), "1" (oldval)
7218 : "memory"
7219 );
7220diff -urNp linux-3.0.3/arch/x86/include/asm/hw_irq.h linux-3.0.3/arch/x86/include/asm/hw_irq.h
7221--- linux-3.0.3/arch/x86/include/asm/hw_irq.h 2011-07-21 22:17:23.000000000 -0400
7222+++ linux-3.0.3/arch/x86/include/asm/hw_irq.h 2011-08-23 21:47:55.000000000 -0400
7223@@ -137,8 +137,8 @@ extern void setup_ioapic_dest(void);
7224 extern void enable_IO_APIC(void);
7225
7226 /* Statistics */
7227-extern atomic_t irq_err_count;
7228-extern atomic_t irq_mis_count;
7229+extern atomic_unchecked_t irq_err_count;
7230+extern atomic_unchecked_t irq_mis_count;
7231
7232 /* EISA */
7233 extern void eisa_set_level_irq(unsigned int irq);
7234diff -urNp linux-3.0.3/arch/x86/include/asm/i387.h linux-3.0.3/arch/x86/include/asm/i387.h
7235--- linux-3.0.3/arch/x86/include/asm/i387.h 2011-07-21 22:17:23.000000000 -0400
7236+++ linux-3.0.3/arch/x86/include/asm/i387.h 2011-08-23 21:47:55.000000000 -0400
7237@@ -92,6 +92,11 @@ static inline int fxrstor_checking(struc
7238 {
7239 int err;
7240
7241+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7242+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7243+ fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
7244+#endif
7245+
7246 /* See comment in fxsave() below. */
7247 #ifdef CONFIG_AS_FXSAVEQ
7248 asm volatile("1: fxrstorq %[fx]\n\t"
7249@@ -121,6 +126,11 @@ static inline int fxsave_user(struct i38
7250 {
7251 int err;
7252
7253+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7254+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7255+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
7256+#endif
7257+
7258 /*
7259 * Clear the bytes not touched by the fxsave and reserved
7260 * for the SW usage.
7261@@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu
7262 #endif /* CONFIG_X86_64 */
7263
7264 /* We need a safe address that is cheap to find and that is already
7265- in L1 during context switch. The best choices are unfortunately
7266- different for UP and SMP */
7267-#ifdef CONFIG_SMP
7268-#define safe_address (__per_cpu_offset[0])
7269-#else
7270-#define safe_address (kstat_cpu(0).cpustat.user)
7271-#endif
7272+ in L1 during context switch. */
7273+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
7274
7275 /*
7276 * These must be called with preempt disabled
7277@@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void
7278 struct thread_info *me = current_thread_info();
7279 preempt_disable();
7280 if (me->status & TS_USEDFPU)
7281- __save_init_fpu(me->task);
7282+ __save_init_fpu(current);
7283 else
7284 clts();
7285 }
7286diff -urNp linux-3.0.3/arch/x86/include/asm/io.h linux-3.0.3/arch/x86/include/asm/io.h
7287--- linux-3.0.3/arch/x86/include/asm/io.h 2011-07-21 22:17:23.000000000 -0400
7288+++ linux-3.0.3/arch/x86/include/asm/io.h 2011-08-23 21:47:55.000000000 -0400
7289@@ -196,6 +196,17 @@ extern void set_iounmap_nonlazy(void);
7290
7291 #include <linux/vmalloc.h>
7292
7293+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
7294+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
7295+{
7296+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7297+}
7298+
7299+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
7300+{
7301+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7302+}
7303+
7304 /*
7305 * Convert a virtual cached pointer to an uncached pointer
7306 */
7307diff -urNp linux-3.0.3/arch/x86/include/asm/irqflags.h linux-3.0.3/arch/x86/include/asm/irqflags.h
7308--- linux-3.0.3/arch/x86/include/asm/irqflags.h 2011-07-21 22:17:23.000000000 -0400
7309+++ linux-3.0.3/arch/x86/include/asm/irqflags.h 2011-08-23 21:47:55.000000000 -0400
7310@@ -140,6 +140,11 @@ static inline unsigned long arch_local_i
7311 sti; \
7312 sysexit
7313
7314+#define GET_CR0_INTO_RDI mov %cr0, %rdi
7315+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
7316+#define GET_CR3_INTO_RDI mov %cr3, %rdi
7317+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
7318+
7319 #else
7320 #define INTERRUPT_RETURN iret
7321 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
7322diff -urNp linux-3.0.3/arch/x86/include/asm/kprobes.h linux-3.0.3/arch/x86/include/asm/kprobes.h
7323--- linux-3.0.3/arch/x86/include/asm/kprobes.h 2011-07-21 22:17:23.000000000 -0400
7324+++ linux-3.0.3/arch/x86/include/asm/kprobes.h 2011-08-23 21:47:55.000000000 -0400
7325@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
7326 #define RELATIVEJUMP_SIZE 5
7327 #define RELATIVECALL_OPCODE 0xe8
7328 #define RELATIVE_ADDR_SIZE 4
7329-#define MAX_STACK_SIZE 64
7330-#define MIN_STACK_SIZE(ADDR) \
7331- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
7332- THREAD_SIZE - (unsigned long)(ADDR))) \
7333- ? (MAX_STACK_SIZE) \
7334- : (((unsigned long)current_thread_info()) + \
7335- THREAD_SIZE - (unsigned long)(ADDR)))
7336+#define MAX_STACK_SIZE 64UL
7337+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
7338
7339 #define flush_insn_slot(p) do { } while (0)
7340
7341diff -urNp linux-3.0.3/arch/x86/include/asm/kvm_host.h linux-3.0.3/arch/x86/include/asm/kvm_host.h
7342--- linux-3.0.3/arch/x86/include/asm/kvm_host.h 2011-07-21 22:17:23.000000000 -0400
7343+++ linux-3.0.3/arch/x86/include/asm/kvm_host.h 2011-08-26 19:49:56.000000000 -0400
7344@@ -441,7 +441,7 @@ struct kvm_arch {
7345 unsigned int n_used_mmu_pages;
7346 unsigned int n_requested_mmu_pages;
7347 unsigned int n_max_mmu_pages;
7348- atomic_t invlpg_counter;
7349+ atomic_unchecked_t invlpg_counter;
7350 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
7351 /*
7352 * Hash table of struct kvm_mmu_page.
7353@@ -619,7 +619,7 @@ struct kvm_x86_ops {
7354 enum x86_intercept_stage stage);
7355
7356 const struct trace_print_flags *exit_reasons_str;
7357-};
7358+} __do_const;
7359
7360 struct kvm_arch_async_pf {
7361 u32 token;
7362diff -urNp linux-3.0.3/arch/x86/include/asm/local.h linux-3.0.3/arch/x86/include/asm/local.h
7363--- linux-3.0.3/arch/x86/include/asm/local.h 2011-07-21 22:17:23.000000000 -0400
7364+++ linux-3.0.3/arch/x86/include/asm/local.h 2011-08-23 21:47:55.000000000 -0400
7365@@ -18,26 +18,58 @@ typedef struct {
7366
7367 static inline void local_inc(local_t *l)
7368 {
7369- asm volatile(_ASM_INC "%0"
7370+ asm volatile(_ASM_INC "%0\n"
7371+
7372+#ifdef CONFIG_PAX_REFCOUNT
7373+ "jno 0f\n"
7374+ _ASM_DEC "%0\n"
7375+ "int $4\n0:\n"
7376+ _ASM_EXTABLE(0b, 0b)
7377+#endif
7378+
7379 : "+m" (l->a.counter));
7380 }
7381
7382 static inline void local_dec(local_t *l)
7383 {
7384- asm volatile(_ASM_DEC "%0"
7385+ asm volatile(_ASM_DEC "%0\n"
7386+
7387+#ifdef CONFIG_PAX_REFCOUNT
7388+ "jno 0f\n"
7389+ _ASM_INC "%0\n"
7390+ "int $4\n0:\n"
7391+ _ASM_EXTABLE(0b, 0b)
7392+#endif
7393+
7394 : "+m" (l->a.counter));
7395 }
7396
7397 static inline void local_add(long i, local_t *l)
7398 {
7399- asm volatile(_ASM_ADD "%1,%0"
7400+ asm volatile(_ASM_ADD "%1,%0\n"
7401+
7402+#ifdef CONFIG_PAX_REFCOUNT
7403+ "jno 0f\n"
7404+ _ASM_SUB "%1,%0\n"
7405+ "int $4\n0:\n"
7406+ _ASM_EXTABLE(0b, 0b)
7407+#endif
7408+
7409 : "+m" (l->a.counter)
7410 : "ir" (i));
7411 }
7412
7413 static inline void local_sub(long i, local_t *l)
7414 {
7415- asm volatile(_ASM_SUB "%1,%0"
7416+ asm volatile(_ASM_SUB "%1,%0\n"
7417+
7418+#ifdef CONFIG_PAX_REFCOUNT
7419+ "jno 0f\n"
7420+ _ASM_ADD "%1,%0\n"
7421+ "int $4\n0:\n"
7422+ _ASM_EXTABLE(0b, 0b)
7423+#endif
7424+
7425 : "+m" (l->a.counter)
7426 : "ir" (i));
7427 }
7428@@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
7429 {
7430 unsigned char c;
7431
7432- asm volatile(_ASM_SUB "%2,%0; sete %1"
7433+ asm volatile(_ASM_SUB "%2,%0\n"
7434+
7435+#ifdef CONFIG_PAX_REFCOUNT
7436+ "jno 0f\n"
7437+ _ASM_ADD "%2,%0\n"
7438+ "int $4\n0:\n"
7439+ _ASM_EXTABLE(0b, 0b)
7440+#endif
7441+
7442+ "sete %1\n"
7443 : "+m" (l->a.counter), "=qm" (c)
7444 : "ir" (i) : "memory");
7445 return c;
7446@@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
7447 {
7448 unsigned char c;
7449
7450- asm volatile(_ASM_DEC "%0; sete %1"
7451+ asm volatile(_ASM_DEC "%0\n"
7452+
7453+#ifdef CONFIG_PAX_REFCOUNT
7454+ "jno 0f\n"
7455+ _ASM_INC "%0\n"
7456+ "int $4\n0:\n"
7457+ _ASM_EXTABLE(0b, 0b)
7458+#endif
7459+
7460+ "sete %1\n"
7461 : "+m" (l->a.counter), "=qm" (c)
7462 : : "memory");
7463 return c != 0;
7464@@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
7465 {
7466 unsigned char c;
7467
7468- asm volatile(_ASM_INC "%0; sete %1"
7469+ asm volatile(_ASM_INC "%0\n"
7470+
7471+#ifdef CONFIG_PAX_REFCOUNT
7472+ "jno 0f\n"
7473+ _ASM_DEC "%0\n"
7474+ "int $4\n0:\n"
7475+ _ASM_EXTABLE(0b, 0b)
7476+#endif
7477+
7478+ "sete %1\n"
7479 : "+m" (l->a.counter), "=qm" (c)
7480 : : "memory");
7481 return c != 0;
7482@@ -110,7 +169,16 @@ static inline int local_add_negative(lon
7483 {
7484 unsigned char c;
7485
7486- asm volatile(_ASM_ADD "%2,%0; sets %1"
7487+ asm volatile(_ASM_ADD "%2,%0\n"
7488+
7489+#ifdef CONFIG_PAX_REFCOUNT
7490+ "jno 0f\n"
7491+ _ASM_SUB "%2,%0\n"
7492+ "int $4\n0:\n"
7493+ _ASM_EXTABLE(0b, 0b)
7494+#endif
7495+
7496+ "sets %1\n"
7497 : "+m" (l->a.counter), "=qm" (c)
7498 : "ir" (i) : "memory");
7499 return c;
7500@@ -133,7 +201,15 @@ static inline long local_add_return(long
7501 #endif
7502 /* Modern 486+ processor */
7503 __i = i;
7504- asm volatile(_ASM_XADD "%0, %1;"
7505+ asm volatile(_ASM_XADD "%0, %1\n"
7506+
7507+#ifdef CONFIG_PAX_REFCOUNT
7508+ "jno 0f\n"
7509+ _ASM_MOV "%0,%1\n"
7510+ "int $4\n0:\n"
7511+ _ASM_EXTABLE(0b, 0b)
7512+#endif
7513+
7514 : "+r" (i), "+m" (l->a.counter)
7515 : : "memory");
7516 return i + __i;
7517diff -urNp linux-3.0.3/arch/x86/include/asm/mman.h linux-3.0.3/arch/x86/include/asm/mman.h
7518--- linux-3.0.3/arch/x86/include/asm/mman.h 2011-07-21 22:17:23.000000000 -0400
7519+++ linux-3.0.3/arch/x86/include/asm/mman.h 2011-08-23 21:47:55.000000000 -0400
7520@@ -5,4 +5,14 @@
7521
7522 #include <asm-generic/mman.h>
7523
7524+#ifdef __KERNEL__
7525+#ifndef __ASSEMBLY__
7526+#ifdef CONFIG_X86_32
7527+#define arch_mmap_check i386_mmap_check
7528+int i386_mmap_check(unsigned long addr, unsigned long len,
7529+ unsigned long flags);
7530+#endif
7531+#endif
7532+#endif
7533+
7534 #endif /* _ASM_X86_MMAN_H */
7535diff -urNp linux-3.0.3/arch/x86/include/asm/mmu_context.h linux-3.0.3/arch/x86/include/asm/mmu_context.h
7536--- linux-3.0.3/arch/x86/include/asm/mmu_context.h 2011-07-21 22:17:23.000000000 -0400
7537+++ linux-3.0.3/arch/x86/include/asm/mmu_context.h 2011-08-23 21:48:14.000000000 -0400
7538@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *m
7539
7540 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
7541 {
7542+
7543+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7544+ unsigned int i;
7545+ pgd_t *pgd;
7546+
7547+ pax_open_kernel();
7548+ pgd = get_cpu_pgd(smp_processor_id());
7549+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
7550+ set_pgd_batched(pgd+i, native_make_pgd(0));
7551+ pax_close_kernel();
7552+#endif
7553+
7554 #ifdef CONFIG_SMP
7555 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
7556 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
7557@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_s
7558 struct task_struct *tsk)
7559 {
7560 unsigned cpu = smp_processor_id();
7561+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7562+ int tlbstate = TLBSTATE_OK;
7563+#endif
7564
7565 if (likely(prev != next)) {
7566 #ifdef CONFIG_SMP
7567+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7568+ tlbstate = percpu_read(cpu_tlbstate.state);
7569+#endif
7570 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7571 percpu_write(cpu_tlbstate.active_mm, next);
7572 #endif
7573 cpumask_set_cpu(cpu, mm_cpumask(next));
7574
7575 /* Re-load page tables */
7576+#ifdef CONFIG_PAX_PER_CPU_PGD
7577+ pax_open_kernel();
7578+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7579+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7580+ pax_close_kernel();
7581+ load_cr3(get_cpu_pgd(cpu));
7582+#else
7583 load_cr3(next->pgd);
7584+#endif
7585
7586 /* stop flush ipis for the previous mm */
7587 cpumask_clear_cpu(cpu, mm_cpumask(prev));
7588@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_s
7589 */
7590 if (unlikely(prev->context.ldt != next->context.ldt))
7591 load_LDT_nolock(&next->context);
7592- }
7593+
7594+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7595+ if (!(__supported_pte_mask & _PAGE_NX)) {
7596+ smp_mb__before_clear_bit();
7597+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
7598+ smp_mb__after_clear_bit();
7599+ cpu_set(cpu, next->context.cpu_user_cs_mask);
7600+ }
7601+#endif
7602+
7603+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7604+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
7605+ prev->context.user_cs_limit != next->context.user_cs_limit))
7606+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7607 #ifdef CONFIG_SMP
7608+ else if (unlikely(tlbstate != TLBSTATE_OK))
7609+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7610+#endif
7611+#endif
7612+
7613+ }
7614 else {
7615+
7616+#ifdef CONFIG_PAX_PER_CPU_PGD
7617+ pax_open_kernel();
7618+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7619+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7620+ pax_close_kernel();
7621+ load_cr3(get_cpu_pgd(cpu));
7622+#endif
7623+
7624+#ifdef CONFIG_SMP
7625 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7626 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
7627
7628@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_s
7629 * tlb flush IPI delivery. We must reload CR3
7630 * to make sure to use no freed page tables.
7631 */
7632+
7633+#ifndef CONFIG_PAX_PER_CPU_PGD
7634 load_cr3(next->pgd);
7635+#endif
7636+
7637 load_LDT_nolock(&next->context);
7638+
7639+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
7640+ if (!(__supported_pte_mask & _PAGE_NX))
7641+ cpu_set(cpu, next->context.cpu_user_cs_mask);
7642+#endif
7643+
7644+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7645+#ifdef CONFIG_PAX_PAGEEXEC
7646+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
7647+#endif
7648+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7649+#endif
7650+
7651 }
7652- }
7653 #endif
7654+ }
7655 }
7656
7657 #define activate_mm(prev, next) \
7658diff -urNp linux-3.0.3/arch/x86/include/asm/mmu.h linux-3.0.3/arch/x86/include/asm/mmu.h
7659--- linux-3.0.3/arch/x86/include/asm/mmu.h 2011-07-21 22:17:23.000000000 -0400
7660+++ linux-3.0.3/arch/x86/include/asm/mmu.h 2011-08-23 21:47:55.000000000 -0400
7661@@ -9,7 +9,7 @@
7662 * we put the segment information here.
7663 */
7664 typedef struct {
7665- void *ldt;
7666+ struct desc_struct *ldt;
7667 int size;
7668
7669 #ifdef CONFIG_X86_64
7670@@ -18,7 +18,19 @@ typedef struct {
7671 #endif
7672
7673 struct mutex lock;
7674- void *vdso;
7675+ unsigned long vdso;
7676+
7677+#ifdef CONFIG_X86_32
7678+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
7679+ unsigned long user_cs_base;
7680+ unsigned long user_cs_limit;
7681+
7682+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7683+ cpumask_t cpu_user_cs_mask;
7684+#endif
7685+
7686+#endif
7687+#endif
7688 } mm_context_t;
7689
7690 #ifdef CONFIG_SMP
7691diff -urNp linux-3.0.3/arch/x86/include/asm/module.h linux-3.0.3/arch/x86/include/asm/module.h
7692--- linux-3.0.3/arch/x86/include/asm/module.h 2011-07-21 22:17:23.000000000 -0400
7693+++ linux-3.0.3/arch/x86/include/asm/module.h 2011-08-23 21:48:14.000000000 -0400
7694@@ -5,6 +5,7 @@
7695
7696 #ifdef CONFIG_X86_64
7697 /* X86_64 does not define MODULE_PROC_FAMILY */
7698+#define MODULE_PROC_FAMILY ""
7699 #elif defined CONFIG_M386
7700 #define MODULE_PROC_FAMILY "386 "
7701 #elif defined CONFIG_M486
7702@@ -59,8 +60,30 @@
7703 #error unknown processor family
7704 #endif
7705
7706-#ifdef CONFIG_X86_32
7707-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
7708+#ifdef CONFIG_PAX_MEMORY_UDEREF
7709+#define MODULE_PAX_UDEREF "UDEREF "
7710+#else
7711+#define MODULE_PAX_UDEREF ""
7712+#endif
7713+
7714+#ifdef CONFIG_PAX_KERNEXEC
7715+#define MODULE_PAX_KERNEXEC "KERNEXEC "
7716+#else
7717+#define MODULE_PAX_KERNEXEC ""
7718 #endif
7719
7720+#ifdef CONFIG_PAX_REFCOUNT
7721+#define MODULE_PAX_REFCOUNT "REFCOUNT "
7722+#else
7723+#define MODULE_PAX_REFCOUNT ""
7724+#endif
7725+
7726+#ifdef CONFIG_GRKERNSEC
7727+#define MODULE_GRSEC "GRSECURITY "
7728+#else
7729+#define MODULE_GRSEC ""
7730+#endif
7731+
7732+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
7733+
7734 #endif /* _ASM_X86_MODULE_H */
7735diff -urNp linux-3.0.3/arch/x86/include/asm/page_64_types.h linux-3.0.3/arch/x86/include/asm/page_64_types.h
7736--- linux-3.0.3/arch/x86/include/asm/page_64_types.h 2011-07-21 22:17:23.000000000 -0400
7737+++ linux-3.0.3/arch/x86/include/asm/page_64_types.h 2011-08-23 21:47:55.000000000 -0400
7738@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
7739
7740 /* duplicated to the one in bootmem.h */
7741 extern unsigned long max_pfn;
7742-extern unsigned long phys_base;
7743+extern const unsigned long phys_base;
7744
7745 extern unsigned long __phys_addr(unsigned long);
7746 #define __phys_reloc_hide(x) (x)
7747diff -urNp linux-3.0.3/arch/x86/include/asm/paravirt.h linux-3.0.3/arch/x86/include/asm/paravirt.h
7748--- linux-3.0.3/arch/x86/include/asm/paravirt.h 2011-07-21 22:17:23.000000000 -0400
7749+++ linux-3.0.3/arch/x86/include/asm/paravirt.h 2011-08-23 21:47:55.000000000 -0400
7750@@ -658,6 +658,18 @@ static inline void set_pgd(pgd_t *pgdp,
7751 val);
7752 }
7753
7754+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
7755+{
7756+ pgdval_t val = native_pgd_val(pgd);
7757+
7758+ if (sizeof(pgdval_t) > sizeof(long))
7759+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
7760+ val, (u64)val >> 32);
7761+ else
7762+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
7763+ val);
7764+}
7765+
7766 static inline void pgd_clear(pgd_t *pgdp)
7767 {
7768 set_pgd(pgdp, __pgd(0));
7769@@ -739,6 +751,21 @@ static inline void __set_fixmap(unsigned
7770 pv_mmu_ops.set_fixmap(idx, phys, flags);
7771 }
7772
7773+#ifdef CONFIG_PAX_KERNEXEC
7774+static inline unsigned long pax_open_kernel(void)
7775+{
7776+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
7777+}
7778+
7779+static inline unsigned long pax_close_kernel(void)
7780+{
7781+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
7782+}
7783+#else
7784+static inline unsigned long pax_open_kernel(void) { return 0; }
7785+static inline unsigned long pax_close_kernel(void) { return 0; }
7786+#endif
7787+
7788 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
7789
7790 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
7791@@ -955,7 +982,7 @@ extern void default_banner(void);
7792
7793 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
7794 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
7795-#define PARA_INDIRECT(addr) *%cs:addr
7796+#define PARA_INDIRECT(addr) *%ss:addr
7797 #endif
7798
7799 #define INTERRUPT_RETURN \
7800@@ -1032,6 +1059,21 @@ extern void default_banner(void);
7801 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
7802 CLBR_NONE, \
7803 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
7804+
7805+#define GET_CR0_INTO_RDI \
7806+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
7807+ mov %rax,%rdi
7808+
7809+#define SET_RDI_INTO_CR0 \
7810+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
7811+
7812+#define GET_CR3_INTO_RDI \
7813+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
7814+ mov %rax,%rdi
7815+
7816+#define SET_RDI_INTO_CR3 \
7817+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
7818+
7819 #endif /* CONFIG_X86_32 */
7820
7821 #endif /* __ASSEMBLY__ */
7822diff -urNp linux-3.0.3/arch/x86/include/asm/paravirt_types.h linux-3.0.3/arch/x86/include/asm/paravirt_types.h
7823--- linux-3.0.3/arch/x86/include/asm/paravirt_types.h 2011-07-21 22:17:23.000000000 -0400
7824+++ linux-3.0.3/arch/x86/include/asm/paravirt_types.h 2011-08-23 21:47:55.000000000 -0400
7825@@ -78,19 +78,19 @@ struct pv_init_ops {
7826 */
7827 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
7828 unsigned long addr, unsigned len);
7829-};
7830+} __no_const;
7831
7832
7833 struct pv_lazy_ops {
7834 /* Set deferred update mode, used for batching operations. */
7835 void (*enter)(void);
7836 void (*leave)(void);
7837-};
7838+} __no_const;
7839
7840 struct pv_time_ops {
7841 unsigned long long (*sched_clock)(void);
7842 unsigned long (*get_tsc_khz)(void);
7843-};
7844+} __no_const;
7845
7846 struct pv_cpu_ops {
7847 /* hooks for various privileged instructions */
7848@@ -186,7 +186,7 @@ struct pv_cpu_ops {
7849
7850 void (*start_context_switch)(struct task_struct *prev);
7851 void (*end_context_switch)(struct task_struct *next);
7852-};
7853+} __no_const;
7854
7855 struct pv_irq_ops {
7856 /*
7857@@ -217,7 +217,7 @@ struct pv_apic_ops {
7858 unsigned long start_eip,
7859 unsigned long start_esp);
7860 #endif
7861-};
7862+} __no_const;
7863
7864 struct pv_mmu_ops {
7865 unsigned long (*read_cr2)(void);
7866@@ -306,6 +306,7 @@ struct pv_mmu_ops {
7867 struct paravirt_callee_save make_pud;
7868
7869 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
7870+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
7871 #endif /* PAGETABLE_LEVELS == 4 */
7872 #endif /* PAGETABLE_LEVELS >= 3 */
7873
7874@@ -317,6 +318,12 @@ struct pv_mmu_ops {
7875 an mfn. We can tell which is which from the index. */
7876 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
7877 phys_addr_t phys, pgprot_t flags);
7878+
7879+#ifdef CONFIG_PAX_KERNEXEC
7880+ unsigned long (*pax_open_kernel)(void);
7881+ unsigned long (*pax_close_kernel)(void);
7882+#endif
7883+
7884 };
7885
7886 struct arch_spinlock;
7887@@ -327,7 +334,7 @@ struct pv_lock_ops {
7888 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
7889 int (*spin_trylock)(struct arch_spinlock *lock);
7890 void (*spin_unlock)(struct arch_spinlock *lock);
7891-};
7892+} __no_const;
7893
7894 /* This contains all the paravirt structures: we get a convenient
7895 * number for each function using the offset which we use to indicate
7896diff -urNp linux-3.0.3/arch/x86/include/asm/pgalloc.h linux-3.0.3/arch/x86/include/asm/pgalloc.h
7897--- linux-3.0.3/arch/x86/include/asm/pgalloc.h 2011-07-21 22:17:23.000000000 -0400
7898+++ linux-3.0.3/arch/x86/include/asm/pgalloc.h 2011-08-23 21:47:55.000000000 -0400
7899@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
7900 pmd_t *pmd, pte_t *pte)
7901 {
7902 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
7903+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
7904+}
7905+
7906+static inline void pmd_populate_user(struct mm_struct *mm,
7907+ pmd_t *pmd, pte_t *pte)
7908+{
7909+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
7910 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
7911 }
7912
7913diff -urNp linux-3.0.3/arch/x86/include/asm/pgtable-2level.h linux-3.0.3/arch/x86/include/asm/pgtable-2level.h
7914--- linux-3.0.3/arch/x86/include/asm/pgtable-2level.h 2011-07-21 22:17:23.000000000 -0400
7915+++ linux-3.0.3/arch/x86/include/asm/pgtable-2level.h 2011-08-23 21:47:55.000000000 -0400
7916@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
7917
7918 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
7919 {
7920+ pax_open_kernel();
7921 *pmdp = pmd;
7922+ pax_close_kernel();
7923 }
7924
7925 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
7926diff -urNp linux-3.0.3/arch/x86/include/asm/pgtable_32.h linux-3.0.3/arch/x86/include/asm/pgtable_32.h
7927--- linux-3.0.3/arch/x86/include/asm/pgtable_32.h 2011-07-21 22:17:23.000000000 -0400
7928+++ linux-3.0.3/arch/x86/include/asm/pgtable_32.h 2011-08-23 21:47:55.000000000 -0400
7929@@ -25,9 +25,6 @@
7930 struct mm_struct;
7931 struct vm_area_struct;
7932
7933-extern pgd_t swapper_pg_dir[1024];
7934-extern pgd_t initial_page_table[1024];
7935-
7936 static inline void pgtable_cache_init(void) { }
7937 static inline void check_pgt_cache(void) { }
7938 void paging_init(void);
7939@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, u
7940 # include <asm/pgtable-2level.h>
7941 #endif
7942
7943+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
7944+extern pgd_t initial_page_table[PTRS_PER_PGD];
7945+#ifdef CONFIG_X86_PAE
7946+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
7947+#endif
7948+
7949 #if defined(CONFIG_HIGHPTE)
7950 #define pte_offset_map(dir, address) \
7951 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
7952@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, u
7953 /* Clear a kernel PTE and flush it from the TLB */
7954 #define kpte_clear_flush(ptep, vaddr) \
7955 do { \
7956+ pax_open_kernel(); \
7957 pte_clear(&init_mm, (vaddr), (ptep)); \
7958+ pax_close_kernel(); \
7959 __flush_tlb_one((vaddr)); \
7960 } while (0)
7961
7962@@ -74,6 +79,9 @@ do { \
7963
7964 #endif /* !__ASSEMBLY__ */
7965
7966+#define HAVE_ARCH_UNMAPPED_AREA
7967+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
7968+
7969 /*
7970 * kern_addr_valid() is (1) for FLATMEM and (0) for
7971 * SPARSEMEM and DISCONTIGMEM
7972diff -urNp linux-3.0.3/arch/x86/include/asm/pgtable_32_types.h linux-3.0.3/arch/x86/include/asm/pgtable_32_types.h
7973--- linux-3.0.3/arch/x86/include/asm/pgtable_32_types.h 2011-07-21 22:17:23.000000000 -0400
7974+++ linux-3.0.3/arch/x86/include/asm/pgtable_32_types.h 2011-08-23 21:47:55.000000000 -0400
7975@@ -8,7 +8,7 @@
7976 */
7977 #ifdef CONFIG_X86_PAE
7978 # include <asm/pgtable-3level_types.h>
7979-# define PMD_SIZE (1UL << PMD_SHIFT)
7980+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
7981 # define PMD_MASK (~(PMD_SIZE - 1))
7982 #else
7983 # include <asm/pgtable-2level_types.h>
7984@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
7985 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
7986 #endif
7987
7988+#ifdef CONFIG_PAX_KERNEXEC
7989+#ifndef __ASSEMBLY__
7990+extern unsigned char MODULES_EXEC_VADDR[];
7991+extern unsigned char MODULES_EXEC_END[];
7992+#endif
7993+#include <asm/boot.h>
7994+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
7995+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
7996+#else
7997+#define ktla_ktva(addr) (addr)
7998+#define ktva_ktla(addr) (addr)
7999+#endif
8000+
8001 #define MODULES_VADDR VMALLOC_START
8002 #define MODULES_END VMALLOC_END
8003 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
8004diff -urNp linux-3.0.3/arch/x86/include/asm/pgtable-3level.h linux-3.0.3/arch/x86/include/asm/pgtable-3level.h
8005--- linux-3.0.3/arch/x86/include/asm/pgtable-3level.h 2011-07-21 22:17:23.000000000 -0400
8006+++ linux-3.0.3/arch/x86/include/asm/pgtable-3level.h 2011-08-23 21:47:55.000000000 -0400
8007@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
8008
8009 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8010 {
8011+ pax_open_kernel();
8012 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
8013+ pax_close_kernel();
8014 }
8015
8016 static inline void native_set_pud(pud_t *pudp, pud_t pud)
8017 {
8018+ pax_open_kernel();
8019 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
8020+ pax_close_kernel();
8021 }
8022
8023 /*
8024diff -urNp linux-3.0.3/arch/x86/include/asm/pgtable_64.h linux-3.0.3/arch/x86/include/asm/pgtable_64.h
8025--- linux-3.0.3/arch/x86/include/asm/pgtable_64.h 2011-07-21 22:17:23.000000000 -0400
8026+++ linux-3.0.3/arch/x86/include/asm/pgtable_64.h 2011-08-23 21:47:55.000000000 -0400
8027@@ -16,10 +16,13 @@
8028
8029 extern pud_t level3_kernel_pgt[512];
8030 extern pud_t level3_ident_pgt[512];
8031+extern pud_t level3_vmalloc_pgt[512];
8032+extern pud_t level3_vmemmap_pgt[512];
8033+extern pud_t level2_vmemmap_pgt[512];
8034 extern pmd_t level2_kernel_pgt[512];
8035 extern pmd_t level2_fixmap_pgt[512];
8036-extern pmd_t level2_ident_pgt[512];
8037-extern pgd_t init_level4_pgt[];
8038+extern pmd_t level2_ident_pgt[512*2];
8039+extern pgd_t init_level4_pgt[512];
8040
8041 #define swapper_pg_dir init_level4_pgt
8042
8043@@ -61,7 +64,9 @@ static inline void native_set_pte_atomic
8044
8045 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8046 {
8047+ pax_open_kernel();
8048 *pmdp = pmd;
8049+ pax_close_kernel();
8050 }
8051
8052 static inline void native_pmd_clear(pmd_t *pmd)
8053@@ -107,6 +112,13 @@ static inline void native_pud_clear(pud_
8054
8055 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
8056 {
8057+ pax_open_kernel();
8058+ *pgdp = pgd;
8059+ pax_close_kernel();
8060+}
8061+
8062+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
8063+{
8064 *pgdp = pgd;
8065 }
8066
8067diff -urNp linux-3.0.3/arch/x86/include/asm/pgtable_64_types.h linux-3.0.3/arch/x86/include/asm/pgtable_64_types.h
8068--- linux-3.0.3/arch/x86/include/asm/pgtable_64_types.h 2011-07-21 22:17:23.000000000 -0400
8069+++ linux-3.0.3/arch/x86/include/asm/pgtable_64_types.h 2011-08-23 21:47:55.000000000 -0400
8070@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
8071 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
8072 #define MODULES_END _AC(0xffffffffff000000, UL)
8073 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
8074+#define MODULES_EXEC_VADDR MODULES_VADDR
8075+#define MODULES_EXEC_END MODULES_END
8076+
8077+#define ktla_ktva(addr) (addr)
8078+#define ktva_ktla(addr) (addr)
8079
8080 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
8081diff -urNp linux-3.0.3/arch/x86/include/asm/pgtable.h linux-3.0.3/arch/x86/include/asm/pgtable.h
8082--- linux-3.0.3/arch/x86/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
8083+++ linux-3.0.3/arch/x86/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
8084@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm
8085
8086 #ifndef __PAGETABLE_PUD_FOLDED
8087 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
8088+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
8089 #define pgd_clear(pgd) native_pgd_clear(pgd)
8090 #endif
8091
8092@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm
8093
8094 #define arch_end_context_switch(prev) do {} while(0)
8095
8096+#define pax_open_kernel() native_pax_open_kernel()
8097+#define pax_close_kernel() native_pax_close_kernel()
8098 #endif /* CONFIG_PARAVIRT */
8099
8100+#define __HAVE_ARCH_PAX_OPEN_KERNEL
8101+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
8102+
8103+#ifdef CONFIG_PAX_KERNEXEC
8104+static inline unsigned long native_pax_open_kernel(void)
8105+{
8106+ unsigned long cr0;
8107+
8108+ preempt_disable();
8109+ barrier();
8110+ cr0 = read_cr0() ^ X86_CR0_WP;
8111+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
8112+ write_cr0(cr0);
8113+ return cr0 ^ X86_CR0_WP;
8114+}
8115+
8116+static inline unsigned long native_pax_close_kernel(void)
8117+{
8118+ unsigned long cr0;
8119+
8120+ cr0 = read_cr0() ^ X86_CR0_WP;
8121+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
8122+ write_cr0(cr0);
8123+ barrier();
8124+ preempt_enable_no_resched();
8125+ return cr0 ^ X86_CR0_WP;
8126+}
8127+#else
8128+static inline unsigned long native_pax_open_kernel(void) { return 0; }
8129+static inline unsigned long native_pax_close_kernel(void) { return 0; }
8130+#endif
8131+
8132 /*
8133 * The following only work if pte_present() is true.
8134 * Undefined behaviour if not..
8135 */
8136+static inline int pte_user(pte_t pte)
8137+{
8138+ return pte_val(pte) & _PAGE_USER;
8139+}
8140+
8141 static inline int pte_dirty(pte_t pte)
8142 {
8143 return pte_flags(pte) & _PAGE_DIRTY;
8144@@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t
8145 return pte_clear_flags(pte, _PAGE_RW);
8146 }
8147
8148+static inline pte_t pte_mkread(pte_t pte)
8149+{
8150+ return __pte(pte_val(pte) | _PAGE_USER);
8151+}
8152+
8153 static inline pte_t pte_mkexec(pte_t pte)
8154 {
8155- return pte_clear_flags(pte, _PAGE_NX);
8156+#ifdef CONFIG_X86_PAE
8157+ if (__supported_pte_mask & _PAGE_NX)
8158+ return pte_clear_flags(pte, _PAGE_NX);
8159+ else
8160+#endif
8161+ return pte_set_flags(pte, _PAGE_USER);
8162+}
8163+
8164+static inline pte_t pte_exprotect(pte_t pte)
8165+{
8166+#ifdef CONFIG_X86_PAE
8167+ if (__supported_pte_mask & _PAGE_NX)
8168+ return pte_set_flags(pte, _PAGE_NX);
8169+ else
8170+#endif
8171+ return pte_clear_flags(pte, _PAGE_USER);
8172 }
8173
8174 static inline pte_t pte_mkdirty(pte_t pte)
8175@@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long
8176 #endif
8177
8178 #ifndef __ASSEMBLY__
8179+
8180+#ifdef CONFIG_PAX_PER_CPU_PGD
8181+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
8182+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
8183+{
8184+ return cpu_pgd[cpu];
8185+}
8186+#endif
8187+
8188 #include <linux/mm_types.h>
8189
8190 static inline int pte_none(pte_t pte)
8191@@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *p
8192
8193 static inline int pgd_bad(pgd_t pgd)
8194 {
8195- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
8196+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
8197 }
8198
8199 static inline int pgd_none(pgd_t pgd)
8200@@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
8201 * pgd_offset() returns a (pgd_t *)
8202 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
8203 */
8204-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
8205+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
8206+
8207+#ifdef CONFIG_PAX_PER_CPU_PGD
8208+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
8209+#endif
8210+
8211 /*
8212 * a shortcut which implies the use of the kernel's pgd, instead
8213 * of a process's
8214@@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
8215 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
8216 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
8217
8218+#ifdef CONFIG_X86_32
8219+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
8220+#else
8221+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
8222+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
8223+
8224+#ifdef CONFIG_PAX_MEMORY_UDEREF
8225+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
8226+#else
8227+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
8228+#endif
8229+
8230+#endif
8231+
8232 #ifndef __ASSEMBLY__
8233
8234 extern int direct_gbpages;
8235@@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(st
8236 * dst and src can be on the same page, but the range must not overlap,
8237 * and must not cross a page boundary.
8238 */
8239-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
8240+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
8241 {
8242- memcpy(dst, src, count * sizeof(pgd_t));
8243+ pax_open_kernel();
8244+ while (count--)
8245+ *dst++ = *src++;
8246+ pax_close_kernel();
8247 }
8248
8249+#ifdef CONFIG_PAX_PER_CPU_PGD
8250+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8251+#endif
8252+
8253+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8254+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8255+#else
8256+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
8257+#endif
8258
8259 #include <asm-generic/pgtable.h>
8260 #endif /* __ASSEMBLY__ */
8261diff -urNp linux-3.0.3/arch/x86/include/asm/pgtable_types.h linux-3.0.3/arch/x86/include/asm/pgtable_types.h
8262--- linux-3.0.3/arch/x86/include/asm/pgtable_types.h 2011-07-21 22:17:23.000000000 -0400
8263+++ linux-3.0.3/arch/x86/include/asm/pgtable_types.h 2011-08-23 21:47:55.000000000 -0400
8264@@ -16,13 +16,12 @@
8265 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
8266 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
8267 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
8268-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
8269+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
8270 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
8271 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
8272 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
8273-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
8274-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
8275-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
8276+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
8277+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
8278 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
8279
8280 /* If _PAGE_BIT_PRESENT is clear, we use these: */
8281@@ -40,7 +39,6 @@
8282 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
8283 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
8284 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
8285-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
8286 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
8287 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
8288 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
8289@@ -57,8 +55,10 @@
8290
8291 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
8292 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
8293-#else
8294+#elif defined(CONFIG_KMEMCHECK)
8295 #define _PAGE_NX (_AT(pteval_t, 0))
8296+#else
8297+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
8298 #endif
8299
8300 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
8301@@ -96,6 +96,9 @@
8302 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
8303 _PAGE_ACCESSED)
8304
8305+#define PAGE_READONLY_NOEXEC PAGE_READONLY
8306+#define PAGE_SHARED_NOEXEC PAGE_SHARED
8307+
8308 #define __PAGE_KERNEL_EXEC \
8309 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
8310 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
8311@@ -106,8 +109,8 @@
8312 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
8313 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
8314 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
8315-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
8316-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
8317+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
8318+#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
8319 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
8320 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
8321 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
8322@@ -166,8 +169,8 @@
8323 * bits are combined, this will alow user to access the high address mapped
8324 * VDSO in the presence of CONFIG_COMPAT_VDSO
8325 */
8326-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
8327-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
8328+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8329+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8330 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
8331 #endif
8332
8333@@ -205,7 +208,17 @@ static inline pgdval_t pgd_flags(pgd_t p
8334 {
8335 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
8336 }
8337+#endif
8338
8339+#if PAGETABLE_LEVELS == 3
8340+#include <asm-generic/pgtable-nopud.h>
8341+#endif
8342+
8343+#if PAGETABLE_LEVELS == 2
8344+#include <asm-generic/pgtable-nopmd.h>
8345+#endif
8346+
8347+#ifndef __ASSEMBLY__
8348 #if PAGETABLE_LEVELS > 3
8349 typedef struct { pudval_t pud; } pud_t;
8350
8351@@ -219,8 +232,6 @@ static inline pudval_t native_pud_val(pu
8352 return pud.pud;
8353 }
8354 #else
8355-#include <asm-generic/pgtable-nopud.h>
8356-
8357 static inline pudval_t native_pud_val(pud_t pud)
8358 {
8359 return native_pgd_val(pud.pgd);
8360@@ -240,8 +251,6 @@ static inline pmdval_t native_pmd_val(pm
8361 return pmd.pmd;
8362 }
8363 #else
8364-#include <asm-generic/pgtable-nopmd.h>
8365-
8366 static inline pmdval_t native_pmd_val(pmd_t pmd)
8367 {
8368 return native_pgd_val(pmd.pud.pgd);
8369@@ -281,7 +290,6 @@ typedef struct page *pgtable_t;
8370
8371 extern pteval_t __supported_pte_mask;
8372 extern void set_nx(void);
8373-extern int nx_enabled;
8374
8375 #define pgprot_writecombine pgprot_writecombine
8376 extern pgprot_t pgprot_writecombine(pgprot_t prot);
8377diff -urNp linux-3.0.3/arch/x86/include/asm/processor.h linux-3.0.3/arch/x86/include/asm/processor.h
8378--- linux-3.0.3/arch/x86/include/asm/processor.h 2011-07-21 22:17:23.000000000 -0400
8379+++ linux-3.0.3/arch/x86/include/asm/processor.h 2011-08-23 21:47:55.000000000 -0400
8380@@ -266,7 +266,7 @@ struct tss_struct {
8381
8382 } ____cacheline_aligned;
8383
8384-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
8385+extern struct tss_struct init_tss[NR_CPUS];
8386
8387 /*
8388 * Save the original ist values for checking stack pointers during debugging
8389@@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(co
8390 */
8391 #define TASK_SIZE PAGE_OFFSET
8392 #define TASK_SIZE_MAX TASK_SIZE
8393+
8394+#ifdef CONFIG_PAX_SEGMEXEC
8395+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
8396+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
8397+#else
8398 #define STACK_TOP TASK_SIZE
8399-#define STACK_TOP_MAX STACK_TOP
8400+#endif
8401+
8402+#define STACK_TOP_MAX TASK_SIZE
8403
8404 #define INIT_THREAD { \
8405- .sp0 = sizeof(init_stack) + (long)&init_stack, \
8406+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8407 .vm86_info = NULL, \
8408 .sysenter_cs = __KERNEL_CS, \
8409 .io_bitmap_ptr = NULL, \
8410@@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(co
8411 */
8412 #define INIT_TSS { \
8413 .x86_tss = { \
8414- .sp0 = sizeof(init_stack) + (long)&init_stack, \
8415+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8416 .ss0 = __KERNEL_DS, \
8417 .ss1 = __KERNEL_CS, \
8418 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
8419@@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(co
8420 extern unsigned long thread_saved_pc(struct task_struct *tsk);
8421
8422 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
8423-#define KSTK_TOP(info) \
8424-({ \
8425- unsigned long *__ptr = (unsigned long *)(info); \
8426- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
8427-})
8428+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
8429
8430 /*
8431 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
8432@@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(str
8433 #define task_pt_regs(task) \
8434 ({ \
8435 struct pt_regs *__regs__; \
8436- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
8437+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
8438 __regs__ - 1; \
8439 })
8440
8441@@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(str
8442 /*
8443 * User space process size. 47bits minus one guard page.
8444 */
8445-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
8446+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
8447
8448 /* This decides where the kernel will search for a free chunk of vm
8449 * space during mmap's.
8450 */
8451 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
8452- 0xc0000000 : 0xFFFFe000)
8453+ 0xc0000000 : 0xFFFFf000)
8454
8455 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
8456 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
8457@@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(str
8458 #define STACK_TOP_MAX TASK_SIZE_MAX
8459
8460 #define INIT_THREAD { \
8461- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8462+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8463 }
8464
8465 #define INIT_TSS { \
8466- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8467+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8468 }
8469
8470 /*
8471@@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs
8472 */
8473 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
8474
8475+#ifdef CONFIG_PAX_SEGMEXEC
8476+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
8477+#endif
8478+
8479 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
8480
8481 /* Get/set a process' ability to use the timestamp counter instruction */
8482diff -urNp linux-3.0.3/arch/x86/include/asm/ptrace.h linux-3.0.3/arch/x86/include/asm/ptrace.h
8483--- linux-3.0.3/arch/x86/include/asm/ptrace.h 2011-07-21 22:17:23.000000000 -0400
8484+++ linux-3.0.3/arch/x86/include/asm/ptrace.h 2011-08-23 21:47:55.000000000 -0400
8485@@ -153,28 +153,29 @@ static inline unsigned long regs_return_
8486 }
8487
8488 /*
8489- * user_mode_vm(regs) determines whether a register set came from user mode.
8490+ * user_mode(regs) determines whether a register set came from user mode.
8491 * This is true if V8086 mode was enabled OR if the register set was from
8492 * protected mode with RPL-3 CS value. This tricky test checks that with
8493 * one comparison. Many places in the kernel can bypass this full check
8494- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
8495+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
8496+ * be used.
8497 */
8498-static inline int user_mode(struct pt_regs *regs)
8499+static inline int user_mode_novm(struct pt_regs *regs)
8500 {
8501 #ifdef CONFIG_X86_32
8502 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
8503 #else
8504- return !!(regs->cs & 3);
8505+ return !!(regs->cs & SEGMENT_RPL_MASK);
8506 #endif
8507 }
8508
8509-static inline int user_mode_vm(struct pt_regs *regs)
8510+static inline int user_mode(struct pt_regs *regs)
8511 {
8512 #ifdef CONFIG_X86_32
8513 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
8514 USER_RPL;
8515 #else
8516- return user_mode(regs);
8517+ return user_mode_novm(regs);
8518 #endif
8519 }
8520
8521diff -urNp linux-3.0.3/arch/x86/include/asm/reboot.h linux-3.0.3/arch/x86/include/asm/reboot.h
8522--- linux-3.0.3/arch/x86/include/asm/reboot.h 2011-07-21 22:17:23.000000000 -0400
8523+++ linux-3.0.3/arch/x86/include/asm/reboot.h 2011-08-23 21:47:55.000000000 -0400
8524@@ -6,19 +6,19 @@
8525 struct pt_regs;
8526
8527 struct machine_ops {
8528- void (*restart)(char *cmd);
8529- void (*halt)(void);
8530- void (*power_off)(void);
8531+ void (* __noreturn restart)(char *cmd);
8532+ void (* __noreturn halt)(void);
8533+ void (* __noreturn power_off)(void);
8534 void (*shutdown)(void);
8535 void (*crash_shutdown)(struct pt_regs *);
8536- void (*emergency_restart)(void);
8537-};
8538+ void (* __noreturn emergency_restart)(void);
8539+} __no_const;
8540
8541 extern struct machine_ops machine_ops;
8542
8543 void native_machine_crash_shutdown(struct pt_regs *regs);
8544 void native_machine_shutdown(void);
8545-void machine_real_restart(unsigned int type);
8546+void machine_real_restart(unsigned int type) __noreturn;
8547 /* These must match dispatch_table in reboot_32.S */
8548 #define MRR_BIOS 0
8549 #define MRR_APM 1
8550diff -urNp linux-3.0.3/arch/x86/include/asm/rwsem.h linux-3.0.3/arch/x86/include/asm/rwsem.h
8551--- linux-3.0.3/arch/x86/include/asm/rwsem.h 2011-07-21 22:17:23.000000000 -0400
8552+++ linux-3.0.3/arch/x86/include/asm/rwsem.h 2011-08-23 21:47:55.000000000 -0400
8553@@ -64,6 +64,14 @@ static inline void __down_read(struct rw
8554 {
8555 asm volatile("# beginning down_read\n\t"
8556 LOCK_PREFIX _ASM_INC "(%1)\n\t"
8557+
8558+#ifdef CONFIG_PAX_REFCOUNT
8559+ "jno 0f\n"
8560+ LOCK_PREFIX _ASM_DEC "(%1)\n"
8561+ "int $4\n0:\n"
8562+ _ASM_EXTABLE(0b, 0b)
8563+#endif
8564+
8565 /* adds 0x00000001 */
8566 " jns 1f\n"
8567 " call call_rwsem_down_read_failed\n"
8568@@ -85,6 +93,14 @@ static inline int __down_read_trylock(st
8569 "1:\n\t"
8570 " mov %1,%2\n\t"
8571 " add %3,%2\n\t"
8572+
8573+#ifdef CONFIG_PAX_REFCOUNT
8574+ "jno 0f\n"
8575+ "sub %3,%2\n"
8576+ "int $4\n0:\n"
8577+ _ASM_EXTABLE(0b, 0b)
8578+#endif
8579+
8580 " jle 2f\n\t"
8581 LOCK_PREFIX " cmpxchg %2,%0\n\t"
8582 " jnz 1b\n\t"
8583@@ -104,6 +120,14 @@ static inline void __down_write_nested(s
8584 long tmp;
8585 asm volatile("# beginning down_write\n\t"
8586 LOCK_PREFIX " xadd %1,(%2)\n\t"
8587+
8588+#ifdef CONFIG_PAX_REFCOUNT
8589+ "jno 0f\n"
8590+ "mov %1,(%2)\n"
8591+ "int $4\n0:\n"
8592+ _ASM_EXTABLE(0b, 0b)
8593+#endif
8594+
8595 /* adds 0xffff0001, returns the old value */
8596 " test %1,%1\n\t"
8597 /* was the count 0 before? */
8598@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_s
8599 long tmp;
8600 asm volatile("# beginning __up_read\n\t"
8601 LOCK_PREFIX " xadd %1,(%2)\n\t"
8602+
8603+#ifdef CONFIG_PAX_REFCOUNT
8604+ "jno 0f\n"
8605+ "mov %1,(%2)\n"
8606+ "int $4\n0:\n"
8607+ _ASM_EXTABLE(0b, 0b)
8608+#endif
8609+
8610 /* subtracts 1, returns the old value */
8611 " jns 1f\n\t"
8612 " call call_rwsem_wake\n" /* expects old value in %edx */
8613@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_
8614 long tmp;
8615 asm volatile("# beginning __up_write\n\t"
8616 LOCK_PREFIX " xadd %1,(%2)\n\t"
8617+
8618+#ifdef CONFIG_PAX_REFCOUNT
8619+ "jno 0f\n"
8620+ "mov %1,(%2)\n"
8621+ "int $4\n0:\n"
8622+ _ASM_EXTABLE(0b, 0b)
8623+#endif
8624+
8625 /* subtracts 0xffff0001, returns the old value */
8626 " jns 1f\n\t"
8627 " call call_rwsem_wake\n" /* expects old value in %edx */
8628@@ -176,6 +216,14 @@ static inline void __downgrade_write(str
8629 {
8630 asm volatile("# beginning __downgrade_write\n\t"
8631 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
8632+
8633+#ifdef CONFIG_PAX_REFCOUNT
8634+ "jno 0f\n"
8635+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
8636+ "int $4\n0:\n"
8637+ _ASM_EXTABLE(0b, 0b)
8638+#endif
8639+
8640 /*
8641 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
8642 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
8643@@ -194,7 +242,15 @@ static inline void __downgrade_write(str
8644 */
8645 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
8646 {
8647- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
8648+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
8649+
8650+#ifdef CONFIG_PAX_REFCOUNT
8651+ "jno 0f\n"
8652+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
8653+ "int $4\n0:\n"
8654+ _ASM_EXTABLE(0b, 0b)
8655+#endif
8656+
8657 : "+m" (sem->count)
8658 : "er" (delta));
8659 }
8660@@ -206,7 +262,15 @@ static inline long rwsem_atomic_update(l
8661 {
8662 long tmp = delta;
8663
8664- asm volatile(LOCK_PREFIX "xadd %0,%1"
8665+ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
8666+
8667+#ifdef CONFIG_PAX_REFCOUNT
8668+ "jno 0f\n"
8669+ "mov %0,%1\n"
8670+ "int $4\n0:\n"
8671+ _ASM_EXTABLE(0b, 0b)
8672+#endif
8673+
8674 : "+r" (tmp), "+m" (sem->count)
8675 : : "memory");
8676
8677diff -urNp linux-3.0.3/arch/x86/include/asm/segment.h linux-3.0.3/arch/x86/include/asm/segment.h
8678--- linux-3.0.3/arch/x86/include/asm/segment.h 2011-07-21 22:17:23.000000000 -0400
8679+++ linux-3.0.3/arch/x86/include/asm/segment.h 2011-08-23 21:47:55.000000000 -0400
8680@@ -64,8 +64,8 @@
8681 * 26 - ESPFIX small SS
8682 * 27 - per-cpu [ offset to per-cpu data area ]
8683 * 28 - stack_canary-20 [ for stack protector ]
8684- * 29 - unused
8685- * 30 - unused
8686+ * 29 - PCI BIOS CS
8687+ * 30 - PCI BIOS DS
8688 * 31 - TSS for double fault handler
8689 */
8690 #define GDT_ENTRY_TLS_MIN 6
8691@@ -79,6 +79,8 @@
8692
8693 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
8694
8695+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
8696+
8697 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
8698
8699 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
8700@@ -104,6 +106,12 @@
8701 #define __KERNEL_STACK_CANARY 0
8702 #endif
8703
8704+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
8705+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
8706+
8707+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
8708+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
8709+
8710 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
8711
8712 /*
8713@@ -141,7 +149,7 @@
8714 */
8715
8716 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
8717-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
8718+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
8719
8720
8721 #else
8722@@ -165,6 +173,8 @@
8723 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
8724 #define __USER32_DS __USER_DS
8725
8726+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
8727+
8728 #define GDT_ENTRY_TSS 8 /* needs two entries */
8729 #define GDT_ENTRY_LDT 10 /* needs two entries */
8730 #define GDT_ENTRY_TLS_MIN 12
8731@@ -185,6 +195,7 @@
8732 #endif
8733
8734 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
8735+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
8736 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
8737 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
8738 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
8739diff -urNp linux-3.0.3/arch/x86/include/asm/smp.h linux-3.0.3/arch/x86/include/asm/smp.h
8740--- linux-3.0.3/arch/x86/include/asm/smp.h 2011-07-21 22:17:23.000000000 -0400
8741+++ linux-3.0.3/arch/x86/include/asm/smp.h 2011-08-23 21:47:55.000000000 -0400
8742@@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_
8743 /* cpus sharing the last level cache: */
8744 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
8745 DECLARE_PER_CPU(u16, cpu_llc_id);
8746-DECLARE_PER_CPU(int, cpu_number);
8747+DECLARE_PER_CPU(unsigned int, cpu_number);
8748
8749 static inline struct cpumask *cpu_sibling_mask(int cpu)
8750 {
8751@@ -77,7 +77,7 @@ struct smp_ops {
8752
8753 void (*send_call_func_ipi)(const struct cpumask *mask);
8754 void (*send_call_func_single_ipi)(int cpu);
8755-};
8756+} __no_const;
8757
8758 /* Globals due to paravirt */
8759 extern void set_cpu_sibling_map(int cpu);
8760@@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitd
8761 extern int safe_smp_processor_id(void);
8762
8763 #elif defined(CONFIG_X86_64_SMP)
8764-#define raw_smp_processor_id() (percpu_read(cpu_number))
8765-
8766-#define stack_smp_processor_id() \
8767-({ \
8768- struct thread_info *ti; \
8769- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
8770- ti->cpu; \
8771-})
8772+#define raw_smp_processor_id() (percpu_read(cpu_number))
8773+#define stack_smp_processor_id() raw_smp_processor_id()
8774 #define safe_smp_processor_id() smp_processor_id()
8775
8776 #endif
8777diff -urNp linux-3.0.3/arch/x86/include/asm/spinlock.h linux-3.0.3/arch/x86/include/asm/spinlock.h
8778--- linux-3.0.3/arch/x86/include/asm/spinlock.h 2011-07-21 22:17:23.000000000 -0400
8779+++ linux-3.0.3/arch/x86/include/asm/spinlock.h 2011-08-23 21:47:55.000000000 -0400
8780@@ -249,6 +249,14 @@ static inline int arch_write_can_lock(ar
8781 static inline void arch_read_lock(arch_rwlock_t *rw)
8782 {
8783 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
8784+
8785+#ifdef CONFIG_PAX_REFCOUNT
8786+ "jno 0f\n"
8787+ LOCK_PREFIX " addl $1,(%0)\n"
8788+ "int $4\n0:\n"
8789+ _ASM_EXTABLE(0b, 0b)
8790+#endif
8791+
8792 "jns 1f\n"
8793 "call __read_lock_failed\n\t"
8794 "1:\n"
8795@@ -258,6 +266,14 @@ static inline void arch_read_lock(arch_r
8796 static inline void arch_write_lock(arch_rwlock_t *rw)
8797 {
8798 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
8799+
8800+#ifdef CONFIG_PAX_REFCOUNT
8801+ "jno 0f\n"
8802+ LOCK_PREFIX " addl %1,(%0)\n"
8803+ "int $4\n0:\n"
8804+ _ASM_EXTABLE(0b, 0b)
8805+#endif
8806+
8807 "jz 1f\n"
8808 "call __write_lock_failed\n\t"
8809 "1:\n"
8810@@ -286,12 +302,29 @@ static inline int arch_write_trylock(arc
8811
8812 static inline void arch_read_unlock(arch_rwlock_t *rw)
8813 {
8814- asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
8815+ asm volatile(LOCK_PREFIX "incl %0\n"
8816+
8817+#ifdef CONFIG_PAX_REFCOUNT
8818+ "jno 0f\n"
8819+ LOCK_PREFIX "decl %0\n"
8820+ "int $4\n0:\n"
8821+ _ASM_EXTABLE(0b, 0b)
8822+#endif
8823+
8824+ :"+m" (rw->lock) : : "memory");
8825 }
8826
8827 static inline void arch_write_unlock(arch_rwlock_t *rw)
8828 {
8829- asm volatile(LOCK_PREFIX "addl %1, %0"
8830+ asm volatile(LOCK_PREFIX "addl %1, %0\n"
8831+
8832+#ifdef CONFIG_PAX_REFCOUNT
8833+ "jno 0f\n"
8834+ LOCK_PREFIX "subl %1, %0\n"
8835+ "int $4\n0:\n"
8836+ _ASM_EXTABLE(0b, 0b)
8837+#endif
8838+
8839 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
8840 }
8841
8842diff -urNp linux-3.0.3/arch/x86/include/asm/stackprotector.h linux-3.0.3/arch/x86/include/asm/stackprotector.h
8843--- linux-3.0.3/arch/x86/include/asm/stackprotector.h 2011-07-21 22:17:23.000000000 -0400
8844+++ linux-3.0.3/arch/x86/include/asm/stackprotector.h 2011-08-23 21:47:55.000000000 -0400
8845@@ -48,7 +48,7 @@
8846 * head_32 for boot CPU and setup_per_cpu_areas() for others.
8847 */
8848 #define GDT_STACK_CANARY_INIT \
8849- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
8850+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
8851
8852 /*
8853 * Initialize the stackprotector canary value.
8854@@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
8855
8856 static inline void load_stack_canary_segment(void)
8857 {
8858-#ifdef CONFIG_X86_32
8859+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
8860 asm volatile ("mov %0, %%gs" : : "r" (0));
8861 #endif
8862 }
8863diff -urNp linux-3.0.3/arch/x86/include/asm/stacktrace.h linux-3.0.3/arch/x86/include/asm/stacktrace.h
8864--- linux-3.0.3/arch/x86/include/asm/stacktrace.h 2011-07-21 22:17:23.000000000 -0400
8865+++ linux-3.0.3/arch/x86/include/asm/stacktrace.h 2011-08-23 21:47:55.000000000 -0400
8866@@ -11,28 +11,20 @@
8867
8868 extern int kstack_depth_to_print;
8869
8870-struct thread_info;
8871+struct task_struct;
8872 struct stacktrace_ops;
8873
8874-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
8875- unsigned long *stack,
8876- unsigned long bp,
8877- const struct stacktrace_ops *ops,
8878- void *data,
8879- unsigned long *end,
8880- int *graph);
8881-
8882-extern unsigned long
8883-print_context_stack(struct thread_info *tinfo,
8884- unsigned long *stack, unsigned long bp,
8885- const struct stacktrace_ops *ops, void *data,
8886- unsigned long *end, int *graph);
8887-
8888-extern unsigned long
8889-print_context_stack_bp(struct thread_info *tinfo,
8890- unsigned long *stack, unsigned long bp,
8891- const struct stacktrace_ops *ops, void *data,
8892- unsigned long *end, int *graph);
8893+typedef unsigned long walk_stack_t(struct task_struct *task,
8894+ void *stack_start,
8895+ unsigned long *stack,
8896+ unsigned long bp,
8897+ const struct stacktrace_ops *ops,
8898+ void *data,
8899+ unsigned long *end,
8900+ int *graph);
8901+
8902+extern walk_stack_t print_context_stack;
8903+extern walk_stack_t print_context_stack_bp;
8904
8905 /* Generic stack tracer with callbacks */
8906
8907@@ -40,7 +32,7 @@ struct stacktrace_ops {
8908 void (*address)(void *data, unsigned long address, int reliable);
8909 /* On negative return stop dumping */
8910 int (*stack)(void *data, char *name);
8911- walk_stack_t walk_stack;
8912+ walk_stack_t *walk_stack;
8913 };
8914
8915 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
8916diff -urNp linux-3.0.3/arch/x86/include/asm/system.h linux-3.0.3/arch/x86/include/asm/system.h
8917--- linux-3.0.3/arch/x86/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
8918+++ linux-3.0.3/arch/x86/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
8919@@ -129,7 +129,7 @@ do { \
8920 "call __switch_to\n\t" \
8921 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
8922 __switch_canary \
8923- "movq %P[thread_info](%%rsi),%%r8\n\t" \
8924+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
8925 "movq %%rax,%%rdi\n\t" \
8926 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
8927 "jnz ret_from_fork\n\t" \
8928@@ -140,7 +140,7 @@ do { \
8929 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
8930 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
8931 [_tif_fork] "i" (_TIF_FORK), \
8932- [thread_info] "i" (offsetof(struct task_struct, stack)), \
8933+ [thread_info] "m" (current_tinfo), \
8934 [current_task] "m" (current_task) \
8935 __switch_canary_iparam \
8936 : "memory", "cc" __EXTRA_CLOBBER)
8937@@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
8938 {
8939 unsigned long __limit;
8940 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
8941- return __limit + 1;
8942+ return __limit;
8943 }
8944
8945 static inline void native_clts(void)
8946@@ -397,12 +397,12 @@ void enable_hlt(void);
8947
8948 void cpu_idle_wait(void);
8949
8950-extern unsigned long arch_align_stack(unsigned long sp);
8951+#define arch_align_stack(x) ((x) & ~0xfUL)
8952 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
8953
8954 void default_idle(void);
8955
8956-void stop_this_cpu(void *dummy);
8957+void stop_this_cpu(void *dummy) __noreturn;
8958
8959 /*
8960 * Force strict CPU ordering.
8961diff -urNp linux-3.0.3/arch/x86/include/asm/thread_info.h linux-3.0.3/arch/x86/include/asm/thread_info.h
8962--- linux-3.0.3/arch/x86/include/asm/thread_info.h 2011-07-21 22:17:23.000000000 -0400
8963+++ linux-3.0.3/arch/x86/include/asm/thread_info.h 2011-08-23 21:47:55.000000000 -0400
8964@@ -10,6 +10,7 @@
8965 #include <linux/compiler.h>
8966 #include <asm/page.h>
8967 #include <asm/types.h>
8968+#include <asm/percpu.h>
8969
8970 /*
8971 * low level task data that entry.S needs immediate access to
8972@@ -24,7 +25,6 @@ struct exec_domain;
8973 #include <asm/atomic.h>
8974
8975 struct thread_info {
8976- struct task_struct *task; /* main task structure */
8977 struct exec_domain *exec_domain; /* execution domain */
8978 __u32 flags; /* low level flags */
8979 __u32 status; /* thread synchronous flags */
8980@@ -34,18 +34,12 @@ struct thread_info {
8981 mm_segment_t addr_limit;
8982 struct restart_block restart_block;
8983 void __user *sysenter_return;
8984-#ifdef CONFIG_X86_32
8985- unsigned long previous_esp; /* ESP of the previous stack in
8986- case of nested (IRQ) stacks
8987- */
8988- __u8 supervisor_stack[0];
8989-#endif
8990+ unsigned long lowest_stack;
8991 int uaccess_err;
8992 };
8993
8994-#define INIT_THREAD_INFO(tsk) \
8995+#define INIT_THREAD_INFO \
8996 { \
8997- .task = &tsk, \
8998 .exec_domain = &default_exec_domain, \
8999 .flags = 0, \
9000 .cpu = 0, \
9001@@ -56,7 +50,7 @@ struct thread_info {
9002 }, \
9003 }
9004
9005-#define init_thread_info (init_thread_union.thread_info)
9006+#define init_thread_info (init_thread_union.stack)
9007 #define init_stack (init_thread_union.stack)
9008
9009 #else /* !__ASSEMBLY__ */
9010@@ -170,6 +164,23 @@ struct thread_info {
9011 ret; \
9012 })
9013
9014+#ifdef __ASSEMBLY__
9015+/* how to get the thread information struct from ASM */
9016+#define GET_THREAD_INFO(reg) \
9017+ mov PER_CPU_VAR(current_tinfo), reg
9018+
9019+/* use this one if reg already contains %esp */
9020+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
9021+#else
9022+/* how to get the thread information struct from C */
9023+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
9024+
9025+static __always_inline struct thread_info *current_thread_info(void)
9026+{
9027+ return percpu_read_stable(current_tinfo);
9028+}
9029+#endif
9030+
9031 #ifdef CONFIG_X86_32
9032
9033 #define STACK_WARN (THREAD_SIZE/8)
9034@@ -180,35 +191,13 @@ struct thread_info {
9035 */
9036 #ifndef __ASSEMBLY__
9037
9038-
9039 /* how to get the current stack pointer from C */
9040 register unsigned long current_stack_pointer asm("esp") __used;
9041
9042-/* how to get the thread information struct from C */
9043-static inline struct thread_info *current_thread_info(void)
9044-{
9045- return (struct thread_info *)
9046- (current_stack_pointer & ~(THREAD_SIZE - 1));
9047-}
9048-
9049-#else /* !__ASSEMBLY__ */
9050-
9051-/* how to get the thread information struct from ASM */
9052-#define GET_THREAD_INFO(reg) \
9053- movl $-THREAD_SIZE, reg; \
9054- andl %esp, reg
9055-
9056-/* use this one if reg already contains %esp */
9057-#define GET_THREAD_INFO_WITH_ESP(reg) \
9058- andl $-THREAD_SIZE, reg
9059-
9060 #endif
9061
9062 #else /* X86_32 */
9063
9064-#include <asm/percpu.h>
9065-#define KERNEL_STACK_OFFSET (5*8)
9066-
9067 /*
9068 * macros/functions for gaining access to the thread information structure
9069 * preempt_count needs to be 1 initially, until the scheduler is functional.
9070@@ -216,21 +205,8 @@ static inline struct thread_info *curren
9071 #ifndef __ASSEMBLY__
9072 DECLARE_PER_CPU(unsigned long, kernel_stack);
9073
9074-static inline struct thread_info *current_thread_info(void)
9075-{
9076- struct thread_info *ti;
9077- ti = (void *)(percpu_read_stable(kernel_stack) +
9078- KERNEL_STACK_OFFSET - THREAD_SIZE);
9079- return ti;
9080-}
9081-
9082-#else /* !__ASSEMBLY__ */
9083-
9084-/* how to get the thread information struct from ASM */
9085-#define GET_THREAD_INFO(reg) \
9086- movq PER_CPU_VAR(kernel_stack),reg ; \
9087- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
9088-
9089+/* how to get the current stack pointer from C */
9090+register unsigned long current_stack_pointer asm("rsp") __used;
9091 #endif
9092
9093 #endif /* !X86_32 */
9094@@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
9095 extern void free_thread_info(struct thread_info *ti);
9096 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
9097 #define arch_task_cache_init arch_task_cache_init
9098+
9099+#define __HAVE_THREAD_FUNCTIONS
9100+#define task_thread_info(task) (&(task)->tinfo)
9101+#define task_stack_page(task) ((task)->stack)
9102+#define setup_thread_stack(p, org) do {} while (0)
9103+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
9104+
9105+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
9106+extern struct task_struct *alloc_task_struct_node(int node);
9107+extern void free_task_struct(struct task_struct *);
9108+
9109 #endif
9110 #endif /* _ASM_X86_THREAD_INFO_H */
9111diff -urNp linux-3.0.3/arch/x86/include/asm/uaccess_32.h linux-3.0.3/arch/x86/include/asm/uaccess_32.h
9112--- linux-3.0.3/arch/x86/include/asm/uaccess_32.h 2011-07-21 22:17:23.000000000 -0400
9113+++ linux-3.0.3/arch/x86/include/asm/uaccess_32.h 2011-08-23 21:48:14.000000000 -0400
9114@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_u
9115 static __always_inline unsigned long __must_check
9116 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
9117 {
9118+ pax_track_stack();
9119+
9120+ if ((long)n < 0)
9121+ return n;
9122+
9123 if (__builtin_constant_p(n)) {
9124 unsigned long ret;
9125
9126@@ -61,6 +66,8 @@ __copy_to_user_inatomic(void __user *to,
9127 return ret;
9128 }
9129 }
9130+ if (!__builtin_constant_p(n))
9131+ check_object_size(from, n, true);
9132 return __copy_to_user_ll(to, from, n);
9133 }
9134
9135@@ -82,12 +89,16 @@ static __always_inline unsigned long __m
9136 __copy_to_user(void __user *to, const void *from, unsigned long n)
9137 {
9138 might_fault();
9139+
9140 return __copy_to_user_inatomic(to, from, n);
9141 }
9142
9143 static __always_inline unsigned long
9144 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
9145 {
9146+ if ((long)n < 0)
9147+ return n;
9148+
9149 /* Avoid zeroing the tail if the copy fails..
9150 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
9151 * but as the zeroing behaviour is only significant when n is not
9152@@ -137,6 +148,12 @@ static __always_inline unsigned long
9153 __copy_from_user(void *to, const void __user *from, unsigned long n)
9154 {
9155 might_fault();
9156+
9157+ pax_track_stack();
9158+
9159+ if ((long)n < 0)
9160+ return n;
9161+
9162 if (__builtin_constant_p(n)) {
9163 unsigned long ret;
9164
9165@@ -152,6 +169,8 @@ __copy_from_user(void *to, const void __
9166 return ret;
9167 }
9168 }
9169+ if (!__builtin_constant_p(n))
9170+ check_object_size(to, n, false);
9171 return __copy_from_user_ll(to, from, n);
9172 }
9173
9174@@ -159,6 +178,10 @@ static __always_inline unsigned long __c
9175 const void __user *from, unsigned long n)
9176 {
9177 might_fault();
9178+
9179+ if ((long)n < 0)
9180+ return n;
9181+
9182 if (__builtin_constant_p(n)) {
9183 unsigned long ret;
9184
9185@@ -181,15 +204,19 @@ static __always_inline unsigned long
9186 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
9187 unsigned long n)
9188 {
9189- return __copy_from_user_ll_nocache_nozero(to, from, n);
9190-}
9191+ if ((long)n < 0)
9192+ return n;
9193
9194-unsigned long __must_check copy_to_user(void __user *to,
9195- const void *from, unsigned long n);
9196-unsigned long __must_check _copy_from_user(void *to,
9197- const void __user *from,
9198- unsigned long n);
9199+ return __copy_from_user_ll_nocache_nozero(to, from, n);
9200+}
9201
9202+extern void copy_to_user_overflow(void)
9203+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9204+ __compiletime_error("copy_to_user() buffer size is not provably correct")
9205+#else
9206+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
9207+#endif
9208+;
9209
9210 extern void copy_from_user_overflow(void)
9211 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9212@@ -199,17 +226,61 @@ extern void copy_from_user_overflow(void
9213 #endif
9214 ;
9215
9216-static inline unsigned long __must_check copy_from_user(void *to,
9217- const void __user *from,
9218- unsigned long n)
9219+/**
9220+ * copy_to_user: - Copy a block of data into user space.
9221+ * @to: Destination address, in user space.
9222+ * @from: Source address, in kernel space.
9223+ * @n: Number of bytes to copy.
9224+ *
9225+ * Context: User context only. This function may sleep.
9226+ *
9227+ * Copy data from kernel space to user space.
9228+ *
9229+ * Returns number of bytes that could not be copied.
9230+ * On success, this will be zero.
9231+ */
9232+static inline unsigned long __must_check
9233+copy_to_user(void __user *to, const void *from, unsigned long n)
9234+{
9235+ int sz = __compiletime_object_size(from);
9236+
9237+ if (unlikely(sz != -1 && sz < n))
9238+ copy_to_user_overflow();
9239+ else if (access_ok(VERIFY_WRITE, to, n))
9240+ n = __copy_to_user(to, from, n);
9241+ return n;
9242+}
9243+
9244+/**
9245+ * copy_from_user: - Copy a block of data from user space.
9246+ * @to: Destination address, in kernel space.
9247+ * @from: Source address, in user space.
9248+ * @n: Number of bytes to copy.
9249+ *
9250+ * Context: User context only. This function may sleep.
9251+ *
9252+ * Copy data from user space to kernel space.
9253+ *
9254+ * Returns number of bytes that could not be copied.
9255+ * On success, this will be zero.
9256+ *
9257+ * If some data could not be copied, this function will pad the copied
9258+ * data to the requested size using zero bytes.
9259+ */
9260+static inline unsigned long __must_check
9261+copy_from_user(void *to, const void __user *from, unsigned long n)
9262 {
9263 int sz = __compiletime_object_size(to);
9264
9265- if (likely(sz == -1 || sz >= n))
9266- n = _copy_from_user(to, from, n);
9267- else
9268+ if (unlikely(sz != -1 && sz < n))
9269 copy_from_user_overflow();
9270-
9271+ else if (access_ok(VERIFY_READ, from, n))
9272+ n = __copy_from_user(to, from, n);
9273+ else if ((long)n > 0) {
9274+ if (!__builtin_constant_p(n))
9275+ check_object_size(to, n, false);
9276+ memset(to, 0, n);
9277+ }
9278 return n;
9279 }
9280
9281diff -urNp linux-3.0.3/arch/x86/include/asm/uaccess_64.h linux-3.0.3/arch/x86/include/asm/uaccess_64.h
9282--- linux-3.0.3/arch/x86/include/asm/uaccess_64.h 2011-07-21 22:17:23.000000000 -0400
9283+++ linux-3.0.3/arch/x86/include/asm/uaccess_64.h 2011-08-23 21:48:14.000000000 -0400
9284@@ -10,6 +10,9 @@
9285 #include <asm/alternative.h>
9286 #include <asm/cpufeature.h>
9287 #include <asm/page.h>
9288+#include <asm/pgtable.h>
9289+
9290+#define set_fs(x) (current_thread_info()->addr_limit = (x))
9291
9292 /*
9293 * Copy To/From Userspace
9294@@ -36,26 +39,26 @@ copy_user_generic(void *to, const void *
9295 return ret;
9296 }
9297
9298-__must_check unsigned long
9299-_copy_to_user(void __user *to, const void *from, unsigned len);
9300-__must_check unsigned long
9301-_copy_from_user(void *to, const void __user *from, unsigned len);
9302+static __always_inline __must_check unsigned long
9303+__copy_to_user(void __user *to, const void *from, unsigned len);
9304+static __always_inline __must_check unsigned long
9305+__copy_from_user(void *to, const void __user *from, unsigned len);
9306 __must_check unsigned long
9307 copy_in_user(void __user *to, const void __user *from, unsigned len);
9308
9309 static inline unsigned long __must_check copy_from_user(void *to,
9310 const void __user *from,
9311- unsigned long n)
9312+ unsigned n)
9313 {
9314- int sz = __compiletime_object_size(to);
9315-
9316 might_fault();
9317- if (likely(sz == -1 || sz >= n))
9318- n = _copy_from_user(to, from, n);
9319-#ifdef CONFIG_DEBUG_VM
9320- else
9321- WARN(1, "Buffer overflow detected!\n");
9322-#endif
9323+
9324+ if (access_ok(VERIFY_READ, from, n))
9325+ n = __copy_from_user(to, from, n);
9326+ else if ((int)n > 0) {
9327+ if (!__builtin_constant_p(n))
9328+ check_object_size(to, n, false);
9329+ memset(to, 0, n);
9330+ }
9331 return n;
9332 }
9333
9334@@ -64,110 +67,198 @@ int copy_to_user(void __user *dst, const
9335 {
9336 might_fault();
9337
9338- return _copy_to_user(dst, src, size);
9339+ if (access_ok(VERIFY_WRITE, dst, size))
9340+ size = __copy_to_user(dst, src, size);
9341+ return size;
9342 }
9343
9344 static __always_inline __must_check
9345-int __copy_from_user(void *dst, const void __user *src, unsigned size)
9346+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
9347 {
9348- int ret = 0;
9349+ int sz = __compiletime_object_size(dst);
9350+ unsigned ret = 0;
9351
9352 might_fault();
9353- if (!__builtin_constant_p(size))
9354- return copy_user_generic(dst, (__force void *)src, size);
9355+
9356+ pax_track_stack();
9357+
9358+ if ((int)size < 0)
9359+ return size;
9360+
9361+#ifdef CONFIG_PAX_MEMORY_UDEREF
9362+ if (!__access_ok(VERIFY_READ, src, size))
9363+ return size;
9364+#endif
9365+
9366+ if (unlikely(sz != -1 && sz < size)) {
9367+#ifdef CONFIG_DEBUG_VM
9368+ WARN(1, "Buffer overflow detected!\n");
9369+#endif
9370+ return size;
9371+ }
9372+
9373+ if (!__builtin_constant_p(size)) {
9374+ check_object_size(dst, size, false);
9375+
9376+#ifdef CONFIG_PAX_MEMORY_UDEREF
9377+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9378+ src += PAX_USER_SHADOW_BASE;
9379+#endif
9380+
9381+ return copy_user_generic(dst, (__force const void *)src, size);
9382+ }
9383 switch (size) {
9384- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
9385+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
9386 ret, "b", "b", "=q", 1);
9387 return ret;
9388- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
9389+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
9390 ret, "w", "w", "=r", 2);
9391 return ret;
9392- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
9393+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
9394 ret, "l", "k", "=r", 4);
9395 return ret;
9396- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
9397+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9398 ret, "q", "", "=r", 8);
9399 return ret;
9400 case 10:
9401- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9402+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9403 ret, "q", "", "=r", 10);
9404 if (unlikely(ret))
9405 return ret;
9406 __get_user_asm(*(u16 *)(8 + (char *)dst),
9407- (u16 __user *)(8 + (char __user *)src),
9408+ (const u16 __user *)(8 + (const char __user *)src),
9409 ret, "w", "w", "=r", 2);
9410 return ret;
9411 case 16:
9412- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9413+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9414 ret, "q", "", "=r", 16);
9415 if (unlikely(ret))
9416 return ret;
9417 __get_user_asm(*(u64 *)(8 + (char *)dst),
9418- (u64 __user *)(8 + (char __user *)src),
9419+ (const u64 __user *)(8 + (const char __user *)src),
9420 ret, "q", "", "=r", 8);
9421 return ret;
9422 default:
9423- return copy_user_generic(dst, (__force void *)src, size);
9424+
9425+#ifdef CONFIG_PAX_MEMORY_UDEREF
9426+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9427+ src += PAX_USER_SHADOW_BASE;
9428+#endif
9429+
9430+ return copy_user_generic(dst, (__force const void *)src, size);
9431 }
9432 }
9433
9434 static __always_inline __must_check
9435-int __copy_to_user(void __user *dst, const void *src, unsigned size)
9436+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
9437 {
9438- int ret = 0;
9439+ int sz = __compiletime_object_size(src);
9440+ unsigned ret = 0;
9441
9442 might_fault();
9443- if (!__builtin_constant_p(size))
9444+
9445+ pax_track_stack();
9446+
9447+ if ((int)size < 0)
9448+ return size;
9449+
9450+#ifdef CONFIG_PAX_MEMORY_UDEREF
9451+ if (!__access_ok(VERIFY_WRITE, dst, size))
9452+ return size;
9453+#endif
9454+
9455+ if (unlikely(sz != -1 && sz < size)) {
9456+#ifdef CONFIG_DEBUG_VM
9457+ WARN(1, "Buffer overflow detected!\n");
9458+#endif
9459+ return size;
9460+ }
9461+
9462+ if (!__builtin_constant_p(size)) {
9463+ check_object_size(src, size, true);
9464+
9465+#ifdef CONFIG_PAX_MEMORY_UDEREF
9466+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9467+ dst += PAX_USER_SHADOW_BASE;
9468+#endif
9469+
9470 return copy_user_generic((__force void *)dst, src, size);
9471+ }
9472 switch (size) {
9473- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
9474+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
9475 ret, "b", "b", "iq", 1);
9476 return ret;
9477- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
9478+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
9479 ret, "w", "w", "ir", 2);
9480 return ret;
9481- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
9482+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
9483 ret, "l", "k", "ir", 4);
9484 return ret;
9485- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
9486+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9487 ret, "q", "", "er", 8);
9488 return ret;
9489 case 10:
9490- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9491+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9492 ret, "q", "", "er", 10);
9493 if (unlikely(ret))
9494 return ret;
9495 asm("":::"memory");
9496- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
9497+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
9498 ret, "w", "w", "ir", 2);
9499 return ret;
9500 case 16:
9501- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9502+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9503 ret, "q", "", "er", 16);
9504 if (unlikely(ret))
9505 return ret;
9506 asm("":::"memory");
9507- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
9508+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
9509 ret, "q", "", "er", 8);
9510 return ret;
9511 default:
9512+
9513+#ifdef CONFIG_PAX_MEMORY_UDEREF
9514+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9515+ dst += PAX_USER_SHADOW_BASE;
9516+#endif
9517+
9518 return copy_user_generic((__force void *)dst, src, size);
9519 }
9520 }
9521
9522 static __always_inline __must_check
9523-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9524+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9525 {
9526- int ret = 0;
9527+ unsigned ret = 0;
9528
9529 might_fault();
9530- if (!__builtin_constant_p(size))
9531+
9532+ if ((int)size < 0)
9533+ return size;
9534+
9535+#ifdef CONFIG_PAX_MEMORY_UDEREF
9536+ if (!__access_ok(VERIFY_READ, src, size))
9537+ return size;
9538+ if (!__access_ok(VERIFY_WRITE, dst, size))
9539+ return size;
9540+#endif
9541+
9542+ if (!__builtin_constant_p(size)) {
9543+
9544+#ifdef CONFIG_PAX_MEMORY_UDEREF
9545+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9546+ src += PAX_USER_SHADOW_BASE;
9547+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9548+ dst += PAX_USER_SHADOW_BASE;
9549+#endif
9550+
9551 return copy_user_generic((__force void *)dst,
9552- (__force void *)src, size);
9553+ (__force const void *)src, size);
9554+ }
9555 switch (size) {
9556 case 1: {
9557 u8 tmp;
9558- __get_user_asm(tmp, (u8 __user *)src,
9559+ __get_user_asm(tmp, (const u8 __user *)src,
9560 ret, "b", "b", "=q", 1);
9561 if (likely(!ret))
9562 __put_user_asm(tmp, (u8 __user *)dst,
9563@@ -176,7 +267,7 @@ int __copy_in_user(void __user *dst, con
9564 }
9565 case 2: {
9566 u16 tmp;
9567- __get_user_asm(tmp, (u16 __user *)src,
9568+ __get_user_asm(tmp, (const u16 __user *)src,
9569 ret, "w", "w", "=r", 2);
9570 if (likely(!ret))
9571 __put_user_asm(tmp, (u16 __user *)dst,
9572@@ -186,7 +277,7 @@ int __copy_in_user(void __user *dst, con
9573
9574 case 4: {
9575 u32 tmp;
9576- __get_user_asm(tmp, (u32 __user *)src,
9577+ __get_user_asm(tmp, (const u32 __user *)src,
9578 ret, "l", "k", "=r", 4);
9579 if (likely(!ret))
9580 __put_user_asm(tmp, (u32 __user *)dst,
9581@@ -195,7 +286,7 @@ int __copy_in_user(void __user *dst, con
9582 }
9583 case 8: {
9584 u64 tmp;
9585- __get_user_asm(tmp, (u64 __user *)src,
9586+ __get_user_asm(tmp, (const u64 __user *)src,
9587 ret, "q", "", "=r", 8);
9588 if (likely(!ret))
9589 __put_user_asm(tmp, (u64 __user *)dst,
9590@@ -203,8 +294,16 @@ int __copy_in_user(void __user *dst, con
9591 return ret;
9592 }
9593 default:
9594+
9595+#ifdef CONFIG_PAX_MEMORY_UDEREF
9596+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9597+ src += PAX_USER_SHADOW_BASE;
9598+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9599+ dst += PAX_USER_SHADOW_BASE;
9600+#endif
9601+
9602 return copy_user_generic((__force void *)dst,
9603- (__force void *)src, size);
9604+ (__force const void *)src, size);
9605 }
9606 }
9607
9608@@ -221,33 +320,72 @@ __must_check unsigned long __clear_user(
9609 static __must_check __always_inline int
9610 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
9611 {
9612+ pax_track_stack();
9613+
9614+ if ((int)size < 0)
9615+ return size;
9616+
9617+#ifdef CONFIG_PAX_MEMORY_UDEREF
9618+ if (!__access_ok(VERIFY_READ, src, size))
9619+ return size;
9620+
9621+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9622+ src += PAX_USER_SHADOW_BASE;
9623+#endif
9624+
9625 return copy_user_generic(dst, (__force const void *)src, size);
9626 }
9627
9628-static __must_check __always_inline int
9629+static __must_check __always_inline unsigned long
9630 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
9631 {
9632+ if ((int)size < 0)
9633+ return size;
9634+
9635+#ifdef CONFIG_PAX_MEMORY_UDEREF
9636+ if (!__access_ok(VERIFY_WRITE, dst, size))
9637+ return size;
9638+
9639+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9640+ dst += PAX_USER_SHADOW_BASE;
9641+#endif
9642+
9643 return copy_user_generic((__force void *)dst, src, size);
9644 }
9645
9646-extern long __copy_user_nocache(void *dst, const void __user *src,
9647+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
9648 unsigned size, int zerorest);
9649
9650-static inline int
9651-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9652+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9653 {
9654 might_sleep();
9655+
9656+ if ((int)size < 0)
9657+ return size;
9658+
9659+#ifdef CONFIG_PAX_MEMORY_UDEREF
9660+ if (!__access_ok(VERIFY_READ, src, size))
9661+ return size;
9662+#endif
9663+
9664 return __copy_user_nocache(dst, src, size, 1);
9665 }
9666
9667-static inline int
9668-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9669+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9670 unsigned size)
9671 {
9672+ if ((int)size < 0)
9673+ return size;
9674+
9675+#ifdef CONFIG_PAX_MEMORY_UDEREF
9676+ if (!__access_ok(VERIFY_READ, src, size))
9677+ return size;
9678+#endif
9679+
9680 return __copy_user_nocache(dst, src, size, 0);
9681 }
9682
9683-unsigned long
9684+extern unsigned long
9685 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
9686
9687 #endif /* _ASM_X86_UACCESS_64_H */
9688diff -urNp linux-3.0.3/arch/x86/include/asm/uaccess.h linux-3.0.3/arch/x86/include/asm/uaccess.h
9689--- linux-3.0.3/arch/x86/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
9690+++ linux-3.0.3/arch/x86/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
9691@@ -7,12 +7,15 @@
9692 #include <linux/compiler.h>
9693 #include <linux/thread_info.h>
9694 #include <linux/string.h>
9695+#include <linux/sched.h>
9696 #include <asm/asm.h>
9697 #include <asm/page.h>
9698
9699 #define VERIFY_READ 0
9700 #define VERIFY_WRITE 1
9701
9702+extern void check_object_size(const void *ptr, unsigned long n, bool to);
9703+
9704 /*
9705 * The fs value determines whether argument validity checking should be
9706 * performed or not. If get_fs() == USER_DS, checking is performed, with
9707@@ -28,7 +31,12 @@
9708
9709 #define get_ds() (KERNEL_DS)
9710 #define get_fs() (current_thread_info()->addr_limit)
9711+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9712+void __set_fs(mm_segment_t x);
9713+void set_fs(mm_segment_t x);
9714+#else
9715 #define set_fs(x) (current_thread_info()->addr_limit = (x))
9716+#endif
9717
9718 #define segment_eq(a, b) ((a).seg == (b).seg)
9719
9720@@ -76,7 +84,33 @@
9721 * checks that the pointer is in the user space range - after calling
9722 * this function, memory access functions may still return -EFAULT.
9723 */
9724-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9725+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9726+#define access_ok(type, addr, size) \
9727+({ \
9728+ long __size = size; \
9729+ unsigned long __addr = (unsigned long)addr; \
9730+ unsigned long __addr_ao = __addr & PAGE_MASK; \
9731+ unsigned long __end_ao = __addr + __size - 1; \
9732+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
9733+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
9734+ while(__addr_ao <= __end_ao) { \
9735+ char __c_ao; \
9736+ __addr_ao += PAGE_SIZE; \
9737+ if (__size > PAGE_SIZE) \
9738+ cond_resched(); \
9739+ if (__get_user(__c_ao, (char __user *)__addr)) \
9740+ break; \
9741+ if (type != VERIFY_WRITE) { \
9742+ __addr = __addr_ao; \
9743+ continue; \
9744+ } \
9745+ if (__put_user(__c_ao, (char __user *)__addr)) \
9746+ break; \
9747+ __addr = __addr_ao; \
9748+ } \
9749+ } \
9750+ __ret_ao; \
9751+})
9752
9753 /*
9754 * The exception table consists of pairs of addresses: the first is the
9755@@ -182,12 +216,20 @@ extern int __get_user_bad(void);
9756 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
9757 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
9758
9759-
9760+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9761+#define __copyuser_seg "gs;"
9762+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
9763+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
9764+#else
9765+#define __copyuser_seg
9766+#define __COPYUSER_SET_ES
9767+#define __COPYUSER_RESTORE_ES
9768+#endif
9769
9770 #ifdef CONFIG_X86_32
9771 #define __put_user_asm_u64(x, addr, err, errret) \
9772- asm volatile("1: movl %%eax,0(%2)\n" \
9773- "2: movl %%edx,4(%2)\n" \
9774+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
9775+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
9776 "3:\n" \
9777 ".section .fixup,\"ax\"\n" \
9778 "4: movl %3,%0\n" \
9779@@ -199,8 +241,8 @@ extern int __get_user_bad(void);
9780 : "A" (x), "r" (addr), "i" (errret), "0" (err))
9781
9782 #define __put_user_asm_ex_u64(x, addr) \
9783- asm volatile("1: movl %%eax,0(%1)\n" \
9784- "2: movl %%edx,4(%1)\n" \
9785+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
9786+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
9787 "3:\n" \
9788 _ASM_EXTABLE(1b, 2b - 1b) \
9789 _ASM_EXTABLE(2b, 3b - 2b) \
9790@@ -373,7 +415,7 @@ do { \
9791 } while (0)
9792
9793 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
9794- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
9795+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
9796 "2:\n" \
9797 ".section .fixup,\"ax\"\n" \
9798 "3: mov %3,%0\n" \
9799@@ -381,7 +423,7 @@ do { \
9800 " jmp 2b\n" \
9801 ".previous\n" \
9802 _ASM_EXTABLE(1b, 3b) \
9803- : "=r" (err), ltype(x) \
9804+ : "=r" (err), ltype (x) \
9805 : "m" (__m(addr)), "i" (errret), "0" (err))
9806
9807 #define __get_user_size_ex(x, ptr, size) \
9808@@ -406,7 +448,7 @@ do { \
9809 } while (0)
9810
9811 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
9812- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
9813+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
9814 "2:\n" \
9815 _ASM_EXTABLE(1b, 2b - 1b) \
9816 : ltype(x) : "m" (__m(addr)))
9817@@ -423,13 +465,24 @@ do { \
9818 int __gu_err; \
9819 unsigned long __gu_val; \
9820 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
9821- (x) = (__force __typeof__(*(ptr)))__gu_val; \
9822+ (x) = (__typeof__(*(ptr)))__gu_val; \
9823 __gu_err; \
9824 })
9825
9826 /* FIXME: this hack is definitely wrong -AK */
9827 struct __large_struct { unsigned long buf[100]; };
9828-#define __m(x) (*(struct __large_struct __user *)(x))
9829+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9830+#define ____m(x) \
9831+({ \
9832+ unsigned long ____x = (unsigned long)(x); \
9833+ if (____x < PAX_USER_SHADOW_BASE) \
9834+ ____x += PAX_USER_SHADOW_BASE; \
9835+ (void __user *)____x; \
9836+})
9837+#else
9838+#define ____m(x) (x)
9839+#endif
9840+#define __m(x) (*(struct __large_struct __user *)____m(x))
9841
9842 /*
9843 * Tell gcc we read from memory instead of writing: this is because
9844@@ -437,7 +490,7 @@ struct __large_struct { unsigned long bu
9845 * aliasing issues.
9846 */
9847 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
9848- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
9849+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
9850 "2:\n" \
9851 ".section .fixup,\"ax\"\n" \
9852 "3: mov %3,%0\n" \
9853@@ -445,10 +498,10 @@ struct __large_struct { unsigned long bu
9854 ".previous\n" \
9855 _ASM_EXTABLE(1b, 3b) \
9856 : "=r"(err) \
9857- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
9858+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
9859
9860 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
9861- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
9862+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
9863 "2:\n" \
9864 _ASM_EXTABLE(1b, 2b - 1b) \
9865 : : ltype(x), "m" (__m(addr)))
9866@@ -487,8 +540,12 @@ struct __large_struct { unsigned long bu
9867 * On error, the variable @x is set to zero.
9868 */
9869
9870+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9871+#define __get_user(x, ptr) get_user((x), (ptr))
9872+#else
9873 #define __get_user(x, ptr) \
9874 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
9875+#endif
9876
9877 /**
9878 * __put_user: - Write a simple value into user space, with less checking.
9879@@ -510,8 +567,12 @@ struct __large_struct { unsigned long bu
9880 * Returns zero on success, or -EFAULT on error.
9881 */
9882
9883+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9884+#define __put_user(x, ptr) put_user((x), (ptr))
9885+#else
9886 #define __put_user(x, ptr) \
9887 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
9888+#endif
9889
9890 #define __get_user_unaligned __get_user
9891 #define __put_user_unaligned __put_user
9892@@ -529,7 +590,7 @@ struct __large_struct { unsigned long bu
9893 #define get_user_ex(x, ptr) do { \
9894 unsigned long __gue_val; \
9895 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
9896- (x) = (__force __typeof__(*(ptr)))__gue_val; \
9897+ (x) = (__typeof__(*(ptr)))__gue_val; \
9898 } while (0)
9899
9900 #ifdef CONFIG_X86_WP_WORKS_OK
9901diff -urNp linux-3.0.3/arch/x86/include/asm/vgtod.h linux-3.0.3/arch/x86/include/asm/vgtod.h
9902--- linux-3.0.3/arch/x86/include/asm/vgtod.h 2011-07-21 22:17:23.000000000 -0400
9903+++ linux-3.0.3/arch/x86/include/asm/vgtod.h 2011-08-23 21:47:55.000000000 -0400
9904@@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
9905 int sysctl_enabled;
9906 struct timezone sys_tz;
9907 struct { /* extract of a clocksource struct */
9908+ char name[8];
9909 cycle_t (*vread)(void);
9910 cycle_t cycle_last;
9911 cycle_t mask;
9912diff -urNp linux-3.0.3/arch/x86/include/asm/x86_init.h linux-3.0.3/arch/x86/include/asm/x86_init.h
9913--- linux-3.0.3/arch/x86/include/asm/x86_init.h 2011-07-21 22:17:23.000000000 -0400
9914+++ linux-3.0.3/arch/x86/include/asm/x86_init.h 2011-08-23 21:47:55.000000000 -0400
9915@@ -28,7 +28,7 @@ struct x86_init_mpparse {
9916 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
9917 void (*find_smp_config)(void);
9918 void (*get_smp_config)(unsigned int early);
9919-};
9920+} __no_const;
9921
9922 /**
9923 * struct x86_init_resources - platform specific resource related ops
9924@@ -42,7 +42,7 @@ struct x86_init_resources {
9925 void (*probe_roms)(void);
9926 void (*reserve_resources)(void);
9927 char *(*memory_setup)(void);
9928-};
9929+} __no_const;
9930
9931 /**
9932 * struct x86_init_irqs - platform specific interrupt setup
9933@@ -55,7 +55,7 @@ struct x86_init_irqs {
9934 void (*pre_vector_init)(void);
9935 void (*intr_init)(void);
9936 void (*trap_init)(void);
9937-};
9938+} __no_const;
9939
9940 /**
9941 * struct x86_init_oem - oem platform specific customizing functions
9942@@ -65,7 +65,7 @@ struct x86_init_irqs {
9943 struct x86_init_oem {
9944 void (*arch_setup)(void);
9945 void (*banner)(void);
9946-};
9947+} __no_const;
9948
9949 /**
9950 * struct x86_init_mapping - platform specific initial kernel pagetable setup
9951@@ -76,7 +76,7 @@ struct x86_init_oem {
9952 */
9953 struct x86_init_mapping {
9954 void (*pagetable_reserve)(u64 start, u64 end);
9955-};
9956+} __no_const;
9957
9958 /**
9959 * struct x86_init_paging - platform specific paging functions
9960@@ -86,7 +86,7 @@ struct x86_init_mapping {
9961 struct x86_init_paging {
9962 void (*pagetable_setup_start)(pgd_t *base);
9963 void (*pagetable_setup_done)(pgd_t *base);
9964-};
9965+} __no_const;
9966
9967 /**
9968 * struct x86_init_timers - platform specific timer setup
9969@@ -101,7 +101,7 @@ struct x86_init_timers {
9970 void (*tsc_pre_init)(void);
9971 void (*timer_init)(void);
9972 void (*wallclock_init)(void);
9973-};
9974+} __no_const;
9975
9976 /**
9977 * struct x86_init_iommu - platform specific iommu setup
9978@@ -109,7 +109,7 @@ struct x86_init_timers {
9979 */
9980 struct x86_init_iommu {
9981 int (*iommu_init)(void);
9982-};
9983+} __no_const;
9984
9985 /**
9986 * struct x86_init_pci - platform specific pci init functions
9987@@ -123,7 +123,7 @@ struct x86_init_pci {
9988 int (*init)(void);
9989 void (*init_irq)(void);
9990 void (*fixup_irqs)(void);
9991-};
9992+} __no_const;
9993
9994 /**
9995 * struct x86_init_ops - functions for platform specific setup
9996@@ -139,7 +139,7 @@ struct x86_init_ops {
9997 struct x86_init_timers timers;
9998 struct x86_init_iommu iommu;
9999 struct x86_init_pci pci;
10000-};
10001+} __no_const;
10002
10003 /**
10004 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
10005@@ -147,7 +147,7 @@ struct x86_init_ops {
10006 */
10007 struct x86_cpuinit_ops {
10008 void (*setup_percpu_clockev)(void);
10009-};
10010+} __no_const;
10011
10012 /**
10013 * struct x86_platform_ops - platform specific runtime functions
10014@@ -166,7 +166,7 @@ struct x86_platform_ops {
10015 bool (*is_untracked_pat_range)(u64 start, u64 end);
10016 void (*nmi_init)(void);
10017 int (*i8042_detect)(void);
10018-};
10019+} __no_const;
10020
10021 struct pci_dev;
10022
10023@@ -174,7 +174,7 @@ struct x86_msi_ops {
10024 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
10025 void (*teardown_msi_irq)(unsigned int irq);
10026 void (*teardown_msi_irqs)(struct pci_dev *dev);
10027-};
10028+} __no_const;
10029
10030 extern struct x86_init_ops x86_init;
10031 extern struct x86_cpuinit_ops x86_cpuinit;
10032diff -urNp linux-3.0.3/arch/x86/include/asm/xsave.h linux-3.0.3/arch/x86/include/asm/xsave.h
10033--- linux-3.0.3/arch/x86/include/asm/xsave.h 2011-07-21 22:17:23.000000000 -0400
10034+++ linux-3.0.3/arch/x86/include/asm/xsave.h 2011-08-23 21:47:55.000000000 -0400
10035@@ -65,6 +65,11 @@ static inline int xsave_user(struct xsav
10036 {
10037 int err;
10038
10039+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10040+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
10041+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
10042+#endif
10043+
10044 /*
10045 * Clear the xsave header first, so that reserved fields are
10046 * initialized to zero.
10047@@ -100,6 +105,11 @@ static inline int xrestore_user(struct x
10048 u32 lmask = mask;
10049 u32 hmask = mask >> 32;
10050
10051+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10052+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
10053+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
10054+#endif
10055+
10056 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
10057 "2:\n"
10058 ".section .fixup,\"ax\"\n"
10059diff -urNp linux-3.0.3/arch/x86/Kconfig linux-3.0.3/arch/x86/Kconfig
10060--- linux-3.0.3/arch/x86/Kconfig 2011-07-21 22:17:23.000000000 -0400
10061+++ linux-3.0.3/arch/x86/Kconfig 2011-08-23 21:48:14.000000000 -0400
10062@@ -229,7 +229,7 @@ config X86_HT
10063
10064 config X86_32_LAZY_GS
10065 def_bool y
10066- depends on X86_32 && !CC_STACKPROTECTOR
10067+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10068
10069 config ARCH_HWEIGHT_CFLAGS
10070 string
10071@@ -1018,7 +1018,7 @@ choice
10072
10073 config NOHIGHMEM
10074 bool "off"
10075- depends on !X86_NUMAQ
10076+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10077 ---help---
10078 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10079 However, the address space of 32-bit x86 processors is only 4
10080@@ -1055,7 +1055,7 @@ config NOHIGHMEM
10081
10082 config HIGHMEM4G
10083 bool "4GB"
10084- depends on !X86_NUMAQ
10085+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10086 ---help---
10087 Select this if you have a 32-bit processor and between 1 and 4
10088 gigabytes of physical RAM.
10089@@ -1109,7 +1109,7 @@ config PAGE_OFFSET
10090 hex
10091 default 0xB0000000 if VMSPLIT_3G_OPT
10092 default 0x80000000 if VMSPLIT_2G
10093- default 0x78000000 if VMSPLIT_2G_OPT
10094+ default 0x70000000 if VMSPLIT_2G_OPT
10095 default 0x40000000 if VMSPLIT_1G
10096 default 0xC0000000
10097 depends on X86_32
10098@@ -1453,7 +1453,7 @@ config ARCH_USES_PG_UNCACHED
10099
10100 config EFI
10101 bool "EFI runtime service support"
10102- depends on ACPI
10103+ depends on ACPI && !PAX_KERNEXEC
10104 ---help---
10105 This enables the kernel to use EFI runtime services that are
10106 available (such as the EFI variable services).
10107@@ -1483,6 +1483,7 @@ config SECCOMP
10108
10109 config CC_STACKPROTECTOR
10110 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
10111+ depends on X86_64 || !PAX_MEMORY_UDEREF
10112 ---help---
10113 This option turns on the -fstack-protector GCC feature. This
10114 feature puts, at the beginning of functions, a canary value on
10115@@ -1540,6 +1541,7 @@ config KEXEC_JUMP
10116 config PHYSICAL_START
10117 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
10118 default "0x1000000"
10119+ range 0x400000 0x40000000
10120 ---help---
10121 This gives the physical address where the kernel is loaded.
10122
10123@@ -1603,6 +1605,7 @@ config X86_NEED_RELOCS
10124 config PHYSICAL_ALIGN
10125 hex "Alignment value to which kernel should be aligned" if X86_32
10126 default "0x1000000"
10127+ range 0x400000 0x1000000 if PAX_KERNEXEC
10128 range 0x2000 0x1000000
10129 ---help---
10130 This value puts the alignment restrictions on physical address
10131@@ -1634,9 +1637,10 @@ config HOTPLUG_CPU
10132 Say N if you want to disable CPU hotplug.
10133
10134 config COMPAT_VDSO
10135- def_bool y
10136+ def_bool n
10137 prompt "Compat VDSO support"
10138 depends on X86_32 || IA32_EMULATION
10139+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
10140 ---help---
10141 Map the 32-bit VDSO to the predictable old-style address too.
10142
10143diff -urNp linux-3.0.3/arch/x86/Kconfig.cpu linux-3.0.3/arch/x86/Kconfig.cpu
10144--- linux-3.0.3/arch/x86/Kconfig.cpu 2011-07-21 22:17:23.000000000 -0400
10145+++ linux-3.0.3/arch/x86/Kconfig.cpu 2011-08-23 21:47:55.000000000 -0400
10146@@ -338,7 +338,7 @@ config X86_PPRO_FENCE
10147
10148 config X86_F00F_BUG
10149 def_bool y
10150- depends on M586MMX || M586TSC || M586 || M486 || M386
10151+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
10152
10153 config X86_INVD_BUG
10154 def_bool y
10155@@ -362,7 +362,7 @@ config X86_POPAD_OK
10156
10157 config X86_ALIGNMENT_16
10158 def_bool y
10159- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10160+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10161
10162 config X86_INTEL_USERCOPY
10163 def_bool y
10164@@ -408,7 +408,7 @@ config X86_CMPXCHG64
10165 # generates cmov.
10166 config X86_CMOV
10167 def_bool y
10168- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10169+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10170
10171 config X86_MINIMUM_CPU_FAMILY
10172 int
10173diff -urNp linux-3.0.3/arch/x86/Kconfig.debug linux-3.0.3/arch/x86/Kconfig.debug
10174--- linux-3.0.3/arch/x86/Kconfig.debug 2011-07-21 22:17:23.000000000 -0400
10175+++ linux-3.0.3/arch/x86/Kconfig.debug 2011-08-23 21:47:55.000000000 -0400
10176@@ -81,7 +81,7 @@ config X86_PTDUMP
10177 config DEBUG_RODATA
10178 bool "Write protect kernel read-only data structures"
10179 default y
10180- depends on DEBUG_KERNEL
10181+ depends on DEBUG_KERNEL && BROKEN
10182 ---help---
10183 Mark the kernel read-only data as write-protected in the pagetables,
10184 in order to catch accidental (and incorrect) writes to such const
10185@@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
10186
10187 config DEBUG_SET_MODULE_RONX
10188 bool "Set loadable kernel module data as NX and text as RO"
10189- depends on MODULES
10190+ depends on MODULES && BROKEN
10191 ---help---
10192 This option helps catch unintended modifications to loadable
10193 kernel module's text and read-only data. It also prevents execution
10194diff -urNp linux-3.0.3/arch/x86/kernel/acpi/realmode/Makefile linux-3.0.3/arch/x86/kernel/acpi/realmode/Makefile
10195--- linux-3.0.3/arch/x86/kernel/acpi/realmode/Makefile 2011-07-21 22:17:23.000000000 -0400
10196+++ linux-3.0.3/arch/x86/kernel/acpi/realmode/Makefile 2011-08-23 21:47:55.000000000 -0400
10197@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
10198 $(call cc-option, -fno-stack-protector) \
10199 $(call cc-option, -mpreferred-stack-boundary=2)
10200 KBUILD_CFLAGS += $(call cc-option, -m32)
10201+ifdef CONSTIFY_PLUGIN
10202+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
10203+endif
10204 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10205 GCOV_PROFILE := n
10206
10207diff -urNp linux-3.0.3/arch/x86/kernel/acpi/realmode/wakeup.S linux-3.0.3/arch/x86/kernel/acpi/realmode/wakeup.S
10208--- linux-3.0.3/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-21 22:17:23.000000000 -0400
10209+++ linux-3.0.3/arch/x86/kernel/acpi/realmode/wakeup.S 2011-08-23 21:48:14.000000000 -0400
10210@@ -108,6 +108,9 @@ wakeup_code:
10211 /* Do any other stuff... */
10212
10213 #ifndef CONFIG_64BIT
10214+ /* Recheck NX bit overrides (64bit path does this in trampoline */
10215+ call verify_cpu
10216+
10217 /* This could also be done in C code... */
10218 movl pmode_cr3, %eax
10219 movl %eax, %cr3
10220@@ -131,6 +134,7 @@ wakeup_code:
10221 movl pmode_cr0, %eax
10222 movl %eax, %cr0
10223 jmp pmode_return
10224+# include "../../verify_cpu.S"
10225 #else
10226 pushw $0
10227 pushw trampoline_segment
10228diff -urNp linux-3.0.3/arch/x86/kernel/acpi/sleep.c linux-3.0.3/arch/x86/kernel/acpi/sleep.c
10229--- linux-3.0.3/arch/x86/kernel/acpi/sleep.c 2011-07-21 22:17:23.000000000 -0400
10230+++ linux-3.0.3/arch/x86/kernel/acpi/sleep.c 2011-08-23 21:47:55.000000000 -0400
10231@@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
10232 header->trampoline_segment = trampoline_address() >> 4;
10233 #ifdef CONFIG_SMP
10234 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
10235+
10236+ pax_open_kernel();
10237 early_gdt_descr.address =
10238 (unsigned long)get_cpu_gdt_table(smp_processor_id());
10239+ pax_close_kernel();
10240+
10241 initial_gs = per_cpu_offset(smp_processor_id());
10242 #endif
10243 initial_code = (unsigned long)wakeup_long64;
10244diff -urNp linux-3.0.3/arch/x86/kernel/acpi/wakeup_32.S linux-3.0.3/arch/x86/kernel/acpi/wakeup_32.S
10245--- linux-3.0.3/arch/x86/kernel/acpi/wakeup_32.S 2011-07-21 22:17:23.000000000 -0400
10246+++ linux-3.0.3/arch/x86/kernel/acpi/wakeup_32.S 2011-08-23 21:47:55.000000000 -0400
10247@@ -30,13 +30,11 @@ wakeup_pmode_return:
10248 # and restore the stack ... but you need gdt for this to work
10249 movl saved_context_esp, %esp
10250
10251- movl %cs:saved_magic, %eax
10252- cmpl $0x12345678, %eax
10253+ cmpl $0x12345678, saved_magic
10254 jne bogus_magic
10255
10256 # jump to place where we left off
10257- movl saved_eip, %eax
10258- jmp *%eax
10259+ jmp *(saved_eip)
10260
10261 bogus_magic:
10262 jmp bogus_magic
10263diff -urNp linux-3.0.3/arch/x86/kernel/alternative.c linux-3.0.3/arch/x86/kernel/alternative.c
10264--- linux-3.0.3/arch/x86/kernel/alternative.c 2011-07-21 22:17:23.000000000 -0400
10265+++ linux-3.0.3/arch/x86/kernel/alternative.c 2011-08-23 21:47:55.000000000 -0400
10266@@ -313,7 +313,7 @@ static void alternatives_smp_lock(const
10267 if (!*poff || ptr < text || ptr >= text_end)
10268 continue;
10269 /* turn DS segment override prefix into lock prefix */
10270- if (*ptr == 0x3e)
10271+ if (*ktla_ktva(ptr) == 0x3e)
10272 text_poke(ptr, ((unsigned char []){0xf0}), 1);
10273 };
10274 mutex_unlock(&text_mutex);
10275@@ -334,7 +334,7 @@ static void alternatives_smp_unlock(cons
10276 if (!*poff || ptr < text || ptr >= text_end)
10277 continue;
10278 /* turn lock prefix into DS segment override prefix */
10279- if (*ptr == 0xf0)
10280+ if (*ktla_ktva(ptr) == 0xf0)
10281 text_poke(ptr, ((unsigned char []){0x3E}), 1);
10282 };
10283 mutex_unlock(&text_mutex);
10284@@ -503,7 +503,7 @@ void __init_or_module apply_paravirt(str
10285
10286 BUG_ON(p->len > MAX_PATCH_LEN);
10287 /* prep the buffer with the original instructions */
10288- memcpy(insnbuf, p->instr, p->len);
10289+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
10290 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
10291 (unsigned long)p->instr, p->len);
10292
10293@@ -571,7 +571,7 @@ void __init alternative_instructions(voi
10294 if (smp_alt_once)
10295 free_init_pages("SMP alternatives",
10296 (unsigned long)__smp_locks,
10297- (unsigned long)__smp_locks_end);
10298+ PAGE_ALIGN((unsigned long)__smp_locks_end));
10299
10300 restart_nmi();
10301 }
10302@@ -588,13 +588,17 @@ void __init alternative_instructions(voi
10303 * instructions. And on the local CPU you need to be protected again NMI or MCE
10304 * handlers seeing an inconsistent instruction while you patch.
10305 */
10306-void *__init_or_module text_poke_early(void *addr, const void *opcode,
10307+void *__kprobes text_poke_early(void *addr, const void *opcode,
10308 size_t len)
10309 {
10310 unsigned long flags;
10311 local_irq_save(flags);
10312- memcpy(addr, opcode, len);
10313+
10314+ pax_open_kernel();
10315+ memcpy(ktla_ktva(addr), opcode, len);
10316 sync_core();
10317+ pax_close_kernel();
10318+
10319 local_irq_restore(flags);
10320 /* Could also do a CLFLUSH here to speed up CPU recovery; but
10321 that causes hangs on some VIA CPUs. */
10322@@ -616,36 +620,22 @@ void *__init_or_module text_poke_early(v
10323 */
10324 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
10325 {
10326- unsigned long flags;
10327- char *vaddr;
10328+ unsigned char *vaddr = ktla_ktva(addr);
10329 struct page *pages[2];
10330- int i;
10331+ size_t i;
10332
10333 if (!core_kernel_text((unsigned long)addr)) {
10334- pages[0] = vmalloc_to_page(addr);
10335- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
10336+ pages[0] = vmalloc_to_page(vaddr);
10337+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
10338 } else {
10339- pages[0] = virt_to_page(addr);
10340+ pages[0] = virt_to_page(vaddr);
10341 WARN_ON(!PageReserved(pages[0]));
10342- pages[1] = virt_to_page(addr + PAGE_SIZE);
10343+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
10344 }
10345 BUG_ON(!pages[0]);
10346- local_irq_save(flags);
10347- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
10348- if (pages[1])
10349- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
10350- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
10351- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
10352- clear_fixmap(FIX_TEXT_POKE0);
10353- if (pages[1])
10354- clear_fixmap(FIX_TEXT_POKE1);
10355- local_flush_tlb();
10356- sync_core();
10357- /* Could also do a CLFLUSH here to speed up CPU recovery; but
10358- that causes hangs on some VIA CPUs. */
10359+ text_poke_early(addr, opcode, len);
10360 for (i = 0; i < len; i++)
10361- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
10362- local_irq_restore(flags);
10363+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
10364 return addr;
10365 }
10366
10367diff -urNp linux-3.0.3/arch/x86/kernel/apic/apic.c linux-3.0.3/arch/x86/kernel/apic/apic.c
10368--- linux-3.0.3/arch/x86/kernel/apic/apic.c 2011-07-21 22:17:23.000000000 -0400
10369+++ linux-3.0.3/arch/x86/kernel/apic/apic.c 2011-08-23 21:48:14.000000000 -0400
10370@@ -173,7 +173,7 @@ int first_system_vector = 0xfe;
10371 /*
10372 * Debug level, exported for io_apic.c
10373 */
10374-unsigned int apic_verbosity;
10375+int apic_verbosity;
10376
10377 int pic_mode;
10378
10379@@ -1834,7 +1834,7 @@ void smp_error_interrupt(struct pt_regs
10380 apic_write(APIC_ESR, 0);
10381 v1 = apic_read(APIC_ESR);
10382 ack_APIC_irq();
10383- atomic_inc(&irq_err_count);
10384+ atomic_inc_unchecked(&irq_err_count);
10385
10386 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
10387 smp_processor_id(), v0 , v1);
10388@@ -2190,6 +2190,8 @@ static int __cpuinit apic_cluster_num(vo
10389 u16 *bios_cpu_apicid;
10390 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
10391
10392+ pax_track_stack();
10393+
10394 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
10395 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
10396
10397diff -urNp linux-3.0.3/arch/x86/kernel/apic/io_apic.c linux-3.0.3/arch/x86/kernel/apic/io_apic.c
10398--- linux-3.0.3/arch/x86/kernel/apic/io_apic.c 2011-07-21 22:17:23.000000000 -0400
10399+++ linux-3.0.3/arch/x86/kernel/apic/io_apic.c 2011-08-23 21:47:55.000000000 -0400
10400@@ -1028,7 +1028,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
10401 }
10402 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
10403
10404-void lock_vector_lock(void)
10405+void lock_vector_lock(void) __acquires(vector_lock)
10406 {
10407 /* Used to the online set of cpus does not change
10408 * during assign_irq_vector.
10409@@ -1036,7 +1036,7 @@ void lock_vector_lock(void)
10410 raw_spin_lock(&vector_lock);
10411 }
10412
10413-void unlock_vector_lock(void)
10414+void unlock_vector_lock(void) __releases(vector_lock)
10415 {
10416 raw_spin_unlock(&vector_lock);
10417 }
10418@@ -2364,7 +2364,7 @@ static void ack_apic_edge(struct irq_dat
10419 ack_APIC_irq();
10420 }
10421
10422-atomic_t irq_mis_count;
10423+atomic_unchecked_t irq_mis_count;
10424
10425 /*
10426 * IO-APIC versions below 0x20 don't support EOI register.
10427@@ -2472,7 +2472,7 @@ static void ack_apic_level(struct irq_da
10428 * at the cpu.
10429 */
10430 if (!(v & (1 << (i & 0x1f)))) {
10431- atomic_inc(&irq_mis_count);
10432+ atomic_inc_unchecked(&irq_mis_count);
10433
10434 eoi_ioapic_irq(irq, cfg);
10435 }
10436diff -urNp linux-3.0.3/arch/x86/kernel/apm_32.c linux-3.0.3/arch/x86/kernel/apm_32.c
10437--- linux-3.0.3/arch/x86/kernel/apm_32.c 2011-07-21 22:17:23.000000000 -0400
10438+++ linux-3.0.3/arch/x86/kernel/apm_32.c 2011-08-23 21:47:55.000000000 -0400
10439@@ -413,7 +413,7 @@ static DEFINE_MUTEX(apm_mutex);
10440 * This is for buggy BIOS's that refer to (real mode) segment 0x40
10441 * even though they are called in protected mode.
10442 */
10443-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
10444+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
10445 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
10446
10447 static const char driver_version[] = "1.16ac"; /* no spaces */
10448@@ -591,7 +591,10 @@ static long __apm_bios_call(void *_call)
10449 BUG_ON(cpu != 0);
10450 gdt = get_cpu_gdt_table(cpu);
10451 save_desc_40 = gdt[0x40 / 8];
10452+
10453+ pax_open_kernel();
10454 gdt[0x40 / 8] = bad_bios_desc;
10455+ pax_close_kernel();
10456
10457 apm_irq_save(flags);
10458 APM_DO_SAVE_SEGS;
10459@@ -600,7 +603,11 @@ static long __apm_bios_call(void *_call)
10460 &call->esi);
10461 APM_DO_RESTORE_SEGS;
10462 apm_irq_restore(flags);
10463+
10464+ pax_open_kernel();
10465 gdt[0x40 / 8] = save_desc_40;
10466+ pax_close_kernel();
10467+
10468 put_cpu();
10469
10470 return call->eax & 0xff;
10471@@ -667,7 +674,10 @@ static long __apm_bios_call_simple(void
10472 BUG_ON(cpu != 0);
10473 gdt = get_cpu_gdt_table(cpu);
10474 save_desc_40 = gdt[0x40 / 8];
10475+
10476+ pax_open_kernel();
10477 gdt[0x40 / 8] = bad_bios_desc;
10478+ pax_close_kernel();
10479
10480 apm_irq_save(flags);
10481 APM_DO_SAVE_SEGS;
10482@@ -675,7 +685,11 @@ static long __apm_bios_call_simple(void
10483 &call->eax);
10484 APM_DO_RESTORE_SEGS;
10485 apm_irq_restore(flags);
10486+
10487+ pax_open_kernel();
10488 gdt[0x40 / 8] = save_desc_40;
10489+ pax_close_kernel();
10490+
10491 put_cpu();
10492 return error;
10493 }
10494@@ -2349,12 +2363,15 @@ static int __init apm_init(void)
10495 * code to that CPU.
10496 */
10497 gdt = get_cpu_gdt_table(0);
10498+
10499+ pax_open_kernel();
10500 set_desc_base(&gdt[APM_CS >> 3],
10501 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
10502 set_desc_base(&gdt[APM_CS_16 >> 3],
10503 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
10504 set_desc_base(&gdt[APM_DS >> 3],
10505 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
10506+ pax_close_kernel();
10507
10508 proc_create("apm", 0, NULL, &apm_file_ops);
10509
10510diff -urNp linux-3.0.3/arch/x86/kernel/asm-offsets_64.c linux-3.0.3/arch/x86/kernel/asm-offsets_64.c
10511--- linux-3.0.3/arch/x86/kernel/asm-offsets_64.c 2011-07-21 22:17:23.000000000 -0400
10512+++ linux-3.0.3/arch/x86/kernel/asm-offsets_64.c 2011-08-23 21:47:55.000000000 -0400
10513@@ -69,6 +69,7 @@ int main(void)
10514 BLANK();
10515 #undef ENTRY
10516
10517+ DEFINE(TSS_size, sizeof(struct tss_struct));
10518 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
10519 BLANK();
10520
10521diff -urNp linux-3.0.3/arch/x86/kernel/asm-offsets.c linux-3.0.3/arch/x86/kernel/asm-offsets.c
10522--- linux-3.0.3/arch/x86/kernel/asm-offsets.c 2011-07-21 22:17:23.000000000 -0400
10523+++ linux-3.0.3/arch/x86/kernel/asm-offsets.c 2011-08-23 21:47:55.000000000 -0400
10524@@ -33,6 +33,8 @@ void common(void) {
10525 OFFSET(TI_status, thread_info, status);
10526 OFFSET(TI_addr_limit, thread_info, addr_limit);
10527 OFFSET(TI_preempt_count, thread_info, preempt_count);
10528+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
10529+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
10530
10531 BLANK();
10532 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
10533@@ -53,8 +55,26 @@ void common(void) {
10534 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
10535 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
10536 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
10537+
10538+#ifdef CONFIG_PAX_KERNEXEC
10539+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
10540+#endif
10541+
10542+#ifdef CONFIG_PAX_MEMORY_UDEREF
10543+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
10544+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
10545+#ifdef CONFIG_X86_64
10546+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
10547+#endif
10548 #endif
10549
10550+#endif
10551+
10552+ BLANK();
10553+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
10554+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
10555+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
10556+
10557 #ifdef CONFIG_XEN
10558 BLANK();
10559 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
10560diff -urNp linux-3.0.3/arch/x86/kernel/cpu/amd.c linux-3.0.3/arch/x86/kernel/cpu/amd.c
10561--- linux-3.0.3/arch/x86/kernel/cpu/amd.c 2011-07-21 22:17:23.000000000 -0400
10562+++ linux-3.0.3/arch/x86/kernel/cpu/amd.c 2011-08-23 21:47:55.000000000 -0400
10563@@ -647,7 +647,7 @@ static unsigned int __cpuinit amd_size_c
10564 unsigned int size)
10565 {
10566 /* AMD errata T13 (order #21922) */
10567- if ((c->x86 == 6)) {
10568+ if (c->x86 == 6) {
10569 /* Duron Rev A0 */
10570 if (c->x86_model == 3 && c->x86_mask == 0)
10571 size = 64;
10572diff -urNp linux-3.0.3/arch/x86/kernel/cpu/common.c linux-3.0.3/arch/x86/kernel/cpu/common.c
10573--- linux-3.0.3/arch/x86/kernel/cpu/common.c 2011-07-21 22:17:23.000000000 -0400
10574+++ linux-3.0.3/arch/x86/kernel/cpu/common.c 2011-08-23 21:47:55.000000000 -0400
10575@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
10576
10577 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
10578
10579-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
10580-#ifdef CONFIG_X86_64
10581- /*
10582- * We need valid kernel segments for data and code in long mode too
10583- * IRET will check the segment types kkeil 2000/10/28
10584- * Also sysret mandates a special GDT layout
10585- *
10586- * TLS descriptors are currently at a different place compared to i386.
10587- * Hopefully nobody expects them at a fixed place (Wine?)
10588- */
10589- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
10590- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
10591- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
10592- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
10593- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
10594- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
10595-#else
10596- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
10597- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10598- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
10599- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
10600- /*
10601- * Segments used for calling PnP BIOS have byte granularity.
10602- * They code segments and data segments have fixed 64k limits,
10603- * the transfer segment sizes are set at run time.
10604- */
10605- /* 32-bit code */
10606- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10607- /* 16-bit code */
10608- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10609- /* 16-bit data */
10610- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
10611- /* 16-bit data */
10612- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
10613- /* 16-bit data */
10614- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
10615- /*
10616- * The APM segments have byte granularity and their bases
10617- * are set at run time. All have 64k limits.
10618- */
10619- /* 32-bit code */
10620- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10621- /* 16-bit code */
10622- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10623- /* data */
10624- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
10625-
10626- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10627- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10628- GDT_STACK_CANARY_INIT
10629-#endif
10630-} };
10631-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
10632-
10633 static int __init x86_xsave_setup(char *s)
10634 {
10635 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
10636@@ -371,7 +317,7 @@ void switch_to_new_gdt(int cpu)
10637 {
10638 struct desc_ptr gdt_descr;
10639
10640- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
10641+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
10642 gdt_descr.size = GDT_SIZE - 1;
10643 load_gdt(&gdt_descr);
10644 /* Reload the per-cpu base */
10645@@ -840,6 +786,10 @@ static void __cpuinit identify_cpu(struc
10646 /* Filter out anything that depends on CPUID levels we don't have */
10647 filter_cpuid_features(c, true);
10648
10649+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
10650+ setup_clear_cpu_cap(X86_FEATURE_SEP);
10651+#endif
10652+
10653 /* If the model name is still unset, do table lookup. */
10654 if (!c->x86_model_id[0]) {
10655 const char *p;
10656@@ -1019,6 +969,9 @@ static __init int setup_disablecpuid(cha
10657 }
10658 __setup("clearcpuid=", setup_disablecpuid);
10659
10660+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
10661+EXPORT_PER_CPU_SYMBOL(current_tinfo);
10662+
10663 #ifdef CONFIG_X86_64
10664 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
10665
10666@@ -1034,7 +987,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
10667 EXPORT_PER_CPU_SYMBOL(current_task);
10668
10669 DEFINE_PER_CPU(unsigned long, kernel_stack) =
10670- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
10671+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
10672 EXPORT_PER_CPU_SYMBOL(kernel_stack);
10673
10674 DEFINE_PER_CPU(char *, irq_stack_ptr) =
10675@@ -1099,7 +1052,7 @@ struct pt_regs * __cpuinit idle_regs(str
10676 {
10677 memset(regs, 0, sizeof(struct pt_regs));
10678 regs->fs = __KERNEL_PERCPU;
10679- regs->gs = __KERNEL_STACK_CANARY;
10680+ savesegment(gs, regs->gs);
10681
10682 return regs;
10683 }
10684@@ -1154,7 +1107,7 @@ void __cpuinit cpu_init(void)
10685 int i;
10686
10687 cpu = stack_smp_processor_id();
10688- t = &per_cpu(init_tss, cpu);
10689+ t = init_tss + cpu;
10690 oist = &per_cpu(orig_ist, cpu);
10691
10692 #ifdef CONFIG_NUMA
10693@@ -1180,7 +1133,7 @@ void __cpuinit cpu_init(void)
10694 switch_to_new_gdt(cpu);
10695 loadsegment(fs, 0);
10696
10697- load_idt((const struct desc_ptr *)&idt_descr);
10698+ load_idt(&idt_descr);
10699
10700 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
10701 syscall_init();
10702@@ -1189,7 +1142,6 @@ void __cpuinit cpu_init(void)
10703 wrmsrl(MSR_KERNEL_GS_BASE, 0);
10704 barrier();
10705
10706- x86_configure_nx();
10707 if (cpu != 0)
10708 enable_x2apic();
10709
10710@@ -1243,7 +1195,7 @@ void __cpuinit cpu_init(void)
10711 {
10712 int cpu = smp_processor_id();
10713 struct task_struct *curr = current;
10714- struct tss_struct *t = &per_cpu(init_tss, cpu);
10715+ struct tss_struct *t = init_tss + cpu;
10716 struct thread_struct *thread = &curr->thread;
10717
10718 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
10719diff -urNp linux-3.0.3/arch/x86/kernel/cpu/intel.c linux-3.0.3/arch/x86/kernel/cpu/intel.c
10720--- linux-3.0.3/arch/x86/kernel/cpu/intel.c 2011-08-23 21:44:40.000000000 -0400
10721+++ linux-3.0.3/arch/x86/kernel/cpu/intel.c 2011-08-26 19:49:56.000000000 -0400
10722@@ -172,7 +172,7 @@ static void __cpuinit trap_init_f00f_bug
10723 * Update the IDT descriptor and reload the IDT so that
10724 * it uses the read-only mapped virtual address.
10725 */
10726- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
10727+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
10728 load_idt(&idt_descr);
10729 }
10730 #endif
10731@@ -466,7 +466,7 @@ static void __cpuinit init_intel(struct
10732
10733 rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
10734 if ((epb & 0xF) == 0) {
10735- printk_once(KERN_WARNING, "x86: updated energy_perf_bias"
10736+ printk_once(KERN_WARNING "x86: updated energy_perf_bias"
10737 " to 'normal' from 'performance'\n"
10738 "You can view and update epb via utility,"
10739 " such as x86_energy_perf_policy(8)\n");
10740diff -urNp linux-3.0.3/arch/x86/kernel/cpu/Makefile linux-3.0.3/arch/x86/kernel/cpu/Makefile
10741--- linux-3.0.3/arch/x86/kernel/cpu/Makefile 2011-07-21 22:17:23.000000000 -0400
10742+++ linux-3.0.3/arch/x86/kernel/cpu/Makefile 2011-08-23 21:47:55.000000000 -0400
10743@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
10744 CFLAGS_REMOVE_perf_event.o = -pg
10745 endif
10746
10747-# Make sure load_percpu_segment has no stackprotector
10748-nostackp := $(call cc-option, -fno-stack-protector)
10749-CFLAGS_common.o := $(nostackp)
10750-
10751 obj-y := intel_cacheinfo.o scattered.o topology.o
10752 obj-y += proc.o capflags.o powerflags.o common.o
10753 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
10754diff -urNp linux-3.0.3/arch/x86/kernel/cpu/mcheck/mce.c linux-3.0.3/arch/x86/kernel/cpu/mcheck/mce.c
10755--- linux-3.0.3/arch/x86/kernel/cpu/mcheck/mce.c 2011-07-21 22:17:23.000000000 -0400
10756+++ linux-3.0.3/arch/x86/kernel/cpu/mcheck/mce.c 2011-08-23 21:47:55.000000000 -0400
10757@@ -46,6 +46,7 @@
10758 #include <asm/ipi.h>
10759 #include <asm/mce.h>
10760 #include <asm/msr.h>
10761+#include <asm/local.h>
10762
10763 #include "mce-internal.h"
10764
10765@@ -208,7 +209,7 @@ static void print_mce(struct mce *m)
10766 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
10767 m->cs, m->ip);
10768
10769- if (m->cs == __KERNEL_CS)
10770+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
10771 print_symbol("{%s}", m->ip);
10772 pr_cont("\n");
10773 }
10774@@ -236,10 +237,10 @@ static void print_mce(struct mce *m)
10775
10776 #define PANIC_TIMEOUT 5 /* 5 seconds */
10777
10778-static atomic_t mce_paniced;
10779+static atomic_unchecked_t mce_paniced;
10780
10781 static int fake_panic;
10782-static atomic_t mce_fake_paniced;
10783+static atomic_unchecked_t mce_fake_paniced;
10784
10785 /* Panic in progress. Enable interrupts and wait for final IPI */
10786 static void wait_for_panic(void)
10787@@ -263,7 +264,7 @@ static void mce_panic(char *msg, struct
10788 /*
10789 * Make sure only one CPU runs in machine check panic
10790 */
10791- if (atomic_inc_return(&mce_paniced) > 1)
10792+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
10793 wait_for_panic();
10794 barrier();
10795
10796@@ -271,7 +272,7 @@ static void mce_panic(char *msg, struct
10797 console_verbose();
10798 } else {
10799 /* Don't log too much for fake panic */
10800- if (atomic_inc_return(&mce_fake_paniced) > 1)
10801+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
10802 return;
10803 }
10804 /* First print corrected ones that are still unlogged */
10805@@ -638,7 +639,7 @@ static int mce_timed_out(u64 *t)
10806 * might have been modified by someone else.
10807 */
10808 rmb();
10809- if (atomic_read(&mce_paniced))
10810+ if (atomic_read_unchecked(&mce_paniced))
10811 wait_for_panic();
10812 if (!monarch_timeout)
10813 goto out;
10814@@ -1452,14 +1453,14 @@ void __cpuinit mcheck_cpu_init(struct cp
10815 */
10816
10817 static DEFINE_SPINLOCK(mce_state_lock);
10818-static int open_count; /* #times opened */
10819+static local_t open_count; /* #times opened */
10820 static int open_exclu; /* already open exclusive? */
10821
10822 static int mce_open(struct inode *inode, struct file *file)
10823 {
10824 spin_lock(&mce_state_lock);
10825
10826- if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
10827+ if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
10828 spin_unlock(&mce_state_lock);
10829
10830 return -EBUSY;
10831@@ -1467,7 +1468,7 @@ static int mce_open(struct inode *inode,
10832
10833 if (file->f_flags & O_EXCL)
10834 open_exclu = 1;
10835- open_count++;
10836+ local_inc(&open_count);
10837
10838 spin_unlock(&mce_state_lock);
10839
10840@@ -1478,7 +1479,7 @@ static int mce_release(struct inode *ino
10841 {
10842 spin_lock(&mce_state_lock);
10843
10844- open_count--;
10845+ local_dec(&open_count);
10846 open_exclu = 0;
10847
10848 spin_unlock(&mce_state_lock);
10849@@ -2163,7 +2164,7 @@ struct dentry *mce_get_debugfs_dir(void)
10850 static void mce_reset(void)
10851 {
10852 cpu_missing = 0;
10853- atomic_set(&mce_fake_paniced, 0);
10854+ atomic_set_unchecked(&mce_fake_paniced, 0);
10855 atomic_set(&mce_executing, 0);
10856 atomic_set(&mce_callin, 0);
10857 atomic_set(&global_nwo, 0);
10858diff -urNp linux-3.0.3/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-3.0.3/arch/x86/kernel/cpu/mcheck/mce-inject.c
10859--- linux-3.0.3/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-07-21 22:17:23.000000000 -0400
10860+++ linux-3.0.3/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-08-23 21:47:55.000000000 -0400
10861@@ -215,7 +215,9 @@ static int inject_init(void)
10862 if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
10863 return -ENOMEM;
10864 printk(KERN_INFO "Machine check injector initialized\n");
10865- mce_chrdev_ops.write = mce_write;
10866+ pax_open_kernel();
10867+ *(void **)&mce_chrdev_ops.write = mce_write;
10868+ pax_close_kernel();
10869 register_die_notifier(&mce_raise_nb);
10870 return 0;
10871 }
10872diff -urNp linux-3.0.3/arch/x86/kernel/cpu/mtrr/main.c linux-3.0.3/arch/x86/kernel/cpu/mtrr/main.c
10873--- linux-3.0.3/arch/x86/kernel/cpu/mtrr/main.c 2011-07-21 22:17:23.000000000 -0400
10874+++ linux-3.0.3/arch/x86/kernel/cpu/mtrr/main.c 2011-08-23 21:47:55.000000000 -0400
10875@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
10876 u64 size_or_mask, size_and_mask;
10877 static bool mtrr_aps_delayed_init;
10878
10879-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
10880+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
10881
10882 const struct mtrr_ops *mtrr_if;
10883
10884diff -urNp linux-3.0.3/arch/x86/kernel/cpu/mtrr/mtrr.h linux-3.0.3/arch/x86/kernel/cpu/mtrr/mtrr.h
10885--- linux-3.0.3/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-07-21 22:17:23.000000000 -0400
10886+++ linux-3.0.3/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-08-26 19:49:56.000000000 -0400
10887@@ -25,7 +25,7 @@ struct mtrr_ops {
10888 int (*validate_add_page)(unsigned long base, unsigned long size,
10889 unsigned int type);
10890 int (*have_wrcomb)(void);
10891-};
10892+} __do_const;
10893
10894 extern int generic_get_free_region(unsigned long base, unsigned long size,
10895 int replace_reg);
10896diff -urNp linux-3.0.3/arch/x86/kernel/cpu/perf_event.c linux-3.0.3/arch/x86/kernel/cpu/perf_event.c
10897--- linux-3.0.3/arch/x86/kernel/cpu/perf_event.c 2011-07-21 22:17:23.000000000 -0400
10898+++ linux-3.0.3/arch/x86/kernel/cpu/perf_event.c 2011-08-23 21:48:14.000000000 -0400
10899@@ -781,6 +781,8 @@ static int x86_schedule_events(struct cp
10900 int i, j, w, wmax, num = 0;
10901 struct hw_perf_event *hwc;
10902
10903+ pax_track_stack();
10904+
10905 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
10906
10907 for (i = 0; i < n; i++) {
10908@@ -1872,7 +1874,7 @@ perf_callchain_user(struct perf_callchai
10909 break;
10910
10911 perf_callchain_store(entry, frame.return_address);
10912- fp = frame.next_frame;
10913+ fp = (__force const void __user *)frame.next_frame;
10914 }
10915 }
10916
10917diff -urNp linux-3.0.3/arch/x86/kernel/crash.c linux-3.0.3/arch/x86/kernel/crash.c
10918--- linux-3.0.3/arch/x86/kernel/crash.c 2011-07-21 22:17:23.000000000 -0400
10919+++ linux-3.0.3/arch/x86/kernel/crash.c 2011-08-23 21:47:55.000000000 -0400
10920@@ -42,7 +42,7 @@ static void kdump_nmi_callback(int cpu,
10921 regs = args->regs;
10922
10923 #ifdef CONFIG_X86_32
10924- if (!user_mode_vm(regs)) {
10925+ if (!user_mode(regs)) {
10926 crash_fixup_ss_esp(&fixed_regs, regs);
10927 regs = &fixed_regs;
10928 }
10929diff -urNp linux-3.0.3/arch/x86/kernel/doublefault_32.c linux-3.0.3/arch/x86/kernel/doublefault_32.c
10930--- linux-3.0.3/arch/x86/kernel/doublefault_32.c 2011-07-21 22:17:23.000000000 -0400
10931+++ linux-3.0.3/arch/x86/kernel/doublefault_32.c 2011-08-23 21:47:55.000000000 -0400
10932@@ -11,7 +11,7 @@
10933
10934 #define DOUBLEFAULT_STACKSIZE (1024)
10935 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
10936-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
10937+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
10938
10939 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
10940
10941@@ -21,7 +21,7 @@ static void doublefault_fn(void)
10942 unsigned long gdt, tss;
10943
10944 store_gdt(&gdt_desc);
10945- gdt = gdt_desc.address;
10946+ gdt = (unsigned long)gdt_desc.address;
10947
10948 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
10949
10950@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
10951 /* 0x2 bit is always set */
10952 .flags = X86_EFLAGS_SF | 0x2,
10953 .sp = STACK_START,
10954- .es = __USER_DS,
10955+ .es = __KERNEL_DS,
10956 .cs = __KERNEL_CS,
10957 .ss = __KERNEL_DS,
10958- .ds = __USER_DS,
10959+ .ds = __KERNEL_DS,
10960 .fs = __KERNEL_PERCPU,
10961
10962 .__cr3 = __pa_nodebug(swapper_pg_dir),
10963diff -urNp linux-3.0.3/arch/x86/kernel/dumpstack_32.c linux-3.0.3/arch/x86/kernel/dumpstack_32.c
10964--- linux-3.0.3/arch/x86/kernel/dumpstack_32.c 2011-07-21 22:17:23.000000000 -0400
10965+++ linux-3.0.3/arch/x86/kernel/dumpstack_32.c 2011-08-23 21:47:55.000000000 -0400
10966@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task
10967 bp = stack_frame(task, regs);
10968
10969 for (;;) {
10970- struct thread_info *context;
10971+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
10972
10973- context = (struct thread_info *)
10974- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
10975- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
10976+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
10977
10978- stack = (unsigned long *)context->previous_esp;
10979- if (!stack)
10980+ if (stack_start == task_stack_page(task))
10981 break;
10982+ stack = *(unsigned long **)stack_start;
10983 if (ops->stack(data, "IRQ") < 0)
10984 break;
10985 touch_nmi_watchdog();
10986@@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs
10987 * When in-kernel, we also print out the stack and code at the
10988 * time of the fault..
10989 */
10990- if (!user_mode_vm(regs)) {
10991+ if (!user_mode(regs)) {
10992 unsigned int code_prologue = code_bytes * 43 / 64;
10993 unsigned int code_len = code_bytes;
10994 unsigned char c;
10995 u8 *ip;
10996+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
10997
10998 printk(KERN_EMERG "Stack:\n");
10999 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
11000
11001 printk(KERN_EMERG "Code: ");
11002
11003- ip = (u8 *)regs->ip - code_prologue;
11004+ ip = (u8 *)regs->ip - code_prologue + cs_base;
11005 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
11006 /* try starting at IP */
11007- ip = (u8 *)regs->ip;
11008+ ip = (u8 *)regs->ip + cs_base;
11009 code_len = code_len - code_prologue + 1;
11010 }
11011 for (i = 0; i < code_len; i++, ip++) {
11012@@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs
11013 printk(" Bad EIP value.");
11014 break;
11015 }
11016- if (ip == (u8 *)regs->ip)
11017+ if (ip == (u8 *)regs->ip + cs_base)
11018 printk("<%02x> ", c);
11019 else
11020 printk("%02x ", c);
11021@@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
11022 {
11023 unsigned short ud2;
11024
11025+ ip = ktla_ktva(ip);
11026 if (ip < PAGE_OFFSET)
11027 return 0;
11028 if (probe_kernel_address((unsigned short *)ip, ud2))
11029diff -urNp linux-3.0.3/arch/x86/kernel/dumpstack_64.c linux-3.0.3/arch/x86/kernel/dumpstack_64.c
11030--- linux-3.0.3/arch/x86/kernel/dumpstack_64.c 2011-07-21 22:17:23.000000000 -0400
11031+++ linux-3.0.3/arch/x86/kernel/dumpstack_64.c 2011-08-23 21:47:55.000000000 -0400
11032@@ -147,9 +147,9 @@ void dump_trace(struct task_struct *task
11033 unsigned long *irq_stack_end =
11034 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
11035 unsigned used = 0;
11036- struct thread_info *tinfo;
11037 int graph = 0;
11038 unsigned long dummy;
11039+ void *stack_start;
11040
11041 if (!task)
11042 task = current;
11043@@ -167,10 +167,10 @@ void dump_trace(struct task_struct *task
11044 * current stack address. If the stacks consist of nested
11045 * exceptions
11046 */
11047- tinfo = task_thread_info(task);
11048 for (;;) {
11049 char *id;
11050 unsigned long *estack_end;
11051+
11052 estack_end = in_exception_stack(cpu, (unsigned long)stack,
11053 &used, &id);
11054
11055@@ -178,7 +178,7 @@ void dump_trace(struct task_struct *task
11056 if (ops->stack(data, id) < 0)
11057 break;
11058
11059- bp = ops->walk_stack(tinfo, stack, bp, ops,
11060+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
11061 data, estack_end, &graph);
11062 ops->stack(data, "<EOE>");
11063 /*
11064@@ -197,7 +197,7 @@ void dump_trace(struct task_struct *task
11065 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
11066 if (ops->stack(data, "IRQ") < 0)
11067 break;
11068- bp = ops->walk_stack(tinfo, stack, bp,
11069+ bp = ops->walk_stack(task, irq_stack, stack, bp,
11070 ops, data, irq_stack_end, &graph);
11071 /*
11072 * We link to the next stack (which would be
11073@@ -218,7 +218,8 @@ void dump_trace(struct task_struct *task
11074 /*
11075 * This handles the process stack:
11076 */
11077- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
11078+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11079+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11080 put_cpu();
11081 }
11082 EXPORT_SYMBOL(dump_trace);
11083diff -urNp linux-3.0.3/arch/x86/kernel/dumpstack.c linux-3.0.3/arch/x86/kernel/dumpstack.c
11084--- linux-3.0.3/arch/x86/kernel/dumpstack.c 2011-07-21 22:17:23.000000000 -0400
11085+++ linux-3.0.3/arch/x86/kernel/dumpstack.c 2011-08-23 21:48:14.000000000 -0400
11086@@ -2,6 +2,9 @@
11087 * Copyright (C) 1991, 1992 Linus Torvalds
11088 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
11089 */
11090+#ifdef CONFIG_GRKERNSEC_HIDESYM
11091+#define __INCLUDED_BY_HIDESYM 1
11092+#endif
11093 #include <linux/kallsyms.h>
11094 #include <linux/kprobes.h>
11095 #include <linux/uaccess.h>
11096@@ -35,9 +38,8 @@ void printk_address(unsigned long addres
11097 static void
11098 print_ftrace_graph_addr(unsigned long addr, void *data,
11099 const struct stacktrace_ops *ops,
11100- struct thread_info *tinfo, int *graph)
11101+ struct task_struct *task, int *graph)
11102 {
11103- struct task_struct *task = tinfo->task;
11104 unsigned long ret_addr;
11105 int index = task->curr_ret_stack;
11106
11107@@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long ad
11108 static inline void
11109 print_ftrace_graph_addr(unsigned long addr, void *data,
11110 const struct stacktrace_ops *ops,
11111- struct thread_info *tinfo, int *graph)
11112+ struct task_struct *task, int *graph)
11113 { }
11114 #endif
11115
11116@@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long ad
11117 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
11118 */
11119
11120-static inline int valid_stack_ptr(struct thread_info *tinfo,
11121- void *p, unsigned int size, void *end)
11122+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
11123 {
11124- void *t = tinfo;
11125 if (end) {
11126 if (p < end && p >= (end-THREAD_SIZE))
11127 return 1;
11128@@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct
11129 }
11130
11131 unsigned long
11132-print_context_stack(struct thread_info *tinfo,
11133+print_context_stack(struct task_struct *task, void *stack_start,
11134 unsigned long *stack, unsigned long bp,
11135 const struct stacktrace_ops *ops, void *data,
11136 unsigned long *end, int *graph)
11137 {
11138 struct stack_frame *frame = (struct stack_frame *)bp;
11139
11140- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
11141+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
11142 unsigned long addr;
11143
11144 addr = *stack;
11145@@ -102,7 +102,7 @@ print_context_stack(struct thread_info *
11146 } else {
11147 ops->address(data, addr, 0);
11148 }
11149- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11150+ print_ftrace_graph_addr(addr, data, ops, task, graph);
11151 }
11152 stack++;
11153 }
11154@@ -111,7 +111,7 @@ print_context_stack(struct thread_info *
11155 EXPORT_SYMBOL_GPL(print_context_stack);
11156
11157 unsigned long
11158-print_context_stack_bp(struct thread_info *tinfo,
11159+print_context_stack_bp(struct task_struct *task, void *stack_start,
11160 unsigned long *stack, unsigned long bp,
11161 const struct stacktrace_ops *ops, void *data,
11162 unsigned long *end, int *graph)
11163@@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_inf
11164 struct stack_frame *frame = (struct stack_frame *)bp;
11165 unsigned long *ret_addr = &frame->return_address;
11166
11167- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
11168+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
11169 unsigned long addr = *ret_addr;
11170
11171 if (!__kernel_text_address(addr))
11172@@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_inf
11173 ops->address(data, addr, 1);
11174 frame = frame->next_frame;
11175 ret_addr = &frame->return_address;
11176- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11177+ print_ftrace_graph_addr(addr, data, ops, task, graph);
11178 }
11179
11180 return (unsigned long)frame;
11181@@ -186,7 +186,7 @@ void dump_stack(void)
11182
11183 bp = stack_frame(current, NULL);
11184 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
11185- current->pid, current->comm, print_tainted(),
11186+ task_pid_nr(current), current->comm, print_tainted(),
11187 init_utsname()->release,
11188 (int)strcspn(init_utsname()->version, " "),
11189 init_utsname()->version);
11190@@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
11191 }
11192 EXPORT_SYMBOL_GPL(oops_begin);
11193
11194+extern void gr_handle_kernel_exploit(void);
11195+
11196 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
11197 {
11198 if (regs && kexec_should_crash(current))
11199@@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long fl
11200 panic("Fatal exception in interrupt");
11201 if (panic_on_oops)
11202 panic("Fatal exception");
11203- do_exit(signr);
11204+
11205+ gr_handle_kernel_exploit();
11206+
11207+ do_group_exit(signr);
11208 }
11209
11210 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
11211@@ -269,7 +274,7 @@ int __kprobes __die(const char *str, str
11212
11213 show_registers(regs);
11214 #ifdef CONFIG_X86_32
11215- if (user_mode_vm(regs)) {
11216+ if (user_mode(regs)) {
11217 sp = regs->sp;
11218 ss = regs->ss & 0xffff;
11219 } else {
11220@@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs
11221 unsigned long flags = oops_begin();
11222 int sig = SIGSEGV;
11223
11224- if (!user_mode_vm(regs))
11225+ if (!user_mode(regs))
11226 report_bug(regs->ip, regs);
11227
11228 if (__die(str, regs, err))
11229diff -urNp linux-3.0.3/arch/x86/kernel/early_printk.c linux-3.0.3/arch/x86/kernel/early_printk.c
11230--- linux-3.0.3/arch/x86/kernel/early_printk.c 2011-07-21 22:17:23.000000000 -0400
11231+++ linux-3.0.3/arch/x86/kernel/early_printk.c 2011-08-23 21:48:14.000000000 -0400
11232@@ -7,6 +7,7 @@
11233 #include <linux/pci_regs.h>
11234 #include <linux/pci_ids.h>
11235 #include <linux/errno.h>
11236+#include <linux/sched.h>
11237 #include <asm/io.h>
11238 #include <asm/processor.h>
11239 #include <asm/fcntl.h>
11240@@ -179,6 +180,8 @@ asmlinkage void early_printk(const char
11241 int n;
11242 va_list ap;
11243
11244+ pax_track_stack();
11245+
11246 va_start(ap, fmt);
11247 n = vscnprintf(buf, sizeof(buf), fmt, ap);
11248 early_console->write(early_console, buf, n);
11249diff -urNp linux-3.0.3/arch/x86/kernel/entry_32.S linux-3.0.3/arch/x86/kernel/entry_32.S
11250--- linux-3.0.3/arch/x86/kernel/entry_32.S 2011-07-21 22:17:23.000000000 -0400
11251+++ linux-3.0.3/arch/x86/kernel/entry_32.S 2011-08-23 21:48:14.000000000 -0400
11252@@ -185,13 +185,146 @@
11253 /*CFI_REL_OFFSET gs, PT_GS*/
11254 .endm
11255 .macro SET_KERNEL_GS reg
11256+
11257+#ifdef CONFIG_CC_STACKPROTECTOR
11258 movl $(__KERNEL_STACK_CANARY), \reg
11259+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
11260+ movl $(__USER_DS), \reg
11261+#else
11262+ xorl \reg, \reg
11263+#endif
11264+
11265 movl \reg, %gs
11266 .endm
11267
11268 #endif /* CONFIG_X86_32_LAZY_GS */
11269
11270-.macro SAVE_ALL
11271+.macro pax_enter_kernel
11272+#ifdef CONFIG_PAX_KERNEXEC
11273+ call pax_enter_kernel
11274+#endif
11275+.endm
11276+
11277+.macro pax_exit_kernel
11278+#ifdef CONFIG_PAX_KERNEXEC
11279+ call pax_exit_kernel
11280+#endif
11281+.endm
11282+
11283+#ifdef CONFIG_PAX_KERNEXEC
11284+ENTRY(pax_enter_kernel)
11285+#ifdef CONFIG_PARAVIRT
11286+ pushl %eax
11287+ pushl %ecx
11288+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
11289+ mov %eax, %esi
11290+#else
11291+ mov %cr0, %esi
11292+#endif
11293+ bts $16, %esi
11294+ jnc 1f
11295+ mov %cs, %esi
11296+ cmp $__KERNEL_CS, %esi
11297+ jz 3f
11298+ ljmp $__KERNEL_CS, $3f
11299+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
11300+2:
11301+#ifdef CONFIG_PARAVIRT
11302+ mov %esi, %eax
11303+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11304+#else
11305+ mov %esi, %cr0
11306+#endif
11307+3:
11308+#ifdef CONFIG_PARAVIRT
11309+ popl %ecx
11310+ popl %eax
11311+#endif
11312+ ret
11313+ENDPROC(pax_enter_kernel)
11314+
11315+ENTRY(pax_exit_kernel)
11316+#ifdef CONFIG_PARAVIRT
11317+ pushl %eax
11318+ pushl %ecx
11319+#endif
11320+ mov %cs, %esi
11321+ cmp $__KERNEXEC_KERNEL_CS, %esi
11322+ jnz 2f
11323+#ifdef CONFIG_PARAVIRT
11324+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
11325+ mov %eax, %esi
11326+#else
11327+ mov %cr0, %esi
11328+#endif
11329+ btr $16, %esi
11330+ ljmp $__KERNEL_CS, $1f
11331+1:
11332+#ifdef CONFIG_PARAVIRT
11333+ mov %esi, %eax
11334+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
11335+#else
11336+ mov %esi, %cr0
11337+#endif
11338+2:
11339+#ifdef CONFIG_PARAVIRT
11340+ popl %ecx
11341+ popl %eax
11342+#endif
11343+ ret
11344+ENDPROC(pax_exit_kernel)
11345+#endif
11346+
11347+.macro pax_erase_kstack
11348+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11349+ call pax_erase_kstack
11350+#endif
11351+.endm
11352+
11353+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11354+/*
11355+ * ebp: thread_info
11356+ * ecx, edx: can be clobbered
11357+ */
11358+ENTRY(pax_erase_kstack)
11359+ pushl %edi
11360+ pushl %eax
11361+
11362+ mov TI_lowest_stack(%ebp), %edi
11363+ mov $-0xBEEF, %eax
11364+ std
11365+
11366+1: mov %edi, %ecx
11367+ and $THREAD_SIZE_asm - 1, %ecx
11368+ shr $2, %ecx
11369+ repne scasl
11370+ jecxz 2f
11371+
11372+ cmp $2*16, %ecx
11373+ jc 2f
11374+
11375+ mov $2*16, %ecx
11376+ repe scasl
11377+ jecxz 2f
11378+ jne 1b
11379+
11380+2: cld
11381+ mov %esp, %ecx
11382+ sub %edi, %ecx
11383+ shr $2, %ecx
11384+ rep stosl
11385+
11386+ mov TI_task_thread_sp0(%ebp), %edi
11387+ sub $128, %edi
11388+ mov %edi, TI_lowest_stack(%ebp)
11389+
11390+ popl %eax
11391+ popl %edi
11392+ ret
11393+ENDPROC(pax_erase_kstack)
11394+#endif
11395+
11396+.macro __SAVE_ALL _DS
11397 cld
11398 PUSH_GS
11399 pushl_cfi %fs
11400@@ -214,7 +347,7 @@
11401 CFI_REL_OFFSET ecx, 0
11402 pushl_cfi %ebx
11403 CFI_REL_OFFSET ebx, 0
11404- movl $(__USER_DS), %edx
11405+ movl $\_DS, %edx
11406 movl %edx, %ds
11407 movl %edx, %es
11408 movl $(__KERNEL_PERCPU), %edx
11409@@ -222,6 +355,15 @@
11410 SET_KERNEL_GS %edx
11411 .endm
11412
11413+.macro SAVE_ALL
11414+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
11415+ __SAVE_ALL __KERNEL_DS
11416+ pax_enter_kernel
11417+#else
11418+ __SAVE_ALL __USER_DS
11419+#endif
11420+.endm
11421+
11422 .macro RESTORE_INT_REGS
11423 popl_cfi %ebx
11424 CFI_RESTORE ebx
11425@@ -332,7 +474,15 @@ check_userspace:
11426 movb PT_CS(%esp), %al
11427 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
11428 cmpl $USER_RPL, %eax
11429+
11430+#ifdef CONFIG_PAX_KERNEXEC
11431+ jae resume_userspace
11432+
11433+ PAX_EXIT_KERNEL
11434+ jmp resume_kernel
11435+#else
11436 jb resume_kernel # not returning to v8086 or userspace
11437+#endif
11438
11439 ENTRY(resume_userspace)
11440 LOCKDEP_SYS_EXIT
11441@@ -344,7 +494,7 @@ ENTRY(resume_userspace)
11442 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
11443 # int/exception return?
11444 jne work_pending
11445- jmp restore_all
11446+ jmp restore_all_pax
11447 END(ret_from_exception)
11448
11449 #ifdef CONFIG_PREEMPT
11450@@ -394,23 +544,34 @@ sysenter_past_esp:
11451 /*CFI_REL_OFFSET cs, 0*/
11452 /*
11453 * Push current_thread_info()->sysenter_return to the stack.
11454- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
11455- * pushed above; +8 corresponds to copy_thread's esp0 setting.
11456 */
11457- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
11458+ pushl_cfi $0
11459 CFI_REL_OFFSET eip, 0
11460
11461 pushl_cfi %eax
11462 SAVE_ALL
11463+ GET_THREAD_INFO(%ebp)
11464+ movl TI_sysenter_return(%ebp),%ebp
11465+ movl %ebp,PT_EIP(%esp)
11466 ENABLE_INTERRUPTS(CLBR_NONE)
11467
11468 /*
11469 * Load the potential sixth argument from user stack.
11470 * Careful about security.
11471 */
11472+ movl PT_OLDESP(%esp),%ebp
11473+
11474+#ifdef CONFIG_PAX_MEMORY_UDEREF
11475+ mov PT_OLDSS(%esp),%ds
11476+1: movl %ds:(%ebp),%ebp
11477+ push %ss
11478+ pop %ds
11479+#else
11480 cmpl $__PAGE_OFFSET-3,%ebp
11481 jae syscall_fault
11482 1: movl (%ebp),%ebp
11483+#endif
11484+
11485 movl %ebp,PT_EBP(%esp)
11486 .section __ex_table,"a"
11487 .align 4
11488@@ -433,12 +594,23 @@ sysenter_do_call:
11489 testl $_TIF_ALLWORK_MASK, %ecx
11490 jne sysexit_audit
11491 sysenter_exit:
11492+
11493+#ifdef CONFIG_PAX_RANDKSTACK
11494+ pushl_cfi %eax
11495+ call pax_randomize_kstack
11496+ popl_cfi %eax
11497+#endif
11498+
11499+ pax_erase_kstack
11500+
11501 /* if something modifies registers it must also disable sysexit */
11502 movl PT_EIP(%esp), %edx
11503 movl PT_OLDESP(%esp), %ecx
11504 xorl %ebp,%ebp
11505 TRACE_IRQS_ON
11506 1: mov PT_FS(%esp), %fs
11507+2: mov PT_DS(%esp), %ds
11508+3: mov PT_ES(%esp), %es
11509 PTGS_TO_GS
11510 ENABLE_INTERRUPTS_SYSEXIT
11511
11512@@ -455,6 +627,9 @@ sysenter_audit:
11513 movl %eax,%edx /* 2nd arg: syscall number */
11514 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
11515 call audit_syscall_entry
11516+
11517+ pax_erase_kstack
11518+
11519 pushl_cfi %ebx
11520 movl PT_EAX(%esp),%eax /* reload syscall number */
11521 jmp sysenter_do_call
11522@@ -481,11 +656,17 @@ sysexit_audit:
11523
11524 CFI_ENDPROC
11525 .pushsection .fixup,"ax"
11526-2: movl $0,PT_FS(%esp)
11527+4: movl $0,PT_FS(%esp)
11528+ jmp 1b
11529+5: movl $0,PT_DS(%esp)
11530+ jmp 1b
11531+6: movl $0,PT_ES(%esp)
11532 jmp 1b
11533 .section __ex_table,"a"
11534 .align 4
11535- .long 1b,2b
11536+ .long 1b,4b
11537+ .long 2b,5b
11538+ .long 3b,6b
11539 .popsection
11540 PTGS_TO_GS_EX
11541 ENDPROC(ia32_sysenter_target)
11542@@ -518,6 +699,14 @@ syscall_exit:
11543 testl $_TIF_ALLWORK_MASK, %ecx # current->work
11544 jne syscall_exit_work
11545
11546+restore_all_pax:
11547+
11548+#ifdef CONFIG_PAX_RANDKSTACK
11549+ call pax_randomize_kstack
11550+#endif
11551+
11552+ pax_erase_kstack
11553+
11554 restore_all:
11555 TRACE_IRQS_IRET
11556 restore_all_notrace:
11557@@ -577,14 +766,34 @@ ldt_ss:
11558 * compensating for the offset by changing to the ESPFIX segment with
11559 * a base address that matches for the difference.
11560 */
11561-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
11562+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
11563 mov %esp, %edx /* load kernel esp */
11564 mov PT_OLDESP(%esp), %eax /* load userspace esp */
11565 mov %dx, %ax /* eax: new kernel esp */
11566 sub %eax, %edx /* offset (low word is 0) */
11567+#ifdef CONFIG_SMP
11568+ movl PER_CPU_VAR(cpu_number), %ebx
11569+ shll $PAGE_SHIFT_asm, %ebx
11570+ addl $cpu_gdt_table, %ebx
11571+#else
11572+ movl $cpu_gdt_table, %ebx
11573+#endif
11574 shr $16, %edx
11575- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
11576- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
11577+
11578+#ifdef CONFIG_PAX_KERNEXEC
11579+ mov %cr0, %esi
11580+ btr $16, %esi
11581+ mov %esi, %cr0
11582+#endif
11583+
11584+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
11585+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
11586+
11587+#ifdef CONFIG_PAX_KERNEXEC
11588+ bts $16, %esi
11589+ mov %esi, %cr0
11590+#endif
11591+
11592 pushl_cfi $__ESPFIX_SS
11593 pushl_cfi %eax /* new kernel esp */
11594 /* Disable interrupts, but do not irqtrace this section: we
11595@@ -613,29 +822,23 @@ work_resched:
11596 movl TI_flags(%ebp), %ecx
11597 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
11598 # than syscall tracing?
11599- jz restore_all
11600+ jz restore_all_pax
11601 testb $_TIF_NEED_RESCHED, %cl
11602 jnz work_resched
11603
11604 work_notifysig: # deal with pending signals and
11605 # notify-resume requests
11606+ movl %esp, %eax
11607 #ifdef CONFIG_VM86
11608 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
11609- movl %esp, %eax
11610- jne work_notifysig_v86 # returning to kernel-space or
11611+ jz 1f # returning to kernel-space or
11612 # vm86-space
11613- xorl %edx, %edx
11614- call do_notify_resume
11615- jmp resume_userspace_sig
11616
11617- ALIGN
11618-work_notifysig_v86:
11619 pushl_cfi %ecx # save ti_flags for do_notify_resume
11620 call save_v86_state # %eax contains pt_regs pointer
11621 popl_cfi %ecx
11622 movl %eax, %esp
11623-#else
11624- movl %esp, %eax
11625+1:
11626 #endif
11627 xorl %edx, %edx
11628 call do_notify_resume
11629@@ -648,6 +851,9 @@ syscall_trace_entry:
11630 movl $-ENOSYS,PT_EAX(%esp)
11631 movl %esp, %eax
11632 call syscall_trace_enter
11633+
11634+ pax_erase_kstack
11635+
11636 /* What it returned is what we'll actually use. */
11637 cmpl $(nr_syscalls), %eax
11638 jnae syscall_call
11639@@ -670,6 +876,10 @@ END(syscall_exit_work)
11640
11641 RING0_INT_FRAME # can't unwind into user space anyway
11642 syscall_fault:
11643+#ifdef CONFIG_PAX_MEMORY_UDEREF
11644+ push %ss
11645+ pop %ds
11646+#endif
11647 GET_THREAD_INFO(%ebp)
11648 movl $-EFAULT,PT_EAX(%esp)
11649 jmp resume_userspace
11650@@ -752,6 +962,36 @@ ptregs_clone:
11651 CFI_ENDPROC
11652 ENDPROC(ptregs_clone)
11653
11654+ ALIGN;
11655+ENTRY(kernel_execve)
11656+ CFI_STARTPROC
11657+ pushl_cfi %ebp
11658+ sub $PT_OLDSS+4,%esp
11659+ pushl_cfi %edi
11660+ pushl_cfi %ecx
11661+ pushl_cfi %eax
11662+ lea 3*4(%esp),%edi
11663+ mov $PT_OLDSS/4+1,%ecx
11664+ xorl %eax,%eax
11665+ rep stosl
11666+ popl_cfi %eax
11667+ popl_cfi %ecx
11668+ popl_cfi %edi
11669+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
11670+ pushl_cfi %esp
11671+ call sys_execve
11672+ add $4,%esp
11673+ CFI_ADJUST_CFA_OFFSET -4
11674+ GET_THREAD_INFO(%ebp)
11675+ test %eax,%eax
11676+ jz syscall_exit
11677+ add $PT_OLDSS+4,%esp
11678+ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
11679+ popl_cfi %ebp
11680+ ret
11681+ CFI_ENDPROC
11682+ENDPROC(kernel_execve)
11683+
11684 .macro FIXUP_ESPFIX_STACK
11685 /*
11686 * Switch back for ESPFIX stack to the normal zerobased stack
11687@@ -761,8 +1001,15 @@ ENDPROC(ptregs_clone)
11688 * normal stack and adjusts ESP with the matching offset.
11689 */
11690 /* fixup the stack */
11691- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
11692- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
11693+#ifdef CONFIG_SMP
11694+ movl PER_CPU_VAR(cpu_number), %ebx
11695+ shll $PAGE_SHIFT_asm, %ebx
11696+ addl $cpu_gdt_table, %ebx
11697+#else
11698+ movl $cpu_gdt_table, %ebx
11699+#endif
11700+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
11701+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
11702 shl $16, %eax
11703 addl %esp, %eax /* the adjusted stack pointer */
11704 pushl_cfi $__KERNEL_DS
11705@@ -1213,7 +1460,6 @@ return_to_handler:
11706 jmp *%ecx
11707 #endif
11708
11709-.section .rodata,"a"
11710 #include "syscall_table_32.S"
11711
11712 syscall_table_size=(.-sys_call_table)
11713@@ -1259,9 +1505,12 @@ error_code:
11714 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
11715 REG_TO_PTGS %ecx
11716 SET_KERNEL_GS %ecx
11717- movl $(__USER_DS), %ecx
11718+ movl $(__KERNEL_DS), %ecx
11719 movl %ecx, %ds
11720 movl %ecx, %es
11721+
11722+ pax_enter_kernel
11723+
11724 TRACE_IRQS_OFF
11725 movl %esp,%eax # pt_regs pointer
11726 call *%edi
11727@@ -1346,6 +1595,9 @@ nmi_stack_correct:
11728 xorl %edx,%edx # zero error code
11729 movl %esp,%eax # pt_regs pointer
11730 call do_nmi
11731+
11732+ pax_exit_kernel
11733+
11734 jmp restore_all_notrace
11735 CFI_ENDPROC
11736
11737@@ -1382,6 +1634,9 @@ nmi_espfix_stack:
11738 FIXUP_ESPFIX_STACK # %eax == %esp
11739 xorl %edx,%edx # zero error code
11740 call do_nmi
11741+
11742+ pax_exit_kernel
11743+
11744 RESTORE_REGS
11745 lss 12+4(%esp), %esp # back to espfix stack
11746 CFI_ADJUST_CFA_OFFSET -24
11747diff -urNp linux-3.0.3/arch/x86/kernel/entry_64.S linux-3.0.3/arch/x86/kernel/entry_64.S
11748--- linux-3.0.3/arch/x86/kernel/entry_64.S 2011-07-21 22:17:23.000000000 -0400
11749+++ linux-3.0.3/arch/x86/kernel/entry_64.S 2011-08-26 19:49:56.000000000 -0400
11750@@ -53,6 +53,7 @@
11751 #include <asm/paravirt.h>
11752 #include <asm/ftrace.h>
11753 #include <asm/percpu.h>
11754+#include <asm/pgtable.h>
11755
11756 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
11757 #include <linux/elf-em.h>
11758@@ -176,6 +177,264 @@ ENTRY(native_usergs_sysret64)
11759 ENDPROC(native_usergs_sysret64)
11760 #endif /* CONFIG_PARAVIRT */
11761
11762+ .macro ljmpq sel, off
11763+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
11764+ .byte 0x48; ljmp *1234f(%rip)
11765+ .pushsection .rodata
11766+ .align 16
11767+ 1234: .quad \off; .word \sel
11768+ .popsection
11769+#else
11770+ pushq $\sel
11771+ pushq $\off
11772+ lretq
11773+#endif
11774+ .endm
11775+
11776+ .macro pax_enter_kernel
11777+#ifdef CONFIG_PAX_KERNEXEC
11778+ call pax_enter_kernel
11779+#endif
11780+ .endm
11781+
11782+ .macro pax_exit_kernel
11783+#ifdef CONFIG_PAX_KERNEXEC
11784+ call pax_exit_kernel
11785+#endif
11786+ .endm
11787+
11788+#ifdef CONFIG_PAX_KERNEXEC
11789+ENTRY(pax_enter_kernel)
11790+ pushq %rdi
11791+
11792+#ifdef CONFIG_PARAVIRT
11793+ PV_SAVE_REGS(CLBR_RDI)
11794+#endif
11795+
11796+ GET_CR0_INTO_RDI
11797+ bts $16,%rdi
11798+ jnc 1f
11799+ mov %cs,%edi
11800+ cmp $__KERNEL_CS,%edi
11801+ jz 3f
11802+ ljmpq __KERNEL_CS,3f
11803+1: ljmpq __KERNEXEC_KERNEL_CS,2f
11804+2: SET_RDI_INTO_CR0
11805+3:
11806+
11807+#ifdef CONFIG_PARAVIRT
11808+ PV_RESTORE_REGS(CLBR_RDI)
11809+#endif
11810+
11811+ popq %rdi
11812+ retq
11813+ENDPROC(pax_enter_kernel)
11814+
11815+ENTRY(pax_exit_kernel)
11816+ pushq %rdi
11817+
11818+#ifdef CONFIG_PARAVIRT
11819+ PV_SAVE_REGS(CLBR_RDI)
11820+#endif
11821+
11822+ mov %cs,%rdi
11823+ cmp $__KERNEXEC_KERNEL_CS,%edi
11824+ jnz 2f
11825+ GET_CR0_INTO_RDI
11826+ btr $16,%rdi
11827+ ljmpq __KERNEL_CS,1f
11828+1: SET_RDI_INTO_CR0
11829+2:
11830+
11831+#ifdef CONFIG_PARAVIRT
11832+ PV_RESTORE_REGS(CLBR_RDI);
11833+#endif
11834+
11835+ popq %rdi
11836+ retq
11837+ENDPROC(pax_exit_kernel)
11838+#endif
11839+
11840+ .macro pax_enter_kernel_user
11841+#ifdef CONFIG_PAX_MEMORY_UDEREF
11842+ call pax_enter_kernel_user
11843+#endif
11844+ .endm
11845+
11846+ .macro pax_exit_kernel_user
11847+#ifdef CONFIG_PAX_MEMORY_UDEREF
11848+ call pax_exit_kernel_user
11849+#endif
11850+#ifdef CONFIG_PAX_RANDKSTACK
11851+ push %rax
11852+ call pax_randomize_kstack
11853+ pop %rax
11854+#endif
11855+ .endm
11856+
11857+#ifdef CONFIG_PAX_MEMORY_UDEREF
11858+ENTRY(pax_enter_kernel_user)
11859+ pushq %rdi
11860+ pushq %rbx
11861+
11862+#ifdef CONFIG_PARAVIRT
11863+ PV_SAVE_REGS(CLBR_RDI)
11864+#endif
11865+
11866+ GET_CR3_INTO_RDI
11867+ mov %rdi,%rbx
11868+ add $__START_KERNEL_map,%rbx
11869+ sub phys_base(%rip),%rbx
11870+
11871+#ifdef CONFIG_PARAVIRT
11872+ pushq %rdi
11873+ cmpl $0, pv_info+PARAVIRT_enabled
11874+ jz 1f
11875+ i = 0
11876+ .rept USER_PGD_PTRS
11877+ mov i*8(%rbx),%rsi
11878+ mov $0,%sil
11879+ lea i*8(%rbx),%rdi
11880+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
11881+ i = i + 1
11882+ .endr
11883+ jmp 2f
11884+1:
11885+#endif
11886+
11887+ i = 0
11888+ .rept USER_PGD_PTRS
11889+ movb $0,i*8(%rbx)
11890+ i = i + 1
11891+ .endr
11892+
11893+#ifdef CONFIG_PARAVIRT
11894+2: popq %rdi
11895+#endif
11896+ SET_RDI_INTO_CR3
11897+
11898+#ifdef CONFIG_PAX_KERNEXEC
11899+ GET_CR0_INTO_RDI
11900+ bts $16,%rdi
11901+ SET_RDI_INTO_CR0
11902+#endif
11903+
11904+#ifdef CONFIG_PARAVIRT
11905+ PV_RESTORE_REGS(CLBR_RDI)
11906+#endif
11907+
11908+ popq %rbx
11909+ popq %rdi
11910+ retq
11911+ENDPROC(pax_enter_kernel_user)
11912+
11913+ENTRY(pax_exit_kernel_user)
11914+ push %rdi
11915+
11916+#ifdef CONFIG_PARAVIRT
11917+ pushq %rbx
11918+ PV_SAVE_REGS(CLBR_RDI)
11919+#endif
11920+
11921+#ifdef CONFIG_PAX_KERNEXEC
11922+ GET_CR0_INTO_RDI
11923+ btr $16,%rdi
11924+ SET_RDI_INTO_CR0
11925+#endif
11926+
11927+ GET_CR3_INTO_RDI
11928+ add $__START_KERNEL_map,%rdi
11929+ sub phys_base(%rip),%rdi
11930+
11931+#ifdef CONFIG_PARAVIRT
11932+ cmpl $0, pv_info+PARAVIRT_enabled
11933+ jz 1f
11934+ mov %rdi,%rbx
11935+ i = 0
11936+ .rept USER_PGD_PTRS
11937+ mov i*8(%rbx),%rsi
11938+ mov $0x67,%sil
11939+ lea i*8(%rbx),%rdi
11940+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
11941+ i = i + 1
11942+ .endr
11943+ jmp 2f
11944+1:
11945+#endif
11946+
11947+ i = 0
11948+ .rept USER_PGD_PTRS
11949+ movb $0x67,i*8(%rdi)
11950+ i = i + 1
11951+ .endr
11952+
11953+#ifdef CONFIG_PARAVIRT
11954+2: PV_RESTORE_REGS(CLBR_RDI)
11955+ popq %rbx
11956+#endif
11957+
11958+ popq %rdi
11959+ retq
11960+ENDPROC(pax_exit_kernel_user)
11961+#endif
11962+
11963+ .macro pax_erase_kstack
11964+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11965+ call pax_erase_kstack
11966+#endif
11967+ .endm
11968+
11969+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11970+/*
11971+ * r10: thread_info
11972+ * rcx, rdx: can be clobbered
11973+ */
11974+ENTRY(pax_erase_kstack)
11975+ pushq %rdi
11976+ pushq %rax
11977+ pushq %r10
11978+
11979+ GET_THREAD_INFO(%r10)
11980+ mov TI_lowest_stack(%r10), %rdi
11981+ mov $-0xBEEF, %rax
11982+ std
11983+
11984+1: mov %edi, %ecx
11985+ and $THREAD_SIZE_asm - 1, %ecx
11986+ shr $3, %ecx
11987+ repne scasq
11988+ jecxz 2f
11989+
11990+ cmp $2*8, %ecx
11991+ jc 2f
11992+
11993+ mov $2*8, %ecx
11994+ repe scasq
11995+ jecxz 2f
11996+ jne 1b
11997+
11998+2: cld
11999+ mov %esp, %ecx
12000+ sub %edi, %ecx
12001+
12002+ cmp $THREAD_SIZE_asm, %rcx
12003+ jb 3f
12004+ ud2
12005+3:
12006+
12007+ shr $3, %ecx
12008+ rep stosq
12009+
12010+ mov TI_task_thread_sp0(%r10), %rdi
12011+ sub $256, %rdi
12012+ mov %rdi, TI_lowest_stack(%r10)
12013+
12014+ popq %r10
12015+ popq %rax
12016+ popq %rdi
12017+ ret
12018+ENDPROC(pax_erase_kstack)
12019+#endif
12020
12021 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
12022 #ifdef CONFIG_TRACE_IRQFLAGS
12023@@ -318,7 +577,7 @@ ENTRY(save_args)
12024 leaq -RBP+8(%rsp),%rdi /* arg1 for handler */
12025 movq_cfi rbp, 8 /* push %rbp */
12026 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
12027- testl $3, CS(%rdi)
12028+ testb $3, CS(%rdi)
12029 je 1f
12030 SWAPGS
12031 /*
12032@@ -409,7 +668,7 @@ ENTRY(ret_from_fork)
12033
12034 RESTORE_REST
12035
12036- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12037+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12038 je int_ret_from_sys_call
12039
12040 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
12041@@ -455,7 +714,7 @@ END(ret_from_fork)
12042 ENTRY(system_call)
12043 CFI_STARTPROC simple
12044 CFI_SIGNAL_FRAME
12045- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
12046+ CFI_DEF_CFA rsp,0
12047 CFI_REGISTER rip,rcx
12048 /*CFI_REGISTER rflags,r11*/
12049 SWAPGS_UNSAFE_STACK
12050@@ -468,12 +727,13 @@ ENTRY(system_call_after_swapgs)
12051
12052 movq %rsp,PER_CPU_VAR(old_rsp)
12053 movq PER_CPU_VAR(kernel_stack),%rsp
12054+ pax_enter_kernel_user
12055 /*
12056 * No need to follow this irqs off/on section - it's straight
12057 * and short:
12058 */
12059 ENABLE_INTERRUPTS(CLBR_NONE)
12060- SAVE_ARGS 8,1
12061+ SAVE_ARGS 8*6,1
12062 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
12063 movq %rcx,RIP-ARGOFFSET(%rsp)
12064 CFI_REL_OFFSET rip,RIP-ARGOFFSET
12065@@ -502,6 +762,8 @@ sysret_check:
12066 andl %edi,%edx
12067 jnz sysret_careful
12068 CFI_REMEMBER_STATE
12069+ pax_exit_kernel_user
12070+ pax_erase_kstack
12071 /*
12072 * sysretq will re-enable interrupts:
12073 */
12074@@ -560,6 +822,9 @@ auditsys:
12075 movq %rax,%rsi /* 2nd arg: syscall number */
12076 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
12077 call audit_syscall_entry
12078+
12079+ pax_erase_kstack
12080+
12081 LOAD_ARGS 0 /* reload call-clobbered registers */
12082 jmp system_call_fastpath
12083
12084@@ -590,6 +855,9 @@ tracesys:
12085 FIXUP_TOP_OF_STACK %rdi
12086 movq %rsp,%rdi
12087 call syscall_trace_enter
12088+
12089+ pax_erase_kstack
12090+
12091 /*
12092 * Reload arg registers from stack in case ptrace changed them.
12093 * We don't reload %rax because syscall_trace_enter() returned
12094@@ -611,7 +879,7 @@ tracesys:
12095 GLOBAL(int_ret_from_sys_call)
12096 DISABLE_INTERRUPTS(CLBR_NONE)
12097 TRACE_IRQS_OFF
12098- testl $3,CS-ARGOFFSET(%rsp)
12099+ testb $3,CS-ARGOFFSET(%rsp)
12100 je retint_restore_args
12101 movl $_TIF_ALLWORK_MASK,%edi
12102 /* edi: mask to check */
12103@@ -793,6 +1061,16 @@ END(interrupt)
12104 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
12105 call save_args
12106 PARTIAL_FRAME 0
12107+#ifdef CONFIG_PAX_MEMORY_UDEREF
12108+ testb $3, CS(%rdi)
12109+ jnz 1f
12110+ pax_enter_kernel
12111+ jmp 2f
12112+1: pax_enter_kernel_user
12113+2:
12114+#else
12115+ pax_enter_kernel
12116+#endif
12117 call \func
12118 .endm
12119
12120@@ -825,7 +1103,7 @@ ret_from_intr:
12121 CFI_ADJUST_CFA_OFFSET -8
12122 exit_intr:
12123 GET_THREAD_INFO(%rcx)
12124- testl $3,CS-ARGOFFSET(%rsp)
12125+ testb $3,CS-ARGOFFSET(%rsp)
12126 je retint_kernel
12127
12128 /* Interrupt came from user space */
12129@@ -847,12 +1125,15 @@ retint_swapgs: /* return to user-space
12130 * The iretq could re-enable interrupts:
12131 */
12132 DISABLE_INTERRUPTS(CLBR_ANY)
12133+ pax_exit_kernel_user
12134+ pax_erase_kstack
12135 TRACE_IRQS_IRETQ
12136 SWAPGS
12137 jmp restore_args
12138
12139 retint_restore_args: /* return to kernel space */
12140 DISABLE_INTERRUPTS(CLBR_ANY)
12141+ pax_exit_kernel
12142 /*
12143 * The iretq could re-enable interrupts:
12144 */
12145@@ -1027,6 +1308,16 @@ ENTRY(\sym)
12146 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12147 call error_entry
12148 DEFAULT_FRAME 0
12149+#ifdef CONFIG_PAX_MEMORY_UDEREF
12150+ testb $3, CS(%rsp)
12151+ jnz 1f
12152+ pax_enter_kernel
12153+ jmp 2f
12154+1: pax_enter_kernel_user
12155+2:
12156+#else
12157+ pax_enter_kernel
12158+#endif
12159 movq %rsp,%rdi /* pt_regs pointer */
12160 xorl %esi,%esi /* no error code */
12161 call \do_sym
12162@@ -1044,6 +1335,16 @@ ENTRY(\sym)
12163 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12164 call save_paranoid
12165 TRACE_IRQS_OFF
12166+#ifdef CONFIG_PAX_MEMORY_UDEREF
12167+ testb $3, CS(%rsp)
12168+ jnz 1f
12169+ pax_enter_kernel
12170+ jmp 2f
12171+1: pax_enter_kernel_user
12172+2:
12173+#else
12174+ pax_enter_kernel
12175+#endif
12176 movq %rsp,%rdi /* pt_regs pointer */
12177 xorl %esi,%esi /* no error code */
12178 call \do_sym
12179@@ -1052,7 +1353,7 @@ ENTRY(\sym)
12180 END(\sym)
12181 .endm
12182
12183-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
12184+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
12185 .macro paranoidzeroentry_ist sym do_sym ist
12186 ENTRY(\sym)
12187 INTR_FRAME
12188@@ -1062,8 +1363,24 @@ ENTRY(\sym)
12189 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12190 call save_paranoid
12191 TRACE_IRQS_OFF
12192+#ifdef CONFIG_PAX_MEMORY_UDEREF
12193+ testb $3, CS(%rsp)
12194+ jnz 1f
12195+ pax_enter_kernel
12196+ jmp 2f
12197+1: pax_enter_kernel_user
12198+2:
12199+#else
12200+ pax_enter_kernel
12201+#endif
12202 movq %rsp,%rdi /* pt_regs pointer */
12203 xorl %esi,%esi /* no error code */
12204+#ifdef CONFIG_SMP
12205+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
12206+ lea init_tss(%r12), %r12
12207+#else
12208+ lea init_tss(%rip), %r12
12209+#endif
12210 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12211 call \do_sym
12212 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12213@@ -1080,6 +1397,16 @@ ENTRY(\sym)
12214 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12215 call error_entry
12216 DEFAULT_FRAME 0
12217+#ifdef CONFIG_PAX_MEMORY_UDEREF
12218+ testb $3, CS(%rsp)
12219+ jnz 1f
12220+ pax_enter_kernel
12221+ jmp 2f
12222+1: pax_enter_kernel_user
12223+2:
12224+#else
12225+ pax_enter_kernel
12226+#endif
12227 movq %rsp,%rdi /* pt_regs pointer */
12228 movq ORIG_RAX(%rsp),%rsi /* get error code */
12229 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12230@@ -1099,6 +1426,16 @@ ENTRY(\sym)
12231 call save_paranoid
12232 DEFAULT_FRAME 0
12233 TRACE_IRQS_OFF
12234+#ifdef CONFIG_PAX_MEMORY_UDEREF
12235+ testb $3, CS(%rsp)
12236+ jnz 1f
12237+ pax_enter_kernel
12238+ jmp 2f
12239+1: pax_enter_kernel_user
12240+2:
12241+#else
12242+ pax_enter_kernel
12243+#endif
12244 movq %rsp,%rdi /* pt_regs pointer */
12245 movq ORIG_RAX(%rsp),%rsi /* get error code */
12246 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12247@@ -1361,14 +1698,27 @@ ENTRY(paranoid_exit)
12248 TRACE_IRQS_OFF
12249 testl %ebx,%ebx /* swapgs needed? */
12250 jnz paranoid_restore
12251- testl $3,CS(%rsp)
12252+ testb $3,CS(%rsp)
12253 jnz paranoid_userspace
12254+#ifdef CONFIG_PAX_MEMORY_UDEREF
12255+ pax_exit_kernel
12256+ TRACE_IRQS_IRETQ 0
12257+ SWAPGS_UNSAFE_STACK
12258+ RESTORE_ALL 8
12259+ jmp irq_return
12260+#endif
12261 paranoid_swapgs:
12262+#ifdef CONFIG_PAX_MEMORY_UDEREF
12263+ pax_exit_kernel_user
12264+#else
12265+ pax_exit_kernel
12266+#endif
12267 TRACE_IRQS_IRETQ 0
12268 SWAPGS_UNSAFE_STACK
12269 RESTORE_ALL 8
12270 jmp irq_return
12271 paranoid_restore:
12272+ pax_exit_kernel
12273 TRACE_IRQS_IRETQ 0
12274 RESTORE_ALL 8
12275 jmp irq_return
12276@@ -1426,7 +1776,7 @@ ENTRY(error_entry)
12277 movq_cfi r14, R14+8
12278 movq_cfi r15, R15+8
12279 xorl %ebx,%ebx
12280- testl $3,CS+8(%rsp)
12281+ testb $3,CS+8(%rsp)
12282 je error_kernelspace
12283 error_swapgs:
12284 SWAPGS
12285@@ -1490,6 +1840,16 @@ ENTRY(nmi)
12286 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12287 call save_paranoid
12288 DEFAULT_FRAME 0
12289+#ifdef CONFIG_PAX_MEMORY_UDEREF
12290+ testb $3, CS(%rsp)
12291+ jnz 1f
12292+ pax_enter_kernel
12293+ jmp 2f
12294+1: pax_enter_kernel_user
12295+2:
12296+#else
12297+ pax_enter_kernel
12298+#endif
12299 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
12300 movq %rsp,%rdi
12301 movq $-1,%rsi
12302@@ -1500,11 +1860,25 @@ ENTRY(nmi)
12303 DISABLE_INTERRUPTS(CLBR_NONE)
12304 testl %ebx,%ebx /* swapgs needed? */
12305 jnz nmi_restore
12306- testl $3,CS(%rsp)
12307+ testb $3,CS(%rsp)
12308 jnz nmi_userspace
12309+#ifdef CONFIG_PAX_MEMORY_UDEREF
12310+ pax_exit_kernel
12311+ SWAPGS_UNSAFE_STACK
12312+ RESTORE_ALL 8
12313+ jmp irq_return
12314+#endif
12315 nmi_swapgs:
12316+#ifdef CONFIG_PAX_MEMORY_UDEREF
12317+ pax_exit_kernel_user
12318+#else
12319+ pax_exit_kernel
12320+#endif
12321 SWAPGS_UNSAFE_STACK
12322+ RESTORE_ALL 8
12323+ jmp irq_return
12324 nmi_restore:
12325+ pax_exit_kernel
12326 RESTORE_ALL 8
12327 jmp irq_return
12328 nmi_userspace:
12329diff -urNp linux-3.0.3/arch/x86/kernel/ftrace.c linux-3.0.3/arch/x86/kernel/ftrace.c
12330--- linux-3.0.3/arch/x86/kernel/ftrace.c 2011-07-21 22:17:23.000000000 -0400
12331+++ linux-3.0.3/arch/x86/kernel/ftrace.c 2011-08-23 21:47:55.000000000 -0400
12332@@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the
12333 static const void *mod_code_newcode; /* holds the text to write to the IP */
12334
12335 static unsigned nmi_wait_count;
12336-static atomic_t nmi_update_count = ATOMIC_INIT(0);
12337+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
12338
12339 int ftrace_arch_read_dyn_info(char *buf, int size)
12340 {
12341@@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf,
12342
12343 r = snprintf(buf, size, "%u %u",
12344 nmi_wait_count,
12345- atomic_read(&nmi_update_count));
12346+ atomic_read_unchecked(&nmi_update_count));
12347 return r;
12348 }
12349
12350@@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
12351
12352 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
12353 smp_rmb();
12354+ pax_open_kernel();
12355 ftrace_mod_code();
12356- atomic_inc(&nmi_update_count);
12357+ pax_close_kernel();
12358+ atomic_inc_unchecked(&nmi_update_count);
12359 }
12360 /* Must have previous changes seen before executions */
12361 smp_mb();
12362@@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, uns
12363 {
12364 unsigned char replaced[MCOUNT_INSN_SIZE];
12365
12366+ ip = ktla_ktva(ip);
12367+
12368 /*
12369 * Note: Due to modules and __init, code can
12370 * disappear and change, we need to protect against faulting
12371@@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_fun
12372 unsigned char old[MCOUNT_INSN_SIZE], *new;
12373 int ret;
12374
12375- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
12376+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
12377 new = ftrace_call_replace(ip, (unsigned long)func);
12378 ret = ftrace_modify_code(ip, old, new);
12379
12380@@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long
12381 {
12382 unsigned char code[MCOUNT_INSN_SIZE];
12383
12384+ ip = ktla_ktva(ip);
12385+
12386 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
12387 return -EFAULT;
12388
12389diff -urNp linux-3.0.3/arch/x86/kernel/head32.c linux-3.0.3/arch/x86/kernel/head32.c
12390--- linux-3.0.3/arch/x86/kernel/head32.c 2011-07-21 22:17:23.000000000 -0400
12391+++ linux-3.0.3/arch/x86/kernel/head32.c 2011-08-23 21:47:55.000000000 -0400
12392@@ -19,6 +19,7 @@
12393 #include <asm/io_apic.h>
12394 #include <asm/bios_ebda.h>
12395 #include <asm/tlbflush.h>
12396+#include <asm/boot.h>
12397
12398 static void __init i386_default_early_setup(void)
12399 {
12400@@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
12401 {
12402 memblock_init();
12403
12404- memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12405+ memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12406
12407 #ifdef CONFIG_BLK_DEV_INITRD
12408 /* Reserve INITRD */
12409diff -urNp linux-3.0.3/arch/x86/kernel/head_32.S linux-3.0.3/arch/x86/kernel/head_32.S
12410--- linux-3.0.3/arch/x86/kernel/head_32.S 2011-07-21 22:17:23.000000000 -0400
12411+++ linux-3.0.3/arch/x86/kernel/head_32.S 2011-08-23 21:47:55.000000000 -0400
12412@@ -25,6 +25,12 @@
12413 /* Physical address */
12414 #define pa(X) ((X) - __PAGE_OFFSET)
12415
12416+#ifdef CONFIG_PAX_KERNEXEC
12417+#define ta(X) (X)
12418+#else
12419+#define ta(X) ((X) - __PAGE_OFFSET)
12420+#endif
12421+
12422 /*
12423 * References to members of the new_cpu_data structure.
12424 */
12425@@ -54,11 +60,7 @@
12426 * and small than max_low_pfn, otherwise will waste some page table entries
12427 */
12428
12429-#if PTRS_PER_PMD > 1
12430-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
12431-#else
12432-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
12433-#endif
12434+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
12435
12436 /* Number of possible pages in the lowmem region */
12437 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
12438@@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
12439 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12440
12441 /*
12442+ * Real beginning of normal "text" segment
12443+ */
12444+ENTRY(stext)
12445+ENTRY(_stext)
12446+
12447+/*
12448 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
12449 * %esi points to the real-mode code as a 32-bit pointer.
12450 * CS and DS must be 4 GB flat segments, but we don't depend on
12451@@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12452 * can.
12453 */
12454 __HEAD
12455+
12456+#ifdef CONFIG_PAX_KERNEXEC
12457+ jmp startup_32
12458+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
12459+.fill PAGE_SIZE-5,1,0xcc
12460+#endif
12461+
12462 ENTRY(startup_32)
12463 movl pa(stack_start),%ecx
12464
12465@@ -105,6 +120,57 @@ ENTRY(startup_32)
12466 2:
12467 leal -__PAGE_OFFSET(%ecx),%esp
12468
12469+#ifdef CONFIG_SMP
12470+ movl $pa(cpu_gdt_table),%edi
12471+ movl $__per_cpu_load,%eax
12472+ movw %ax,__KERNEL_PERCPU + 2(%edi)
12473+ rorl $16,%eax
12474+ movb %al,__KERNEL_PERCPU + 4(%edi)
12475+ movb %ah,__KERNEL_PERCPU + 7(%edi)
12476+ movl $__per_cpu_end - 1,%eax
12477+ subl $__per_cpu_start,%eax
12478+ movw %ax,__KERNEL_PERCPU + 0(%edi)
12479+#endif
12480+
12481+#ifdef CONFIG_PAX_MEMORY_UDEREF
12482+ movl $NR_CPUS,%ecx
12483+ movl $pa(cpu_gdt_table),%edi
12484+1:
12485+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
12486+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
12487+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
12488+ addl $PAGE_SIZE_asm,%edi
12489+ loop 1b
12490+#endif
12491+
12492+#ifdef CONFIG_PAX_KERNEXEC
12493+ movl $pa(boot_gdt),%edi
12494+ movl $__LOAD_PHYSICAL_ADDR,%eax
12495+ movw %ax,__BOOT_CS + 2(%edi)
12496+ rorl $16,%eax
12497+ movb %al,__BOOT_CS + 4(%edi)
12498+ movb %ah,__BOOT_CS + 7(%edi)
12499+ rorl $16,%eax
12500+
12501+ ljmp $(__BOOT_CS),$1f
12502+1:
12503+
12504+ movl $NR_CPUS,%ecx
12505+ movl $pa(cpu_gdt_table),%edi
12506+ addl $__PAGE_OFFSET,%eax
12507+1:
12508+ movw %ax,__KERNEL_CS + 2(%edi)
12509+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
12510+ rorl $16,%eax
12511+ movb %al,__KERNEL_CS + 4(%edi)
12512+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
12513+ movb %ah,__KERNEL_CS + 7(%edi)
12514+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
12515+ rorl $16,%eax
12516+ addl $PAGE_SIZE_asm,%edi
12517+ loop 1b
12518+#endif
12519+
12520 /*
12521 * Clear BSS first so that there are no surprises...
12522 */
12523@@ -195,8 +261,11 @@ ENTRY(startup_32)
12524 movl %eax, pa(max_pfn_mapped)
12525
12526 /* Do early initialization of the fixmap area */
12527- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12528- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
12529+#ifdef CONFIG_COMPAT_VDSO
12530+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
12531+#else
12532+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
12533+#endif
12534 #else /* Not PAE */
12535
12536 page_pde_offset = (__PAGE_OFFSET >> 20);
12537@@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12538 movl %eax, pa(max_pfn_mapped)
12539
12540 /* Do early initialization of the fixmap area */
12541- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12542- movl %eax,pa(initial_page_table+0xffc)
12543+#ifdef CONFIG_COMPAT_VDSO
12544+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
12545+#else
12546+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
12547+#endif
12548 #endif
12549
12550 #ifdef CONFIG_PARAVIRT
12551@@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12552 cmpl $num_subarch_entries, %eax
12553 jae bad_subarch
12554
12555- movl pa(subarch_entries)(,%eax,4), %eax
12556- subl $__PAGE_OFFSET, %eax
12557- jmp *%eax
12558+ jmp *pa(subarch_entries)(,%eax,4)
12559
12560 bad_subarch:
12561 WEAK(lguest_entry)
12562@@ -255,10 +325,10 @@ WEAK(xen_entry)
12563 __INITDATA
12564
12565 subarch_entries:
12566- .long default_entry /* normal x86/PC */
12567- .long lguest_entry /* lguest hypervisor */
12568- .long xen_entry /* Xen hypervisor */
12569- .long default_entry /* Moorestown MID */
12570+ .long ta(default_entry) /* normal x86/PC */
12571+ .long ta(lguest_entry) /* lguest hypervisor */
12572+ .long ta(xen_entry) /* Xen hypervisor */
12573+ .long ta(default_entry) /* Moorestown MID */
12574 num_subarch_entries = (. - subarch_entries) / 4
12575 .previous
12576 #else
12577@@ -312,6 +382,7 @@ default_entry:
12578 orl %edx,%eax
12579 movl %eax,%cr4
12580
12581+#ifdef CONFIG_X86_PAE
12582 testb $X86_CR4_PAE, %al # check if PAE is enabled
12583 jz 6f
12584
12585@@ -340,6 +411,9 @@ default_entry:
12586 /* Make changes effective */
12587 wrmsr
12588
12589+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
12590+#endif
12591+
12592 6:
12593
12594 /*
12595@@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
12596 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
12597 movl %eax,%ss # after changing gdt.
12598
12599- movl $(__USER_DS),%eax # DS/ES contains default USER segment
12600+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
12601 movl %eax,%ds
12602 movl %eax,%es
12603
12604@@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
12605 */
12606 cmpb $0,ready
12607 jne 1f
12608- movl $gdt_page,%eax
12609+ movl $cpu_gdt_table,%eax
12610 movl $stack_canary,%ecx
12611+#ifdef CONFIG_SMP
12612+ addl $__per_cpu_load,%ecx
12613+#endif
12614 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
12615 shrl $16, %ecx
12616 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
12617 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
12618 1:
12619-#endif
12620 movl $(__KERNEL_STACK_CANARY),%eax
12621+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12622+ movl $(__USER_DS),%eax
12623+#else
12624+ xorl %eax,%eax
12625+#endif
12626 movl %eax,%gs
12627
12628 xorl %eax,%eax # Clear LDT
12629@@ -558,22 +639,22 @@ early_page_fault:
12630 jmp early_fault
12631
12632 early_fault:
12633- cld
12634 #ifdef CONFIG_PRINTK
12635+ cmpl $1,%ss:early_recursion_flag
12636+ je hlt_loop
12637+ incl %ss:early_recursion_flag
12638+ cld
12639 pusha
12640 movl $(__KERNEL_DS),%eax
12641 movl %eax,%ds
12642 movl %eax,%es
12643- cmpl $2,early_recursion_flag
12644- je hlt_loop
12645- incl early_recursion_flag
12646 movl %cr2,%eax
12647 pushl %eax
12648 pushl %edx /* trapno */
12649 pushl $fault_msg
12650 call printk
12651+; call dump_stack
12652 #endif
12653- call dump_stack
12654 hlt_loop:
12655 hlt
12656 jmp hlt_loop
12657@@ -581,8 +662,11 @@ hlt_loop:
12658 /* This is the default interrupt "handler" :-) */
12659 ALIGN
12660 ignore_int:
12661- cld
12662 #ifdef CONFIG_PRINTK
12663+ cmpl $2,%ss:early_recursion_flag
12664+ je hlt_loop
12665+ incl %ss:early_recursion_flag
12666+ cld
12667 pushl %eax
12668 pushl %ecx
12669 pushl %edx
12670@@ -591,9 +675,6 @@ ignore_int:
12671 movl $(__KERNEL_DS),%eax
12672 movl %eax,%ds
12673 movl %eax,%es
12674- cmpl $2,early_recursion_flag
12675- je hlt_loop
12676- incl early_recursion_flag
12677 pushl 16(%esp)
12678 pushl 24(%esp)
12679 pushl 32(%esp)
12680@@ -622,29 +703,43 @@ ENTRY(initial_code)
12681 /*
12682 * BSS section
12683 */
12684-__PAGE_ALIGNED_BSS
12685- .align PAGE_SIZE
12686 #ifdef CONFIG_X86_PAE
12687+.section .initial_pg_pmd,"a",@progbits
12688 initial_pg_pmd:
12689 .fill 1024*KPMDS,4,0
12690 #else
12691+.section .initial_page_table,"a",@progbits
12692 ENTRY(initial_page_table)
12693 .fill 1024,4,0
12694 #endif
12695+.section .initial_pg_fixmap,"a",@progbits
12696 initial_pg_fixmap:
12697 .fill 1024,4,0
12698+.section .empty_zero_page,"a",@progbits
12699 ENTRY(empty_zero_page)
12700 .fill 4096,1,0
12701+.section .swapper_pg_dir,"a",@progbits
12702 ENTRY(swapper_pg_dir)
12703+#ifdef CONFIG_X86_PAE
12704+ .fill 4,8,0
12705+#else
12706 .fill 1024,4,0
12707+#endif
12708+
12709+/*
12710+ * The IDT has to be page-aligned to simplify the Pentium
12711+ * F0 0F bug workaround.. We have a special link segment
12712+ * for this.
12713+ */
12714+.section .idt,"a",@progbits
12715+ENTRY(idt_table)
12716+ .fill 256,8,0
12717
12718 /*
12719 * This starts the data section.
12720 */
12721 #ifdef CONFIG_X86_PAE
12722-__PAGE_ALIGNED_DATA
12723- /* Page-aligned for the benefit of paravirt? */
12724- .align PAGE_SIZE
12725+.section .initial_page_table,"a",@progbits
12726 ENTRY(initial_page_table)
12727 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
12728 # if KPMDS == 3
12729@@ -663,18 +758,27 @@ ENTRY(initial_page_table)
12730 # error "Kernel PMDs should be 1, 2 or 3"
12731 # endif
12732 .align PAGE_SIZE /* needs to be page-sized too */
12733+
12734+#ifdef CONFIG_PAX_PER_CPU_PGD
12735+ENTRY(cpu_pgd)
12736+ .rept NR_CPUS
12737+ .fill 4,8,0
12738+ .endr
12739+#endif
12740+
12741 #endif
12742
12743 .data
12744 .balign 4
12745 ENTRY(stack_start)
12746- .long init_thread_union+THREAD_SIZE
12747+ .long init_thread_union+THREAD_SIZE-8
12748+
12749+ready: .byte 0
12750
12751+.section .rodata,"a",@progbits
12752 early_recursion_flag:
12753 .long 0
12754
12755-ready: .byte 0
12756-
12757 int_msg:
12758 .asciz "Unknown interrupt or fault at: %p %p %p\n"
12759
12760@@ -707,7 +811,7 @@ fault_msg:
12761 .word 0 # 32 bit align gdt_desc.address
12762 boot_gdt_descr:
12763 .word __BOOT_DS+7
12764- .long boot_gdt - __PAGE_OFFSET
12765+ .long pa(boot_gdt)
12766
12767 .word 0 # 32-bit align idt_desc.address
12768 idt_descr:
12769@@ -718,7 +822,7 @@ idt_descr:
12770 .word 0 # 32 bit align gdt_desc.address
12771 ENTRY(early_gdt_descr)
12772 .word GDT_ENTRIES*8-1
12773- .long gdt_page /* Overwritten for secondary CPUs */
12774+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
12775
12776 /*
12777 * The boot_gdt must mirror the equivalent in setup.S and is
12778@@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
12779 .align L1_CACHE_BYTES
12780 ENTRY(boot_gdt)
12781 .fill GDT_ENTRY_BOOT_CS,8,0
12782- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
12783- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
12784+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
12785+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
12786+
12787+ .align PAGE_SIZE_asm
12788+ENTRY(cpu_gdt_table)
12789+ .rept NR_CPUS
12790+ .quad 0x0000000000000000 /* NULL descriptor */
12791+ .quad 0x0000000000000000 /* 0x0b reserved */
12792+ .quad 0x0000000000000000 /* 0x13 reserved */
12793+ .quad 0x0000000000000000 /* 0x1b reserved */
12794+
12795+#ifdef CONFIG_PAX_KERNEXEC
12796+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
12797+#else
12798+ .quad 0x0000000000000000 /* 0x20 unused */
12799+#endif
12800+
12801+ .quad 0x0000000000000000 /* 0x28 unused */
12802+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
12803+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
12804+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
12805+ .quad 0x0000000000000000 /* 0x4b reserved */
12806+ .quad 0x0000000000000000 /* 0x53 reserved */
12807+ .quad 0x0000000000000000 /* 0x5b reserved */
12808+
12809+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
12810+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
12811+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
12812+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
12813+
12814+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
12815+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
12816+
12817+ /*
12818+ * Segments used for calling PnP BIOS have byte granularity.
12819+ * The code segments and data segments have fixed 64k limits,
12820+ * the transfer segment sizes are set at run time.
12821+ */
12822+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
12823+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
12824+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
12825+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
12826+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
12827+
12828+ /*
12829+ * The APM segments have byte granularity and their bases
12830+ * are set at run time. All have 64k limits.
12831+ */
12832+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
12833+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
12834+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
12835+
12836+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
12837+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
12838+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
12839+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
12840+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
12841+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
12842+
12843+ /* Be sure this is zeroed to avoid false validations in Xen */
12844+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
12845+ .endr
12846diff -urNp linux-3.0.3/arch/x86/kernel/head_64.S linux-3.0.3/arch/x86/kernel/head_64.S
12847--- linux-3.0.3/arch/x86/kernel/head_64.S 2011-07-21 22:17:23.000000000 -0400
12848+++ linux-3.0.3/arch/x86/kernel/head_64.S 2011-08-23 21:47:55.000000000 -0400
12849@@ -19,6 +19,7 @@
12850 #include <asm/cache.h>
12851 #include <asm/processor-flags.h>
12852 #include <asm/percpu.h>
12853+#include <asm/cpufeature.h>
12854
12855 #ifdef CONFIG_PARAVIRT
12856 #include <asm/asm-offsets.h>
12857@@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
12858 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
12859 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
12860 L3_START_KERNEL = pud_index(__START_KERNEL_map)
12861+L4_VMALLOC_START = pgd_index(VMALLOC_START)
12862+L3_VMALLOC_START = pud_index(VMALLOC_START)
12863+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
12864+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
12865
12866 .text
12867 __HEAD
12868@@ -85,35 +90,22 @@ startup_64:
12869 */
12870 addq %rbp, init_level4_pgt + 0(%rip)
12871 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
12872+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
12873+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
12874 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
12875
12876 addq %rbp, level3_ident_pgt + 0(%rip)
12877+#ifndef CONFIG_XEN
12878+ addq %rbp, level3_ident_pgt + 8(%rip)
12879+#endif
12880
12881- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
12882- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
12883+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
12884
12885- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
12886+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
12887+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
12888
12889- /* Add an Identity mapping if I am above 1G */
12890- leaq _text(%rip), %rdi
12891- andq $PMD_PAGE_MASK, %rdi
12892-
12893- movq %rdi, %rax
12894- shrq $PUD_SHIFT, %rax
12895- andq $(PTRS_PER_PUD - 1), %rax
12896- jz ident_complete
12897-
12898- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
12899- leaq level3_ident_pgt(%rip), %rbx
12900- movq %rdx, 0(%rbx, %rax, 8)
12901-
12902- movq %rdi, %rax
12903- shrq $PMD_SHIFT, %rax
12904- andq $(PTRS_PER_PMD - 1), %rax
12905- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
12906- leaq level2_spare_pgt(%rip), %rbx
12907- movq %rdx, 0(%rbx, %rax, 8)
12908-ident_complete:
12909+ addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
12910+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
12911
12912 /*
12913 * Fixup the kernel text+data virtual addresses. Note that
12914@@ -160,8 +152,8 @@ ENTRY(secondary_startup_64)
12915 * after the boot processor executes this code.
12916 */
12917
12918- /* Enable PAE mode and PGE */
12919- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
12920+ /* Enable PAE mode and PSE/PGE */
12921+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
12922 movq %rax, %cr4
12923
12924 /* Setup early boot stage 4 level pagetables. */
12925@@ -183,9 +175,14 @@ ENTRY(secondary_startup_64)
12926 movl $MSR_EFER, %ecx
12927 rdmsr
12928 btsl $_EFER_SCE, %eax /* Enable System Call */
12929- btl $20,%edi /* No Execute supported? */
12930+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
12931 jnc 1f
12932 btsl $_EFER_NX, %eax
12933+ leaq init_level4_pgt(%rip), %rdi
12934+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
12935+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
12936+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
12937+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
12938 1: wrmsr /* Make changes effective */
12939
12940 /* Setup cr0 */
12941@@ -269,7 +266,7 @@ ENTRY(secondary_startup_64)
12942 bad_address:
12943 jmp bad_address
12944
12945- .section ".init.text","ax"
12946+ __INIT
12947 #ifdef CONFIG_EARLY_PRINTK
12948 .globl early_idt_handlers
12949 early_idt_handlers:
12950@@ -314,18 +311,23 @@ ENTRY(early_idt_handler)
12951 #endif /* EARLY_PRINTK */
12952 1: hlt
12953 jmp 1b
12954+ .previous
12955
12956 #ifdef CONFIG_EARLY_PRINTK
12957+ __INITDATA
12958 early_recursion_flag:
12959 .long 0
12960+ .previous
12961
12962+ .section .rodata,"a",@progbits
12963 early_idt_msg:
12964 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
12965 early_idt_ripmsg:
12966 .asciz "RIP %s\n"
12967-#endif /* CONFIG_EARLY_PRINTK */
12968 .previous
12969+#endif /* CONFIG_EARLY_PRINTK */
12970
12971+ .section .rodata,"a",@progbits
12972 #define NEXT_PAGE(name) \
12973 .balign PAGE_SIZE; \
12974 ENTRY(name)
12975@@ -338,7 +340,6 @@ ENTRY(name)
12976 i = i + 1 ; \
12977 .endr
12978
12979- .data
12980 /*
12981 * This default setting generates an ident mapping at address 0x100000
12982 * and a mapping for the kernel that precisely maps virtual address
12983@@ -349,13 +350,36 @@ NEXT_PAGE(init_level4_pgt)
12984 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
12985 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
12986 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
12987+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
12988+ .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
12989+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
12990+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
12991 .org init_level4_pgt + L4_START_KERNEL*8, 0
12992 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
12993 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
12994
12995+#ifdef CONFIG_PAX_PER_CPU_PGD
12996+NEXT_PAGE(cpu_pgd)
12997+ .rept NR_CPUS
12998+ .fill 512,8,0
12999+ .endr
13000+#endif
13001+
13002 NEXT_PAGE(level3_ident_pgt)
13003 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13004+#ifdef CONFIG_XEN
13005 .fill 511,8,0
13006+#else
13007+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
13008+ .fill 510,8,0
13009+#endif
13010+
13011+NEXT_PAGE(level3_vmalloc_pgt)
13012+ .fill 512,8,0
13013+
13014+NEXT_PAGE(level3_vmemmap_pgt)
13015+ .fill L3_VMEMMAP_START,8,0
13016+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13017
13018 NEXT_PAGE(level3_kernel_pgt)
13019 .fill L3_START_KERNEL,8,0
13020@@ -363,20 +387,23 @@ NEXT_PAGE(level3_kernel_pgt)
13021 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
13022 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13023
13024+NEXT_PAGE(level2_vmemmap_pgt)
13025+ .fill 512,8,0
13026+
13027 NEXT_PAGE(level2_fixmap_pgt)
13028- .fill 506,8,0
13029- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13030- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
13031- .fill 5,8,0
13032+ .fill 507,8,0
13033+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
13034+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
13035+ .fill 4,8,0
13036
13037-NEXT_PAGE(level1_fixmap_pgt)
13038+NEXT_PAGE(level1_vsyscall_pgt)
13039 .fill 512,8,0
13040
13041-NEXT_PAGE(level2_ident_pgt)
13042- /* Since I easily can, map the first 1G.
13043+ /* Since I easily can, map the first 2G.
13044 * Don't set NX because code runs from these pages.
13045 */
13046- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
13047+NEXT_PAGE(level2_ident_pgt)
13048+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
13049
13050 NEXT_PAGE(level2_kernel_pgt)
13051 /*
13052@@ -389,33 +416,55 @@ NEXT_PAGE(level2_kernel_pgt)
13053 * If you want to increase this then increase MODULES_VADDR
13054 * too.)
13055 */
13056- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
13057- KERNEL_IMAGE_SIZE/PMD_SIZE)
13058-
13059-NEXT_PAGE(level2_spare_pgt)
13060- .fill 512, 8, 0
13061+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
13062
13063 #undef PMDS
13064 #undef NEXT_PAGE
13065
13066- .data
13067+ .align PAGE_SIZE
13068+ENTRY(cpu_gdt_table)
13069+ .rept NR_CPUS
13070+ .quad 0x0000000000000000 /* NULL descriptor */
13071+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
13072+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
13073+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
13074+ .quad 0x00cffb000000ffff /* __USER32_CS */
13075+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
13076+ .quad 0x00affb000000ffff /* __USER_CS */
13077+
13078+#ifdef CONFIG_PAX_KERNEXEC
13079+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
13080+#else
13081+ .quad 0x0 /* unused */
13082+#endif
13083+
13084+ .quad 0,0 /* TSS */
13085+ .quad 0,0 /* LDT */
13086+ .quad 0,0,0 /* three TLS descriptors */
13087+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
13088+ /* asm/segment.h:GDT_ENTRIES must match this */
13089+
13090+ /* zero the remaining page */
13091+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
13092+ .endr
13093+
13094 .align 16
13095 .globl early_gdt_descr
13096 early_gdt_descr:
13097 .word GDT_ENTRIES*8-1
13098 early_gdt_descr_base:
13099- .quad INIT_PER_CPU_VAR(gdt_page)
13100+ .quad cpu_gdt_table
13101
13102 ENTRY(phys_base)
13103 /* This must match the first entry in level2_kernel_pgt */
13104 .quad 0x0000000000000000
13105
13106 #include "../../x86/xen/xen-head.S"
13107-
13108- .section .bss, "aw", @nobits
13109+
13110+ .section .rodata,"a",@progbits
13111 .align L1_CACHE_BYTES
13112 ENTRY(idt_table)
13113- .skip IDT_ENTRIES * 16
13114+ .fill 512,8,0
13115
13116 __PAGE_ALIGNED_BSS
13117 .align PAGE_SIZE
13118diff -urNp linux-3.0.3/arch/x86/kernel/i386_ksyms_32.c linux-3.0.3/arch/x86/kernel/i386_ksyms_32.c
13119--- linux-3.0.3/arch/x86/kernel/i386_ksyms_32.c 2011-07-21 22:17:23.000000000 -0400
13120+++ linux-3.0.3/arch/x86/kernel/i386_ksyms_32.c 2011-08-23 21:47:55.000000000 -0400
13121@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
13122 EXPORT_SYMBOL(cmpxchg8b_emu);
13123 #endif
13124
13125+EXPORT_SYMBOL_GPL(cpu_gdt_table);
13126+
13127 /* Networking helper routines. */
13128 EXPORT_SYMBOL(csum_partial_copy_generic);
13129+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
13130+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
13131
13132 EXPORT_SYMBOL(__get_user_1);
13133 EXPORT_SYMBOL(__get_user_2);
13134@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
13135
13136 EXPORT_SYMBOL(csum_partial);
13137 EXPORT_SYMBOL(empty_zero_page);
13138+
13139+#ifdef CONFIG_PAX_KERNEXEC
13140+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
13141+#endif
13142diff -urNp linux-3.0.3/arch/x86/kernel/i8259.c linux-3.0.3/arch/x86/kernel/i8259.c
13143--- linux-3.0.3/arch/x86/kernel/i8259.c 2011-07-21 22:17:23.000000000 -0400
13144+++ linux-3.0.3/arch/x86/kernel/i8259.c 2011-08-23 21:47:55.000000000 -0400
13145@@ -210,7 +210,7 @@ spurious_8259A_irq:
13146 "spurious 8259A interrupt: IRQ%d.\n", irq);
13147 spurious_irq_mask |= irqmask;
13148 }
13149- atomic_inc(&irq_err_count);
13150+ atomic_inc_unchecked(&irq_err_count);
13151 /*
13152 * Theoretically we do not have to handle this IRQ,
13153 * but in Linux this does not cause problems and is
13154diff -urNp linux-3.0.3/arch/x86/kernel/init_task.c linux-3.0.3/arch/x86/kernel/init_task.c
13155--- linux-3.0.3/arch/x86/kernel/init_task.c 2011-07-21 22:17:23.000000000 -0400
13156+++ linux-3.0.3/arch/x86/kernel/init_task.c 2011-08-23 21:47:55.000000000 -0400
13157@@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
13158 * way process stacks are handled. This is done by having a special
13159 * "init_task" linker map entry..
13160 */
13161-union thread_union init_thread_union __init_task_data =
13162- { INIT_THREAD_INFO(init_task) };
13163+union thread_union init_thread_union __init_task_data;
13164
13165 /*
13166 * Initial task structure.
13167@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
13168 * section. Since TSS's are completely CPU-local, we want them
13169 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
13170 */
13171-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
13172-
13173+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
13174+EXPORT_SYMBOL(init_tss);
13175diff -urNp linux-3.0.3/arch/x86/kernel/ioport.c linux-3.0.3/arch/x86/kernel/ioport.c
13176--- linux-3.0.3/arch/x86/kernel/ioport.c 2011-07-21 22:17:23.000000000 -0400
13177+++ linux-3.0.3/arch/x86/kernel/ioport.c 2011-08-23 21:48:14.000000000 -0400
13178@@ -6,6 +6,7 @@
13179 #include <linux/sched.h>
13180 #include <linux/kernel.h>
13181 #include <linux/capability.h>
13182+#include <linux/security.h>
13183 #include <linux/errno.h>
13184 #include <linux/types.h>
13185 #include <linux/ioport.h>
13186@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long
13187
13188 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
13189 return -EINVAL;
13190+#ifdef CONFIG_GRKERNSEC_IO
13191+ if (turn_on && grsec_disable_privio) {
13192+ gr_handle_ioperm();
13193+ return -EPERM;
13194+ }
13195+#endif
13196 if (turn_on && !capable(CAP_SYS_RAWIO))
13197 return -EPERM;
13198
13199@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long
13200 * because the ->io_bitmap_max value must match the bitmap
13201 * contents:
13202 */
13203- tss = &per_cpu(init_tss, get_cpu());
13204+ tss = init_tss + get_cpu();
13205
13206 if (turn_on)
13207 bitmap_clear(t->io_bitmap_ptr, from, num);
13208@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct
13209 return -EINVAL;
13210 /* Trying to gain more privileges? */
13211 if (level > old) {
13212+#ifdef CONFIG_GRKERNSEC_IO
13213+ if (grsec_disable_privio) {
13214+ gr_handle_iopl();
13215+ return -EPERM;
13216+ }
13217+#endif
13218 if (!capable(CAP_SYS_RAWIO))
13219 return -EPERM;
13220 }
13221diff -urNp linux-3.0.3/arch/x86/kernel/irq_32.c linux-3.0.3/arch/x86/kernel/irq_32.c
13222--- linux-3.0.3/arch/x86/kernel/irq_32.c 2011-07-21 22:17:23.000000000 -0400
13223+++ linux-3.0.3/arch/x86/kernel/irq_32.c 2011-08-23 21:47:55.000000000 -0400
13224@@ -36,7 +36,7 @@ static int check_stack_overflow(void)
13225 __asm__ __volatile__("andl %%esp,%0" :
13226 "=r" (sp) : "0" (THREAD_SIZE - 1));
13227
13228- return sp < (sizeof(struct thread_info) + STACK_WARN);
13229+ return sp < STACK_WARN;
13230 }
13231
13232 static void print_stack_overflow(void)
13233@@ -54,8 +54,8 @@ static inline void print_stack_overflow(
13234 * per-CPU IRQ handling contexts (thread information and stack)
13235 */
13236 union irq_ctx {
13237- struct thread_info tinfo;
13238- u32 stack[THREAD_SIZE/sizeof(u32)];
13239+ unsigned long previous_esp;
13240+ u32 stack[THREAD_SIZE/sizeof(u32)];
13241 } __attribute__((aligned(THREAD_SIZE)));
13242
13243 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
13244@@ -75,10 +75,9 @@ static void call_on_stack(void *func, vo
13245 static inline int
13246 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
13247 {
13248- union irq_ctx *curctx, *irqctx;
13249+ union irq_ctx *irqctx;
13250 u32 *isp, arg1, arg2;
13251
13252- curctx = (union irq_ctx *) current_thread_info();
13253 irqctx = __this_cpu_read(hardirq_ctx);
13254
13255 /*
13256@@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struc
13257 * handler) we can't do that and just have to keep using the
13258 * current stack (which is the irq stack already after all)
13259 */
13260- if (unlikely(curctx == irqctx))
13261+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
13262 return 0;
13263
13264 /* build the stack frame on the IRQ stack */
13265- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13266- irqctx->tinfo.task = curctx->tinfo.task;
13267- irqctx->tinfo.previous_esp = current_stack_pointer;
13268+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13269+ irqctx->previous_esp = current_stack_pointer;
13270
13271- /*
13272- * Copy the softirq bits in preempt_count so that the
13273- * softirq checks work in the hardirq context.
13274- */
13275- irqctx->tinfo.preempt_count =
13276- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
13277- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
13278+#ifdef CONFIG_PAX_MEMORY_UDEREF
13279+ __set_fs(MAKE_MM_SEG(0));
13280+#endif
13281
13282 if (unlikely(overflow))
13283 call_on_stack(print_stack_overflow, isp);
13284@@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struc
13285 : "0" (irq), "1" (desc), "2" (isp),
13286 "D" (desc->handle_irq)
13287 : "memory", "cc", "ecx");
13288+
13289+#ifdef CONFIG_PAX_MEMORY_UDEREF
13290+ __set_fs(current_thread_info()->addr_limit);
13291+#endif
13292+
13293 return 1;
13294 }
13295
13296@@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struc
13297 */
13298 void __cpuinit irq_ctx_init(int cpu)
13299 {
13300- union irq_ctx *irqctx;
13301-
13302 if (per_cpu(hardirq_ctx, cpu))
13303 return;
13304
13305- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13306- THREAD_FLAGS,
13307- THREAD_ORDER));
13308- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13309- irqctx->tinfo.cpu = cpu;
13310- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
13311- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13312-
13313- per_cpu(hardirq_ctx, cpu) = irqctx;
13314-
13315- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13316- THREAD_FLAGS,
13317- THREAD_ORDER));
13318- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13319- irqctx->tinfo.cpu = cpu;
13320- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13321-
13322- per_cpu(softirq_ctx, cpu) = irqctx;
13323+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13324+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13325
13326 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
13327 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
13328@@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
13329 asmlinkage void do_softirq(void)
13330 {
13331 unsigned long flags;
13332- struct thread_info *curctx;
13333 union irq_ctx *irqctx;
13334 u32 *isp;
13335
13336@@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
13337 local_irq_save(flags);
13338
13339 if (local_softirq_pending()) {
13340- curctx = current_thread_info();
13341 irqctx = __this_cpu_read(softirq_ctx);
13342- irqctx->tinfo.task = curctx->task;
13343- irqctx->tinfo.previous_esp = current_stack_pointer;
13344+ irqctx->previous_esp = current_stack_pointer;
13345
13346 /* build the stack frame on the softirq stack */
13347- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13348+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13349+
13350+#ifdef CONFIG_PAX_MEMORY_UDEREF
13351+ __set_fs(MAKE_MM_SEG(0));
13352+#endif
13353
13354 call_on_stack(__do_softirq, isp);
13355+
13356+#ifdef CONFIG_PAX_MEMORY_UDEREF
13357+ __set_fs(current_thread_info()->addr_limit);
13358+#endif
13359+
13360 /*
13361 * Shouldn't happen, we returned above if in_interrupt():
13362 */
13363diff -urNp linux-3.0.3/arch/x86/kernel/irq.c linux-3.0.3/arch/x86/kernel/irq.c
13364--- linux-3.0.3/arch/x86/kernel/irq.c 2011-07-21 22:17:23.000000000 -0400
13365+++ linux-3.0.3/arch/x86/kernel/irq.c 2011-08-23 21:47:55.000000000 -0400
13366@@ -17,7 +17,7 @@
13367 #include <asm/mce.h>
13368 #include <asm/hw_irq.h>
13369
13370-atomic_t irq_err_count;
13371+atomic_unchecked_t irq_err_count;
13372
13373 /* Function pointer for generic interrupt vector handling */
13374 void (*x86_platform_ipi_callback)(void) = NULL;
13375@@ -116,9 +116,9 @@ int arch_show_interrupts(struct seq_file
13376 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
13377 seq_printf(p, " Machine check polls\n");
13378 #endif
13379- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
13380+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
13381 #if defined(CONFIG_X86_IO_APIC)
13382- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
13383+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
13384 #endif
13385 return 0;
13386 }
13387@@ -158,10 +158,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
13388
13389 u64 arch_irq_stat(void)
13390 {
13391- u64 sum = atomic_read(&irq_err_count);
13392+ u64 sum = atomic_read_unchecked(&irq_err_count);
13393
13394 #ifdef CONFIG_X86_IO_APIC
13395- sum += atomic_read(&irq_mis_count);
13396+ sum += atomic_read_unchecked(&irq_mis_count);
13397 #endif
13398 return sum;
13399 }
13400diff -urNp linux-3.0.3/arch/x86/kernel/kgdb.c linux-3.0.3/arch/x86/kernel/kgdb.c
13401--- linux-3.0.3/arch/x86/kernel/kgdb.c 2011-07-21 22:17:23.000000000 -0400
13402+++ linux-3.0.3/arch/x86/kernel/kgdb.c 2011-08-23 21:47:55.000000000 -0400
13403@@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem,
13404 #ifdef CONFIG_X86_32
13405 switch (regno) {
13406 case GDB_SS:
13407- if (!user_mode_vm(regs))
13408+ if (!user_mode(regs))
13409 *(unsigned long *)mem = __KERNEL_DS;
13410 break;
13411 case GDB_SP:
13412- if (!user_mode_vm(regs))
13413+ if (!user_mode(regs))
13414 *(unsigned long *)mem = kernel_stack_pointer(regs);
13415 break;
13416 case GDB_GS:
13417@@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vec
13418 case 'k':
13419 /* clear the trace bit */
13420 linux_regs->flags &= ~X86_EFLAGS_TF;
13421- atomic_set(&kgdb_cpu_doing_single_step, -1);
13422+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
13423
13424 /* set the trace bit if we're stepping */
13425 if (remcomInBuffer[0] == 's') {
13426 linux_regs->flags |= X86_EFLAGS_TF;
13427- atomic_set(&kgdb_cpu_doing_single_step,
13428+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
13429 raw_smp_processor_id());
13430 }
13431
13432@@ -534,7 +534,7 @@ static int __kgdb_notify(struct die_args
13433 return NOTIFY_DONE;
13434
13435 case DIE_DEBUG:
13436- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
13437+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
13438 if (user_mode(regs))
13439 return single_step_cont(regs, args);
13440 break;
13441diff -urNp linux-3.0.3/arch/x86/kernel/kprobes.c linux-3.0.3/arch/x86/kernel/kprobes.c
13442--- linux-3.0.3/arch/x86/kernel/kprobes.c 2011-07-21 22:17:23.000000000 -0400
13443+++ linux-3.0.3/arch/x86/kernel/kprobes.c 2011-08-23 21:47:55.000000000 -0400
13444@@ -115,8 +115,11 @@ static void __kprobes __synthesize_relat
13445 } __attribute__((packed)) *insn;
13446
13447 insn = (struct __arch_relative_insn *)from;
13448+
13449+ pax_open_kernel();
13450 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
13451 insn->op = op;
13452+ pax_close_kernel();
13453 }
13454
13455 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
13456@@ -153,7 +156,7 @@ static int __kprobes can_boost(kprobe_op
13457 kprobe_opcode_t opcode;
13458 kprobe_opcode_t *orig_opcodes = opcodes;
13459
13460- if (search_exception_tables((unsigned long)opcodes))
13461+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
13462 return 0; /* Page fault may occur on this address. */
13463
13464 retry:
13465@@ -314,7 +317,9 @@ static int __kprobes __copy_instruction(
13466 }
13467 }
13468 insn_get_length(&insn);
13469+ pax_open_kernel();
13470 memcpy(dest, insn.kaddr, insn.length);
13471+ pax_close_kernel();
13472
13473 #ifdef CONFIG_X86_64
13474 if (insn_rip_relative(&insn)) {
13475@@ -338,7 +343,9 @@ static int __kprobes __copy_instruction(
13476 (u8 *) dest;
13477 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
13478 disp = (u8 *) dest + insn_offset_displacement(&insn);
13479+ pax_open_kernel();
13480 *(s32 *) disp = (s32) newdisp;
13481+ pax_close_kernel();
13482 }
13483 #endif
13484 return insn.length;
13485@@ -352,12 +359,12 @@ static void __kprobes arch_copy_kprobe(s
13486 */
13487 __copy_instruction(p->ainsn.insn, p->addr, 0);
13488
13489- if (can_boost(p->addr))
13490+ if (can_boost(ktla_ktva(p->addr)))
13491 p->ainsn.boostable = 0;
13492 else
13493 p->ainsn.boostable = -1;
13494
13495- p->opcode = *p->addr;
13496+ p->opcode = *(ktla_ktva(p->addr));
13497 }
13498
13499 int __kprobes arch_prepare_kprobe(struct kprobe *p)
13500@@ -474,7 +481,7 @@ static void __kprobes setup_singlestep(s
13501 * nor set current_kprobe, because it doesn't use single
13502 * stepping.
13503 */
13504- regs->ip = (unsigned long)p->ainsn.insn;
13505+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13506 preempt_enable_no_resched();
13507 return;
13508 }
13509@@ -493,7 +500,7 @@ static void __kprobes setup_singlestep(s
13510 if (p->opcode == BREAKPOINT_INSTRUCTION)
13511 regs->ip = (unsigned long)p->addr;
13512 else
13513- regs->ip = (unsigned long)p->ainsn.insn;
13514+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13515 }
13516
13517 /*
13518@@ -572,7 +579,7 @@ static int __kprobes kprobe_handler(stru
13519 setup_singlestep(p, regs, kcb, 0);
13520 return 1;
13521 }
13522- } else if (*addr != BREAKPOINT_INSTRUCTION) {
13523+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
13524 /*
13525 * The breakpoint instruction was removed right
13526 * after we hit it. Another cpu has removed
13527@@ -817,7 +824,7 @@ static void __kprobes resume_execution(s
13528 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
13529 {
13530 unsigned long *tos = stack_addr(regs);
13531- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
13532+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
13533 unsigned long orig_ip = (unsigned long)p->addr;
13534 kprobe_opcode_t *insn = p->ainsn.insn;
13535
13536@@ -999,7 +1006,7 @@ int __kprobes kprobe_exceptions_notify(s
13537 struct die_args *args = data;
13538 int ret = NOTIFY_DONE;
13539
13540- if (args->regs && user_mode_vm(args->regs))
13541+ if (args->regs && user_mode(args->regs))
13542 return ret;
13543
13544 switch (val) {
13545@@ -1381,7 +1388,7 @@ int __kprobes arch_prepare_optimized_kpr
13546 * Verify if the address gap is in 2GB range, because this uses
13547 * a relative jump.
13548 */
13549- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
13550+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
13551 if (abs(rel) > 0x7fffffff)
13552 return -ERANGE;
13553
13554@@ -1402,11 +1409,11 @@ int __kprobes arch_prepare_optimized_kpr
13555 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
13556
13557 /* Set probe function call */
13558- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
13559+ synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
13560
13561 /* Set returning jmp instruction at the tail of out-of-line buffer */
13562 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
13563- (u8 *)op->kp.addr + op->optinsn.size);
13564+ (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
13565
13566 flush_icache_range((unsigned long) buf,
13567 (unsigned long) buf + TMPL_END_IDX +
13568@@ -1428,7 +1435,7 @@ static void __kprobes setup_optimize_kpr
13569 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
13570
13571 /* Backup instructions which will be replaced by jump address */
13572- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
13573+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
13574 RELATIVE_ADDR_SIZE);
13575
13576 insn_buf[0] = RELATIVEJUMP_OPCODE;
13577diff -urNp linux-3.0.3/arch/x86/kernel/kvm.c linux-3.0.3/arch/x86/kernel/kvm.c
13578--- linux-3.0.3/arch/x86/kernel/kvm.c 2011-07-21 22:17:23.000000000 -0400
13579+++ linux-3.0.3/arch/x86/kernel/kvm.c 2011-08-24 18:10:12.000000000 -0400
13580@@ -426,6 +426,7 @@ static void __init paravirt_ops_setup(vo
13581 pv_mmu_ops.set_pud = kvm_set_pud;
13582 #if PAGETABLE_LEVELS == 4
13583 pv_mmu_ops.set_pgd = kvm_set_pgd;
13584+ pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
13585 #endif
13586 #endif
13587 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
13588diff -urNp linux-3.0.3/arch/x86/kernel/ldt.c linux-3.0.3/arch/x86/kernel/ldt.c
13589--- linux-3.0.3/arch/x86/kernel/ldt.c 2011-07-21 22:17:23.000000000 -0400
13590+++ linux-3.0.3/arch/x86/kernel/ldt.c 2011-08-23 21:47:55.000000000 -0400
13591@@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, i
13592 if (reload) {
13593 #ifdef CONFIG_SMP
13594 preempt_disable();
13595- load_LDT(pc);
13596+ load_LDT_nolock(pc);
13597 if (!cpumask_equal(mm_cpumask(current->mm),
13598 cpumask_of(smp_processor_id())))
13599 smp_call_function(flush_ldt, current->mm, 1);
13600 preempt_enable();
13601 #else
13602- load_LDT(pc);
13603+ load_LDT_nolock(pc);
13604 #endif
13605 }
13606 if (oldsize) {
13607@@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t
13608 return err;
13609
13610 for (i = 0; i < old->size; i++)
13611- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
13612+ write_ldt_entry(new->ldt, i, old->ldt + i);
13613 return 0;
13614 }
13615
13616@@ -116,6 +116,24 @@ int init_new_context(struct task_struct
13617 retval = copy_ldt(&mm->context, &old_mm->context);
13618 mutex_unlock(&old_mm->context.lock);
13619 }
13620+
13621+ if (tsk == current) {
13622+ mm->context.vdso = 0;
13623+
13624+#ifdef CONFIG_X86_32
13625+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
13626+ mm->context.user_cs_base = 0UL;
13627+ mm->context.user_cs_limit = ~0UL;
13628+
13629+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
13630+ cpus_clear(mm->context.cpu_user_cs_mask);
13631+#endif
13632+
13633+#endif
13634+#endif
13635+
13636+ }
13637+
13638 return retval;
13639 }
13640
13641@@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, u
13642 }
13643 }
13644
13645+#ifdef CONFIG_PAX_SEGMEXEC
13646+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
13647+ error = -EINVAL;
13648+ goto out_unlock;
13649+ }
13650+#endif
13651+
13652 fill_ldt(&ldt, &ldt_info);
13653 if (oldmode)
13654 ldt.avl = 0;
13655diff -urNp linux-3.0.3/arch/x86/kernel/machine_kexec_32.c linux-3.0.3/arch/x86/kernel/machine_kexec_32.c
13656--- linux-3.0.3/arch/x86/kernel/machine_kexec_32.c 2011-07-21 22:17:23.000000000 -0400
13657+++ linux-3.0.3/arch/x86/kernel/machine_kexec_32.c 2011-08-23 21:47:55.000000000 -0400
13658@@ -27,7 +27,7 @@
13659 #include <asm/cacheflush.h>
13660 #include <asm/debugreg.h>
13661
13662-static void set_idt(void *newidt, __u16 limit)
13663+static void set_idt(struct desc_struct *newidt, __u16 limit)
13664 {
13665 struct desc_ptr curidt;
13666
13667@@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16
13668 }
13669
13670
13671-static void set_gdt(void *newgdt, __u16 limit)
13672+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
13673 {
13674 struct desc_ptr curgdt;
13675
13676@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
13677 }
13678
13679 control_page = page_address(image->control_code_page);
13680- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
13681+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
13682
13683 relocate_kernel_ptr = control_page;
13684 page_list[PA_CONTROL_PAGE] = __pa(control_page);
13685diff -urNp linux-3.0.3/arch/x86/kernel/microcode_intel.c linux-3.0.3/arch/x86/kernel/microcode_intel.c
13686--- linux-3.0.3/arch/x86/kernel/microcode_intel.c 2011-07-21 22:17:23.000000000 -0400
13687+++ linux-3.0.3/arch/x86/kernel/microcode_intel.c 2011-08-23 21:47:55.000000000 -0400
13688@@ -440,13 +440,13 @@ static enum ucode_state request_microcod
13689
13690 static int get_ucode_user(void *to, const void *from, size_t n)
13691 {
13692- return copy_from_user(to, from, n);
13693+ return copy_from_user(to, (__force const void __user *)from, n);
13694 }
13695
13696 static enum ucode_state
13697 request_microcode_user(int cpu, const void __user *buf, size_t size)
13698 {
13699- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
13700+ return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
13701 }
13702
13703 static void microcode_fini_cpu(int cpu)
13704diff -urNp linux-3.0.3/arch/x86/kernel/module.c linux-3.0.3/arch/x86/kernel/module.c
13705--- linux-3.0.3/arch/x86/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
13706+++ linux-3.0.3/arch/x86/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
13707@@ -36,21 +36,66 @@
13708 #define DEBUGP(fmt...)
13709 #endif
13710
13711-void *module_alloc(unsigned long size)
13712+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
13713 {
13714 if (PAGE_ALIGN(size) > MODULES_LEN)
13715 return NULL;
13716 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
13717- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
13718+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
13719 -1, __builtin_return_address(0));
13720 }
13721
13722+void *module_alloc(unsigned long size)
13723+{
13724+
13725+#ifdef CONFIG_PAX_KERNEXEC
13726+ return __module_alloc(size, PAGE_KERNEL);
13727+#else
13728+ return __module_alloc(size, PAGE_KERNEL_EXEC);
13729+#endif
13730+
13731+}
13732+
13733 /* Free memory returned from module_alloc */
13734 void module_free(struct module *mod, void *module_region)
13735 {
13736 vfree(module_region);
13737 }
13738
13739+#ifdef CONFIG_PAX_KERNEXEC
13740+#ifdef CONFIG_X86_32
13741+void *module_alloc_exec(unsigned long size)
13742+{
13743+ struct vm_struct *area;
13744+
13745+ if (size == 0)
13746+ return NULL;
13747+
13748+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
13749+ return area ? area->addr : NULL;
13750+}
13751+EXPORT_SYMBOL(module_alloc_exec);
13752+
13753+void module_free_exec(struct module *mod, void *module_region)
13754+{
13755+ vunmap(module_region);
13756+}
13757+EXPORT_SYMBOL(module_free_exec);
13758+#else
13759+void module_free_exec(struct module *mod, void *module_region)
13760+{
13761+ module_free(mod, module_region);
13762+}
13763+EXPORT_SYMBOL(module_free_exec);
13764+
13765+void *module_alloc_exec(unsigned long size)
13766+{
13767+ return __module_alloc(size, PAGE_KERNEL_RX);
13768+}
13769+EXPORT_SYMBOL(module_alloc_exec);
13770+#endif
13771+#endif
13772+
13773 /* We don't need anything special. */
13774 int module_frob_arch_sections(Elf_Ehdr *hdr,
13775 Elf_Shdr *sechdrs,
13776@@ -70,14 +115,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
13777 unsigned int i;
13778 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
13779 Elf32_Sym *sym;
13780- uint32_t *location;
13781+ uint32_t *plocation, location;
13782
13783 DEBUGP("Applying relocate section %u to %u\n", relsec,
13784 sechdrs[relsec].sh_info);
13785 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
13786 /* This is where to make the change */
13787- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
13788- + rel[i].r_offset;
13789+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
13790+ location = (uint32_t)plocation;
13791+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
13792+ plocation = ktla_ktva((void *)plocation);
13793 /* This is the symbol it is referring to. Note that all
13794 undefined symbols have been resolved. */
13795 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
13796@@ -86,11 +133,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
13797 switch (ELF32_R_TYPE(rel[i].r_info)) {
13798 case R_386_32:
13799 /* We add the value into the location given */
13800- *location += sym->st_value;
13801+ pax_open_kernel();
13802+ *plocation += sym->st_value;
13803+ pax_close_kernel();
13804 break;
13805 case R_386_PC32:
13806 /* Add the value, subtract its postition */
13807- *location += sym->st_value - (uint32_t)location;
13808+ pax_open_kernel();
13809+ *plocation += sym->st_value - location;
13810+ pax_close_kernel();
13811 break;
13812 default:
13813 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
13814@@ -146,21 +197,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
13815 case R_X86_64_NONE:
13816 break;
13817 case R_X86_64_64:
13818+ pax_open_kernel();
13819 *(u64 *)loc = val;
13820+ pax_close_kernel();
13821 break;
13822 case R_X86_64_32:
13823+ pax_open_kernel();
13824 *(u32 *)loc = val;
13825+ pax_close_kernel();
13826 if (val != *(u32 *)loc)
13827 goto overflow;
13828 break;
13829 case R_X86_64_32S:
13830+ pax_open_kernel();
13831 *(s32 *)loc = val;
13832+ pax_close_kernel();
13833 if ((s64)val != *(s32 *)loc)
13834 goto overflow;
13835 break;
13836 case R_X86_64_PC32:
13837 val -= (u64)loc;
13838+ pax_open_kernel();
13839 *(u32 *)loc = val;
13840+ pax_close_kernel();
13841+
13842 #if 0
13843 if ((s64)val != *(s32 *)loc)
13844 goto overflow;
13845diff -urNp linux-3.0.3/arch/x86/kernel/paravirt.c linux-3.0.3/arch/x86/kernel/paravirt.c
13846--- linux-3.0.3/arch/x86/kernel/paravirt.c 2011-07-21 22:17:23.000000000 -0400
13847+++ linux-3.0.3/arch/x86/kernel/paravirt.c 2011-08-23 21:48:14.000000000 -0400
13848@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
13849 {
13850 return x;
13851 }
13852+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
13853+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
13854+#endif
13855
13856 void __init default_banner(void)
13857 {
13858@@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbu
13859 * corresponding structure. */
13860 static void *get_call_destination(u8 type)
13861 {
13862- struct paravirt_patch_template tmpl = {
13863+ const struct paravirt_patch_template tmpl = {
13864 .pv_init_ops = pv_init_ops,
13865 .pv_time_ops = pv_time_ops,
13866 .pv_cpu_ops = pv_cpu_ops,
13867@@ -133,6 +136,9 @@ static void *get_call_destination(u8 typ
13868 .pv_lock_ops = pv_lock_ops,
13869 #endif
13870 };
13871+
13872+ pax_track_stack();
13873+
13874 return *((void **)&tmpl + type);
13875 }
13876
13877@@ -145,15 +151,19 @@ unsigned paravirt_patch_default(u8 type,
13878 if (opfunc == NULL)
13879 /* If there's no function, patch it with a ud2a (BUG) */
13880 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
13881- else if (opfunc == _paravirt_nop)
13882+ else if (opfunc == (void *)_paravirt_nop)
13883 /* If the operation is a nop, then nop the callsite */
13884 ret = paravirt_patch_nop();
13885
13886 /* identity functions just return their single argument */
13887- else if (opfunc == _paravirt_ident_32)
13888+ else if (opfunc == (void *)_paravirt_ident_32)
13889 ret = paravirt_patch_ident_32(insnbuf, len);
13890- else if (opfunc == _paravirt_ident_64)
13891+ else if (opfunc == (void *)_paravirt_ident_64)
13892 ret = paravirt_patch_ident_64(insnbuf, len);
13893+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
13894+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
13895+ ret = paravirt_patch_ident_64(insnbuf, len);
13896+#endif
13897
13898 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
13899 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
13900@@ -178,7 +188,7 @@ unsigned paravirt_patch_insns(void *insn
13901 if (insn_len > len || start == NULL)
13902 insn_len = len;
13903 else
13904- memcpy(insnbuf, start, insn_len);
13905+ memcpy(insnbuf, ktla_ktva(start), insn_len);
13906
13907 return insn_len;
13908 }
13909@@ -294,22 +304,22 @@ void arch_flush_lazy_mmu_mode(void)
13910 preempt_enable();
13911 }
13912
13913-struct pv_info pv_info = {
13914+struct pv_info pv_info __read_only = {
13915 .name = "bare hardware",
13916 .paravirt_enabled = 0,
13917 .kernel_rpl = 0,
13918 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
13919 };
13920
13921-struct pv_init_ops pv_init_ops = {
13922+struct pv_init_ops pv_init_ops __read_only = {
13923 .patch = native_patch,
13924 };
13925
13926-struct pv_time_ops pv_time_ops = {
13927+struct pv_time_ops pv_time_ops __read_only = {
13928 .sched_clock = native_sched_clock,
13929 };
13930
13931-struct pv_irq_ops pv_irq_ops = {
13932+struct pv_irq_ops pv_irq_ops __read_only = {
13933 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
13934 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
13935 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
13936@@ -321,7 +331,7 @@ struct pv_irq_ops pv_irq_ops = {
13937 #endif
13938 };
13939
13940-struct pv_cpu_ops pv_cpu_ops = {
13941+struct pv_cpu_ops pv_cpu_ops __read_only = {
13942 .cpuid = native_cpuid,
13943 .get_debugreg = native_get_debugreg,
13944 .set_debugreg = native_set_debugreg,
13945@@ -382,21 +392,26 @@ struct pv_cpu_ops pv_cpu_ops = {
13946 .end_context_switch = paravirt_nop,
13947 };
13948
13949-struct pv_apic_ops pv_apic_ops = {
13950+struct pv_apic_ops pv_apic_ops __read_only = {
13951 #ifdef CONFIG_X86_LOCAL_APIC
13952 .startup_ipi_hook = paravirt_nop,
13953 #endif
13954 };
13955
13956-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
13957+#ifdef CONFIG_X86_32
13958+#ifdef CONFIG_X86_PAE
13959+/* 64-bit pagetable entries */
13960+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
13961+#else
13962 /* 32-bit pagetable entries */
13963 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
13964+#endif
13965 #else
13966 /* 64-bit pagetable entries */
13967 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
13968 #endif
13969
13970-struct pv_mmu_ops pv_mmu_ops = {
13971+struct pv_mmu_ops pv_mmu_ops __read_only = {
13972
13973 .read_cr2 = native_read_cr2,
13974 .write_cr2 = native_write_cr2,
13975@@ -446,6 +461,7 @@ struct pv_mmu_ops pv_mmu_ops = {
13976 .make_pud = PTE_IDENT,
13977
13978 .set_pgd = native_set_pgd,
13979+ .set_pgd_batched = native_set_pgd_batched,
13980 #endif
13981 #endif /* PAGETABLE_LEVELS >= 3 */
13982
13983@@ -465,6 +481,12 @@ struct pv_mmu_ops pv_mmu_ops = {
13984 },
13985
13986 .set_fixmap = native_set_fixmap,
13987+
13988+#ifdef CONFIG_PAX_KERNEXEC
13989+ .pax_open_kernel = native_pax_open_kernel,
13990+ .pax_close_kernel = native_pax_close_kernel,
13991+#endif
13992+
13993 };
13994
13995 EXPORT_SYMBOL_GPL(pv_time_ops);
13996diff -urNp linux-3.0.3/arch/x86/kernel/paravirt-spinlocks.c linux-3.0.3/arch/x86/kernel/paravirt-spinlocks.c
13997--- linux-3.0.3/arch/x86/kernel/paravirt-spinlocks.c 2011-07-21 22:17:23.000000000 -0400
13998+++ linux-3.0.3/arch/x86/kernel/paravirt-spinlocks.c 2011-08-23 21:47:55.000000000 -0400
13999@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t
14000 arch_spin_lock(lock);
14001 }
14002
14003-struct pv_lock_ops pv_lock_ops = {
14004+struct pv_lock_ops pv_lock_ops __read_only = {
14005 #ifdef CONFIG_SMP
14006 .spin_is_locked = __ticket_spin_is_locked,
14007 .spin_is_contended = __ticket_spin_is_contended,
14008diff -urNp linux-3.0.3/arch/x86/kernel/pci-iommu_table.c linux-3.0.3/arch/x86/kernel/pci-iommu_table.c
14009--- linux-3.0.3/arch/x86/kernel/pci-iommu_table.c 2011-07-21 22:17:23.000000000 -0400
14010+++ linux-3.0.3/arch/x86/kernel/pci-iommu_table.c 2011-08-23 21:48:14.000000000 -0400
14011@@ -2,7 +2,7 @@
14012 #include <asm/iommu_table.h>
14013 #include <linux/string.h>
14014 #include <linux/kallsyms.h>
14015-
14016+#include <linux/sched.h>
14017
14018 #define DEBUG 1
14019
14020@@ -51,6 +51,8 @@ void __init check_iommu_entries(struct i
14021 {
14022 struct iommu_table_entry *p, *q, *x;
14023
14024+ pax_track_stack();
14025+
14026 /* Simple cyclic dependency checker. */
14027 for (p = start; p < finish; p++) {
14028 q = find_dependents_of(start, finish, p);
14029diff -urNp linux-3.0.3/arch/x86/kernel/process_32.c linux-3.0.3/arch/x86/kernel/process_32.c
14030--- linux-3.0.3/arch/x86/kernel/process_32.c 2011-07-21 22:17:23.000000000 -0400
14031+++ linux-3.0.3/arch/x86/kernel/process_32.c 2011-08-23 21:47:55.000000000 -0400
14032@@ -65,6 +65,7 @@ asmlinkage void ret_from_fork(void) __as
14033 unsigned long thread_saved_pc(struct task_struct *tsk)
14034 {
14035 return ((unsigned long *)tsk->thread.sp)[3];
14036+//XXX return tsk->thread.eip;
14037 }
14038
14039 #ifndef CONFIG_SMP
14040@@ -126,15 +127,14 @@ void __show_regs(struct pt_regs *regs, i
14041 unsigned long sp;
14042 unsigned short ss, gs;
14043
14044- if (user_mode_vm(regs)) {
14045+ if (user_mode(regs)) {
14046 sp = regs->sp;
14047 ss = regs->ss & 0xffff;
14048- gs = get_user_gs(regs);
14049 } else {
14050 sp = kernel_stack_pointer(regs);
14051 savesegment(ss, ss);
14052- savesegment(gs, gs);
14053 }
14054+ gs = get_user_gs(regs);
14055
14056 show_regs_common();
14057
14058@@ -196,13 +196,14 @@ int copy_thread(unsigned long clone_flag
14059 struct task_struct *tsk;
14060 int err;
14061
14062- childregs = task_pt_regs(p);
14063+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
14064 *childregs = *regs;
14065 childregs->ax = 0;
14066 childregs->sp = sp;
14067
14068 p->thread.sp = (unsigned long) childregs;
14069 p->thread.sp0 = (unsigned long) (childregs+1);
14070+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14071
14072 p->thread.ip = (unsigned long) ret_from_fork;
14073
14074@@ -292,7 +293,7 @@ __switch_to(struct task_struct *prev_p,
14075 struct thread_struct *prev = &prev_p->thread,
14076 *next = &next_p->thread;
14077 int cpu = smp_processor_id();
14078- struct tss_struct *tss = &per_cpu(init_tss, cpu);
14079+ struct tss_struct *tss = init_tss + cpu;
14080 bool preload_fpu;
14081
14082 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
14083@@ -327,6 +328,10 @@ __switch_to(struct task_struct *prev_p,
14084 */
14085 lazy_save_gs(prev->gs);
14086
14087+#ifdef CONFIG_PAX_MEMORY_UDEREF
14088+ __set_fs(task_thread_info(next_p)->addr_limit);
14089+#endif
14090+
14091 /*
14092 * Load the per-thread Thread-Local Storage descriptor.
14093 */
14094@@ -362,6 +367,9 @@ __switch_to(struct task_struct *prev_p,
14095 */
14096 arch_end_context_switch(next_p);
14097
14098+ percpu_write(current_task, next_p);
14099+ percpu_write(current_tinfo, &next_p->tinfo);
14100+
14101 if (preload_fpu)
14102 __math_state_restore();
14103
14104@@ -371,8 +379,6 @@ __switch_to(struct task_struct *prev_p,
14105 if (prev->gs | next->gs)
14106 lazy_load_gs(next->gs);
14107
14108- percpu_write(current_task, next_p);
14109-
14110 return prev_p;
14111 }
14112
14113@@ -402,4 +408,3 @@ unsigned long get_wchan(struct task_stru
14114 } while (count++ < 16);
14115 return 0;
14116 }
14117-
14118diff -urNp linux-3.0.3/arch/x86/kernel/process_64.c linux-3.0.3/arch/x86/kernel/process_64.c
14119--- linux-3.0.3/arch/x86/kernel/process_64.c 2011-07-21 22:17:23.000000000 -0400
14120+++ linux-3.0.3/arch/x86/kernel/process_64.c 2011-08-23 21:47:55.000000000 -0400
14121@@ -87,7 +87,7 @@ static void __exit_idle(void)
14122 void exit_idle(void)
14123 {
14124 /* idle loop has pid 0 */
14125- if (current->pid)
14126+ if (task_pid_nr(current))
14127 return;
14128 __exit_idle();
14129 }
14130@@ -260,8 +260,7 @@ int copy_thread(unsigned long clone_flag
14131 struct pt_regs *childregs;
14132 struct task_struct *me = current;
14133
14134- childregs = ((struct pt_regs *)
14135- (THREAD_SIZE + task_stack_page(p))) - 1;
14136+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
14137 *childregs = *regs;
14138
14139 childregs->ax = 0;
14140@@ -273,6 +272,7 @@ int copy_thread(unsigned long clone_flag
14141 p->thread.sp = (unsigned long) childregs;
14142 p->thread.sp0 = (unsigned long) (childregs+1);
14143 p->thread.usersp = me->thread.usersp;
14144+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14145
14146 set_tsk_thread_flag(p, TIF_FORK);
14147
14148@@ -375,7 +375,7 @@ __switch_to(struct task_struct *prev_p,
14149 struct thread_struct *prev = &prev_p->thread;
14150 struct thread_struct *next = &next_p->thread;
14151 int cpu = smp_processor_id();
14152- struct tss_struct *tss = &per_cpu(init_tss, cpu);
14153+ struct tss_struct *tss = init_tss + cpu;
14154 unsigned fsindex, gsindex;
14155 bool preload_fpu;
14156
14157@@ -471,10 +471,9 @@ __switch_to(struct task_struct *prev_p,
14158 prev->usersp = percpu_read(old_rsp);
14159 percpu_write(old_rsp, next->usersp);
14160 percpu_write(current_task, next_p);
14161+ percpu_write(current_tinfo, &next_p->tinfo);
14162
14163- percpu_write(kernel_stack,
14164- (unsigned long)task_stack_page(next_p) +
14165- THREAD_SIZE - KERNEL_STACK_OFFSET);
14166+ percpu_write(kernel_stack, next->sp0);
14167
14168 /*
14169 * Now maybe reload the debug registers and handle I/O bitmaps
14170@@ -536,12 +535,11 @@ unsigned long get_wchan(struct task_stru
14171 if (!p || p == current || p->state == TASK_RUNNING)
14172 return 0;
14173 stack = (unsigned long)task_stack_page(p);
14174- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
14175+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
14176 return 0;
14177 fp = *(u64 *)(p->thread.sp);
14178 do {
14179- if (fp < (unsigned long)stack ||
14180- fp >= (unsigned long)stack+THREAD_SIZE)
14181+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
14182 return 0;
14183 ip = *(u64 *)(fp+8);
14184 if (!in_sched_functions(ip))
14185diff -urNp linux-3.0.3/arch/x86/kernel/process.c linux-3.0.3/arch/x86/kernel/process.c
14186--- linux-3.0.3/arch/x86/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
14187+++ linux-3.0.3/arch/x86/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
14188@@ -48,16 +48,33 @@ void free_thread_xstate(struct task_stru
14189
14190 void free_thread_info(struct thread_info *ti)
14191 {
14192- free_thread_xstate(ti->task);
14193 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
14194 }
14195
14196+static struct kmem_cache *task_struct_cachep;
14197+
14198 void arch_task_cache_init(void)
14199 {
14200- task_xstate_cachep =
14201- kmem_cache_create("task_xstate", xstate_size,
14202+ /* create a slab on which task_structs can be allocated */
14203+ task_struct_cachep =
14204+ kmem_cache_create("task_struct", sizeof(struct task_struct),
14205+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
14206+
14207+ task_xstate_cachep =
14208+ kmem_cache_create("task_xstate", xstate_size,
14209 __alignof__(union thread_xstate),
14210- SLAB_PANIC | SLAB_NOTRACK, NULL);
14211+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
14212+}
14213+
14214+struct task_struct *alloc_task_struct_node(int node)
14215+{
14216+ return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
14217+}
14218+
14219+void free_task_struct(struct task_struct *task)
14220+{
14221+ free_thread_xstate(task);
14222+ kmem_cache_free(task_struct_cachep, task);
14223 }
14224
14225 /*
14226@@ -70,7 +87,7 @@ void exit_thread(void)
14227 unsigned long *bp = t->io_bitmap_ptr;
14228
14229 if (bp) {
14230- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
14231+ struct tss_struct *tss = init_tss + get_cpu();
14232
14233 t->io_bitmap_ptr = NULL;
14234 clear_thread_flag(TIF_IO_BITMAP);
14235@@ -106,7 +123,7 @@ void show_regs_common(void)
14236
14237 printk(KERN_CONT "\n");
14238 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
14239- current->pid, current->comm, print_tainted(),
14240+ task_pid_nr(current), current->comm, print_tainted(),
14241 init_utsname()->release,
14242 (int)strcspn(init_utsname()->version, " "),
14243 init_utsname()->version);
14244@@ -120,6 +137,9 @@ void flush_thread(void)
14245 {
14246 struct task_struct *tsk = current;
14247
14248+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
14249+ loadsegment(gs, 0);
14250+#endif
14251 flush_ptrace_hw_breakpoint(tsk);
14252 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
14253 /*
14254@@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), voi
14255 regs.di = (unsigned long) arg;
14256
14257 #ifdef CONFIG_X86_32
14258- regs.ds = __USER_DS;
14259- regs.es = __USER_DS;
14260+ regs.ds = __KERNEL_DS;
14261+ regs.es = __KERNEL_DS;
14262 regs.fs = __KERNEL_PERCPU;
14263- regs.gs = __KERNEL_STACK_CANARY;
14264+ savesegment(gs, regs.gs);
14265 #else
14266 regs.ss = __KERNEL_DS;
14267 #endif
14268@@ -403,7 +423,7 @@ void default_idle(void)
14269 EXPORT_SYMBOL(default_idle);
14270 #endif
14271
14272-void stop_this_cpu(void *dummy)
14273+__noreturn void stop_this_cpu(void *dummy)
14274 {
14275 local_irq_disable();
14276 /*
14277@@ -668,16 +688,34 @@ static int __init idle_setup(char *str)
14278 }
14279 early_param("idle", idle_setup);
14280
14281-unsigned long arch_align_stack(unsigned long sp)
14282+#ifdef CONFIG_PAX_RANDKSTACK
14283+asmlinkage void pax_randomize_kstack(void)
14284 {
14285- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
14286- sp -= get_random_int() % 8192;
14287- return sp & ~0xf;
14288-}
14289+ struct thread_struct *thread = &current->thread;
14290+ unsigned long time;
14291
14292-unsigned long arch_randomize_brk(struct mm_struct *mm)
14293-{
14294- unsigned long range_end = mm->brk + 0x02000000;
14295- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
14296-}
14297+ if (!randomize_va_space)
14298+ return;
14299+
14300+ rdtscl(time);
14301+
14302+ /* P4 seems to return a 0 LSB, ignore it */
14303+#ifdef CONFIG_MPENTIUM4
14304+ time &= 0x3EUL;
14305+ time <<= 2;
14306+#elif defined(CONFIG_X86_64)
14307+ time &= 0xFUL;
14308+ time <<= 4;
14309+#else
14310+ time &= 0x1FUL;
14311+ time <<= 3;
14312+#endif
14313+
14314+ thread->sp0 ^= time;
14315+ load_sp0(init_tss + smp_processor_id(), thread);
14316
14317+#ifdef CONFIG_X86_64
14318+ percpu_write(kernel_stack, thread->sp0);
14319+#endif
14320+}
14321+#endif
14322diff -urNp linux-3.0.3/arch/x86/kernel/ptrace.c linux-3.0.3/arch/x86/kernel/ptrace.c
14323--- linux-3.0.3/arch/x86/kernel/ptrace.c 2011-07-21 22:17:23.000000000 -0400
14324+++ linux-3.0.3/arch/x86/kernel/ptrace.c 2011-08-23 21:47:55.000000000 -0400
14325@@ -821,7 +821,7 @@ long arch_ptrace(struct task_struct *chi
14326 unsigned long addr, unsigned long data)
14327 {
14328 int ret;
14329- unsigned long __user *datap = (unsigned long __user *)data;
14330+ unsigned long __user *datap = (__force unsigned long __user *)data;
14331
14332 switch (request) {
14333 /* read the word at location addr in the USER area. */
14334@@ -906,14 +906,14 @@ long arch_ptrace(struct task_struct *chi
14335 if ((int) addr < 0)
14336 return -EIO;
14337 ret = do_get_thread_area(child, addr,
14338- (struct user_desc __user *)data);
14339+ (__force struct user_desc __user *) data);
14340 break;
14341
14342 case PTRACE_SET_THREAD_AREA:
14343 if ((int) addr < 0)
14344 return -EIO;
14345 ret = do_set_thread_area(child, addr,
14346- (struct user_desc __user *)data, 0);
14347+ (__force struct user_desc __user *) data, 0);
14348 break;
14349 #endif
14350
14351@@ -1330,7 +1330,7 @@ static void fill_sigtrap_info(struct tas
14352 memset(info, 0, sizeof(*info));
14353 info->si_signo = SIGTRAP;
14354 info->si_code = si_code;
14355- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
14356+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
14357 }
14358
14359 void user_single_step_siginfo(struct task_struct *tsk,
14360diff -urNp linux-3.0.3/arch/x86/kernel/pvclock.c linux-3.0.3/arch/x86/kernel/pvclock.c
14361--- linux-3.0.3/arch/x86/kernel/pvclock.c 2011-07-21 22:17:23.000000000 -0400
14362+++ linux-3.0.3/arch/x86/kernel/pvclock.c 2011-08-23 21:47:55.000000000 -0400
14363@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvc
14364 return pv_tsc_khz;
14365 }
14366
14367-static atomic64_t last_value = ATOMIC64_INIT(0);
14368+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
14369
14370 void pvclock_resume(void)
14371 {
14372- atomic64_set(&last_value, 0);
14373+ atomic64_set_unchecked(&last_value, 0);
14374 }
14375
14376 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
14377@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct
14378 * updating at the same time, and one of them could be slightly behind,
14379 * making the assumption that last_value always go forward fail to hold.
14380 */
14381- last = atomic64_read(&last_value);
14382+ last = atomic64_read_unchecked(&last_value);
14383 do {
14384 if (ret < last)
14385 return last;
14386- last = atomic64_cmpxchg(&last_value, last, ret);
14387+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
14388 } while (unlikely(last != ret));
14389
14390 return ret;
14391diff -urNp linux-3.0.3/arch/x86/kernel/reboot.c linux-3.0.3/arch/x86/kernel/reboot.c
14392--- linux-3.0.3/arch/x86/kernel/reboot.c 2011-07-21 22:17:23.000000000 -0400
14393+++ linux-3.0.3/arch/x86/kernel/reboot.c 2011-08-23 21:47:55.000000000 -0400
14394@@ -35,7 +35,7 @@ void (*pm_power_off)(void);
14395 EXPORT_SYMBOL(pm_power_off);
14396
14397 static const struct desc_ptr no_idt = {};
14398-static int reboot_mode;
14399+static unsigned short reboot_mode;
14400 enum reboot_type reboot_type = BOOT_ACPI;
14401 int reboot_force;
14402
14403@@ -315,13 +315,17 @@ core_initcall(reboot_init);
14404 extern const unsigned char machine_real_restart_asm[];
14405 extern const u64 machine_real_restart_gdt[3];
14406
14407-void machine_real_restart(unsigned int type)
14408+__noreturn void machine_real_restart(unsigned int type)
14409 {
14410 void *restart_va;
14411 unsigned long restart_pa;
14412- void (*restart_lowmem)(unsigned int);
14413+ void (* __noreturn restart_lowmem)(unsigned int);
14414 u64 *lowmem_gdt;
14415
14416+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14417+ struct desc_struct *gdt;
14418+#endif
14419+
14420 local_irq_disable();
14421
14422 /* Write zero to CMOS register number 0x0f, which the BIOS POST
14423@@ -347,14 +351,14 @@ void machine_real_restart(unsigned int t
14424 boot)". This seems like a fairly standard thing that gets set by
14425 REBOOT.COM programs, and the previous reset routine did this
14426 too. */
14427- *((unsigned short *)0x472) = reboot_mode;
14428+ *(unsigned short *)(__va(0x472)) = reboot_mode;
14429
14430 /* Patch the GDT in the low memory trampoline */
14431 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
14432
14433 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
14434 restart_pa = virt_to_phys(restart_va);
14435- restart_lowmem = (void (*)(unsigned int))restart_pa;
14436+ restart_lowmem = (void *)restart_pa;
14437
14438 /* GDT[0]: GDT self-pointer */
14439 lowmem_gdt[0] =
14440@@ -365,7 +369,33 @@ void machine_real_restart(unsigned int t
14441 GDT_ENTRY(0x009b, restart_pa, 0xffff);
14442
14443 /* Jump to the identity-mapped low memory code */
14444+
14445+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14446+ gdt = get_cpu_gdt_table(smp_processor_id());
14447+ pax_open_kernel();
14448+#ifdef CONFIG_PAX_MEMORY_UDEREF
14449+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
14450+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
14451+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
14452+#endif
14453+#ifdef CONFIG_PAX_KERNEXEC
14454+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
14455+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
14456+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
14457+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
14458+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
14459+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
14460+#endif
14461+ pax_close_kernel();
14462+#endif
14463+
14464+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
14465+ asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
14466+ unreachable();
14467+#else
14468 restart_lowmem(type);
14469+#endif
14470+
14471 }
14472 #ifdef CONFIG_APM_MODULE
14473 EXPORT_SYMBOL(machine_real_restart);
14474@@ -523,7 +553,7 @@ void __attribute__((weak)) mach_reboot_f
14475 * try to force a triple fault and then cycle between hitting the keyboard
14476 * controller and doing that
14477 */
14478-static void native_machine_emergency_restart(void)
14479+__noreturn static void native_machine_emergency_restart(void)
14480 {
14481 int i;
14482 int attempt = 0;
14483@@ -647,13 +677,13 @@ void native_machine_shutdown(void)
14484 #endif
14485 }
14486
14487-static void __machine_emergency_restart(int emergency)
14488+static __noreturn void __machine_emergency_restart(int emergency)
14489 {
14490 reboot_emergency = emergency;
14491 machine_ops.emergency_restart();
14492 }
14493
14494-static void native_machine_restart(char *__unused)
14495+static __noreturn void native_machine_restart(char *__unused)
14496 {
14497 printk("machine restart\n");
14498
14499@@ -662,7 +692,7 @@ static void native_machine_restart(char
14500 __machine_emergency_restart(0);
14501 }
14502
14503-static void native_machine_halt(void)
14504+static __noreturn void native_machine_halt(void)
14505 {
14506 /* stop other cpus and apics */
14507 machine_shutdown();
14508@@ -673,7 +703,7 @@ static void native_machine_halt(void)
14509 stop_this_cpu(NULL);
14510 }
14511
14512-static void native_machine_power_off(void)
14513+__noreturn static void native_machine_power_off(void)
14514 {
14515 if (pm_power_off) {
14516 if (!reboot_force)
14517@@ -682,6 +712,7 @@ static void native_machine_power_off(voi
14518 }
14519 /* a fallback in case there is no PM info available */
14520 tboot_shutdown(TB_SHUTDOWN_HALT);
14521+ unreachable();
14522 }
14523
14524 struct machine_ops machine_ops = {
14525diff -urNp linux-3.0.3/arch/x86/kernel/setup.c linux-3.0.3/arch/x86/kernel/setup.c
14526--- linux-3.0.3/arch/x86/kernel/setup.c 2011-07-21 22:17:23.000000000 -0400
14527+++ linux-3.0.3/arch/x86/kernel/setup.c 2011-08-23 21:47:55.000000000 -0400
14528@@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
14529 * area (640->1Mb) as ram even though it is not.
14530 * take them out.
14531 */
14532- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
14533+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
14534 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
14535 }
14536
14537@@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
14538
14539 if (!boot_params.hdr.root_flags)
14540 root_mountflags &= ~MS_RDONLY;
14541- init_mm.start_code = (unsigned long) _text;
14542- init_mm.end_code = (unsigned long) _etext;
14543+ init_mm.start_code = ktla_ktva((unsigned long) _text);
14544+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
14545 init_mm.end_data = (unsigned long) _edata;
14546 init_mm.brk = _brk_end;
14547
14548- code_resource.start = virt_to_phys(_text);
14549- code_resource.end = virt_to_phys(_etext)-1;
14550- data_resource.start = virt_to_phys(_etext);
14551+ code_resource.start = virt_to_phys(ktla_ktva(_text));
14552+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
14553+ data_resource.start = virt_to_phys(_sdata);
14554 data_resource.end = virt_to_phys(_edata)-1;
14555 bss_resource.start = virt_to_phys(&__bss_start);
14556 bss_resource.end = virt_to_phys(&__bss_stop)-1;
14557diff -urNp linux-3.0.3/arch/x86/kernel/setup_percpu.c linux-3.0.3/arch/x86/kernel/setup_percpu.c
14558--- linux-3.0.3/arch/x86/kernel/setup_percpu.c 2011-07-21 22:17:23.000000000 -0400
14559+++ linux-3.0.3/arch/x86/kernel/setup_percpu.c 2011-08-23 21:47:55.000000000 -0400
14560@@ -21,19 +21,17 @@
14561 #include <asm/cpu.h>
14562 #include <asm/stackprotector.h>
14563
14564-DEFINE_PER_CPU(int, cpu_number);
14565+#ifdef CONFIG_SMP
14566+DEFINE_PER_CPU(unsigned int, cpu_number);
14567 EXPORT_PER_CPU_SYMBOL(cpu_number);
14568+#endif
14569
14570-#ifdef CONFIG_X86_64
14571 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
14572-#else
14573-#define BOOT_PERCPU_OFFSET 0
14574-#endif
14575
14576 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
14577 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
14578
14579-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
14580+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
14581 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
14582 };
14583 EXPORT_SYMBOL(__per_cpu_offset);
14584@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(
14585 {
14586 #ifdef CONFIG_X86_32
14587 struct desc_struct gdt;
14588+ unsigned long base = per_cpu_offset(cpu);
14589
14590- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
14591- 0x2 | DESCTYPE_S, 0x8);
14592- gdt.s = 1;
14593+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
14594+ 0x83 | DESCTYPE_S, 0xC);
14595 write_gdt_entry(get_cpu_gdt_table(cpu),
14596 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
14597 #endif
14598@@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
14599 /* alrighty, percpu areas up and running */
14600 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
14601 for_each_possible_cpu(cpu) {
14602+#ifdef CONFIG_CC_STACKPROTECTOR
14603+#ifdef CONFIG_X86_32
14604+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
14605+#endif
14606+#endif
14607 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
14608 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
14609 per_cpu(cpu_number, cpu) = cpu;
14610@@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
14611 */
14612 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
14613 #endif
14614+#ifdef CONFIG_CC_STACKPROTECTOR
14615+#ifdef CONFIG_X86_32
14616+ if (!cpu)
14617+ per_cpu(stack_canary.canary, cpu) = canary;
14618+#endif
14619+#endif
14620 /*
14621 * Up to this point, the boot CPU has been using .init.data
14622 * area. Reload any changed state for the boot CPU.
14623diff -urNp linux-3.0.3/arch/x86/kernel/signal.c linux-3.0.3/arch/x86/kernel/signal.c
14624--- linux-3.0.3/arch/x86/kernel/signal.c 2011-07-21 22:17:23.000000000 -0400
14625+++ linux-3.0.3/arch/x86/kernel/signal.c 2011-08-23 21:48:14.000000000 -0400
14626@@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsi
14627 * Align the stack pointer according to the i386 ABI,
14628 * i.e. so that on function entry ((sp + 4) & 15) == 0.
14629 */
14630- sp = ((sp + 4) & -16ul) - 4;
14631+ sp = ((sp - 12) & -16ul) - 4;
14632 #else /* !CONFIG_X86_32 */
14633 sp = round_down(sp, 16) - 8;
14634 #endif
14635@@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, str
14636 * Return an always-bogus address instead so we will die with SIGSEGV.
14637 */
14638 if (onsigstack && !likely(on_sig_stack(sp)))
14639- return (void __user *)-1L;
14640+ return (__force void __user *)-1L;
14641
14642 /* save i387 state */
14643 if (used_math() && save_i387_xstate(*fpstate) < 0)
14644- return (void __user *)-1L;
14645+ return (__force void __user *)-1L;
14646
14647 return (void __user *)sp;
14648 }
14649@@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigactio
14650 }
14651
14652 if (current->mm->context.vdso)
14653- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
14654+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
14655 else
14656- restorer = &frame->retcode;
14657+ restorer = (void __user *)&frame->retcode;
14658 if (ka->sa.sa_flags & SA_RESTORER)
14659 restorer = ka->sa.sa_restorer;
14660
14661@@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigactio
14662 * reasons and because gdb uses it as a signature to notice
14663 * signal handler stack frames.
14664 */
14665- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
14666+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
14667
14668 if (err)
14669 return -EFAULT;
14670@@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, str
14671 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
14672
14673 /* Set up to return from userspace. */
14674- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14675+ if (current->mm->context.vdso)
14676+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14677+ else
14678+ restorer = (void __user *)&frame->retcode;
14679 if (ka->sa.sa_flags & SA_RESTORER)
14680 restorer = ka->sa.sa_restorer;
14681 put_user_ex(restorer, &frame->pretcode);
14682@@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, str
14683 * reasons and because gdb uses it as a signature to notice
14684 * signal handler stack frames.
14685 */
14686- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
14687+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
14688 } put_user_catch(err);
14689
14690 if (err)
14691@@ -769,6 +772,8 @@ static void do_signal(struct pt_regs *re
14692 int signr;
14693 sigset_t *oldset;
14694
14695+ pax_track_stack();
14696+
14697 /*
14698 * We want the common case to go fast, which is why we may in certain
14699 * cases get here from kernel mode. Just return without doing anything
14700@@ -776,7 +781,7 @@ static void do_signal(struct pt_regs *re
14701 * X86_32: vm86 regs switched out by assembly code before reaching
14702 * here, so testing against kernel CS suffices.
14703 */
14704- if (!user_mode(regs))
14705+ if (!user_mode_novm(regs))
14706 return;
14707
14708 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
14709diff -urNp linux-3.0.3/arch/x86/kernel/smpboot.c linux-3.0.3/arch/x86/kernel/smpboot.c
14710--- linux-3.0.3/arch/x86/kernel/smpboot.c 2011-07-21 22:17:23.000000000 -0400
14711+++ linux-3.0.3/arch/x86/kernel/smpboot.c 2011-08-23 21:47:55.000000000 -0400
14712@@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int api
14713 set_idle_for_cpu(cpu, c_idle.idle);
14714 do_rest:
14715 per_cpu(current_task, cpu) = c_idle.idle;
14716+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
14717 #ifdef CONFIG_X86_32
14718 /* Stack for startup_32 can be just as for start_secondary onwards */
14719 irq_ctx_init(cpu);
14720 #else
14721 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
14722 initial_gs = per_cpu_offset(cpu);
14723- per_cpu(kernel_stack, cpu) =
14724- (unsigned long)task_stack_page(c_idle.idle) -
14725- KERNEL_STACK_OFFSET + THREAD_SIZE;
14726+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
14727 #endif
14728+
14729+ pax_open_kernel();
14730 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
14731+ pax_close_kernel();
14732+
14733 initial_code = (unsigned long)start_secondary;
14734 stack_start = c_idle.idle->thread.sp;
14735
14736@@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int
14737
14738 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
14739
14740+#ifdef CONFIG_PAX_PER_CPU_PGD
14741+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
14742+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
14743+ KERNEL_PGD_PTRS);
14744+#endif
14745+
14746 err = do_boot_cpu(apicid, cpu);
14747 if (err) {
14748 pr_debug("do_boot_cpu failed %d\n", err);
14749diff -urNp linux-3.0.3/arch/x86/kernel/step.c linux-3.0.3/arch/x86/kernel/step.c
14750--- linux-3.0.3/arch/x86/kernel/step.c 2011-07-21 22:17:23.000000000 -0400
14751+++ linux-3.0.3/arch/x86/kernel/step.c 2011-08-23 21:47:55.000000000 -0400
14752@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
14753 struct desc_struct *desc;
14754 unsigned long base;
14755
14756- seg &= ~7UL;
14757+ seg >>= 3;
14758
14759 mutex_lock(&child->mm->context.lock);
14760- if (unlikely((seg >> 3) >= child->mm->context.size))
14761+ if (unlikely(seg >= child->mm->context.size))
14762 addr = -1L; /* bogus selector, access would fault */
14763 else {
14764 desc = child->mm->context.ldt + seg;
14765@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
14766 addr += base;
14767 }
14768 mutex_unlock(&child->mm->context.lock);
14769- }
14770+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
14771+ addr = ktla_ktva(addr);
14772
14773 return addr;
14774 }
14775@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
14776 unsigned char opcode[15];
14777 unsigned long addr = convert_ip_to_linear(child, regs);
14778
14779+ if (addr == -EINVAL)
14780+ return 0;
14781+
14782 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
14783 for (i = 0; i < copied; i++) {
14784 switch (opcode[i]) {
14785@@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
14786
14787 #ifdef CONFIG_X86_64
14788 case 0x40 ... 0x4f:
14789- if (regs->cs != __USER_CS)
14790+ if ((regs->cs & 0xffff) != __USER_CS)
14791 /* 32-bit mode: register increment */
14792 return 0;
14793 /* 64-bit mode: REX prefix */
14794diff -urNp linux-3.0.3/arch/x86/kernel/syscall_table_32.S linux-3.0.3/arch/x86/kernel/syscall_table_32.S
14795--- linux-3.0.3/arch/x86/kernel/syscall_table_32.S 2011-07-21 22:17:23.000000000 -0400
14796+++ linux-3.0.3/arch/x86/kernel/syscall_table_32.S 2011-08-23 21:47:55.000000000 -0400
14797@@ -1,3 +1,4 @@
14798+.section .rodata,"a",@progbits
14799 ENTRY(sys_call_table)
14800 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
14801 .long sys_exit
14802diff -urNp linux-3.0.3/arch/x86/kernel/sys_i386_32.c linux-3.0.3/arch/x86/kernel/sys_i386_32.c
14803--- linux-3.0.3/arch/x86/kernel/sys_i386_32.c 2011-07-21 22:17:23.000000000 -0400
14804+++ linux-3.0.3/arch/x86/kernel/sys_i386_32.c 2011-08-23 21:47:55.000000000 -0400
14805@@ -24,17 +24,224 @@
14806
14807 #include <asm/syscalls.h>
14808
14809-/*
14810- * Do a system call from kernel instead of calling sys_execve so we
14811- * end up with proper pt_regs.
14812- */
14813-int kernel_execve(const char *filename,
14814- const char *const argv[],
14815- const char *const envp[])
14816+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
14817 {
14818- long __res;
14819- asm volatile ("int $0x80"
14820- : "=a" (__res)
14821- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
14822- return __res;
14823+ unsigned long pax_task_size = TASK_SIZE;
14824+
14825+#ifdef CONFIG_PAX_SEGMEXEC
14826+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
14827+ pax_task_size = SEGMEXEC_TASK_SIZE;
14828+#endif
14829+
14830+ if (len > pax_task_size || addr > pax_task_size - len)
14831+ return -EINVAL;
14832+
14833+ return 0;
14834+}
14835+
14836+unsigned long
14837+arch_get_unmapped_area(struct file *filp, unsigned long addr,
14838+ unsigned long len, unsigned long pgoff, unsigned long flags)
14839+{
14840+ struct mm_struct *mm = current->mm;
14841+ struct vm_area_struct *vma;
14842+ unsigned long start_addr, pax_task_size = TASK_SIZE;
14843+
14844+#ifdef CONFIG_PAX_SEGMEXEC
14845+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
14846+ pax_task_size = SEGMEXEC_TASK_SIZE;
14847+#endif
14848+
14849+ pax_task_size -= PAGE_SIZE;
14850+
14851+ if (len > pax_task_size)
14852+ return -ENOMEM;
14853+
14854+ if (flags & MAP_FIXED)
14855+ return addr;
14856+
14857+#ifdef CONFIG_PAX_RANDMMAP
14858+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
14859+#endif
14860+
14861+ if (addr) {
14862+ addr = PAGE_ALIGN(addr);
14863+ if (pax_task_size - len >= addr) {
14864+ vma = find_vma(mm, addr);
14865+ if (check_heap_stack_gap(vma, addr, len))
14866+ return addr;
14867+ }
14868+ }
14869+ if (len > mm->cached_hole_size) {
14870+ start_addr = addr = mm->free_area_cache;
14871+ } else {
14872+ start_addr = addr = mm->mmap_base;
14873+ mm->cached_hole_size = 0;
14874+ }
14875+
14876+#ifdef CONFIG_PAX_PAGEEXEC
14877+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
14878+ start_addr = 0x00110000UL;
14879+
14880+#ifdef CONFIG_PAX_RANDMMAP
14881+ if (mm->pax_flags & MF_PAX_RANDMMAP)
14882+ start_addr += mm->delta_mmap & 0x03FFF000UL;
14883+#endif
14884+
14885+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
14886+ start_addr = addr = mm->mmap_base;
14887+ else
14888+ addr = start_addr;
14889+ }
14890+#endif
14891+
14892+full_search:
14893+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
14894+ /* At this point: (!vma || addr < vma->vm_end). */
14895+ if (pax_task_size - len < addr) {
14896+ /*
14897+ * Start a new search - just in case we missed
14898+ * some holes.
14899+ */
14900+ if (start_addr != mm->mmap_base) {
14901+ start_addr = addr = mm->mmap_base;
14902+ mm->cached_hole_size = 0;
14903+ goto full_search;
14904+ }
14905+ return -ENOMEM;
14906+ }
14907+ if (check_heap_stack_gap(vma, addr, len))
14908+ break;
14909+ if (addr + mm->cached_hole_size < vma->vm_start)
14910+ mm->cached_hole_size = vma->vm_start - addr;
14911+ addr = vma->vm_end;
14912+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
14913+ start_addr = addr = mm->mmap_base;
14914+ mm->cached_hole_size = 0;
14915+ goto full_search;
14916+ }
14917+ }
14918+
14919+ /*
14920+ * Remember the place where we stopped the search:
14921+ */
14922+ mm->free_area_cache = addr + len;
14923+ return addr;
14924+}
14925+
14926+unsigned long
14927+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
14928+ const unsigned long len, const unsigned long pgoff,
14929+ const unsigned long flags)
14930+{
14931+ struct vm_area_struct *vma;
14932+ struct mm_struct *mm = current->mm;
14933+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
14934+
14935+#ifdef CONFIG_PAX_SEGMEXEC
14936+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
14937+ pax_task_size = SEGMEXEC_TASK_SIZE;
14938+#endif
14939+
14940+ pax_task_size -= PAGE_SIZE;
14941+
14942+ /* requested length too big for entire address space */
14943+ if (len > pax_task_size)
14944+ return -ENOMEM;
14945+
14946+ if (flags & MAP_FIXED)
14947+ return addr;
14948+
14949+#ifdef CONFIG_PAX_PAGEEXEC
14950+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
14951+ goto bottomup;
14952+#endif
14953+
14954+#ifdef CONFIG_PAX_RANDMMAP
14955+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
14956+#endif
14957+
14958+ /* requesting a specific address */
14959+ if (addr) {
14960+ addr = PAGE_ALIGN(addr);
14961+ if (pax_task_size - len >= addr) {
14962+ vma = find_vma(mm, addr);
14963+ if (check_heap_stack_gap(vma, addr, len))
14964+ return addr;
14965+ }
14966+ }
14967+
14968+ /* check if free_area_cache is useful for us */
14969+ if (len <= mm->cached_hole_size) {
14970+ mm->cached_hole_size = 0;
14971+ mm->free_area_cache = mm->mmap_base;
14972+ }
14973+
14974+ /* either no address requested or can't fit in requested address hole */
14975+ addr = mm->free_area_cache;
14976+
14977+ /* make sure it can fit in the remaining address space */
14978+ if (addr > len) {
14979+ vma = find_vma(mm, addr-len);
14980+ if (check_heap_stack_gap(vma, addr - len, len))
14981+ /* remember the address as a hint for next time */
14982+ return (mm->free_area_cache = addr-len);
14983+ }
14984+
14985+ if (mm->mmap_base < len)
14986+ goto bottomup;
14987+
14988+ addr = mm->mmap_base-len;
14989+
14990+ do {
14991+ /*
14992+ * Lookup failure means no vma is above this address,
14993+ * else if new region fits below vma->vm_start,
14994+ * return with success:
14995+ */
14996+ vma = find_vma(mm, addr);
14997+ if (check_heap_stack_gap(vma, addr, len))
14998+ /* remember the address as a hint for next time */
14999+ return (mm->free_area_cache = addr);
15000+
15001+ /* remember the largest hole we saw so far */
15002+ if (addr + mm->cached_hole_size < vma->vm_start)
15003+ mm->cached_hole_size = vma->vm_start - addr;
15004+
15005+ /* try just below the current vma->vm_start */
15006+ addr = skip_heap_stack_gap(vma, len);
15007+ } while (!IS_ERR_VALUE(addr));
15008+
15009+bottomup:
15010+ /*
15011+ * A failed mmap() very likely causes application failure,
15012+ * so fall back to the bottom-up function here. This scenario
15013+ * can happen with large stack limits and large mmap()
15014+ * allocations.
15015+ */
15016+
15017+#ifdef CONFIG_PAX_SEGMEXEC
15018+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
15019+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
15020+ else
15021+#endif
15022+
15023+ mm->mmap_base = TASK_UNMAPPED_BASE;
15024+
15025+#ifdef CONFIG_PAX_RANDMMAP
15026+ if (mm->pax_flags & MF_PAX_RANDMMAP)
15027+ mm->mmap_base += mm->delta_mmap;
15028+#endif
15029+
15030+ mm->free_area_cache = mm->mmap_base;
15031+ mm->cached_hole_size = ~0UL;
15032+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15033+ /*
15034+ * Restore the topdown base:
15035+ */
15036+ mm->mmap_base = base;
15037+ mm->free_area_cache = base;
15038+ mm->cached_hole_size = ~0UL;
15039+
15040+ return addr;
15041 }
15042diff -urNp linux-3.0.3/arch/x86/kernel/sys_x86_64.c linux-3.0.3/arch/x86/kernel/sys_x86_64.c
15043--- linux-3.0.3/arch/x86/kernel/sys_x86_64.c 2011-07-21 22:17:23.000000000 -0400
15044+++ linux-3.0.3/arch/x86/kernel/sys_x86_64.c 2011-08-23 21:47:55.000000000 -0400
15045@@ -32,8 +32,8 @@ out:
15046 return error;
15047 }
15048
15049-static void find_start_end(unsigned long flags, unsigned long *begin,
15050- unsigned long *end)
15051+static void find_start_end(struct mm_struct *mm, unsigned long flags,
15052+ unsigned long *begin, unsigned long *end)
15053 {
15054 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
15055 unsigned long new_begin;
15056@@ -52,7 +52,7 @@ static void find_start_end(unsigned long
15057 *begin = new_begin;
15058 }
15059 } else {
15060- *begin = TASK_UNMAPPED_BASE;
15061+ *begin = mm->mmap_base;
15062 *end = TASK_SIZE;
15063 }
15064 }
15065@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
15066 if (flags & MAP_FIXED)
15067 return addr;
15068
15069- find_start_end(flags, &begin, &end);
15070+ find_start_end(mm, flags, &begin, &end);
15071
15072 if (len > end)
15073 return -ENOMEM;
15074
15075+#ifdef CONFIG_PAX_RANDMMAP
15076+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15077+#endif
15078+
15079 if (addr) {
15080 addr = PAGE_ALIGN(addr);
15081 vma = find_vma(mm, addr);
15082- if (end - len >= addr &&
15083- (!vma || addr + len <= vma->vm_start))
15084+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
15085 return addr;
15086 }
15087 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
15088@@ -106,7 +109,7 @@ full_search:
15089 }
15090 return -ENOMEM;
15091 }
15092- if (!vma || addr + len <= vma->vm_start) {
15093+ if (check_heap_stack_gap(vma, addr, len)) {
15094 /*
15095 * Remember the place where we stopped the search:
15096 */
15097@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
15098 {
15099 struct vm_area_struct *vma;
15100 struct mm_struct *mm = current->mm;
15101- unsigned long addr = addr0;
15102+ unsigned long base = mm->mmap_base, addr = addr0;
15103
15104 /* requested length too big for entire address space */
15105 if (len > TASK_SIZE)
15106@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
15107 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
15108 goto bottomup;
15109
15110+#ifdef CONFIG_PAX_RANDMMAP
15111+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15112+#endif
15113+
15114 /* requesting a specific address */
15115 if (addr) {
15116 addr = PAGE_ALIGN(addr);
15117- vma = find_vma(mm, addr);
15118- if (TASK_SIZE - len >= addr &&
15119- (!vma || addr + len <= vma->vm_start))
15120- return addr;
15121+ if (TASK_SIZE - len >= addr) {
15122+ vma = find_vma(mm, addr);
15123+ if (check_heap_stack_gap(vma, addr, len))
15124+ return addr;
15125+ }
15126 }
15127
15128 /* check if free_area_cache is useful for us */
15129@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
15130 /* make sure it can fit in the remaining address space */
15131 if (addr > len) {
15132 vma = find_vma(mm, addr-len);
15133- if (!vma || addr <= vma->vm_start)
15134+ if (check_heap_stack_gap(vma, addr - len, len))
15135 /* remember the address as a hint for next time */
15136 return mm->free_area_cache = addr-len;
15137 }
15138@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
15139 * return with success:
15140 */
15141 vma = find_vma(mm, addr);
15142- if (!vma || addr+len <= vma->vm_start)
15143+ if (check_heap_stack_gap(vma, addr, len))
15144 /* remember the address as a hint for next time */
15145 return mm->free_area_cache = addr;
15146
15147@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
15148 mm->cached_hole_size = vma->vm_start - addr;
15149
15150 /* try just below the current vma->vm_start */
15151- addr = vma->vm_start-len;
15152- } while (len < vma->vm_start);
15153+ addr = skip_heap_stack_gap(vma, len);
15154+ } while (!IS_ERR_VALUE(addr));
15155
15156 bottomup:
15157 /*
15158@@ -198,13 +206,21 @@ bottomup:
15159 * can happen with large stack limits and large mmap()
15160 * allocations.
15161 */
15162+ mm->mmap_base = TASK_UNMAPPED_BASE;
15163+
15164+#ifdef CONFIG_PAX_RANDMMAP
15165+ if (mm->pax_flags & MF_PAX_RANDMMAP)
15166+ mm->mmap_base += mm->delta_mmap;
15167+#endif
15168+
15169+ mm->free_area_cache = mm->mmap_base;
15170 mm->cached_hole_size = ~0UL;
15171- mm->free_area_cache = TASK_UNMAPPED_BASE;
15172 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15173 /*
15174 * Restore the topdown base:
15175 */
15176- mm->free_area_cache = mm->mmap_base;
15177+ mm->mmap_base = base;
15178+ mm->free_area_cache = base;
15179 mm->cached_hole_size = ~0UL;
15180
15181 return addr;
15182diff -urNp linux-3.0.3/arch/x86/kernel/tboot.c linux-3.0.3/arch/x86/kernel/tboot.c
15183--- linux-3.0.3/arch/x86/kernel/tboot.c 2011-07-21 22:17:23.000000000 -0400
15184+++ linux-3.0.3/arch/x86/kernel/tboot.c 2011-08-23 21:47:55.000000000 -0400
15185@@ -217,7 +217,7 @@ static int tboot_setup_sleep(void)
15186
15187 void tboot_shutdown(u32 shutdown_type)
15188 {
15189- void (*shutdown)(void);
15190+ void (* __noreturn shutdown)(void);
15191
15192 if (!tboot_enabled())
15193 return;
15194@@ -239,7 +239,7 @@ void tboot_shutdown(u32 shutdown_type)
15195
15196 switch_to_tboot_pt();
15197
15198- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
15199+ shutdown = (void *)tboot->shutdown_entry;
15200 shutdown();
15201
15202 /* should not reach here */
15203@@ -296,7 +296,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
15204 tboot_shutdown(acpi_shutdown_map[sleep_state]);
15205 }
15206
15207-static atomic_t ap_wfs_count;
15208+static atomic_unchecked_t ap_wfs_count;
15209
15210 static int tboot_wait_for_aps(int num_aps)
15211 {
15212@@ -320,9 +320,9 @@ static int __cpuinit tboot_cpu_callback(
15213 {
15214 switch (action) {
15215 case CPU_DYING:
15216- atomic_inc(&ap_wfs_count);
15217+ atomic_inc_unchecked(&ap_wfs_count);
15218 if (num_online_cpus() == 1)
15219- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
15220+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
15221 return NOTIFY_BAD;
15222 break;
15223 }
15224@@ -341,7 +341,7 @@ static __init int tboot_late_init(void)
15225
15226 tboot_create_trampoline();
15227
15228- atomic_set(&ap_wfs_count, 0);
15229+ atomic_set_unchecked(&ap_wfs_count, 0);
15230 register_hotcpu_notifier(&tboot_cpu_notifier);
15231 return 0;
15232 }
15233diff -urNp linux-3.0.3/arch/x86/kernel/time.c linux-3.0.3/arch/x86/kernel/time.c
15234--- linux-3.0.3/arch/x86/kernel/time.c 2011-07-21 22:17:23.000000000 -0400
15235+++ linux-3.0.3/arch/x86/kernel/time.c 2011-08-23 21:47:55.000000000 -0400
15236@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs
15237 {
15238 unsigned long pc = instruction_pointer(regs);
15239
15240- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
15241+ if (!user_mode(regs) && in_lock_functions(pc)) {
15242 #ifdef CONFIG_FRAME_POINTER
15243- return *(unsigned long *)(regs->bp + sizeof(long));
15244+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
15245 #else
15246 unsigned long *sp =
15247 (unsigned long *)kernel_stack_pointer(regs);
15248@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
15249 * or above a saved flags. Eflags has bits 22-31 zero,
15250 * kernel addresses don't.
15251 */
15252+
15253+#ifdef CONFIG_PAX_KERNEXEC
15254+ return ktla_ktva(sp[0]);
15255+#else
15256 if (sp[0] >> 22)
15257 return sp[0];
15258 if (sp[1] >> 22)
15259 return sp[1];
15260 #endif
15261+
15262+#endif
15263 }
15264 return pc;
15265 }
15266diff -urNp linux-3.0.3/arch/x86/kernel/tls.c linux-3.0.3/arch/x86/kernel/tls.c
15267--- linux-3.0.3/arch/x86/kernel/tls.c 2011-07-21 22:17:23.000000000 -0400
15268+++ linux-3.0.3/arch/x86/kernel/tls.c 2011-08-23 21:47:55.000000000 -0400
15269@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
15270 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
15271 return -EINVAL;
15272
15273+#ifdef CONFIG_PAX_SEGMEXEC
15274+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
15275+ return -EINVAL;
15276+#endif
15277+
15278 set_tls_desc(p, idx, &info, 1);
15279
15280 return 0;
15281diff -urNp linux-3.0.3/arch/x86/kernel/trampoline_32.S linux-3.0.3/arch/x86/kernel/trampoline_32.S
15282--- linux-3.0.3/arch/x86/kernel/trampoline_32.S 2011-07-21 22:17:23.000000000 -0400
15283+++ linux-3.0.3/arch/x86/kernel/trampoline_32.S 2011-08-23 21:47:55.000000000 -0400
15284@@ -32,6 +32,12 @@
15285 #include <asm/segment.h>
15286 #include <asm/page_types.h>
15287
15288+#ifdef CONFIG_PAX_KERNEXEC
15289+#define ta(X) (X)
15290+#else
15291+#define ta(X) ((X) - __PAGE_OFFSET)
15292+#endif
15293+
15294 #ifdef CONFIG_SMP
15295
15296 .section ".x86_trampoline","a"
15297@@ -62,7 +68,7 @@ r_base = .
15298 inc %ax # protected mode (PE) bit
15299 lmsw %ax # into protected mode
15300 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
15301- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
15302+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
15303
15304 # These need to be in the same 64K segment as the above;
15305 # hence we don't use the boot_gdt_descr defined in head.S
15306diff -urNp linux-3.0.3/arch/x86/kernel/trampoline_64.S linux-3.0.3/arch/x86/kernel/trampoline_64.S
15307--- linux-3.0.3/arch/x86/kernel/trampoline_64.S 2011-07-21 22:17:23.000000000 -0400
15308+++ linux-3.0.3/arch/x86/kernel/trampoline_64.S 2011-08-23 21:47:55.000000000 -0400
15309@@ -90,7 +90,7 @@ startup_32:
15310 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
15311 movl %eax, %ds
15312
15313- movl $X86_CR4_PAE, %eax
15314+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15315 movl %eax, %cr4 # Enable PAE mode
15316
15317 # Setup trampoline 4 level pagetables
15318@@ -138,7 +138,7 @@ tidt:
15319 # so the kernel can live anywhere
15320 .balign 4
15321 tgdt:
15322- .short tgdt_end - tgdt # gdt limit
15323+ .short tgdt_end - tgdt - 1 # gdt limit
15324 .long tgdt - r_base
15325 .short 0
15326 .quad 0x00cf9b000000ffff # __KERNEL32_CS
15327diff -urNp linux-3.0.3/arch/x86/kernel/traps.c linux-3.0.3/arch/x86/kernel/traps.c
15328--- linux-3.0.3/arch/x86/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
15329+++ linux-3.0.3/arch/x86/kernel/traps.c 2011-08-23 21:47:55.000000000 -0400
15330@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
15331
15332 /* Do we ignore FPU interrupts ? */
15333 char ignore_fpu_irq;
15334-
15335-/*
15336- * The IDT has to be page-aligned to simplify the Pentium
15337- * F0 0F bug workaround.
15338- */
15339-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
15340 #endif
15341
15342 DECLARE_BITMAP(used_vectors, NR_VECTORS);
15343@@ -117,13 +111,13 @@ static inline void preempt_conditional_c
15344 }
15345
15346 static void __kprobes
15347-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
15348+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
15349 long error_code, siginfo_t *info)
15350 {
15351 struct task_struct *tsk = current;
15352
15353 #ifdef CONFIG_X86_32
15354- if (regs->flags & X86_VM_MASK) {
15355+ if (v8086_mode(regs)) {
15356 /*
15357 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
15358 * On nmi (interrupt 2), do_trap should not be called.
15359@@ -134,7 +128,7 @@ do_trap(int trapnr, int signr, char *str
15360 }
15361 #endif
15362
15363- if (!user_mode(regs))
15364+ if (!user_mode_novm(regs))
15365 goto kernel_trap;
15366
15367 #ifdef CONFIG_X86_32
15368@@ -157,7 +151,7 @@ trap_signal:
15369 printk_ratelimit()) {
15370 printk(KERN_INFO
15371 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
15372- tsk->comm, tsk->pid, str,
15373+ tsk->comm, task_pid_nr(tsk), str,
15374 regs->ip, regs->sp, error_code);
15375 print_vma_addr(" in ", regs->ip);
15376 printk("\n");
15377@@ -174,8 +168,20 @@ kernel_trap:
15378 if (!fixup_exception(regs)) {
15379 tsk->thread.error_code = error_code;
15380 tsk->thread.trap_no = trapnr;
15381+
15382+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15383+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
15384+ str = "PAX: suspicious stack segment fault";
15385+#endif
15386+
15387 die(str, regs, error_code);
15388 }
15389+
15390+#ifdef CONFIG_PAX_REFCOUNT
15391+ if (trapnr == 4)
15392+ pax_report_refcount_overflow(regs);
15393+#endif
15394+
15395 return;
15396
15397 #ifdef CONFIG_X86_32
15398@@ -264,14 +270,30 @@ do_general_protection(struct pt_regs *re
15399 conditional_sti(regs);
15400
15401 #ifdef CONFIG_X86_32
15402- if (regs->flags & X86_VM_MASK)
15403+ if (v8086_mode(regs))
15404 goto gp_in_vm86;
15405 #endif
15406
15407 tsk = current;
15408- if (!user_mode(regs))
15409+ if (!user_mode_novm(regs))
15410 goto gp_in_kernel;
15411
15412+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
15413+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
15414+ struct mm_struct *mm = tsk->mm;
15415+ unsigned long limit;
15416+
15417+ down_write(&mm->mmap_sem);
15418+ limit = mm->context.user_cs_limit;
15419+ if (limit < TASK_SIZE) {
15420+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
15421+ up_write(&mm->mmap_sem);
15422+ return;
15423+ }
15424+ up_write(&mm->mmap_sem);
15425+ }
15426+#endif
15427+
15428 tsk->thread.error_code = error_code;
15429 tsk->thread.trap_no = 13;
15430
15431@@ -304,6 +326,13 @@ gp_in_kernel:
15432 if (notify_die(DIE_GPF, "general protection fault", regs,
15433 error_code, 13, SIGSEGV) == NOTIFY_STOP)
15434 return;
15435+
15436+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15437+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
15438+ die("PAX: suspicious general protection fault", regs, error_code);
15439+ else
15440+#endif
15441+
15442 die("general protection fault", regs, error_code);
15443 }
15444
15445@@ -433,6 +462,17 @@ static notrace __kprobes void default_do
15446 dotraplinkage notrace __kprobes void
15447 do_nmi(struct pt_regs *regs, long error_code)
15448 {
15449+
15450+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15451+ if (!user_mode(regs)) {
15452+ unsigned long cs = regs->cs & 0xFFFF;
15453+ unsigned long ip = ktva_ktla(regs->ip);
15454+
15455+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
15456+ regs->ip = ip;
15457+ }
15458+#endif
15459+
15460 nmi_enter();
15461
15462 inc_irq_stat(__nmi_count);
15463@@ -569,7 +609,7 @@ dotraplinkage void __kprobes do_debug(st
15464 /* It's safe to allow irq's after DR6 has been saved */
15465 preempt_conditional_sti(regs);
15466
15467- if (regs->flags & X86_VM_MASK) {
15468+ if (v8086_mode(regs)) {
15469 handle_vm86_trap((struct kernel_vm86_regs *) regs,
15470 error_code, 1);
15471 preempt_conditional_cli(regs);
15472@@ -583,7 +623,7 @@ dotraplinkage void __kprobes do_debug(st
15473 * We already checked v86 mode above, so we can check for kernel mode
15474 * by just checking the CPL of CS.
15475 */
15476- if ((dr6 & DR_STEP) && !user_mode(regs)) {
15477+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
15478 tsk->thread.debugreg6 &= ~DR_STEP;
15479 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
15480 regs->flags &= ~X86_EFLAGS_TF;
15481@@ -612,7 +652,7 @@ void math_error(struct pt_regs *regs, in
15482 return;
15483 conditional_sti(regs);
15484
15485- if (!user_mode_vm(regs))
15486+ if (!user_mode(regs))
15487 {
15488 if (!fixup_exception(regs)) {
15489 task->thread.error_code = error_code;
15490@@ -723,7 +763,7 @@ asmlinkage void __attribute__((weak)) sm
15491 void __math_state_restore(void)
15492 {
15493 struct thread_info *thread = current_thread_info();
15494- struct task_struct *tsk = thread->task;
15495+ struct task_struct *tsk = current;
15496
15497 /*
15498 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
15499@@ -750,8 +790,7 @@ void __math_state_restore(void)
15500 */
15501 asmlinkage void math_state_restore(void)
15502 {
15503- struct thread_info *thread = current_thread_info();
15504- struct task_struct *tsk = thread->task;
15505+ struct task_struct *tsk = current;
15506
15507 if (!tsk_used_math(tsk)) {
15508 local_irq_enable();
15509diff -urNp linux-3.0.3/arch/x86/kernel/verify_cpu.S linux-3.0.3/arch/x86/kernel/verify_cpu.S
15510--- linux-3.0.3/arch/x86/kernel/verify_cpu.S 2011-07-21 22:17:23.000000000 -0400
15511+++ linux-3.0.3/arch/x86/kernel/verify_cpu.S 2011-08-23 21:48:14.000000000 -0400
15512@@ -20,6 +20,7 @@
15513 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
15514 * arch/x86/kernel/trampoline_64.S: secondary processor verification
15515 * arch/x86/kernel/head_32.S: processor startup
15516+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
15517 *
15518 * verify_cpu, returns the status of longmode and SSE in register %eax.
15519 * 0: Success 1: Failure
15520diff -urNp linux-3.0.3/arch/x86/kernel/vm86_32.c linux-3.0.3/arch/x86/kernel/vm86_32.c
15521--- linux-3.0.3/arch/x86/kernel/vm86_32.c 2011-07-21 22:17:23.000000000 -0400
15522+++ linux-3.0.3/arch/x86/kernel/vm86_32.c 2011-08-23 21:48:14.000000000 -0400
15523@@ -41,6 +41,7 @@
15524 #include <linux/ptrace.h>
15525 #include <linux/audit.h>
15526 #include <linux/stddef.h>
15527+#include <linux/grsecurity.h>
15528
15529 #include <asm/uaccess.h>
15530 #include <asm/io.h>
15531@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
15532 do_exit(SIGSEGV);
15533 }
15534
15535- tss = &per_cpu(init_tss, get_cpu());
15536+ tss = init_tss + get_cpu();
15537 current->thread.sp0 = current->thread.saved_sp0;
15538 current->thread.sysenter_cs = __KERNEL_CS;
15539 load_sp0(tss, &current->thread);
15540@@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __use
15541 struct task_struct *tsk;
15542 int tmp, ret = -EPERM;
15543
15544+#ifdef CONFIG_GRKERNSEC_VM86
15545+ if (!capable(CAP_SYS_RAWIO)) {
15546+ gr_handle_vm86();
15547+ goto out;
15548+ }
15549+#endif
15550+
15551 tsk = current;
15552 if (tsk->thread.saved_sp0)
15553 goto out;
15554@@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned
15555 int tmp, ret;
15556 struct vm86plus_struct __user *v86;
15557
15558+#ifdef CONFIG_GRKERNSEC_VM86
15559+ if (!capable(CAP_SYS_RAWIO)) {
15560+ gr_handle_vm86();
15561+ ret = -EPERM;
15562+ goto out;
15563+ }
15564+#endif
15565+
15566 tsk = current;
15567 switch (cmd) {
15568 case VM86_REQUEST_IRQ:
15569@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
15570 tsk->thread.saved_fs = info->regs32->fs;
15571 tsk->thread.saved_gs = get_user_gs(info->regs32);
15572
15573- tss = &per_cpu(init_tss, get_cpu());
15574+ tss = init_tss + get_cpu();
15575 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
15576 if (cpu_has_sep)
15577 tsk->thread.sysenter_cs = 0;
15578@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
15579 goto cannot_handle;
15580 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
15581 goto cannot_handle;
15582- intr_ptr = (unsigned long __user *) (i << 2);
15583+ intr_ptr = (__force unsigned long __user *) (i << 2);
15584 if (get_user(segoffs, intr_ptr))
15585 goto cannot_handle;
15586 if ((segoffs >> 16) == BIOSSEG)
15587diff -urNp linux-3.0.3/arch/x86/kernel/vmlinux.lds.S linux-3.0.3/arch/x86/kernel/vmlinux.lds.S
15588--- linux-3.0.3/arch/x86/kernel/vmlinux.lds.S 2011-07-21 22:17:23.000000000 -0400
15589+++ linux-3.0.3/arch/x86/kernel/vmlinux.lds.S 2011-08-23 21:47:55.000000000 -0400
15590@@ -26,6 +26,13 @@
15591 #include <asm/page_types.h>
15592 #include <asm/cache.h>
15593 #include <asm/boot.h>
15594+#include <asm/segment.h>
15595+
15596+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15597+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
15598+#else
15599+#define __KERNEL_TEXT_OFFSET 0
15600+#endif
15601
15602 #undef i386 /* in case the preprocessor is a 32bit one */
15603
15604@@ -69,31 +76,46 @@ jiffies_64 = jiffies;
15605
15606 PHDRS {
15607 text PT_LOAD FLAGS(5); /* R_E */
15608+#ifdef CONFIG_X86_32
15609+ module PT_LOAD FLAGS(5); /* R_E */
15610+#endif
15611+#ifdef CONFIG_XEN
15612+ rodata PT_LOAD FLAGS(5); /* R_E */
15613+#else
15614+ rodata PT_LOAD FLAGS(4); /* R__ */
15615+#endif
15616 data PT_LOAD FLAGS(6); /* RW_ */
15617 #ifdef CONFIG_X86_64
15618 user PT_LOAD FLAGS(5); /* R_E */
15619+#endif
15620+ init.begin PT_LOAD FLAGS(6); /* RW_ */
15621 #ifdef CONFIG_SMP
15622 percpu PT_LOAD FLAGS(6); /* RW_ */
15623 #endif
15624+ text.init PT_LOAD FLAGS(5); /* R_E */
15625+ text.exit PT_LOAD FLAGS(5); /* R_E */
15626 init PT_LOAD FLAGS(7); /* RWE */
15627-#endif
15628 note PT_NOTE FLAGS(0); /* ___ */
15629 }
15630
15631 SECTIONS
15632 {
15633 #ifdef CONFIG_X86_32
15634- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
15635- phys_startup_32 = startup_32 - LOAD_OFFSET;
15636+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
15637 #else
15638- . = __START_KERNEL;
15639- phys_startup_64 = startup_64 - LOAD_OFFSET;
15640+ . = __START_KERNEL;
15641 #endif
15642
15643 /* Text and read-only data */
15644- .text : AT(ADDR(.text) - LOAD_OFFSET) {
15645- _text = .;
15646+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
15647 /* bootstrapping code */
15648+#ifdef CONFIG_X86_32
15649+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15650+#else
15651+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15652+#endif
15653+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15654+ _text = .;
15655 HEAD_TEXT
15656 #ifdef CONFIG_X86_32
15657 . = ALIGN(PAGE_SIZE);
15658@@ -109,13 +131,47 @@ SECTIONS
15659 IRQENTRY_TEXT
15660 *(.fixup)
15661 *(.gnu.warning)
15662- /* End of text section */
15663- _etext = .;
15664 } :text = 0x9090
15665
15666- NOTES :text :note
15667+ . += __KERNEL_TEXT_OFFSET;
15668+
15669+#ifdef CONFIG_X86_32
15670+ . = ALIGN(PAGE_SIZE);
15671+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
15672+
15673+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
15674+ MODULES_EXEC_VADDR = .;
15675+ BYTE(0)
15676+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
15677+ . = ALIGN(HPAGE_SIZE);
15678+ MODULES_EXEC_END = . - 1;
15679+#endif
15680+
15681+ } :module
15682+#endif
15683+
15684+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
15685+ /* End of text section */
15686+ _etext = . - __KERNEL_TEXT_OFFSET;
15687+ }
15688+
15689+#ifdef CONFIG_X86_32
15690+ . = ALIGN(PAGE_SIZE);
15691+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
15692+ *(.idt)
15693+ . = ALIGN(PAGE_SIZE);
15694+ *(.empty_zero_page)
15695+ *(.initial_pg_fixmap)
15696+ *(.initial_pg_pmd)
15697+ *(.initial_page_table)
15698+ *(.swapper_pg_dir)
15699+ } :rodata
15700+#endif
15701+
15702+ . = ALIGN(PAGE_SIZE);
15703+ NOTES :rodata :note
15704
15705- EXCEPTION_TABLE(16) :text = 0x9090
15706+ EXCEPTION_TABLE(16) :rodata
15707
15708 #if defined(CONFIG_DEBUG_RODATA)
15709 /* .text should occupy whole number of pages */
15710@@ -127,16 +183,20 @@ SECTIONS
15711
15712 /* Data */
15713 .data : AT(ADDR(.data) - LOAD_OFFSET) {
15714+
15715+#ifdef CONFIG_PAX_KERNEXEC
15716+ . = ALIGN(HPAGE_SIZE);
15717+#else
15718+ . = ALIGN(PAGE_SIZE);
15719+#endif
15720+
15721 /* Start of data section */
15722 _sdata = .;
15723
15724 /* init_task */
15725 INIT_TASK_DATA(THREAD_SIZE)
15726
15727-#ifdef CONFIG_X86_32
15728- /* 32 bit has nosave before _edata */
15729 NOSAVE_DATA
15730-#endif
15731
15732 PAGE_ALIGNED_DATA(PAGE_SIZE)
15733
15734@@ -208,12 +268,19 @@ SECTIONS
15735 #endif /* CONFIG_X86_64 */
15736
15737 /* Init code and data - will be freed after init */
15738- . = ALIGN(PAGE_SIZE);
15739 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
15740+ BYTE(0)
15741+
15742+#ifdef CONFIG_PAX_KERNEXEC
15743+ . = ALIGN(HPAGE_SIZE);
15744+#else
15745+ . = ALIGN(PAGE_SIZE);
15746+#endif
15747+
15748 __init_begin = .; /* paired with __init_end */
15749- }
15750+ } :init.begin
15751
15752-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
15753+#ifdef CONFIG_SMP
15754 /*
15755 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
15756 * output PHDR, so the next output section - .init.text - should
15757@@ -222,12 +289,27 @@ SECTIONS
15758 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
15759 #endif
15760
15761- INIT_TEXT_SECTION(PAGE_SIZE)
15762-#ifdef CONFIG_X86_64
15763- :init
15764-#endif
15765+ . = ALIGN(PAGE_SIZE);
15766+ init_begin = .;
15767+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
15768+ VMLINUX_SYMBOL(_sinittext) = .;
15769+ INIT_TEXT
15770+ VMLINUX_SYMBOL(_einittext) = .;
15771+ . = ALIGN(PAGE_SIZE);
15772+ } :text.init
15773
15774- INIT_DATA_SECTION(16)
15775+ /*
15776+ * .exit.text is discard at runtime, not link time, to deal with
15777+ * references from .altinstructions and .eh_frame
15778+ */
15779+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
15780+ EXIT_TEXT
15781+ . = ALIGN(16);
15782+ } :text.exit
15783+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
15784+
15785+ . = ALIGN(PAGE_SIZE);
15786+ INIT_DATA_SECTION(16) :init
15787
15788 /*
15789 * Code and data for a variety of lowlevel trampolines, to be
15790@@ -301,19 +383,12 @@ SECTIONS
15791 }
15792
15793 . = ALIGN(8);
15794- /*
15795- * .exit.text is discard at runtime, not link time, to deal with
15796- * references from .altinstructions and .eh_frame
15797- */
15798- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
15799- EXIT_TEXT
15800- }
15801
15802 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
15803 EXIT_DATA
15804 }
15805
15806-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
15807+#ifndef CONFIG_SMP
15808 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
15809 #endif
15810
15811@@ -332,16 +407,10 @@ SECTIONS
15812 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
15813 __smp_locks = .;
15814 *(.smp_locks)
15815- . = ALIGN(PAGE_SIZE);
15816 __smp_locks_end = .;
15817+ . = ALIGN(PAGE_SIZE);
15818 }
15819
15820-#ifdef CONFIG_X86_64
15821- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
15822- NOSAVE_DATA
15823- }
15824-#endif
15825-
15826 /* BSS */
15827 . = ALIGN(PAGE_SIZE);
15828 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
15829@@ -357,6 +426,7 @@ SECTIONS
15830 __brk_base = .;
15831 . += 64 * 1024; /* 64k alignment slop space */
15832 *(.brk_reservation) /* areas brk users have reserved */
15833+ . = ALIGN(HPAGE_SIZE);
15834 __brk_limit = .;
15835 }
15836
15837@@ -383,13 +453,12 @@ SECTIONS
15838 * for the boot processor.
15839 */
15840 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
15841-INIT_PER_CPU(gdt_page);
15842 INIT_PER_CPU(irq_stack_union);
15843
15844 /*
15845 * Build-time check on the image size:
15846 */
15847-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
15848+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
15849 "kernel image bigger than KERNEL_IMAGE_SIZE");
15850
15851 #ifdef CONFIG_SMP
15852diff -urNp linux-3.0.3/arch/x86/kernel/vsyscall_64.c linux-3.0.3/arch/x86/kernel/vsyscall_64.c
15853--- linux-3.0.3/arch/x86/kernel/vsyscall_64.c 2011-07-21 22:17:23.000000000 -0400
15854+++ linux-3.0.3/arch/x86/kernel/vsyscall_64.c 2011-08-23 21:47:55.000000000 -0400
15855@@ -53,7 +53,7 @@ DEFINE_VVAR(int, vgetcpu_mode);
15856 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
15857 {
15858 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
15859- .sysctl_enabled = 1,
15860+ .sysctl_enabled = 0,
15861 };
15862
15863 void update_vsyscall_tz(void)
15864@@ -231,7 +231,7 @@ static long __vsyscall(3) venosys_1(void
15865 static ctl_table kernel_table2[] = {
15866 { .procname = "vsyscall64",
15867 .data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int),
15868- .mode = 0644,
15869+ .mode = 0444,
15870 .proc_handler = proc_dointvec },
15871 {}
15872 };
15873diff -urNp linux-3.0.3/arch/x86/kernel/x8664_ksyms_64.c linux-3.0.3/arch/x86/kernel/x8664_ksyms_64.c
15874--- linux-3.0.3/arch/x86/kernel/x8664_ksyms_64.c 2011-07-21 22:17:23.000000000 -0400
15875+++ linux-3.0.3/arch/x86/kernel/x8664_ksyms_64.c 2011-08-23 21:47:55.000000000 -0400
15876@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
15877 EXPORT_SYMBOL(copy_user_generic_string);
15878 EXPORT_SYMBOL(copy_user_generic_unrolled);
15879 EXPORT_SYMBOL(__copy_user_nocache);
15880-EXPORT_SYMBOL(_copy_from_user);
15881-EXPORT_SYMBOL(_copy_to_user);
15882
15883 EXPORT_SYMBOL(copy_page);
15884 EXPORT_SYMBOL(clear_page);
15885diff -urNp linux-3.0.3/arch/x86/kernel/xsave.c linux-3.0.3/arch/x86/kernel/xsave.c
15886--- linux-3.0.3/arch/x86/kernel/xsave.c 2011-07-21 22:17:23.000000000 -0400
15887+++ linux-3.0.3/arch/x86/kernel/xsave.c 2011-08-23 21:47:55.000000000 -0400
15888@@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_
15889 fx_sw_user->xstate_size > fx_sw_user->extended_size)
15890 return -EINVAL;
15891
15892- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
15893+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
15894 fx_sw_user->extended_size -
15895 FP_XSTATE_MAGIC2_SIZE));
15896 if (err)
15897@@ -267,7 +267,7 @@ fx_only:
15898 * the other extended state.
15899 */
15900 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
15901- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
15902+ return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
15903 }
15904
15905 /*
15906@@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf
15907 if (use_xsave())
15908 err = restore_user_xstate(buf);
15909 else
15910- err = fxrstor_checking((__force struct i387_fxsave_struct *)
15911+ err = fxrstor_checking((struct i387_fxsave_struct __user *)
15912 buf);
15913 if (unlikely(err)) {
15914 /*
15915diff -urNp linux-3.0.3/arch/x86/kvm/emulate.c linux-3.0.3/arch/x86/kvm/emulate.c
15916--- linux-3.0.3/arch/x86/kvm/emulate.c 2011-07-21 22:17:23.000000000 -0400
15917+++ linux-3.0.3/arch/x86/kvm/emulate.c 2011-08-23 21:47:55.000000000 -0400
15918@@ -96,7 +96,7 @@
15919 #define Src2ImmByte (2<<29)
15920 #define Src2One (3<<29)
15921 #define Src2Imm (4<<29)
15922-#define Src2Mask (7<<29)
15923+#define Src2Mask (7U<<29)
15924
15925 #define X2(x...) x, x
15926 #define X3(x...) X2(x), x
15927@@ -207,6 +207,7 @@ struct gprefix {
15928
15929 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
15930 do { \
15931+ unsigned long _tmp; \
15932 __asm__ __volatile__ ( \
15933 _PRE_EFLAGS("0", "4", "2") \
15934 _op _suffix " %"_x"3,%1; " \
15935@@ -220,8 +221,6 @@ struct gprefix {
15936 /* Raw emulation: instruction has two explicit operands. */
15937 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
15938 do { \
15939- unsigned long _tmp; \
15940- \
15941 switch ((_dst).bytes) { \
15942 case 2: \
15943 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
15944@@ -237,7 +236,6 @@ struct gprefix {
15945
15946 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
15947 do { \
15948- unsigned long _tmp; \
15949 switch ((_dst).bytes) { \
15950 case 1: \
15951 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
15952diff -urNp linux-3.0.3/arch/x86/kvm/lapic.c linux-3.0.3/arch/x86/kvm/lapic.c
15953--- linux-3.0.3/arch/x86/kvm/lapic.c 2011-07-21 22:17:23.000000000 -0400
15954+++ linux-3.0.3/arch/x86/kvm/lapic.c 2011-08-23 21:47:55.000000000 -0400
15955@@ -53,7 +53,7 @@
15956 #define APIC_BUS_CYCLE_NS 1
15957
15958 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
15959-#define apic_debug(fmt, arg...)
15960+#define apic_debug(fmt, arg...) do {} while (0)
15961
15962 #define APIC_LVT_NUM 6
15963 /* 14 is the version for Xeon and Pentium 8.4.8*/
15964diff -urNp linux-3.0.3/arch/x86/kvm/mmu.c linux-3.0.3/arch/x86/kvm/mmu.c
15965--- linux-3.0.3/arch/x86/kvm/mmu.c 2011-07-21 22:17:23.000000000 -0400
15966+++ linux-3.0.3/arch/x86/kvm/mmu.c 2011-08-23 21:47:55.000000000 -0400
15967@@ -3238,7 +3238,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
15968
15969 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
15970
15971- invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
15972+ invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
15973
15974 /*
15975 * Assume that the pte write on a page table of the same type
15976@@ -3270,7 +3270,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
15977 }
15978
15979 spin_lock(&vcpu->kvm->mmu_lock);
15980- if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
15981+ if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
15982 gentry = 0;
15983 kvm_mmu_free_some_pages(vcpu);
15984 ++vcpu->kvm->stat.mmu_pte_write;
15985diff -urNp linux-3.0.3/arch/x86/kvm/paging_tmpl.h linux-3.0.3/arch/x86/kvm/paging_tmpl.h
15986--- linux-3.0.3/arch/x86/kvm/paging_tmpl.h 2011-07-21 22:17:23.000000000 -0400
15987+++ linux-3.0.3/arch/x86/kvm/paging_tmpl.h 2011-08-23 21:48:14.000000000 -0400
15988@@ -583,6 +583,8 @@ static int FNAME(page_fault)(struct kvm_
15989 unsigned long mmu_seq;
15990 bool map_writable;
15991
15992+ pax_track_stack();
15993+
15994 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
15995
15996 r = mmu_topup_memory_caches(vcpu);
15997@@ -703,7 +705,7 @@ static void FNAME(invlpg)(struct kvm_vcp
15998 if (need_flush)
15999 kvm_flush_remote_tlbs(vcpu->kvm);
16000
16001- atomic_inc(&vcpu->kvm->arch.invlpg_counter);
16002+ atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
16003
16004 spin_unlock(&vcpu->kvm->mmu_lock);
16005
16006diff -urNp linux-3.0.3/arch/x86/kvm/svm.c linux-3.0.3/arch/x86/kvm/svm.c
16007--- linux-3.0.3/arch/x86/kvm/svm.c 2011-07-21 22:17:23.000000000 -0400
16008+++ linux-3.0.3/arch/x86/kvm/svm.c 2011-08-23 21:47:55.000000000 -0400
16009@@ -3377,7 +3377,11 @@ static void reload_tss(struct kvm_vcpu *
16010 int cpu = raw_smp_processor_id();
16011
16012 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
16013+
16014+ pax_open_kernel();
16015 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
16016+ pax_close_kernel();
16017+
16018 load_TR_desc();
16019 }
16020
16021@@ -3755,6 +3759,10 @@ static void svm_vcpu_run(struct kvm_vcpu
16022 #endif
16023 #endif
16024
16025+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16026+ __set_fs(current_thread_info()->addr_limit);
16027+#endif
16028+
16029 reload_tss(vcpu);
16030
16031 local_irq_disable();
16032diff -urNp linux-3.0.3/arch/x86/kvm/vmx.c linux-3.0.3/arch/x86/kvm/vmx.c
16033--- linux-3.0.3/arch/x86/kvm/vmx.c 2011-07-21 22:17:23.000000000 -0400
16034+++ linux-3.0.3/arch/x86/kvm/vmx.c 2011-08-23 21:47:55.000000000 -0400
16035@@ -797,7 +797,11 @@ static void reload_tss(void)
16036 struct desc_struct *descs;
16037
16038 descs = (void *)gdt->address;
16039+
16040+ pax_open_kernel();
16041 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
16042+ pax_close_kernel();
16043+
16044 load_TR_desc();
16045 }
16046
16047@@ -1747,8 +1751,11 @@ static __init int hardware_setup(void)
16048 if (!cpu_has_vmx_flexpriority())
16049 flexpriority_enabled = 0;
16050
16051- if (!cpu_has_vmx_tpr_shadow())
16052- kvm_x86_ops->update_cr8_intercept = NULL;
16053+ if (!cpu_has_vmx_tpr_shadow()) {
16054+ pax_open_kernel();
16055+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
16056+ pax_close_kernel();
16057+ }
16058
16059 if (enable_ept && !cpu_has_vmx_ept_2m_page())
16060 kvm_disable_largepages();
16061@@ -2814,7 +2821,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
16062 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
16063
16064 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
16065- vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
16066+ vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
16067 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
16068 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
16069 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
16070@@ -4211,6 +4218,12 @@ static void __noclone vmx_vcpu_run(struc
16071 "jmp .Lkvm_vmx_return \n\t"
16072 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
16073 ".Lkvm_vmx_return: "
16074+
16075+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16076+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
16077+ ".Lkvm_vmx_return2: "
16078+#endif
16079+
16080 /* Save guest registers, load host registers, keep flags */
16081 "mov %0, %c[wordsize](%%"R"sp) \n\t"
16082 "pop %0 \n\t"
16083@@ -4259,6 +4272,11 @@ static void __noclone vmx_vcpu_run(struc
16084 #endif
16085 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
16086 [wordsize]"i"(sizeof(ulong))
16087+
16088+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16089+ ,[cs]"i"(__KERNEL_CS)
16090+#endif
16091+
16092 : "cc", "memory"
16093 , R"ax", R"bx", R"di", R"si"
16094 #ifdef CONFIG_X86_64
16095@@ -4276,7 +4294,16 @@ static void __noclone vmx_vcpu_run(struc
16096
16097 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
16098
16099- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
16100+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
16101+
16102+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16103+ loadsegment(fs, __KERNEL_PERCPU);
16104+#endif
16105+
16106+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16107+ __set_fs(current_thread_info()->addr_limit);
16108+#endif
16109+
16110 vmx->launched = 1;
16111
16112 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
16113diff -urNp linux-3.0.3/arch/x86/kvm/x86.c linux-3.0.3/arch/x86/kvm/x86.c
16114--- linux-3.0.3/arch/x86/kvm/x86.c 2011-07-21 22:17:23.000000000 -0400
16115+++ linux-3.0.3/arch/x86/kvm/x86.c 2011-08-23 21:47:55.000000000 -0400
16116@@ -2057,6 +2057,8 @@ long kvm_arch_dev_ioctl(struct file *fil
16117 if (n < msr_list.nmsrs)
16118 goto out;
16119 r = -EFAULT;
16120+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
16121+ goto out;
16122 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
16123 num_msrs_to_save * sizeof(u32)))
16124 goto out;
16125@@ -2229,15 +2231,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
16126 struct kvm_cpuid2 *cpuid,
16127 struct kvm_cpuid_entry2 __user *entries)
16128 {
16129- int r;
16130+ int r, i;
16131
16132 r = -E2BIG;
16133 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
16134 goto out;
16135 r = -EFAULT;
16136- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
16137- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16138+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16139 goto out;
16140+ for (i = 0; i < cpuid->nent; ++i) {
16141+ struct kvm_cpuid_entry2 cpuid_entry;
16142+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
16143+ goto out;
16144+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
16145+ }
16146 vcpu->arch.cpuid_nent = cpuid->nent;
16147 kvm_apic_set_version(vcpu);
16148 kvm_x86_ops->cpuid_update(vcpu);
16149@@ -2252,15 +2259,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
16150 struct kvm_cpuid2 *cpuid,
16151 struct kvm_cpuid_entry2 __user *entries)
16152 {
16153- int r;
16154+ int r, i;
16155
16156 r = -E2BIG;
16157 if (cpuid->nent < vcpu->arch.cpuid_nent)
16158 goto out;
16159 r = -EFAULT;
16160- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
16161- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16162+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16163 goto out;
16164+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
16165+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
16166+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
16167+ goto out;
16168+ }
16169 return 0;
16170
16171 out:
16172@@ -2579,7 +2590,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
16173 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
16174 struct kvm_interrupt *irq)
16175 {
16176- if (irq->irq < 0 || irq->irq >= 256)
16177+ if (irq->irq >= 256)
16178 return -EINVAL;
16179 if (irqchip_in_kernel(vcpu->kvm))
16180 return -ENXIO;
16181@@ -4878,7 +4889,7 @@ void kvm_after_handle_nmi(struct kvm_vcp
16182 }
16183 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
16184
16185-int kvm_arch_init(void *opaque)
16186+int kvm_arch_init(const void *opaque)
16187 {
16188 int r;
16189 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
16190diff -urNp linux-3.0.3/arch/x86/lguest/boot.c linux-3.0.3/arch/x86/lguest/boot.c
16191--- linux-3.0.3/arch/x86/lguest/boot.c 2011-07-21 22:17:23.000000000 -0400
16192+++ linux-3.0.3/arch/x86/lguest/boot.c 2011-08-23 21:47:55.000000000 -0400
16193@@ -1176,9 +1176,10 @@ static __init int early_put_chars(u32 vt
16194 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
16195 * Launcher to reboot us.
16196 */
16197-static void lguest_restart(char *reason)
16198+static __noreturn void lguest_restart(char *reason)
16199 {
16200 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
16201+ BUG();
16202 }
16203
16204 /*G:050
16205diff -urNp linux-3.0.3/arch/x86/lib/atomic64_32.c linux-3.0.3/arch/x86/lib/atomic64_32.c
16206--- linux-3.0.3/arch/x86/lib/atomic64_32.c 2011-07-21 22:17:23.000000000 -0400
16207+++ linux-3.0.3/arch/x86/lib/atomic64_32.c 2011-08-23 21:47:55.000000000 -0400
16208@@ -8,18 +8,30 @@
16209
16210 long long atomic64_read_cx8(long long, const atomic64_t *v);
16211 EXPORT_SYMBOL(atomic64_read_cx8);
16212+long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16213+EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
16214 long long atomic64_set_cx8(long long, const atomic64_t *v);
16215 EXPORT_SYMBOL(atomic64_set_cx8);
16216+long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16217+EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
16218 long long atomic64_xchg_cx8(long long, unsigned high);
16219 EXPORT_SYMBOL(atomic64_xchg_cx8);
16220 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
16221 EXPORT_SYMBOL(atomic64_add_return_cx8);
16222+long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16223+EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
16224 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
16225 EXPORT_SYMBOL(atomic64_sub_return_cx8);
16226+long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16227+EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
16228 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
16229 EXPORT_SYMBOL(atomic64_inc_return_cx8);
16230+long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16231+EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
16232 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
16233 EXPORT_SYMBOL(atomic64_dec_return_cx8);
16234+long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16235+EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
16236 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
16237 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
16238 int atomic64_inc_not_zero_cx8(atomic64_t *v);
16239@@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
16240 #ifndef CONFIG_X86_CMPXCHG64
16241 long long atomic64_read_386(long long, const atomic64_t *v);
16242 EXPORT_SYMBOL(atomic64_read_386);
16243+long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
16244+EXPORT_SYMBOL(atomic64_read_unchecked_386);
16245 long long atomic64_set_386(long long, const atomic64_t *v);
16246 EXPORT_SYMBOL(atomic64_set_386);
16247+long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
16248+EXPORT_SYMBOL(atomic64_set_unchecked_386);
16249 long long atomic64_xchg_386(long long, unsigned high);
16250 EXPORT_SYMBOL(atomic64_xchg_386);
16251 long long atomic64_add_return_386(long long a, atomic64_t *v);
16252 EXPORT_SYMBOL(atomic64_add_return_386);
16253+long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16254+EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
16255 long long atomic64_sub_return_386(long long a, atomic64_t *v);
16256 EXPORT_SYMBOL(atomic64_sub_return_386);
16257+long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16258+EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
16259 long long atomic64_inc_return_386(long long a, atomic64_t *v);
16260 EXPORT_SYMBOL(atomic64_inc_return_386);
16261+long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16262+EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
16263 long long atomic64_dec_return_386(long long a, atomic64_t *v);
16264 EXPORT_SYMBOL(atomic64_dec_return_386);
16265+long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16266+EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
16267 long long atomic64_add_386(long long a, atomic64_t *v);
16268 EXPORT_SYMBOL(atomic64_add_386);
16269+long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
16270+EXPORT_SYMBOL(atomic64_add_unchecked_386);
16271 long long atomic64_sub_386(long long a, atomic64_t *v);
16272 EXPORT_SYMBOL(atomic64_sub_386);
16273+long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
16274+EXPORT_SYMBOL(atomic64_sub_unchecked_386);
16275 long long atomic64_inc_386(long long a, atomic64_t *v);
16276 EXPORT_SYMBOL(atomic64_inc_386);
16277+long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
16278+EXPORT_SYMBOL(atomic64_inc_unchecked_386);
16279 long long atomic64_dec_386(long long a, atomic64_t *v);
16280 EXPORT_SYMBOL(atomic64_dec_386);
16281+long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
16282+EXPORT_SYMBOL(atomic64_dec_unchecked_386);
16283 long long atomic64_dec_if_positive_386(atomic64_t *v);
16284 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
16285 int atomic64_inc_not_zero_386(atomic64_t *v);
16286diff -urNp linux-3.0.3/arch/x86/lib/atomic64_386_32.S linux-3.0.3/arch/x86/lib/atomic64_386_32.S
16287--- linux-3.0.3/arch/x86/lib/atomic64_386_32.S 2011-07-21 22:17:23.000000000 -0400
16288+++ linux-3.0.3/arch/x86/lib/atomic64_386_32.S 2011-08-23 21:47:55.000000000 -0400
16289@@ -48,6 +48,10 @@ BEGIN(read)
16290 movl (v), %eax
16291 movl 4(v), %edx
16292 RET_ENDP
16293+BEGIN(read_unchecked)
16294+ movl (v), %eax
16295+ movl 4(v), %edx
16296+RET_ENDP
16297 #undef v
16298
16299 #define v %esi
16300@@ -55,6 +59,10 @@ BEGIN(set)
16301 movl %ebx, (v)
16302 movl %ecx, 4(v)
16303 RET_ENDP
16304+BEGIN(set_unchecked)
16305+ movl %ebx, (v)
16306+ movl %ecx, 4(v)
16307+RET_ENDP
16308 #undef v
16309
16310 #define v %esi
16311@@ -70,6 +78,20 @@ RET_ENDP
16312 BEGIN(add)
16313 addl %eax, (v)
16314 adcl %edx, 4(v)
16315+
16316+#ifdef CONFIG_PAX_REFCOUNT
16317+ jno 0f
16318+ subl %eax, (v)
16319+ sbbl %edx, 4(v)
16320+ int $4
16321+0:
16322+ _ASM_EXTABLE(0b, 0b)
16323+#endif
16324+
16325+RET_ENDP
16326+BEGIN(add_unchecked)
16327+ addl %eax, (v)
16328+ adcl %edx, 4(v)
16329 RET_ENDP
16330 #undef v
16331
16332@@ -77,6 +99,24 @@ RET_ENDP
16333 BEGIN(add_return)
16334 addl (v), %eax
16335 adcl 4(v), %edx
16336+
16337+#ifdef CONFIG_PAX_REFCOUNT
16338+ into
16339+1234:
16340+ _ASM_EXTABLE(1234b, 2f)
16341+#endif
16342+
16343+ movl %eax, (v)
16344+ movl %edx, 4(v)
16345+
16346+#ifdef CONFIG_PAX_REFCOUNT
16347+2:
16348+#endif
16349+
16350+RET_ENDP
16351+BEGIN(add_return_unchecked)
16352+ addl (v), %eax
16353+ adcl 4(v), %edx
16354 movl %eax, (v)
16355 movl %edx, 4(v)
16356 RET_ENDP
16357@@ -86,6 +126,20 @@ RET_ENDP
16358 BEGIN(sub)
16359 subl %eax, (v)
16360 sbbl %edx, 4(v)
16361+
16362+#ifdef CONFIG_PAX_REFCOUNT
16363+ jno 0f
16364+ addl %eax, (v)
16365+ adcl %edx, 4(v)
16366+ int $4
16367+0:
16368+ _ASM_EXTABLE(0b, 0b)
16369+#endif
16370+
16371+RET_ENDP
16372+BEGIN(sub_unchecked)
16373+ subl %eax, (v)
16374+ sbbl %edx, 4(v)
16375 RET_ENDP
16376 #undef v
16377
16378@@ -96,6 +150,27 @@ BEGIN(sub_return)
16379 sbbl $0, %edx
16380 addl (v), %eax
16381 adcl 4(v), %edx
16382+
16383+#ifdef CONFIG_PAX_REFCOUNT
16384+ into
16385+1234:
16386+ _ASM_EXTABLE(1234b, 2f)
16387+#endif
16388+
16389+ movl %eax, (v)
16390+ movl %edx, 4(v)
16391+
16392+#ifdef CONFIG_PAX_REFCOUNT
16393+2:
16394+#endif
16395+
16396+RET_ENDP
16397+BEGIN(sub_return_unchecked)
16398+ negl %edx
16399+ negl %eax
16400+ sbbl $0, %edx
16401+ addl (v), %eax
16402+ adcl 4(v), %edx
16403 movl %eax, (v)
16404 movl %edx, 4(v)
16405 RET_ENDP
16406@@ -105,6 +180,20 @@ RET_ENDP
16407 BEGIN(inc)
16408 addl $1, (v)
16409 adcl $0, 4(v)
16410+
16411+#ifdef CONFIG_PAX_REFCOUNT
16412+ jno 0f
16413+ subl $1, (v)
16414+ sbbl $0, 4(v)
16415+ int $4
16416+0:
16417+ _ASM_EXTABLE(0b, 0b)
16418+#endif
16419+
16420+RET_ENDP
16421+BEGIN(inc_unchecked)
16422+ addl $1, (v)
16423+ adcl $0, 4(v)
16424 RET_ENDP
16425 #undef v
16426
16427@@ -114,6 +203,26 @@ BEGIN(inc_return)
16428 movl 4(v), %edx
16429 addl $1, %eax
16430 adcl $0, %edx
16431+
16432+#ifdef CONFIG_PAX_REFCOUNT
16433+ into
16434+1234:
16435+ _ASM_EXTABLE(1234b, 2f)
16436+#endif
16437+
16438+ movl %eax, (v)
16439+ movl %edx, 4(v)
16440+
16441+#ifdef CONFIG_PAX_REFCOUNT
16442+2:
16443+#endif
16444+
16445+RET_ENDP
16446+BEGIN(inc_return_unchecked)
16447+ movl (v), %eax
16448+ movl 4(v), %edx
16449+ addl $1, %eax
16450+ adcl $0, %edx
16451 movl %eax, (v)
16452 movl %edx, 4(v)
16453 RET_ENDP
16454@@ -123,6 +232,20 @@ RET_ENDP
16455 BEGIN(dec)
16456 subl $1, (v)
16457 sbbl $0, 4(v)
16458+
16459+#ifdef CONFIG_PAX_REFCOUNT
16460+ jno 0f
16461+ addl $1, (v)
16462+ adcl $0, 4(v)
16463+ int $4
16464+0:
16465+ _ASM_EXTABLE(0b, 0b)
16466+#endif
16467+
16468+RET_ENDP
16469+BEGIN(dec_unchecked)
16470+ subl $1, (v)
16471+ sbbl $0, 4(v)
16472 RET_ENDP
16473 #undef v
16474
16475@@ -132,6 +255,26 @@ BEGIN(dec_return)
16476 movl 4(v), %edx
16477 subl $1, %eax
16478 sbbl $0, %edx
16479+
16480+#ifdef CONFIG_PAX_REFCOUNT
16481+ into
16482+1234:
16483+ _ASM_EXTABLE(1234b, 2f)
16484+#endif
16485+
16486+ movl %eax, (v)
16487+ movl %edx, 4(v)
16488+
16489+#ifdef CONFIG_PAX_REFCOUNT
16490+2:
16491+#endif
16492+
16493+RET_ENDP
16494+BEGIN(dec_return_unchecked)
16495+ movl (v), %eax
16496+ movl 4(v), %edx
16497+ subl $1, %eax
16498+ sbbl $0, %edx
16499 movl %eax, (v)
16500 movl %edx, 4(v)
16501 RET_ENDP
16502@@ -143,6 +286,13 @@ BEGIN(add_unless)
16503 adcl %edx, %edi
16504 addl (v), %eax
16505 adcl 4(v), %edx
16506+
16507+#ifdef CONFIG_PAX_REFCOUNT
16508+ into
16509+1234:
16510+ _ASM_EXTABLE(1234b, 2f)
16511+#endif
16512+
16513 cmpl %eax, %esi
16514 je 3f
16515 1:
16516@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
16517 1:
16518 addl $1, %eax
16519 adcl $0, %edx
16520+
16521+#ifdef CONFIG_PAX_REFCOUNT
16522+ into
16523+1234:
16524+ _ASM_EXTABLE(1234b, 2f)
16525+#endif
16526+
16527 movl %eax, (v)
16528 movl %edx, 4(v)
16529 movl $1, %eax
16530@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
16531 movl 4(v), %edx
16532 subl $1, %eax
16533 sbbl $0, %edx
16534+
16535+#ifdef CONFIG_PAX_REFCOUNT
16536+ into
16537+1234:
16538+ _ASM_EXTABLE(1234b, 1f)
16539+#endif
16540+
16541 js 1f
16542 movl %eax, (v)
16543 movl %edx, 4(v)
16544diff -urNp linux-3.0.3/arch/x86/lib/atomic64_cx8_32.S linux-3.0.3/arch/x86/lib/atomic64_cx8_32.S
16545--- linux-3.0.3/arch/x86/lib/atomic64_cx8_32.S 2011-07-21 22:17:23.000000000 -0400
16546+++ linux-3.0.3/arch/x86/lib/atomic64_cx8_32.S 2011-08-23 21:47:55.000000000 -0400
16547@@ -39,6 +39,14 @@ ENTRY(atomic64_read_cx8)
16548 CFI_ENDPROC
16549 ENDPROC(atomic64_read_cx8)
16550
16551+ENTRY(atomic64_read_unchecked_cx8)
16552+ CFI_STARTPROC
16553+
16554+ read64 %ecx
16555+ ret
16556+ CFI_ENDPROC
16557+ENDPROC(atomic64_read_unchecked_cx8)
16558+
16559 ENTRY(atomic64_set_cx8)
16560 CFI_STARTPROC
16561
16562@@ -52,6 +60,19 @@ ENTRY(atomic64_set_cx8)
16563 CFI_ENDPROC
16564 ENDPROC(atomic64_set_cx8)
16565
16566+ENTRY(atomic64_set_unchecked_cx8)
16567+ CFI_STARTPROC
16568+
16569+1:
16570+/* we don't need LOCK_PREFIX since aligned 64-bit writes
16571+ * are atomic on 586 and newer */
16572+ cmpxchg8b (%esi)
16573+ jne 1b
16574+
16575+ ret
16576+ CFI_ENDPROC
16577+ENDPROC(atomic64_set_unchecked_cx8)
16578+
16579 ENTRY(atomic64_xchg_cx8)
16580 CFI_STARTPROC
16581
16582@@ -66,8 +87,8 @@ ENTRY(atomic64_xchg_cx8)
16583 CFI_ENDPROC
16584 ENDPROC(atomic64_xchg_cx8)
16585
16586-.macro addsub_return func ins insc
16587-ENTRY(atomic64_\func\()_return_cx8)
16588+.macro addsub_return func ins insc unchecked=""
16589+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
16590 CFI_STARTPROC
16591 SAVE ebp
16592 SAVE ebx
16593@@ -84,27 +105,43 @@ ENTRY(atomic64_\func\()_return_cx8)
16594 movl %edx, %ecx
16595 \ins\()l %esi, %ebx
16596 \insc\()l %edi, %ecx
16597+
16598+.ifb \unchecked
16599+#ifdef CONFIG_PAX_REFCOUNT
16600+ into
16601+2:
16602+ _ASM_EXTABLE(2b, 3f)
16603+#endif
16604+.endif
16605+
16606 LOCK_PREFIX
16607 cmpxchg8b (%ebp)
16608 jne 1b
16609-
16610-10:
16611 movl %ebx, %eax
16612 movl %ecx, %edx
16613+
16614+.ifb \unchecked
16615+#ifdef CONFIG_PAX_REFCOUNT
16616+3:
16617+#endif
16618+.endif
16619+
16620 RESTORE edi
16621 RESTORE esi
16622 RESTORE ebx
16623 RESTORE ebp
16624 ret
16625 CFI_ENDPROC
16626-ENDPROC(atomic64_\func\()_return_cx8)
16627+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
16628 .endm
16629
16630 addsub_return add add adc
16631 addsub_return sub sub sbb
16632+addsub_return add add adc _unchecked
16633+addsub_return sub sub sbb _unchecked
16634
16635-.macro incdec_return func ins insc
16636-ENTRY(atomic64_\func\()_return_cx8)
16637+.macro incdec_return func ins insc unchecked
16638+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
16639 CFI_STARTPROC
16640 SAVE ebx
16641
16642@@ -114,21 +151,38 @@ ENTRY(atomic64_\func\()_return_cx8)
16643 movl %edx, %ecx
16644 \ins\()l $1, %ebx
16645 \insc\()l $0, %ecx
16646+
16647+.ifb \unchecked
16648+#ifdef CONFIG_PAX_REFCOUNT
16649+ into
16650+2:
16651+ _ASM_EXTABLE(2b, 3f)
16652+#endif
16653+.endif
16654+
16655 LOCK_PREFIX
16656 cmpxchg8b (%esi)
16657 jne 1b
16658
16659-10:
16660 movl %ebx, %eax
16661 movl %ecx, %edx
16662+
16663+.ifb \unchecked
16664+#ifdef CONFIG_PAX_REFCOUNT
16665+3:
16666+#endif
16667+.endif
16668+
16669 RESTORE ebx
16670 ret
16671 CFI_ENDPROC
16672-ENDPROC(atomic64_\func\()_return_cx8)
16673+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
16674 .endm
16675
16676 incdec_return inc add adc
16677 incdec_return dec sub sbb
16678+incdec_return inc add adc _unchecked
16679+incdec_return dec sub sbb _unchecked
16680
16681 ENTRY(atomic64_dec_if_positive_cx8)
16682 CFI_STARTPROC
16683@@ -140,6 +194,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
16684 movl %edx, %ecx
16685 subl $1, %ebx
16686 sbb $0, %ecx
16687+
16688+#ifdef CONFIG_PAX_REFCOUNT
16689+ into
16690+1234:
16691+ _ASM_EXTABLE(1234b, 2f)
16692+#endif
16693+
16694 js 2f
16695 LOCK_PREFIX
16696 cmpxchg8b (%esi)
16697@@ -174,6 +235,13 @@ ENTRY(atomic64_add_unless_cx8)
16698 movl %edx, %ecx
16699 addl %esi, %ebx
16700 adcl %edi, %ecx
16701+
16702+#ifdef CONFIG_PAX_REFCOUNT
16703+ into
16704+1234:
16705+ _ASM_EXTABLE(1234b, 3f)
16706+#endif
16707+
16708 LOCK_PREFIX
16709 cmpxchg8b (%ebp)
16710 jne 1b
16711@@ -206,6 +274,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
16712 movl %edx, %ecx
16713 addl $1, %ebx
16714 adcl $0, %ecx
16715+
16716+#ifdef CONFIG_PAX_REFCOUNT
16717+ into
16718+1234:
16719+ _ASM_EXTABLE(1234b, 3f)
16720+#endif
16721+
16722 LOCK_PREFIX
16723 cmpxchg8b (%esi)
16724 jne 1b
16725diff -urNp linux-3.0.3/arch/x86/lib/checksum_32.S linux-3.0.3/arch/x86/lib/checksum_32.S
16726--- linux-3.0.3/arch/x86/lib/checksum_32.S 2011-07-21 22:17:23.000000000 -0400
16727+++ linux-3.0.3/arch/x86/lib/checksum_32.S 2011-08-23 21:47:55.000000000 -0400
16728@@ -28,7 +28,8 @@
16729 #include <linux/linkage.h>
16730 #include <asm/dwarf2.h>
16731 #include <asm/errno.h>
16732-
16733+#include <asm/segment.h>
16734+
16735 /*
16736 * computes a partial checksum, e.g. for TCP/UDP fragments
16737 */
16738@@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (
16739
16740 #define ARGBASE 16
16741 #define FP 12
16742-
16743-ENTRY(csum_partial_copy_generic)
16744+
16745+ENTRY(csum_partial_copy_generic_to_user)
16746 CFI_STARTPROC
16747+
16748+#ifdef CONFIG_PAX_MEMORY_UDEREF
16749+ pushl_cfi %gs
16750+ popl_cfi %es
16751+ jmp csum_partial_copy_generic
16752+#endif
16753+
16754+ENTRY(csum_partial_copy_generic_from_user)
16755+
16756+#ifdef CONFIG_PAX_MEMORY_UDEREF
16757+ pushl_cfi %gs
16758+ popl_cfi %ds
16759+#endif
16760+
16761+ENTRY(csum_partial_copy_generic)
16762 subl $4,%esp
16763 CFI_ADJUST_CFA_OFFSET 4
16764 pushl_cfi %edi
16765@@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
16766 jmp 4f
16767 SRC(1: movw (%esi), %bx )
16768 addl $2, %esi
16769-DST( movw %bx, (%edi) )
16770+DST( movw %bx, %es:(%edi) )
16771 addl $2, %edi
16772 addw %bx, %ax
16773 adcl $0, %eax
16774@@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
16775 SRC(1: movl (%esi), %ebx )
16776 SRC( movl 4(%esi), %edx )
16777 adcl %ebx, %eax
16778-DST( movl %ebx, (%edi) )
16779+DST( movl %ebx, %es:(%edi) )
16780 adcl %edx, %eax
16781-DST( movl %edx, 4(%edi) )
16782+DST( movl %edx, %es:4(%edi) )
16783
16784 SRC( movl 8(%esi), %ebx )
16785 SRC( movl 12(%esi), %edx )
16786 adcl %ebx, %eax
16787-DST( movl %ebx, 8(%edi) )
16788+DST( movl %ebx, %es:8(%edi) )
16789 adcl %edx, %eax
16790-DST( movl %edx, 12(%edi) )
16791+DST( movl %edx, %es:12(%edi) )
16792
16793 SRC( movl 16(%esi), %ebx )
16794 SRC( movl 20(%esi), %edx )
16795 adcl %ebx, %eax
16796-DST( movl %ebx, 16(%edi) )
16797+DST( movl %ebx, %es:16(%edi) )
16798 adcl %edx, %eax
16799-DST( movl %edx, 20(%edi) )
16800+DST( movl %edx, %es:20(%edi) )
16801
16802 SRC( movl 24(%esi), %ebx )
16803 SRC( movl 28(%esi), %edx )
16804 adcl %ebx, %eax
16805-DST( movl %ebx, 24(%edi) )
16806+DST( movl %ebx, %es:24(%edi) )
16807 adcl %edx, %eax
16808-DST( movl %edx, 28(%edi) )
16809+DST( movl %edx, %es:28(%edi) )
16810
16811 lea 32(%esi), %esi
16812 lea 32(%edi), %edi
16813@@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
16814 shrl $2, %edx # This clears CF
16815 SRC(3: movl (%esi), %ebx )
16816 adcl %ebx, %eax
16817-DST( movl %ebx, (%edi) )
16818+DST( movl %ebx, %es:(%edi) )
16819 lea 4(%esi), %esi
16820 lea 4(%edi), %edi
16821 dec %edx
16822@@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
16823 jb 5f
16824 SRC( movw (%esi), %cx )
16825 leal 2(%esi), %esi
16826-DST( movw %cx, (%edi) )
16827+DST( movw %cx, %es:(%edi) )
16828 leal 2(%edi), %edi
16829 je 6f
16830 shll $16,%ecx
16831 SRC(5: movb (%esi), %cl )
16832-DST( movb %cl, (%edi) )
16833+DST( movb %cl, %es:(%edi) )
16834 6: addl %ecx, %eax
16835 adcl $0, %eax
16836 7:
16837@@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
16838
16839 6001:
16840 movl ARGBASE+20(%esp), %ebx # src_err_ptr
16841- movl $-EFAULT, (%ebx)
16842+ movl $-EFAULT, %ss:(%ebx)
16843
16844 # zero the complete destination - computing the rest
16845 # is too much work
16846@@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
16847
16848 6002:
16849 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
16850- movl $-EFAULT,(%ebx)
16851+ movl $-EFAULT,%ss:(%ebx)
16852 jmp 5000b
16853
16854 .previous
16855
16856+ pushl_cfi %ss
16857+ popl_cfi %ds
16858+ pushl_cfi %ss
16859+ popl_cfi %es
16860 popl_cfi %ebx
16861 CFI_RESTORE ebx
16862 popl_cfi %esi
16863@@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
16864 popl_cfi %ecx # equivalent to addl $4,%esp
16865 ret
16866 CFI_ENDPROC
16867-ENDPROC(csum_partial_copy_generic)
16868+ENDPROC(csum_partial_copy_generic_to_user)
16869
16870 #else
16871
16872 /* Version for PentiumII/PPro */
16873
16874 #define ROUND1(x) \
16875+ nop; nop; nop; \
16876 SRC(movl x(%esi), %ebx ) ; \
16877 addl %ebx, %eax ; \
16878- DST(movl %ebx, x(%edi) ) ;
16879+ DST(movl %ebx, %es:x(%edi)) ;
16880
16881 #define ROUND(x) \
16882+ nop; nop; nop; \
16883 SRC(movl x(%esi), %ebx ) ; \
16884 adcl %ebx, %eax ; \
16885- DST(movl %ebx, x(%edi) ) ;
16886+ DST(movl %ebx, %es:x(%edi)) ;
16887
16888 #define ARGBASE 12
16889-
16890-ENTRY(csum_partial_copy_generic)
16891+
16892+ENTRY(csum_partial_copy_generic_to_user)
16893 CFI_STARTPROC
16894+
16895+#ifdef CONFIG_PAX_MEMORY_UDEREF
16896+ pushl_cfi %gs
16897+ popl_cfi %es
16898+ jmp csum_partial_copy_generic
16899+#endif
16900+
16901+ENTRY(csum_partial_copy_generic_from_user)
16902+
16903+#ifdef CONFIG_PAX_MEMORY_UDEREF
16904+ pushl_cfi %gs
16905+ popl_cfi %ds
16906+#endif
16907+
16908+ENTRY(csum_partial_copy_generic)
16909 pushl_cfi %ebx
16910 CFI_REL_OFFSET ebx, 0
16911 pushl_cfi %edi
16912@@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
16913 subl %ebx, %edi
16914 lea -1(%esi),%edx
16915 andl $-32,%edx
16916- lea 3f(%ebx,%ebx), %ebx
16917+ lea 3f(%ebx,%ebx,2), %ebx
16918 testl %esi, %esi
16919 jmp *%ebx
16920 1: addl $64,%esi
16921@@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
16922 jb 5f
16923 SRC( movw (%esi), %dx )
16924 leal 2(%esi), %esi
16925-DST( movw %dx, (%edi) )
16926+DST( movw %dx, %es:(%edi) )
16927 leal 2(%edi), %edi
16928 je 6f
16929 shll $16,%edx
16930 5:
16931 SRC( movb (%esi), %dl )
16932-DST( movb %dl, (%edi) )
16933+DST( movb %dl, %es:(%edi) )
16934 6: addl %edx, %eax
16935 adcl $0, %eax
16936 7:
16937 .section .fixup, "ax"
16938 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
16939- movl $-EFAULT, (%ebx)
16940+ movl $-EFAULT, %ss:(%ebx)
16941 # zero the complete destination (computing the rest is too much work)
16942 movl ARGBASE+8(%esp),%edi # dst
16943 movl ARGBASE+12(%esp),%ecx # len
16944@@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
16945 rep; stosb
16946 jmp 7b
16947 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
16948- movl $-EFAULT, (%ebx)
16949+ movl $-EFAULT, %ss:(%ebx)
16950 jmp 7b
16951 .previous
16952
16953+#ifdef CONFIG_PAX_MEMORY_UDEREF
16954+ pushl_cfi %ss
16955+ popl_cfi %ds
16956+ pushl_cfi %ss
16957+ popl_cfi %es
16958+#endif
16959+
16960 popl_cfi %esi
16961 CFI_RESTORE esi
16962 popl_cfi %edi
16963@@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
16964 CFI_RESTORE ebx
16965 ret
16966 CFI_ENDPROC
16967-ENDPROC(csum_partial_copy_generic)
16968+ENDPROC(csum_partial_copy_generic_to_user)
16969
16970 #undef ROUND
16971 #undef ROUND1
16972diff -urNp linux-3.0.3/arch/x86/lib/clear_page_64.S linux-3.0.3/arch/x86/lib/clear_page_64.S
16973--- linux-3.0.3/arch/x86/lib/clear_page_64.S 2011-07-21 22:17:23.000000000 -0400
16974+++ linux-3.0.3/arch/x86/lib/clear_page_64.S 2011-08-23 21:47:55.000000000 -0400
16975@@ -58,7 +58,7 @@ ENDPROC(clear_page)
16976
16977 #include <asm/cpufeature.h>
16978
16979- .section .altinstr_replacement,"ax"
16980+ .section .altinstr_replacement,"a"
16981 1: .byte 0xeb /* jmp <disp8> */
16982 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
16983 2: .byte 0xeb /* jmp <disp8> */
16984diff -urNp linux-3.0.3/arch/x86/lib/copy_page_64.S linux-3.0.3/arch/x86/lib/copy_page_64.S
16985--- linux-3.0.3/arch/x86/lib/copy_page_64.S 2011-07-21 22:17:23.000000000 -0400
16986+++ linux-3.0.3/arch/x86/lib/copy_page_64.S 2011-08-23 21:47:55.000000000 -0400
16987@@ -104,7 +104,7 @@ ENDPROC(copy_page)
16988
16989 #include <asm/cpufeature.h>
16990
16991- .section .altinstr_replacement,"ax"
16992+ .section .altinstr_replacement,"a"
16993 1: .byte 0xeb /* jmp <disp8> */
16994 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
16995 2:
16996diff -urNp linux-3.0.3/arch/x86/lib/copy_user_64.S linux-3.0.3/arch/x86/lib/copy_user_64.S
16997--- linux-3.0.3/arch/x86/lib/copy_user_64.S 2011-07-21 22:17:23.000000000 -0400
16998+++ linux-3.0.3/arch/x86/lib/copy_user_64.S 2011-08-23 21:47:55.000000000 -0400
16999@@ -16,6 +16,7 @@
17000 #include <asm/thread_info.h>
17001 #include <asm/cpufeature.h>
17002 #include <asm/alternative-asm.h>
17003+#include <asm/pgtable.h>
17004
17005 /*
17006 * By placing feature2 after feature1 in altinstructions section, we logically
17007@@ -29,7 +30,7 @@
17008 .byte 0xe9 /* 32bit jump */
17009 .long \orig-1f /* by default jump to orig */
17010 1:
17011- .section .altinstr_replacement,"ax"
17012+ .section .altinstr_replacement,"a"
17013 2: .byte 0xe9 /* near jump with 32bit immediate */
17014 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
17015 3: .byte 0xe9 /* near jump with 32bit immediate */
17016@@ -71,41 +72,13 @@
17017 #endif
17018 .endm
17019
17020-/* Standard copy_to_user with segment limit checking */
17021-ENTRY(_copy_to_user)
17022- CFI_STARTPROC
17023- GET_THREAD_INFO(%rax)
17024- movq %rdi,%rcx
17025- addq %rdx,%rcx
17026- jc bad_to_user
17027- cmpq TI_addr_limit(%rax),%rcx
17028- ja bad_to_user
17029- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
17030- copy_user_generic_unrolled,copy_user_generic_string, \
17031- copy_user_enhanced_fast_string
17032- CFI_ENDPROC
17033-ENDPROC(_copy_to_user)
17034-
17035-/* Standard copy_from_user with segment limit checking */
17036-ENTRY(_copy_from_user)
17037- CFI_STARTPROC
17038- GET_THREAD_INFO(%rax)
17039- movq %rsi,%rcx
17040- addq %rdx,%rcx
17041- jc bad_from_user
17042- cmpq TI_addr_limit(%rax),%rcx
17043- ja bad_from_user
17044- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
17045- copy_user_generic_unrolled,copy_user_generic_string, \
17046- copy_user_enhanced_fast_string
17047- CFI_ENDPROC
17048-ENDPROC(_copy_from_user)
17049-
17050 .section .fixup,"ax"
17051 /* must zero dest */
17052 ENTRY(bad_from_user)
17053 bad_from_user:
17054 CFI_STARTPROC
17055+ testl %edx,%edx
17056+ js bad_to_user
17057 movl %edx,%ecx
17058 xorl %eax,%eax
17059 rep
17060diff -urNp linux-3.0.3/arch/x86/lib/copy_user_nocache_64.S linux-3.0.3/arch/x86/lib/copy_user_nocache_64.S
17061--- linux-3.0.3/arch/x86/lib/copy_user_nocache_64.S 2011-07-21 22:17:23.000000000 -0400
17062+++ linux-3.0.3/arch/x86/lib/copy_user_nocache_64.S 2011-08-23 21:47:55.000000000 -0400
17063@@ -14,6 +14,7 @@
17064 #include <asm/current.h>
17065 #include <asm/asm-offsets.h>
17066 #include <asm/thread_info.h>
17067+#include <asm/pgtable.h>
17068
17069 .macro ALIGN_DESTINATION
17070 #ifdef FIX_ALIGNMENT
17071@@ -50,6 +51,15 @@
17072 */
17073 ENTRY(__copy_user_nocache)
17074 CFI_STARTPROC
17075+
17076+#ifdef CONFIG_PAX_MEMORY_UDEREF
17077+ mov $PAX_USER_SHADOW_BASE,%rcx
17078+ cmp %rcx,%rsi
17079+ jae 1f
17080+ add %rcx,%rsi
17081+1:
17082+#endif
17083+
17084 cmpl $8,%edx
17085 jb 20f /* less then 8 bytes, go to byte copy loop */
17086 ALIGN_DESTINATION
17087diff -urNp linux-3.0.3/arch/x86/lib/csum-wrappers_64.c linux-3.0.3/arch/x86/lib/csum-wrappers_64.c
17088--- linux-3.0.3/arch/x86/lib/csum-wrappers_64.c 2011-07-21 22:17:23.000000000 -0400
17089+++ linux-3.0.3/arch/x86/lib/csum-wrappers_64.c 2011-08-23 21:47:55.000000000 -0400
17090@@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
17091 len -= 2;
17092 }
17093 }
17094+
17095+#ifdef CONFIG_PAX_MEMORY_UDEREF
17096+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
17097+ src += PAX_USER_SHADOW_BASE;
17098+#endif
17099+
17100 isum = csum_partial_copy_generic((__force const void *)src,
17101 dst, len, isum, errp, NULL);
17102 if (unlikely(*errp))
17103@@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
17104 }
17105
17106 *errp = 0;
17107+
17108+#ifdef CONFIG_PAX_MEMORY_UDEREF
17109+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
17110+ dst += PAX_USER_SHADOW_BASE;
17111+#endif
17112+
17113 return csum_partial_copy_generic(src, (void __force *)dst,
17114 len, isum, NULL, errp);
17115 }
17116diff -urNp linux-3.0.3/arch/x86/lib/getuser.S linux-3.0.3/arch/x86/lib/getuser.S
17117--- linux-3.0.3/arch/x86/lib/getuser.S 2011-07-21 22:17:23.000000000 -0400
17118+++ linux-3.0.3/arch/x86/lib/getuser.S 2011-08-23 21:47:55.000000000 -0400
17119@@ -33,14 +33,35 @@
17120 #include <asm/asm-offsets.h>
17121 #include <asm/thread_info.h>
17122 #include <asm/asm.h>
17123+#include <asm/segment.h>
17124+#include <asm/pgtable.h>
17125+
17126+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17127+#define __copyuser_seg gs;
17128+#else
17129+#define __copyuser_seg
17130+#endif
17131
17132 .text
17133 ENTRY(__get_user_1)
17134 CFI_STARTPROC
17135+
17136+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17137 GET_THREAD_INFO(%_ASM_DX)
17138 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17139 jae bad_get_user
17140-1: movzb (%_ASM_AX),%edx
17141+
17142+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17143+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17144+ cmp %_ASM_DX,%_ASM_AX
17145+ jae 1234f
17146+ add %_ASM_DX,%_ASM_AX
17147+1234:
17148+#endif
17149+
17150+#endif
17151+
17152+1: __copyuser_seg movzb (%_ASM_AX),%edx
17153 xor %eax,%eax
17154 ret
17155 CFI_ENDPROC
17156@@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
17157 ENTRY(__get_user_2)
17158 CFI_STARTPROC
17159 add $1,%_ASM_AX
17160+
17161+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17162 jc bad_get_user
17163 GET_THREAD_INFO(%_ASM_DX)
17164 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17165 jae bad_get_user
17166-2: movzwl -1(%_ASM_AX),%edx
17167+
17168+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17169+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17170+ cmp %_ASM_DX,%_ASM_AX
17171+ jae 1234f
17172+ add %_ASM_DX,%_ASM_AX
17173+1234:
17174+#endif
17175+
17176+#endif
17177+
17178+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
17179 xor %eax,%eax
17180 ret
17181 CFI_ENDPROC
17182@@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
17183 ENTRY(__get_user_4)
17184 CFI_STARTPROC
17185 add $3,%_ASM_AX
17186+
17187+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17188 jc bad_get_user
17189 GET_THREAD_INFO(%_ASM_DX)
17190 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17191 jae bad_get_user
17192-3: mov -3(%_ASM_AX),%edx
17193+
17194+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17195+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17196+ cmp %_ASM_DX,%_ASM_AX
17197+ jae 1234f
17198+ add %_ASM_DX,%_ASM_AX
17199+1234:
17200+#endif
17201+
17202+#endif
17203+
17204+3: __copyuser_seg mov -3(%_ASM_AX),%edx
17205 xor %eax,%eax
17206 ret
17207 CFI_ENDPROC
17208@@ -80,6 +127,15 @@ ENTRY(__get_user_8)
17209 GET_THREAD_INFO(%_ASM_DX)
17210 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17211 jae bad_get_user
17212+
17213+#ifdef CONFIG_PAX_MEMORY_UDEREF
17214+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17215+ cmp %_ASM_DX,%_ASM_AX
17216+ jae 1234f
17217+ add %_ASM_DX,%_ASM_AX
17218+1234:
17219+#endif
17220+
17221 4: movq -7(%_ASM_AX),%_ASM_DX
17222 xor %eax,%eax
17223 ret
17224diff -urNp linux-3.0.3/arch/x86/lib/insn.c linux-3.0.3/arch/x86/lib/insn.c
17225--- linux-3.0.3/arch/x86/lib/insn.c 2011-07-21 22:17:23.000000000 -0400
17226+++ linux-3.0.3/arch/x86/lib/insn.c 2011-08-23 21:47:55.000000000 -0400
17227@@ -21,6 +21,11 @@
17228 #include <linux/string.h>
17229 #include <asm/inat.h>
17230 #include <asm/insn.h>
17231+#ifdef __KERNEL__
17232+#include <asm/pgtable_types.h>
17233+#else
17234+#define ktla_ktva(addr) addr
17235+#endif
17236
17237 #define get_next(t, insn) \
17238 ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
17239@@ -40,8 +45,8 @@
17240 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
17241 {
17242 memset(insn, 0, sizeof(*insn));
17243- insn->kaddr = kaddr;
17244- insn->next_byte = kaddr;
17245+ insn->kaddr = ktla_ktva(kaddr);
17246+ insn->next_byte = ktla_ktva(kaddr);
17247 insn->x86_64 = x86_64 ? 1 : 0;
17248 insn->opnd_bytes = 4;
17249 if (x86_64)
17250diff -urNp linux-3.0.3/arch/x86/lib/mmx_32.c linux-3.0.3/arch/x86/lib/mmx_32.c
17251--- linux-3.0.3/arch/x86/lib/mmx_32.c 2011-07-21 22:17:23.000000000 -0400
17252+++ linux-3.0.3/arch/x86/lib/mmx_32.c 2011-08-23 21:47:55.000000000 -0400
17253@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
17254 {
17255 void *p;
17256 int i;
17257+ unsigned long cr0;
17258
17259 if (unlikely(in_interrupt()))
17260 return __memcpy(to, from, len);
17261@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
17262 kernel_fpu_begin();
17263
17264 __asm__ __volatile__ (
17265- "1: prefetch (%0)\n" /* This set is 28 bytes */
17266- " prefetch 64(%0)\n"
17267- " prefetch 128(%0)\n"
17268- " prefetch 192(%0)\n"
17269- " prefetch 256(%0)\n"
17270+ "1: prefetch (%1)\n" /* This set is 28 bytes */
17271+ " prefetch 64(%1)\n"
17272+ " prefetch 128(%1)\n"
17273+ " prefetch 192(%1)\n"
17274+ " prefetch 256(%1)\n"
17275 "2: \n"
17276 ".section .fixup, \"ax\"\n"
17277- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17278+ "3: \n"
17279+
17280+#ifdef CONFIG_PAX_KERNEXEC
17281+ " movl %%cr0, %0\n"
17282+ " movl %0, %%eax\n"
17283+ " andl $0xFFFEFFFF, %%eax\n"
17284+ " movl %%eax, %%cr0\n"
17285+#endif
17286+
17287+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17288+
17289+#ifdef CONFIG_PAX_KERNEXEC
17290+ " movl %0, %%cr0\n"
17291+#endif
17292+
17293 " jmp 2b\n"
17294 ".previous\n"
17295 _ASM_EXTABLE(1b, 3b)
17296- : : "r" (from));
17297+ : "=&r" (cr0) : "r" (from) : "ax");
17298
17299 for ( ; i > 5; i--) {
17300 __asm__ __volatile__ (
17301- "1: prefetch 320(%0)\n"
17302- "2: movq (%0), %%mm0\n"
17303- " movq 8(%0), %%mm1\n"
17304- " movq 16(%0), %%mm2\n"
17305- " movq 24(%0), %%mm3\n"
17306- " movq %%mm0, (%1)\n"
17307- " movq %%mm1, 8(%1)\n"
17308- " movq %%mm2, 16(%1)\n"
17309- " movq %%mm3, 24(%1)\n"
17310- " movq 32(%0), %%mm0\n"
17311- " movq 40(%0), %%mm1\n"
17312- " movq 48(%0), %%mm2\n"
17313- " movq 56(%0), %%mm3\n"
17314- " movq %%mm0, 32(%1)\n"
17315- " movq %%mm1, 40(%1)\n"
17316- " movq %%mm2, 48(%1)\n"
17317- " movq %%mm3, 56(%1)\n"
17318+ "1: prefetch 320(%1)\n"
17319+ "2: movq (%1), %%mm0\n"
17320+ " movq 8(%1), %%mm1\n"
17321+ " movq 16(%1), %%mm2\n"
17322+ " movq 24(%1), %%mm3\n"
17323+ " movq %%mm0, (%2)\n"
17324+ " movq %%mm1, 8(%2)\n"
17325+ " movq %%mm2, 16(%2)\n"
17326+ " movq %%mm3, 24(%2)\n"
17327+ " movq 32(%1), %%mm0\n"
17328+ " movq 40(%1), %%mm1\n"
17329+ " movq 48(%1), %%mm2\n"
17330+ " movq 56(%1), %%mm3\n"
17331+ " movq %%mm0, 32(%2)\n"
17332+ " movq %%mm1, 40(%2)\n"
17333+ " movq %%mm2, 48(%2)\n"
17334+ " movq %%mm3, 56(%2)\n"
17335 ".section .fixup, \"ax\"\n"
17336- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17337+ "3:\n"
17338+
17339+#ifdef CONFIG_PAX_KERNEXEC
17340+ " movl %%cr0, %0\n"
17341+ " movl %0, %%eax\n"
17342+ " andl $0xFFFEFFFF, %%eax\n"
17343+ " movl %%eax, %%cr0\n"
17344+#endif
17345+
17346+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17347+
17348+#ifdef CONFIG_PAX_KERNEXEC
17349+ " movl %0, %%cr0\n"
17350+#endif
17351+
17352 " jmp 2b\n"
17353 ".previous\n"
17354 _ASM_EXTABLE(1b, 3b)
17355- : : "r" (from), "r" (to) : "memory");
17356+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17357
17358 from += 64;
17359 to += 64;
17360@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
17361 static void fast_copy_page(void *to, void *from)
17362 {
17363 int i;
17364+ unsigned long cr0;
17365
17366 kernel_fpu_begin();
17367
17368@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
17369 * but that is for later. -AV
17370 */
17371 __asm__ __volatile__(
17372- "1: prefetch (%0)\n"
17373- " prefetch 64(%0)\n"
17374- " prefetch 128(%0)\n"
17375- " prefetch 192(%0)\n"
17376- " prefetch 256(%0)\n"
17377+ "1: prefetch (%1)\n"
17378+ " prefetch 64(%1)\n"
17379+ " prefetch 128(%1)\n"
17380+ " prefetch 192(%1)\n"
17381+ " prefetch 256(%1)\n"
17382 "2: \n"
17383 ".section .fixup, \"ax\"\n"
17384- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17385+ "3: \n"
17386+
17387+#ifdef CONFIG_PAX_KERNEXEC
17388+ " movl %%cr0, %0\n"
17389+ " movl %0, %%eax\n"
17390+ " andl $0xFFFEFFFF, %%eax\n"
17391+ " movl %%eax, %%cr0\n"
17392+#endif
17393+
17394+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17395+
17396+#ifdef CONFIG_PAX_KERNEXEC
17397+ " movl %0, %%cr0\n"
17398+#endif
17399+
17400 " jmp 2b\n"
17401 ".previous\n"
17402- _ASM_EXTABLE(1b, 3b) : : "r" (from));
17403+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
17404
17405 for (i = 0; i < (4096-320)/64; i++) {
17406 __asm__ __volatile__ (
17407- "1: prefetch 320(%0)\n"
17408- "2: movq (%0), %%mm0\n"
17409- " movntq %%mm0, (%1)\n"
17410- " movq 8(%0), %%mm1\n"
17411- " movntq %%mm1, 8(%1)\n"
17412- " movq 16(%0), %%mm2\n"
17413- " movntq %%mm2, 16(%1)\n"
17414- " movq 24(%0), %%mm3\n"
17415- " movntq %%mm3, 24(%1)\n"
17416- " movq 32(%0), %%mm4\n"
17417- " movntq %%mm4, 32(%1)\n"
17418- " movq 40(%0), %%mm5\n"
17419- " movntq %%mm5, 40(%1)\n"
17420- " movq 48(%0), %%mm6\n"
17421- " movntq %%mm6, 48(%1)\n"
17422- " movq 56(%0), %%mm7\n"
17423- " movntq %%mm7, 56(%1)\n"
17424+ "1: prefetch 320(%1)\n"
17425+ "2: movq (%1), %%mm0\n"
17426+ " movntq %%mm0, (%2)\n"
17427+ " movq 8(%1), %%mm1\n"
17428+ " movntq %%mm1, 8(%2)\n"
17429+ " movq 16(%1), %%mm2\n"
17430+ " movntq %%mm2, 16(%2)\n"
17431+ " movq 24(%1), %%mm3\n"
17432+ " movntq %%mm3, 24(%2)\n"
17433+ " movq 32(%1), %%mm4\n"
17434+ " movntq %%mm4, 32(%2)\n"
17435+ " movq 40(%1), %%mm5\n"
17436+ " movntq %%mm5, 40(%2)\n"
17437+ " movq 48(%1), %%mm6\n"
17438+ " movntq %%mm6, 48(%2)\n"
17439+ " movq 56(%1), %%mm7\n"
17440+ " movntq %%mm7, 56(%2)\n"
17441 ".section .fixup, \"ax\"\n"
17442- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17443+ "3:\n"
17444+
17445+#ifdef CONFIG_PAX_KERNEXEC
17446+ " movl %%cr0, %0\n"
17447+ " movl %0, %%eax\n"
17448+ " andl $0xFFFEFFFF, %%eax\n"
17449+ " movl %%eax, %%cr0\n"
17450+#endif
17451+
17452+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17453+
17454+#ifdef CONFIG_PAX_KERNEXEC
17455+ " movl %0, %%cr0\n"
17456+#endif
17457+
17458 " jmp 2b\n"
17459 ".previous\n"
17460- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
17461+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17462
17463 from += 64;
17464 to += 64;
17465@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
17466 static void fast_copy_page(void *to, void *from)
17467 {
17468 int i;
17469+ unsigned long cr0;
17470
17471 kernel_fpu_begin();
17472
17473 __asm__ __volatile__ (
17474- "1: prefetch (%0)\n"
17475- " prefetch 64(%0)\n"
17476- " prefetch 128(%0)\n"
17477- " prefetch 192(%0)\n"
17478- " prefetch 256(%0)\n"
17479+ "1: prefetch (%1)\n"
17480+ " prefetch 64(%1)\n"
17481+ " prefetch 128(%1)\n"
17482+ " prefetch 192(%1)\n"
17483+ " prefetch 256(%1)\n"
17484 "2: \n"
17485 ".section .fixup, \"ax\"\n"
17486- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17487+ "3: \n"
17488+
17489+#ifdef CONFIG_PAX_KERNEXEC
17490+ " movl %%cr0, %0\n"
17491+ " movl %0, %%eax\n"
17492+ " andl $0xFFFEFFFF, %%eax\n"
17493+ " movl %%eax, %%cr0\n"
17494+#endif
17495+
17496+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17497+
17498+#ifdef CONFIG_PAX_KERNEXEC
17499+ " movl %0, %%cr0\n"
17500+#endif
17501+
17502 " jmp 2b\n"
17503 ".previous\n"
17504- _ASM_EXTABLE(1b, 3b) : : "r" (from));
17505+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
17506
17507 for (i = 0; i < 4096/64; i++) {
17508 __asm__ __volatile__ (
17509- "1: prefetch 320(%0)\n"
17510- "2: movq (%0), %%mm0\n"
17511- " movq 8(%0), %%mm1\n"
17512- " movq 16(%0), %%mm2\n"
17513- " movq 24(%0), %%mm3\n"
17514- " movq %%mm0, (%1)\n"
17515- " movq %%mm1, 8(%1)\n"
17516- " movq %%mm2, 16(%1)\n"
17517- " movq %%mm3, 24(%1)\n"
17518- " movq 32(%0), %%mm0\n"
17519- " movq 40(%0), %%mm1\n"
17520- " movq 48(%0), %%mm2\n"
17521- " movq 56(%0), %%mm3\n"
17522- " movq %%mm0, 32(%1)\n"
17523- " movq %%mm1, 40(%1)\n"
17524- " movq %%mm2, 48(%1)\n"
17525- " movq %%mm3, 56(%1)\n"
17526+ "1: prefetch 320(%1)\n"
17527+ "2: movq (%1), %%mm0\n"
17528+ " movq 8(%1), %%mm1\n"
17529+ " movq 16(%1), %%mm2\n"
17530+ " movq 24(%1), %%mm3\n"
17531+ " movq %%mm0, (%2)\n"
17532+ " movq %%mm1, 8(%2)\n"
17533+ " movq %%mm2, 16(%2)\n"
17534+ " movq %%mm3, 24(%2)\n"
17535+ " movq 32(%1), %%mm0\n"
17536+ " movq 40(%1), %%mm1\n"
17537+ " movq 48(%1), %%mm2\n"
17538+ " movq 56(%1), %%mm3\n"
17539+ " movq %%mm0, 32(%2)\n"
17540+ " movq %%mm1, 40(%2)\n"
17541+ " movq %%mm2, 48(%2)\n"
17542+ " movq %%mm3, 56(%2)\n"
17543 ".section .fixup, \"ax\"\n"
17544- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17545+ "3:\n"
17546+
17547+#ifdef CONFIG_PAX_KERNEXEC
17548+ " movl %%cr0, %0\n"
17549+ " movl %0, %%eax\n"
17550+ " andl $0xFFFEFFFF, %%eax\n"
17551+ " movl %%eax, %%cr0\n"
17552+#endif
17553+
17554+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17555+
17556+#ifdef CONFIG_PAX_KERNEXEC
17557+ " movl %0, %%cr0\n"
17558+#endif
17559+
17560 " jmp 2b\n"
17561 ".previous\n"
17562 _ASM_EXTABLE(1b, 3b)
17563- : : "r" (from), "r" (to) : "memory");
17564+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17565
17566 from += 64;
17567 to += 64;
17568diff -urNp linux-3.0.3/arch/x86/lib/putuser.S linux-3.0.3/arch/x86/lib/putuser.S
17569--- linux-3.0.3/arch/x86/lib/putuser.S 2011-07-21 22:17:23.000000000 -0400
17570+++ linux-3.0.3/arch/x86/lib/putuser.S 2011-08-23 21:47:55.000000000 -0400
17571@@ -15,7 +15,8 @@
17572 #include <asm/thread_info.h>
17573 #include <asm/errno.h>
17574 #include <asm/asm.h>
17575-
17576+#include <asm/segment.h>
17577+#include <asm/pgtable.h>
17578
17579 /*
17580 * __put_user_X
17581@@ -29,52 +30,119 @@
17582 * as they get called from within inline assembly.
17583 */
17584
17585-#define ENTER CFI_STARTPROC ; \
17586- GET_THREAD_INFO(%_ASM_BX)
17587+#define ENTER CFI_STARTPROC
17588 #define EXIT ret ; \
17589 CFI_ENDPROC
17590
17591+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17592+#define _DEST %_ASM_CX,%_ASM_BX
17593+#else
17594+#define _DEST %_ASM_CX
17595+#endif
17596+
17597+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17598+#define __copyuser_seg gs;
17599+#else
17600+#define __copyuser_seg
17601+#endif
17602+
17603 .text
17604 ENTRY(__put_user_1)
17605 ENTER
17606+
17607+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17608+ GET_THREAD_INFO(%_ASM_BX)
17609 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
17610 jae bad_put_user
17611-1: movb %al,(%_ASM_CX)
17612+
17613+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17614+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17615+ cmp %_ASM_BX,%_ASM_CX
17616+ jb 1234f
17617+ xor %ebx,%ebx
17618+1234:
17619+#endif
17620+
17621+#endif
17622+
17623+1: __copyuser_seg movb %al,(_DEST)
17624 xor %eax,%eax
17625 EXIT
17626 ENDPROC(__put_user_1)
17627
17628 ENTRY(__put_user_2)
17629 ENTER
17630+
17631+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17632+ GET_THREAD_INFO(%_ASM_BX)
17633 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17634 sub $1,%_ASM_BX
17635 cmp %_ASM_BX,%_ASM_CX
17636 jae bad_put_user
17637-2: movw %ax,(%_ASM_CX)
17638+
17639+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17640+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17641+ cmp %_ASM_BX,%_ASM_CX
17642+ jb 1234f
17643+ xor %ebx,%ebx
17644+1234:
17645+#endif
17646+
17647+#endif
17648+
17649+2: __copyuser_seg movw %ax,(_DEST)
17650 xor %eax,%eax
17651 EXIT
17652 ENDPROC(__put_user_2)
17653
17654 ENTRY(__put_user_4)
17655 ENTER
17656+
17657+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17658+ GET_THREAD_INFO(%_ASM_BX)
17659 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17660 sub $3,%_ASM_BX
17661 cmp %_ASM_BX,%_ASM_CX
17662 jae bad_put_user
17663-3: movl %eax,(%_ASM_CX)
17664+
17665+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17666+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17667+ cmp %_ASM_BX,%_ASM_CX
17668+ jb 1234f
17669+ xor %ebx,%ebx
17670+1234:
17671+#endif
17672+
17673+#endif
17674+
17675+3: __copyuser_seg movl %eax,(_DEST)
17676 xor %eax,%eax
17677 EXIT
17678 ENDPROC(__put_user_4)
17679
17680 ENTRY(__put_user_8)
17681 ENTER
17682+
17683+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17684+ GET_THREAD_INFO(%_ASM_BX)
17685 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17686 sub $7,%_ASM_BX
17687 cmp %_ASM_BX,%_ASM_CX
17688 jae bad_put_user
17689-4: mov %_ASM_AX,(%_ASM_CX)
17690+
17691+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17692+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17693+ cmp %_ASM_BX,%_ASM_CX
17694+ jb 1234f
17695+ xor %ebx,%ebx
17696+1234:
17697+#endif
17698+
17699+#endif
17700+
17701+4: __copyuser_seg mov %_ASM_AX,(_DEST)
17702 #ifdef CONFIG_X86_32
17703-5: movl %edx,4(%_ASM_CX)
17704+5: __copyuser_seg movl %edx,4(_DEST)
17705 #endif
17706 xor %eax,%eax
17707 EXIT
17708diff -urNp linux-3.0.3/arch/x86/lib/usercopy_32.c linux-3.0.3/arch/x86/lib/usercopy_32.c
17709--- linux-3.0.3/arch/x86/lib/usercopy_32.c 2011-07-21 22:17:23.000000000 -0400
17710+++ linux-3.0.3/arch/x86/lib/usercopy_32.c 2011-08-23 21:47:55.000000000 -0400
17711@@ -43,7 +43,7 @@ do { \
17712 __asm__ __volatile__( \
17713 " testl %1,%1\n" \
17714 " jz 2f\n" \
17715- "0: lodsb\n" \
17716+ "0: "__copyuser_seg"lodsb\n" \
17717 " stosb\n" \
17718 " testb %%al,%%al\n" \
17719 " jz 1f\n" \
17720@@ -128,10 +128,12 @@ do { \
17721 int __d0; \
17722 might_fault(); \
17723 __asm__ __volatile__( \
17724+ __COPYUSER_SET_ES \
17725 "0: rep; stosl\n" \
17726 " movl %2,%0\n" \
17727 "1: rep; stosb\n" \
17728 "2:\n" \
17729+ __COPYUSER_RESTORE_ES \
17730 ".section .fixup,\"ax\"\n" \
17731 "3: lea 0(%2,%0,4),%0\n" \
17732 " jmp 2b\n" \
17733@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
17734 might_fault();
17735
17736 __asm__ __volatile__(
17737+ __COPYUSER_SET_ES
17738 " testl %0, %0\n"
17739 " jz 3f\n"
17740 " andl %0,%%ecx\n"
17741@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
17742 " subl %%ecx,%0\n"
17743 " addl %0,%%eax\n"
17744 "1:\n"
17745+ __COPYUSER_RESTORE_ES
17746 ".section .fixup,\"ax\"\n"
17747 "2: xorl %%eax,%%eax\n"
17748 " jmp 1b\n"
17749@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
17750
17751 #ifdef CONFIG_X86_INTEL_USERCOPY
17752 static unsigned long
17753-__copy_user_intel(void __user *to, const void *from, unsigned long size)
17754+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
17755 {
17756 int d0, d1;
17757 __asm__ __volatile__(
17758@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
17759 " .align 2,0x90\n"
17760 "3: movl 0(%4), %%eax\n"
17761 "4: movl 4(%4), %%edx\n"
17762- "5: movl %%eax, 0(%3)\n"
17763- "6: movl %%edx, 4(%3)\n"
17764+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
17765+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
17766 "7: movl 8(%4), %%eax\n"
17767 "8: movl 12(%4),%%edx\n"
17768- "9: movl %%eax, 8(%3)\n"
17769- "10: movl %%edx, 12(%3)\n"
17770+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
17771+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
17772 "11: movl 16(%4), %%eax\n"
17773 "12: movl 20(%4), %%edx\n"
17774- "13: movl %%eax, 16(%3)\n"
17775- "14: movl %%edx, 20(%3)\n"
17776+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
17777+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
17778 "15: movl 24(%4), %%eax\n"
17779 "16: movl 28(%4), %%edx\n"
17780- "17: movl %%eax, 24(%3)\n"
17781- "18: movl %%edx, 28(%3)\n"
17782+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
17783+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
17784 "19: movl 32(%4), %%eax\n"
17785 "20: movl 36(%4), %%edx\n"
17786- "21: movl %%eax, 32(%3)\n"
17787- "22: movl %%edx, 36(%3)\n"
17788+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
17789+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
17790 "23: movl 40(%4), %%eax\n"
17791 "24: movl 44(%4), %%edx\n"
17792- "25: movl %%eax, 40(%3)\n"
17793- "26: movl %%edx, 44(%3)\n"
17794+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
17795+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
17796 "27: movl 48(%4), %%eax\n"
17797 "28: movl 52(%4), %%edx\n"
17798- "29: movl %%eax, 48(%3)\n"
17799- "30: movl %%edx, 52(%3)\n"
17800+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
17801+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
17802 "31: movl 56(%4), %%eax\n"
17803 "32: movl 60(%4), %%edx\n"
17804- "33: movl %%eax, 56(%3)\n"
17805- "34: movl %%edx, 60(%3)\n"
17806+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
17807+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
17808 " addl $-64, %0\n"
17809 " addl $64, %4\n"
17810 " addl $64, %3\n"
17811@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
17812 " shrl $2, %0\n"
17813 " andl $3, %%eax\n"
17814 " cld\n"
17815+ __COPYUSER_SET_ES
17816 "99: rep; movsl\n"
17817 "36: movl %%eax, %0\n"
17818 "37: rep; movsb\n"
17819 "100:\n"
17820+ __COPYUSER_RESTORE_ES
17821+ ".section .fixup,\"ax\"\n"
17822+ "101: lea 0(%%eax,%0,4),%0\n"
17823+ " jmp 100b\n"
17824+ ".previous\n"
17825+ ".section __ex_table,\"a\"\n"
17826+ " .align 4\n"
17827+ " .long 1b,100b\n"
17828+ " .long 2b,100b\n"
17829+ " .long 3b,100b\n"
17830+ " .long 4b,100b\n"
17831+ " .long 5b,100b\n"
17832+ " .long 6b,100b\n"
17833+ " .long 7b,100b\n"
17834+ " .long 8b,100b\n"
17835+ " .long 9b,100b\n"
17836+ " .long 10b,100b\n"
17837+ " .long 11b,100b\n"
17838+ " .long 12b,100b\n"
17839+ " .long 13b,100b\n"
17840+ " .long 14b,100b\n"
17841+ " .long 15b,100b\n"
17842+ " .long 16b,100b\n"
17843+ " .long 17b,100b\n"
17844+ " .long 18b,100b\n"
17845+ " .long 19b,100b\n"
17846+ " .long 20b,100b\n"
17847+ " .long 21b,100b\n"
17848+ " .long 22b,100b\n"
17849+ " .long 23b,100b\n"
17850+ " .long 24b,100b\n"
17851+ " .long 25b,100b\n"
17852+ " .long 26b,100b\n"
17853+ " .long 27b,100b\n"
17854+ " .long 28b,100b\n"
17855+ " .long 29b,100b\n"
17856+ " .long 30b,100b\n"
17857+ " .long 31b,100b\n"
17858+ " .long 32b,100b\n"
17859+ " .long 33b,100b\n"
17860+ " .long 34b,100b\n"
17861+ " .long 35b,100b\n"
17862+ " .long 36b,100b\n"
17863+ " .long 37b,100b\n"
17864+ " .long 99b,101b\n"
17865+ ".previous"
17866+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
17867+ : "1"(to), "2"(from), "0"(size)
17868+ : "eax", "edx", "memory");
17869+ return size;
17870+}
17871+
17872+static unsigned long
17873+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
17874+{
17875+ int d0, d1;
17876+ __asm__ __volatile__(
17877+ " .align 2,0x90\n"
17878+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
17879+ " cmpl $67, %0\n"
17880+ " jbe 3f\n"
17881+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
17882+ " .align 2,0x90\n"
17883+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
17884+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
17885+ "5: movl %%eax, 0(%3)\n"
17886+ "6: movl %%edx, 4(%3)\n"
17887+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
17888+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
17889+ "9: movl %%eax, 8(%3)\n"
17890+ "10: movl %%edx, 12(%3)\n"
17891+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
17892+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
17893+ "13: movl %%eax, 16(%3)\n"
17894+ "14: movl %%edx, 20(%3)\n"
17895+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
17896+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
17897+ "17: movl %%eax, 24(%3)\n"
17898+ "18: movl %%edx, 28(%3)\n"
17899+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
17900+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
17901+ "21: movl %%eax, 32(%3)\n"
17902+ "22: movl %%edx, 36(%3)\n"
17903+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
17904+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
17905+ "25: movl %%eax, 40(%3)\n"
17906+ "26: movl %%edx, 44(%3)\n"
17907+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
17908+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
17909+ "29: movl %%eax, 48(%3)\n"
17910+ "30: movl %%edx, 52(%3)\n"
17911+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
17912+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
17913+ "33: movl %%eax, 56(%3)\n"
17914+ "34: movl %%edx, 60(%3)\n"
17915+ " addl $-64, %0\n"
17916+ " addl $64, %4\n"
17917+ " addl $64, %3\n"
17918+ " cmpl $63, %0\n"
17919+ " ja 1b\n"
17920+ "35: movl %0, %%eax\n"
17921+ " shrl $2, %0\n"
17922+ " andl $3, %%eax\n"
17923+ " cld\n"
17924+ "99: rep; "__copyuser_seg" movsl\n"
17925+ "36: movl %%eax, %0\n"
17926+ "37: rep; "__copyuser_seg" movsb\n"
17927+ "100:\n"
17928 ".section .fixup,\"ax\"\n"
17929 "101: lea 0(%%eax,%0,4),%0\n"
17930 " jmp 100b\n"
17931@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
17932 int d0, d1;
17933 __asm__ __volatile__(
17934 " .align 2,0x90\n"
17935- "0: movl 32(%4), %%eax\n"
17936+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
17937 " cmpl $67, %0\n"
17938 " jbe 2f\n"
17939- "1: movl 64(%4), %%eax\n"
17940+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
17941 " .align 2,0x90\n"
17942- "2: movl 0(%4), %%eax\n"
17943- "21: movl 4(%4), %%edx\n"
17944+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
17945+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
17946 " movl %%eax, 0(%3)\n"
17947 " movl %%edx, 4(%3)\n"
17948- "3: movl 8(%4), %%eax\n"
17949- "31: movl 12(%4),%%edx\n"
17950+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
17951+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
17952 " movl %%eax, 8(%3)\n"
17953 " movl %%edx, 12(%3)\n"
17954- "4: movl 16(%4), %%eax\n"
17955- "41: movl 20(%4), %%edx\n"
17956+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
17957+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
17958 " movl %%eax, 16(%3)\n"
17959 " movl %%edx, 20(%3)\n"
17960- "10: movl 24(%4), %%eax\n"
17961- "51: movl 28(%4), %%edx\n"
17962+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
17963+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
17964 " movl %%eax, 24(%3)\n"
17965 " movl %%edx, 28(%3)\n"
17966- "11: movl 32(%4), %%eax\n"
17967- "61: movl 36(%4), %%edx\n"
17968+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
17969+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
17970 " movl %%eax, 32(%3)\n"
17971 " movl %%edx, 36(%3)\n"
17972- "12: movl 40(%4), %%eax\n"
17973- "71: movl 44(%4), %%edx\n"
17974+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
17975+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
17976 " movl %%eax, 40(%3)\n"
17977 " movl %%edx, 44(%3)\n"
17978- "13: movl 48(%4), %%eax\n"
17979- "81: movl 52(%4), %%edx\n"
17980+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
17981+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
17982 " movl %%eax, 48(%3)\n"
17983 " movl %%edx, 52(%3)\n"
17984- "14: movl 56(%4), %%eax\n"
17985- "91: movl 60(%4), %%edx\n"
17986+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
17987+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
17988 " movl %%eax, 56(%3)\n"
17989 " movl %%edx, 60(%3)\n"
17990 " addl $-64, %0\n"
17991@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
17992 " shrl $2, %0\n"
17993 " andl $3, %%eax\n"
17994 " cld\n"
17995- "6: rep; movsl\n"
17996+ "6: rep; "__copyuser_seg" movsl\n"
17997 " movl %%eax,%0\n"
17998- "7: rep; movsb\n"
17999+ "7: rep; "__copyuser_seg" movsb\n"
18000 "8:\n"
18001 ".section .fixup,\"ax\"\n"
18002 "9: lea 0(%%eax,%0,4),%0\n"
18003@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
18004
18005 __asm__ __volatile__(
18006 " .align 2,0x90\n"
18007- "0: movl 32(%4), %%eax\n"
18008+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18009 " cmpl $67, %0\n"
18010 " jbe 2f\n"
18011- "1: movl 64(%4), %%eax\n"
18012+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18013 " .align 2,0x90\n"
18014- "2: movl 0(%4), %%eax\n"
18015- "21: movl 4(%4), %%edx\n"
18016+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18017+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18018 " movnti %%eax, 0(%3)\n"
18019 " movnti %%edx, 4(%3)\n"
18020- "3: movl 8(%4), %%eax\n"
18021- "31: movl 12(%4),%%edx\n"
18022+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18023+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18024 " movnti %%eax, 8(%3)\n"
18025 " movnti %%edx, 12(%3)\n"
18026- "4: movl 16(%4), %%eax\n"
18027- "41: movl 20(%4), %%edx\n"
18028+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18029+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18030 " movnti %%eax, 16(%3)\n"
18031 " movnti %%edx, 20(%3)\n"
18032- "10: movl 24(%4), %%eax\n"
18033- "51: movl 28(%4), %%edx\n"
18034+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18035+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18036 " movnti %%eax, 24(%3)\n"
18037 " movnti %%edx, 28(%3)\n"
18038- "11: movl 32(%4), %%eax\n"
18039- "61: movl 36(%4), %%edx\n"
18040+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18041+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18042 " movnti %%eax, 32(%3)\n"
18043 " movnti %%edx, 36(%3)\n"
18044- "12: movl 40(%4), %%eax\n"
18045- "71: movl 44(%4), %%edx\n"
18046+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18047+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18048 " movnti %%eax, 40(%3)\n"
18049 " movnti %%edx, 44(%3)\n"
18050- "13: movl 48(%4), %%eax\n"
18051- "81: movl 52(%4), %%edx\n"
18052+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18053+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18054 " movnti %%eax, 48(%3)\n"
18055 " movnti %%edx, 52(%3)\n"
18056- "14: movl 56(%4), %%eax\n"
18057- "91: movl 60(%4), %%edx\n"
18058+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18059+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18060 " movnti %%eax, 56(%3)\n"
18061 " movnti %%edx, 60(%3)\n"
18062 " addl $-64, %0\n"
18063@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
18064 " shrl $2, %0\n"
18065 " andl $3, %%eax\n"
18066 " cld\n"
18067- "6: rep; movsl\n"
18068+ "6: rep; "__copyuser_seg" movsl\n"
18069 " movl %%eax,%0\n"
18070- "7: rep; movsb\n"
18071+ "7: rep; "__copyuser_seg" movsb\n"
18072 "8:\n"
18073 ".section .fixup,\"ax\"\n"
18074 "9: lea 0(%%eax,%0,4),%0\n"
18075@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
18076
18077 __asm__ __volatile__(
18078 " .align 2,0x90\n"
18079- "0: movl 32(%4), %%eax\n"
18080+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18081 " cmpl $67, %0\n"
18082 " jbe 2f\n"
18083- "1: movl 64(%4), %%eax\n"
18084+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18085 " .align 2,0x90\n"
18086- "2: movl 0(%4), %%eax\n"
18087- "21: movl 4(%4), %%edx\n"
18088+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18089+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18090 " movnti %%eax, 0(%3)\n"
18091 " movnti %%edx, 4(%3)\n"
18092- "3: movl 8(%4), %%eax\n"
18093- "31: movl 12(%4),%%edx\n"
18094+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18095+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18096 " movnti %%eax, 8(%3)\n"
18097 " movnti %%edx, 12(%3)\n"
18098- "4: movl 16(%4), %%eax\n"
18099- "41: movl 20(%4), %%edx\n"
18100+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18101+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18102 " movnti %%eax, 16(%3)\n"
18103 " movnti %%edx, 20(%3)\n"
18104- "10: movl 24(%4), %%eax\n"
18105- "51: movl 28(%4), %%edx\n"
18106+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18107+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18108 " movnti %%eax, 24(%3)\n"
18109 " movnti %%edx, 28(%3)\n"
18110- "11: movl 32(%4), %%eax\n"
18111- "61: movl 36(%4), %%edx\n"
18112+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18113+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18114 " movnti %%eax, 32(%3)\n"
18115 " movnti %%edx, 36(%3)\n"
18116- "12: movl 40(%4), %%eax\n"
18117- "71: movl 44(%4), %%edx\n"
18118+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18119+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18120 " movnti %%eax, 40(%3)\n"
18121 " movnti %%edx, 44(%3)\n"
18122- "13: movl 48(%4), %%eax\n"
18123- "81: movl 52(%4), %%edx\n"
18124+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18125+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18126 " movnti %%eax, 48(%3)\n"
18127 " movnti %%edx, 52(%3)\n"
18128- "14: movl 56(%4), %%eax\n"
18129- "91: movl 60(%4), %%edx\n"
18130+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18131+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18132 " movnti %%eax, 56(%3)\n"
18133 " movnti %%edx, 60(%3)\n"
18134 " addl $-64, %0\n"
18135@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
18136 " shrl $2, %0\n"
18137 " andl $3, %%eax\n"
18138 " cld\n"
18139- "6: rep; movsl\n"
18140+ "6: rep; "__copyuser_seg" movsl\n"
18141 " movl %%eax,%0\n"
18142- "7: rep; movsb\n"
18143+ "7: rep; "__copyuser_seg" movsb\n"
18144 "8:\n"
18145 ".section .fixup,\"ax\"\n"
18146 "9: lea 0(%%eax,%0,4),%0\n"
18147@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
18148 */
18149 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
18150 unsigned long size);
18151-unsigned long __copy_user_intel(void __user *to, const void *from,
18152+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
18153+ unsigned long size);
18154+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
18155 unsigned long size);
18156 unsigned long __copy_user_zeroing_intel_nocache(void *to,
18157 const void __user *from, unsigned long size);
18158 #endif /* CONFIG_X86_INTEL_USERCOPY */
18159
18160 /* Generic arbitrary sized copy. */
18161-#define __copy_user(to, from, size) \
18162+#define __copy_user(to, from, size, prefix, set, restore) \
18163 do { \
18164 int __d0, __d1, __d2; \
18165 __asm__ __volatile__( \
18166+ set \
18167 " cmp $7,%0\n" \
18168 " jbe 1f\n" \
18169 " movl %1,%0\n" \
18170 " negl %0\n" \
18171 " andl $7,%0\n" \
18172 " subl %0,%3\n" \
18173- "4: rep; movsb\n" \
18174+ "4: rep; "prefix"movsb\n" \
18175 " movl %3,%0\n" \
18176 " shrl $2,%0\n" \
18177 " andl $3,%3\n" \
18178 " .align 2,0x90\n" \
18179- "0: rep; movsl\n" \
18180+ "0: rep; "prefix"movsl\n" \
18181 " movl %3,%0\n" \
18182- "1: rep; movsb\n" \
18183+ "1: rep; "prefix"movsb\n" \
18184 "2:\n" \
18185+ restore \
18186 ".section .fixup,\"ax\"\n" \
18187 "5: addl %3,%0\n" \
18188 " jmp 2b\n" \
18189@@ -682,14 +799,14 @@ do { \
18190 " negl %0\n" \
18191 " andl $7,%0\n" \
18192 " subl %0,%3\n" \
18193- "4: rep; movsb\n" \
18194+ "4: rep; "__copyuser_seg"movsb\n" \
18195 " movl %3,%0\n" \
18196 " shrl $2,%0\n" \
18197 " andl $3,%3\n" \
18198 " .align 2,0x90\n" \
18199- "0: rep; movsl\n" \
18200+ "0: rep; "__copyuser_seg"movsl\n" \
18201 " movl %3,%0\n" \
18202- "1: rep; movsb\n" \
18203+ "1: rep; "__copyuser_seg"movsb\n" \
18204 "2:\n" \
18205 ".section .fixup,\"ax\"\n" \
18206 "5: addl %3,%0\n" \
18207@@ -775,9 +892,9 @@ survive:
18208 }
18209 #endif
18210 if (movsl_is_ok(to, from, n))
18211- __copy_user(to, from, n);
18212+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
18213 else
18214- n = __copy_user_intel(to, from, n);
18215+ n = __generic_copy_to_user_intel(to, from, n);
18216 return n;
18217 }
18218 EXPORT_SYMBOL(__copy_to_user_ll);
18219@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
18220 unsigned long n)
18221 {
18222 if (movsl_is_ok(to, from, n))
18223- __copy_user(to, from, n);
18224+ __copy_user(to, from, n, __copyuser_seg, "", "");
18225 else
18226- n = __copy_user_intel((void __user *)to,
18227- (const void *)from, n);
18228+ n = __generic_copy_from_user_intel(to, from, n);
18229 return n;
18230 }
18231 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
18232@@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocach
18233 if (n > 64 && cpu_has_xmm2)
18234 n = __copy_user_intel_nocache(to, from, n);
18235 else
18236- __copy_user(to, from, n);
18237+ __copy_user(to, from, n, __copyuser_seg, "", "");
18238 #else
18239- __copy_user(to, from, n);
18240+ __copy_user(to, from, n, __copyuser_seg, "", "");
18241 #endif
18242 return n;
18243 }
18244 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
18245
18246-/**
18247- * copy_to_user: - Copy a block of data into user space.
18248- * @to: Destination address, in user space.
18249- * @from: Source address, in kernel space.
18250- * @n: Number of bytes to copy.
18251- *
18252- * Context: User context only. This function may sleep.
18253- *
18254- * Copy data from kernel space to user space.
18255- *
18256- * Returns number of bytes that could not be copied.
18257- * On success, this will be zero.
18258- */
18259-unsigned long
18260-copy_to_user(void __user *to, const void *from, unsigned long n)
18261+void copy_from_user_overflow(void)
18262 {
18263- if (access_ok(VERIFY_WRITE, to, n))
18264- n = __copy_to_user(to, from, n);
18265- return n;
18266+ WARN(1, "Buffer overflow detected!\n");
18267 }
18268-EXPORT_SYMBOL(copy_to_user);
18269+EXPORT_SYMBOL(copy_from_user_overflow);
18270
18271-/**
18272- * copy_from_user: - Copy a block of data from user space.
18273- * @to: Destination address, in kernel space.
18274- * @from: Source address, in user space.
18275- * @n: Number of bytes to copy.
18276- *
18277- * Context: User context only. This function may sleep.
18278- *
18279- * Copy data from user space to kernel space.
18280- *
18281- * Returns number of bytes that could not be copied.
18282- * On success, this will be zero.
18283- *
18284- * If some data could not be copied, this function will pad the copied
18285- * data to the requested size using zero bytes.
18286- */
18287-unsigned long
18288-_copy_from_user(void *to, const void __user *from, unsigned long n)
18289+void copy_to_user_overflow(void)
18290 {
18291- if (access_ok(VERIFY_READ, from, n))
18292- n = __copy_from_user(to, from, n);
18293- else
18294- memset(to, 0, n);
18295- return n;
18296+ WARN(1, "Buffer overflow detected!\n");
18297 }
18298-EXPORT_SYMBOL(_copy_from_user);
18299+EXPORT_SYMBOL(copy_to_user_overflow);
18300
18301-void copy_from_user_overflow(void)
18302+#ifdef CONFIG_PAX_MEMORY_UDEREF
18303+void __set_fs(mm_segment_t x)
18304 {
18305- WARN(1, "Buffer overflow detected!\n");
18306+ switch (x.seg) {
18307+ case 0:
18308+ loadsegment(gs, 0);
18309+ break;
18310+ case TASK_SIZE_MAX:
18311+ loadsegment(gs, __USER_DS);
18312+ break;
18313+ case -1UL:
18314+ loadsegment(gs, __KERNEL_DS);
18315+ break;
18316+ default:
18317+ BUG();
18318+ }
18319+ return;
18320 }
18321-EXPORT_SYMBOL(copy_from_user_overflow);
18322+EXPORT_SYMBOL(__set_fs);
18323+
18324+void set_fs(mm_segment_t x)
18325+{
18326+ current_thread_info()->addr_limit = x;
18327+ __set_fs(x);
18328+}
18329+EXPORT_SYMBOL(set_fs);
18330+#endif
18331diff -urNp linux-3.0.3/arch/x86/lib/usercopy_64.c linux-3.0.3/arch/x86/lib/usercopy_64.c
18332--- linux-3.0.3/arch/x86/lib/usercopy_64.c 2011-07-21 22:17:23.000000000 -0400
18333+++ linux-3.0.3/arch/x86/lib/usercopy_64.c 2011-08-23 21:47:55.000000000 -0400
18334@@ -42,6 +42,12 @@ long
18335 __strncpy_from_user(char *dst, const char __user *src, long count)
18336 {
18337 long res;
18338+
18339+#ifdef CONFIG_PAX_MEMORY_UDEREF
18340+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
18341+ src += PAX_USER_SHADOW_BASE;
18342+#endif
18343+
18344 __do_strncpy_from_user(dst, src, count, res);
18345 return res;
18346 }
18347@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
18348 {
18349 long __d0;
18350 might_fault();
18351+
18352+#ifdef CONFIG_PAX_MEMORY_UDEREF
18353+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
18354+ addr += PAX_USER_SHADOW_BASE;
18355+#endif
18356+
18357 /* no memory constraint because it doesn't change any memory gcc knows
18358 about */
18359 asm volatile(
18360@@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
18361
18362 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
18363 {
18364- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
18365+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
18366+
18367+#ifdef CONFIG_PAX_MEMORY_UDEREF
18368+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
18369+ to += PAX_USER_SHADOW_BASE;
18370+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
18371+ from += PAX_USER_SHADOW_BASE;
18372+#endif
18373+
18374 return copy_user_generic((__force void *)to, (__force void *)from, len);
18375- }
18376- return len;
18377+ }
18378+ return len;
18379 }
18380 EXPORT_SYMBOL(copy_in_user);
18381
18382diff -urNp linux-3.0.3/arch/x86/Makefile linux-3.0.3/arch/x86/Makefile
18383--- linux-3.0.3/arch/x86/Makefile 2011-07-21 22:17:23.000000000 -0400
18384+++ linux-3.0.3/arch/x86/Makefile 2011-08-23 21:48:14.000000000 -0400
18385@@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
18386 else
18387 BITS := 64
18388 UTS_MACHINE := x86_64
18389+ biarch := $(call cc-option,-m64)
18390 CHECKFLAGS += -D__x86_64__ -m64
18391
18392 KBUILD_AFLAGS += -m64
18393@@ -195,3 +196,12 @@ define archhelp
18394 echo ' FDARGS="..." arguments for the booted kernel'
18395 echo ' FDINITRD=file initrd for the booted kernel'
18396 endef
18397+
18398+define OLD_LD
18399+
18400+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
18401+*** Please upgrade your binutils to 2.18 or newer
18402+endef
18403+
18404+archprepare:
18405+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
18406diff -urNp linux-3.0.3/arch/x86/mm/extable.c linux-3.0.3/arch/x86/mm/extable.c
18407--- linux-3.0.3/arch/x86/mm/extable.c 2011-07-21 22:17:23.000000000 -0400
18408+++ linux-3.0.3/arch/x86/mm/extable.c 2011-08-23 21:47:55.000000000 -0400
18409@@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs
18410 const struct exception_table_entry *fixup;
18411
18412 #ifdef CONFIG_PNPBIOS
18413- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
18414+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
18415 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
18416 extern u32 pnp_bios_is_utter_crap;
18417 pnp_bios_is_utter_crap = 1;
18418diff -urNp linux-3.0.3/arch/x86/mm/fault.c linux-3.0.3/arch/x86/mm/fault.c
18419--- linux-3.0.3/arch/x86/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
18420+++ linux-3.0.3/arch/x86/mm/fault.c 2011-08-23 21:48:14.000000000 -0400
18421@@ -13,10 +13,18 @@
18422 #include <linux/perf_event.h> /* perf_sw_event */
18423 #include <linux/hugetlb.h> /* hstate_index_to_shift */
18424 #include <linux/prefetch.h> /* prefetchw */
18425+#include <linux/unistd.h>
18426+#include <linux/compiler.h>
18427
18428 #include <asm/traps.h> /* dotraplinkage, ... */
18429 #include <asm/pgalloc.h> /* pgd_*(), ... */
18430 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
18431+#include <asm/vsyscall.h>
18432+#include <asm/tlbflush.h>
18433+
18434+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18435+#include <asm/stacktrace.h>
18436+#endif
18437
18438 /*
18439 * Page fault error code bits:
18440@@ -54,7 +62,7 @@ static inline int __kprobes notify_page_
18441 int ret = 0;
18442
18443 /* kprobe_running() needs smp_processor_id() */
18444- if (kprobes_built_in() && !user_mode_vm(regs)) {
18445+ if (kprobes_built_in() && !user_mode(regs)) {
18446 preempt_disable();
18447 if (kprobe_running() && kprobe_fault_handler(regs, 14))
18448 ret = 1;
18449@@ -115,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *re
18450 return !instr_lo || (instr_lo>>1) == 1;
18451 case 0x00:
18452 /* Prefetch instruction is 0x0F0D or 0x0F18 */
18453- if (probe_kernel_address(instr, opcode))
18454+ if (user_mode(regs)) {
18455+ if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
18456+ return 0;
18457+ } else if (probe_kernel_address(instr, opcode))
18458 return 0;
18459
18460 *prefetch = (instr_lo == 0xF) &&
18461@@ -149,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsign
18462 while (instr < max_instr) {
18463 unsigned char opcode;
18464
18465- if (probe_kernel_address(instr, opcode))
18466+ if (user_mode(regs)) {
18467+ if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
18468+ break;
18469+ } else if (probe_kernel_address(instr, opcode))
18470 break;
18471
18472 instr++;
18473@@ -180,6 +194,30 @@ force_sig_info_fault(int si_signo, int s
18474 force_sig_info(si_signo, &info, tsk);
18475 }
18476
18477+#ifdef CONFIG_PAX_EMUTRAMP
18478+static int pax_handle_fetch_fault(struct pt_regs *regs);
18479+#endif
18480+
18481+#ifdef CONFIG_PAX_PAGEEXEC
18482+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
18483+{
18484+ pgd_t *pgd;
18485+ pud_t *pud;
18486+ pmd_t *pmd;
18487+
18488+ pgd = pgd_offset(mm, address);
18489+ if (!pgd_present(*pgd))
18490+ return NULL;
18491+ pud = pud_offset(pgd, address);
18492+ if (!pud_present(*pud))
18493+ return NULL;
18494+ pmd = pmd_offset(pud, address);
18495+ if (!pmd_present(*pmd))
18496+ return NULL;
18497+ return pmd;
18498+}
18499+#endif
18500+
18501 DEFINE_SPINLOCK(pgd_lock);
18502 LIST_HEAD(pgd_list);
18503
18504@@ -230,10 +268,22 @@ void vmalloc_sync_all(void)
18505 for (address = VMALLOC_START & PMD_MASK;
18506 address >= TASK_SIZE && address < FIXADDR_TOP;
18507 address += PMD_SIZE) {
18508+
18509+#ifdef CONFIG_PAX_PER_CPU_PGD
18510+ unsigned long cpu;
18511+#else
18512 struct page *page;
18513+#endif
18514
18515 spin_lock(&pgd_lock);
18516+
18517+#ifdef CONFIG_PAX_PER_CPU_PGD
18518+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
18519+ pgd_t *pgd = get_cpu_pgd(cpu);
18520+ pmd_t *ret;
18521+#else
18522 list_for_each_entry(page, &pgd_list, lru) {
18523+ pgd_t *pgd = page_address(page);
18524 spinlock_t *pgt_lock;
18525 pmd_t *ret;
18526
18527@@ -241,8 +291,13 @@ void vmalloc_sync_all(void)
18528 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
18529
18530 spin_lock(pgt_lock);
18531- ret = vmalloc_sync_one(page_address(page), address);
18532+#endif
18533+
18534+ ret = vmalloc_sync_one(pgd, address);
18535+
18536+#ifndef CONFIG_PAX_PER_CPU_PGD
18537 spin_unlock(pgt_lock);
18538+#endif
18539
18540 if (!ret)
18541 break;
18542@@ -276,6 +331,11 @@ static noinline __kprobes int vmalloc_fa
18543 * an interrupt in the middle of a task switch..
18544 */
18545 pgd_paddr = read_cr3();
18546+
18547+#ifdef CONFIG_PAX_PER_CPU_PGD
18548+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
18549+#endif
18550+
18551 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
18552 if (!pmd_k)
18553 return -1;
18554@@ -371,7 +431,14 @@ static noinline __kprobes int vmalloc_fa
18555 * happen within a race in page table update. In the later
18556 * case just flush:
18557 */
18558+
18559+#ifdef CONFIG_PAX_PER_CPU_PGD
18560+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
18561+ pgd = pgd_offset_cpu(smp_processor_id(), address);
18562+#else
18563 pgd = pgd_offset(current->active_mm, address);
18564+#endif
18565+
18566 pgd_ref = pgd_offset_k(address);
18567 if (pgd_none(*pgd_ref))
18568 return -1;
18569@@ -533,7 +600,7 @@ static int is_errata93(struct pt_regs *r
18570 static int is_errata100(struct pt_regs *regs, unsigned long address)
18571 {
18572 #ifdef CONFIG_X86_64
18573- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
18574+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
18575 return 1;
18576 #endif
18577 return 0;
18578@@ -560,7 +627,7 @@ static int is_f00f_bug(struct pt_regs *r
18579 }
18580
18581 static const char nx_warning[] = KERN_CRIT
18582-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
18583+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
18584
18585 static void
18586 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
18587@@ -569,14 +636,25 @@ show_fault_oops(struct pt_regs *regs, un
18588 if (!oops_may_print())
18589 return;
18590
18591- if (error_code & PF_INSTR) {
18592+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
18593 unsigned int level;
18594
18595 pte_t *pte = lookup_address(address, &level);
18596
18597 if (pte && pte_present(*pte) && !pte_exec(*pte))
18598- printk(nx_warning, current_uid());
18599+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
18600+ }
18601+
18602+#ifdef CONFIG_PAX_KERNEXEC
18603+ if (init_mm.start_code <= address && address < init_mm.end_code) {
18604+ if (current->signal->curr_ip)
18605+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
18606+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
18607+ else
18608+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
18609+ current->comm, task_pid_nr(current), current_uid(), current_euid());
18610 }
18611+#endif
18612
18613 printk(KERN_ALERT "BUG: unable to handle kernel ");
18614 if (address < PAGE_SIZE)
18615@@ -702,6 +780,66 @@ __bad_area_nosemaphore(struct pt_regs *r
18616 unsigned long address, int si_code)
18617 {
18618 struct task_struct *tsk = current;
18619+#if defined(CONFIG_X86_64) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18620+ struct mm_struct *mm = tsk->mm;
18621+#endif
18622+
18623+#ifdef CONFIG_X86_64
18624+ if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
18625+ if (regs->ip == VSYSCALL_ADDR(__NR_vgettimeofday) ||
18626+ regs->ip == VSYSCALL_ADDR(__NR_vtime) ||
18627+ regs->ip == VSYSCALL_ADDR(__NR_vgetcpu)) {
18628+ regs->ip += mm->context.vdso - PAGE_SIZE - VSYSCALL_START;
18629+ return;
18630+ }
18631+ }
18632+#endif
18633+
18634+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18635+ if (mm && (error_code & PF_USER)) {
18636+ unsigned long ip = regs->ip;
18637+
18638+ if (v8086_mode(regs))
18639+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
18640+
18641+ /*
18642+ * It's possible to have interrupts off here:
18643+ */
18644+ local_irq_enable();
18645+
18646+#ifdef CONFIG_PAX_PAGEEXEC
18647+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
18648+ (((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
18649+
18650+#ifdef CONFIG_PAX_EMUTRAMP
18651+ switch (pax_handle_fetch_fault(regs)) {
18652+ case 2:
18653+ return;
18654+ }
18655+#endif
18656+
18657+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
18658+ do_group_exit(SIGKILL);
18659+ }
18660+#endif
18661+
18662+#ifdef CONFIG_PAX_SEGMEXEC
18663+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
18664+
18665+#ifdef CONFIG_PAX_EMUTRAMP
18666+ switch (pax_handle_fetch_fault(regs)) {
18667+ case 2:
18668+ return;
18669+ }
18670+#endif
18671+
18672+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
18673+ do_group_exit(SIGKILL);
18674+ }
18675+#endif
18676+
18677+ }
18678+#endif
18679
18680 /* User mode accesses just cause a SIGSEGV */
18681 if (error_code & PF_USER) {
18682@@ -871,6 +1009,99 @@ static int spurious_fault_check(unsigned
18683 return 1;
18684 }
18685
18686+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
18687+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
18688+{
18689+ pte_t *pte;
18690+ pmd_t *pmd;
18691+ spinlock_t *ptl;
18692+ unsigned char pte_mask;
18693+
18694+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
18695+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
18696+ return 0;
18697+
18698+ /* PaX: it's our fault, let's handle it if we can */
18699+
18700+ /* PaX: take a look at read faults before acquiring any locks */
18701+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
18702+ /* instruction fetch attempt from a protected page in user mode */
18703+ up_read(&mm->mmap_sem);
18704+
18705+#ifdef CONFIG_PAX_EMUTRAMP
18706+ switch (pax_handle_fetch_fault(regs)) {
18707+ case 2:
18708+ return 1;
18709+ }
18710+#endif
18711+
18712+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
18713+ do_group_exit(SIGKILL);
18714+ }
18715+
18716+ pmd = pax_get_pmd(mm, address);
18717+ if (unlikely(!pmd))
18718+ return 0;
18719+
18720+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
18721+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
18722+ pte_unmap_unlock(pte, ptl);
18723+ return 0;
18724+ }
18725+
18726+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
18727+ /* write attempt to a protected page in user mode */
18728+ pte_unmap_unlock(pte, ptl);
18729+ return 0;
18730+ }
18731+
18732+#ifdef CONFIG_SMP
18733+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
18734+#else
18735+ if (likely(address > get_limit(regs->cs)))
18736+#endif
18737+ {
18738+ set_pte(pte, pte_mkread(*pte));
18739+ __flush_tlb_one(address);
18740+ pte_unmap_unlock(pte, ptl);
18741+ up_read(&mm->mmap_sem);
18742+ return 1;
18743+ }
18744+
18745+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
18746+
18747+ /*
18748+ * PaX: fill DTLB with user rights and retry
18749+ */
18750+ __asm__ __volatile__ (
18751+ "orb %2,(%1)\n"
18752+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
18753+/*
18754+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
18755+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
18756+ * page fault when examined during a TLB load attempt. this is true not only
18757+ * for PTEs holding a non-present entry but also present entries that will
18758+ * raise a page fault (such as those set up by PaX, or the copy-on-write
18759+ * mechanism). in effect it means that we do *not* need to flush the TLBs
18760+ * for our target pages since their PTEs are simply not in the TLBs at all.
18761+
18762+ * the best thing in omitting it is that we gain around 15-20% speed in the
18763+ * fast path of the page fault handler and can get rid of tracing since we
18764+ * can no longer flush unintended entries.
18765+ */
18766+ "invlpg (%0)\n"
18767+#endif
18768+ __copyuser_seg"testb $0,(%0)\n"
18769+ "xorb %3,(%1)\n"
18770+ :
18771+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
18772+ : "memory", "cc");
18773+ pte_unmap_unlock(pte, ptl);
18774+ up_read(&mm->mmap_sem);
18775+ return 1;
18776+}
18777+#endif
18778+
18779 /*
18780 * Handle a spurious fault caused by a stale TLB entry.
18781 *
18782@@ -943,6 +1174,9 @@ int show_unhandled_signals = 1;
18783 static inline int
18784 access_error(unsigned long error_code, struct vm_area_struct *vma)
18785 {
18786+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
18787+ return 1;
18788+
18789 if (error_code & PF_WRITE) {
18790 /* write, present and write, not present: */
18791 if (unlikely(!(vma->vm_flags & VM_WRITE)))
18792@@ -976,19 +1210,33 @@ do_page_fault(struct pt_regs *regs, unsi
18793 {
18794 struct vm_area_struct *vma;
18795 struct task_struct *tsk;
18796- unsigned long address;
18797 struct mm_struct *mm;
18798 int fault;
18799 int write = error_code & PF_WRITE;
18800 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
18801 (write ? FAULT_FLAG_WRITE : 0);
18802
18803+ /* Get the faulting address: */
18804+ unsigned long address = read_cr2();
18805+
18806+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18807+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
18808+ if (!search_exception_tables(regs->ip)) {
18809+ bad_area_nosemaphore(regs, error_code, address);
18810+ return;
18811+ }
18812+ if (address < PAX_USER_SHADOW_BASE) {
18813+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
18814+ printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
18815+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
18816+ } else
18817+ address -= PAX_USER_SHADOW_BASE;
18818+ }
18819+#endif
18820+
18821 tsk = current;
18822 mm = tsk->mm;
18823
18824- /* Get the faulting address: */
18825- address = read_cr2();
18826-
18827 /*
18828 * Detect and handle instructions that would cause a page fault for
18829 * both a tracked kernel page and a userspace page.
18830@@ -1048,7 +1296,7 @@ do_page_fault(struct pt_regs *regs, unsi
18831 * User-mode registers count as a user access even for any
18832 * potential system fault or CPU buglet:
18833 */
18834- if (user_mode_vm(regs)) {
18835+ if (user_mode(regs)) {
18836 local_irq_enable();
18837 error_code |= PF_USER;
18838 } else {
18839@@ -1103,6 +1351,11 @@ retry:
18840 might_sleep();
18841 }
18842
18843+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
18844+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
18845+ return;
18846+#endif
18847+
18848 vma = find_vma(mm, address);
18849 if (unlikely(!vma)) {
18850 bad_area(regs, error_code, address);
18851@@ -1114,18 +1367,24 @@ retry:
18852 bad_area(regs, error_code, address);
18853 return;
18854 }
18855- if (error_code & PF_USER) {
18856- /*
18857- * Accessing the stack below %sp is always a bug.
18858- * The large cushion allows instructions like enter
18859- * and pusha to work. ("enter $65535, $31" pushes
18860- * 32 pointers and then decrements %sp by 65535.)
18861- */
18862- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
18863- bad_area(regs, error_code, address);
18864- return;
18865- }
18866+ /*
18867+ * Accessing the stack below %sp is always a bug.
18868+ * The large cushion allows instructions like enter
18869+ * and pusha to work. ("enter $65535, $31" pushes
18870+ * 32 pointers and then decrements %sp by 65535.)
18871+ */
18872+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
18873+ bad_area(regs, error_code, address);
18874+ return;
18875 }
18876+
18877+#ifdef CONFIG_PAX_SEGMEXEC
18878+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
18879+ bad_area(regs, error_code, address);
18880+ return;
18881+ }
18882+#endif
18883+
18884 if (unlikely(expand_stack(vma, address))) {
18885 bad_area(regs, error_code, address);
18886 return;
18887@@ -1180,3 +1439,199 @@ good_area:
18888
18889 up_read(&mm->mmap_sem);
18890 }
18891+
18892+#ifdef CONFIG_PAX_EMUTRAMP
18893+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
18894+{
18895+ int err;
18896+
18897+ do { /* PaX: gcc trampoline emulation #1 */
18898+ unsigned char mov1, mov2;
18899+ unsigned short jmp;
18900+ unsigned int addr1, addr2;
18901+
18902+#ifdef CONFIG_X86_64
18903+ if ((regs->ip + 11) >> 32)
18904+ break;
18905+#endif
18906+
18907+ err = get_user(mov1, (unsigned char __user *)regs->ip);
18908+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
18909+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
18910+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
18911+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
18912+
18913+ if (err)
18914+ break;
18915+
18916+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
18917+ regs->cx = addr1;
18918+ regs->ax = addr2;
18919+ regs->ip = addr2;
18920+ return 2;
18921+ }
18922+ } while (0);
18923+
18924+ do { /* PaX: gcc trampoline emulation #2 */
18925+ unsigned char mov, jmp;
18926+ unsigned int addr1, addr2;
18927+
18928+#ifdef CONFIG_X86_64
18929+ if ((regs->ip + 9) >> 32)
18930+ break;
18931+#endif
18932+
18933+ err = get_user(mov, (unsigned char __user *)regs->ip);
18934+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
18935+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
18936+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
18937+
18938+ if (err)
18939+ break;
18940+
18941+ if (mov == 0xB9 && jmp == 0xE9) {
18942+ regs->cx = addr1;
18943+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
18944+ return 2;
18945+ }
18946+ } while (0);
18947+
18948+ return 1; /* PaX in action */
18949+}
18950+
18951+#ifdef CONFIG_X86_64
18952+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
18953+{
18954+ int err;
18955+
18956+ do { /* PaX: gcc trampoline emulation #1 */
18957+ unsigned short mov1, mov2, jmp1;
18958+ unsigned char jmp2;
18959+ unsigned int addr1;
18960+ unsigned long addr2;
18961+
18962+ err = get_user(mov1, (unsigned short __user *)regs->ip);
18963+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
18964+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
18965+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
18966+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
18967+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
18968+
18969+ if (err)
18970+ break;
18971+
18972+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
18973+ regs->r11 = addr1;
18974+ regs->r10 = addr2;
18975+ regs->ip = addr1;
18976+ return 2;
18977+ }
18978+ } while (0);
18979+
18980+ do { /* PaX: gcc trampoline emulation #2 */
18981+ unsigned short mov1, mov2, jmp1;
18982+ unsigned char jmp2;
18983+ unsigned long addr1, addr2;
18984+
18985+ err = get_user(mov1, (unsigned short __user *)regs->ip);
18986+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
18987+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
18988+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
18989+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
18990+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
18991+
18992+ if (err)
18993+ break;
18994+
18995+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
18996+ regs->r11 = addr1;
18997+ regs->r10 = addr2;
18998+ regs->ip = addr1;
18999+ return 2;
19000+ }
19001+ } while (0);
19002+
19003+ return 1; /* PaX in action */
19004+}
19005+#endif
19006+
19007+/*
19008+ * PaX: decide what to do with offenders (regs->ip = fault address)
19009+ *
19010+ * returns 1 when task should be killed
19011+ * 2 when gcc trampoline was detected
19012+ */
19013+static int pax_handle_fetch_fault(struct pt_regs *regs)
19014+{
19015+ if (v8086_mode(regs))
19016+ return 1;
19017+
19018+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
19019+ return 1;
19020+
19021+#ifdef CONFIG_X86_32
19022+ return pax_handle_fetch_fault_32(regs);
19023+#else
19024+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
19025+ return pax_handle_fetch_fault_32(regs);
19026+ else
19027+ return pax_handle_fetch_fault_64(regs);
19028+#endif
19029+}
19030+#endif
19031+
19032+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19033+void pax_report_insns(void *pc, void *sp)
19034+{
19035+ long i;
19036+
19037+ printk(KERN_ERR "PAX: bytes at PC: ");
19038+ for (i = 0; i < 20; i++) {
19039+ unsigned char c;
19040+ if (get_user(c, (__force unsigned char __user *)pc+i))
19041+ printk(KERN_CONT "?? ");
19042+ else
19043+ printk(KERN_CONT "%02x ", c);
19044+ }
19045+ printk("\n");
19046+
19047+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
19048+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
19049+ unsigned long c;
19050+ if (get_user(c, (__force unsigned long __user *)sp+i))
19051+#ifdef CONFIG_X86_32
19052+ printk(KERN_CONT "???????? ");
19053+#else
19054+ printk(KERN_CONT "???????????????? ");
19055+#endif
19056+ else
19057+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
19058+ }
19059+ printk("\n");
19060+}
19061+#endif
19062+
19063+/**
19064+ * probe_kernel_write(): safely attempt to write to a location
19065+ * @dst: address to write to
19066+ * @src: pointer to the data that shall be written
19067+ * @size: size of the data chunk
19068+ *
19069+ * Safely write to address @dst from the buffer at @src. If a kernel fault
19070+ * happens, handle that and return -EFAULT.
19071+ */
19072+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
19073+{
19074+ long ret;
19075+ mm_segment_t old_fs = get_fs();
19076+
19077+ set_fs(KERNEL_DS);
19078+ pagefault_disable();
19079+ pax_open_kernel();
19080+ ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
19081+ pax_close_kernel();
19082+ pagefault_enable();
19083+ set_fs(old_fs);
19084+
19085+ return ret ? -EFAULT : 0;
19086+}
19087diff -urNp linux-3.0.3/arch/x86/mm/gup.c linux-3.0.3/arch/x86/mm/gup.c
19088--- linux-3.0.3/arch/x86/mm/gup.c 2011-07-21 22:17:23.000000000 -0400
19089+++ linux-3.0.3/arch/x86/mm/gup.c 2011-08-23 21:47:55.000000000 -0400
19090@@ -263,7 +263,7 @@ int __get_user_pages_fast(unsigned long
19091 addr = start;
19092 len = (unsigned long) nr_pages << PAGE_SHIFT;
19093 end = start + len;
19094- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19095+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19096 (void __user *)start, len)))
19097 return 0;
19098
19099diff -urNp linux-3.0.3/arch/x86/mm/highmem_32.c linux-3.0.3/arch/x86/mm/highmem_32.c
19100--- linux-3.0.3/arch/x86/mm/highmem_32.c 2011-07-21 22:17:23.000000000 -0400
19101+++ linux-3.0.3/arch/x86/mm/highmem_32.c 2011-08-23 21:47:55.000000000 -0400
19102@@ -44,7 +44,10 @@ void *kmap_atomic_prot(struct page *page
19103 idx = type + KM_TYPE_NR*smp_processor_id();
19104 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
19105 BUG_ON(!pte_none(*(kmap_pte-idx)));
19106+
19107+ pax_open_kernel();
19108 set_pte(kmap_pte-idx, mk_pte(page, prot));
19109+ pax_close_kernel();
19110
19111 return (void *)vaddr;
19112 }
19113diff -urNp linux-3.0.3/arch/x86/mm/hugetlbpage.c linux-3.0.3/arch/x86/mm/hugetlbpage.c
19114--- linux-3.0.3/arch/x86/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
19115+++ linux-3.0.3/arch/x86/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
19116@@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmappe
19117 struct hstate *h = hstate_file(file);
19118 struct mm_struct *mm = current->mm;
19119 struct vm_area_struct *vma;
19120- unsigned long start_addr;
19121+ unsigned long start_addr, pax_task_size = TASK_SIZE;
19122+
19123+#ifdef CONFIG_PAX_SEGMEXEC
19124+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19125+ pax_task_size = SEGMEXEC_TASK_SIZE;
19126+#endif
19127+
19128+ pax_task_size -= PAGE_SIZE;
19129
19130 if (len > mm->cached_hole_size) {
19131- start_addr = mm->free_area_cache;
19132+ start_addr = mm->free_area_cache;
19133 } else {
19134- start_addr = TASK_UNMAPPED_BASE;
19135- mm->cached_hole_size = 0;
19136+ start_addr = mm->mmap_base;
19137+ mm->cached_hole_size = 0;
19138 }
19139
19140 full_search:
19141@@ -280,26 +287,27 @@ full_search:
19142
19143 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19144 /* At this point: (!vma || addr < vma->vm_end). */
19145- if (TASK_SIZE - len < addr) {
19146+ if (pax_task_size - len < addr) {
19147 /*
19148 * Start a new search - just in case we missed
19149 * some holes.
19150 */
19151- if (start_addr != TASK_UNMAPPED_BASE) {
19152- start_addr = TASK_UNMAPPED_BASE;
19153+ if (start_addr != mm->mmap_base) {
19154+ start_addr = mm->mmap_base;
19155 mm->cached_hole_size = 0;
19156 goto full_search;
19157 }
19158 return -ENOMEM;
19159 }
19160- if (!vma || addr + len <= vma->vm_start) {
19161- mm->free_area_cache = addr + len;
19162- return addr;
19163- }
19164+ if (check_heap_stack_gap(vma, addr, len))
19165+ break;
19166 if (addr + mm->cached_hole_size < vma->vm_start)
19167 mm->cached_hole_size = vma->vm_start - addr;
19168 addr = ALIGN(vma->vm_end, huge_page_size(h));
19169 }
19170+
19171+ mm->free_area_cache = addr + len;
19172+ return addr;
19173 }
19174
19175 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
19176@@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmappe
19177 {
19178 struct hstate *h = hstate_file(file);
19179 struct mm_struct *mm = current->mm;
19180- struct vm_area_struct *vma, *prev_vma;
19181- unsigned long base = mm->mmap_base, addr = addr0;
19182+ struct vm_area_struct *vma;
19183+ unsigned long base = mm->mmap_base, addr;
19184 unsigned long largest_hole = mm->cached_hole_size;
19185- int first_time = 1;
19186
19187 /* don't allow allocations above current base */
19188 if (mm->free_area_cache > base)
19189@@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmappe
19190 largest_hole = 0;
19191 mm->free_area_cache = base;
19192 }
19193-try_again:
19194+
19195 /* make sure it can fit in the remaining address space */
19196 if (mm->free_area_cache < len)
19197 goto fail;
19198
19199 /* either no address requested or can't fit in requested address hole */
19200- addr = (mm->free_area_cache - len) & huge_page_mask(h);
19201+ addr = (mm->free_area_cache - len);
19202 do {
19203+ addr &= huge_page_mask(h);
19204+ vma = find_vma(mm, addr);
19205 /*
19206 * Lookup failure means no vma is above this address,
19207 * i.e. return with success:
19208- */
19209- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
19210- return addr;
19211-
19212- /*
19213 * new region fits between prev_vma->vm_end and
19214 * vma->vm_start, use it:
19215 */
19216- if (addr + len <= vma->vm_start &&
19217- (!prev_vma || (addr >= prev_vma->vm_end))) {
19218+ if (check_heap_stack_gap(vma, addr, len)) {
19219 /* remember the address as a hint for next time */
19220- mm->cached_hole_size = largest_hole;
19221- return (mm->free_area_cache = addr);
19222- } else {
19223- /* pull free_area_cache down to the first hole */
19224- if (mm->free_area_cache == vma->vm_end) {
19225- mm->free_area_cache = vma->vm_start;
19226- mm->cached_hole_size = largest_hole;
19227- }
19228+ mm->cached_hole_size = largest_hole;
19229+ return (mm->free_area_cache = addr);
19230+ }
19231+ /* pull free_area_cache down to the first hole */
19232+ if (mm->free_area_cache == vma->vm_end) {
19233+ mm->free_area_cache = vma->vm_start;
19234+ mm->cached_hole_size = largest_hole;
19235 }
19236
19237 /* remember the largest hole we saw so far */
19238 if (addr + largest_hole < vma->vm_start)
19239- largest_hole = vma->vm_start - addr;
19240+ largest_hole = vma->vm_start - addr;
19241
19242 /* try just below the current vma->vm_start */
19243- addr = (vma->vm_start - len) & huge_page_mask(h);
19244- } while (len <= vma->vm_start);
19245+ addr = skip_heap_stack_gap(vma, len);
19246+ } while (!IS_ERR_VALUE(addr));
19247
19248 fail:
19249 /*
19250- * if hint left us with no space for the requested
19251- * mapping then try again:
19252- */
19253- if (first_time) {
19254- mm->free_area_cache = base;
19255- largest_hole = 0;
19256- first_time = 0;
19257- goto try_again;
19258- }
19259- /*
19260 * A failed mmap() very likely causes application failure,
19261 * so fall back to the bottom-up function here. This scenario
19262 * can happen with large stack limits and large mmap()
19263 * allocations.
19264 */
19265- mm->free_area_cache = TASK_UNMAPPED_BASE;
19266+
19267+#ifdef CONFIG_PAX_SEGMEXEC
19268+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19269+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19270+ else
19271+#endif
19272+
19273+ mm->mmap_base = TASK_UNMAPPED_BASE;
19274+
19275+#ifdef CONFIG_PAX_RANDMMAP
19276+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19277+ mm->mmap_base += mm->delta_mmap;
19278+#endif
19279+
19280+ mm->free_area_cache = mm->mmap_base;
19281 mm->cached_hole_size = ~0UL;
19282 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
19283 len, pgoff, flags);
19284@@ -386,6 +392,7 @@ fail:
19285 /*
19286 * Restore the topdown base:
19287 */
19288+ mm->mmap_base = base;
19289 mm->free_area_cache = base;
19290 mm->cached_hole_size = ~0UL;
19291
19292@@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *f
19293 struct hstate *h = hstate_file(file);
19294 struct mm_struct *mm = current->mm;
19295 struct vm_area_struct *vma;
19296+ unsigned long pax_task_size = TASK_SIZE;
19297
19298 if (len & ~huge_page_mask(h))
19299 return -EINVAL;
19300- if (len > TASK_SIZE)
19301+
19302+#ifdef CONFIG_PAX_SEGMEXEC
19303+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19304+ pax_task_size = SEGMEXEC_TASK_SIZE;
19305+#endif
19306+
19307+ pax_task_size -= PAGE_SIZE;
19308+
19309+ if (len > pax_task_size)
19310 return -ENOMEM;
19311
19312 if (flags & MAP_FIXED) {
19313@@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *f
19314 if (addr) {
19315 addr = ALIGN(addr, huge_page_size(h));
19316 vma = find_vma(mm, addr);
19317- if (TASK_SIZE - len >= addr &&
19318- (!vma || addr + len <= vma->vm_start))
19319+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
19320 return addr;
19321 }
19322 if (mm->get_unmapped_area == arch_get_unmapped_area)
19323diff -urNp linux-3.0.3/arch/x86/mm/init_32.c linux-3.0.3/arch/x86/mm/init_32.c
19324--- linux-3.0.3/arch/x86/mm/init_32.c 2011-07-21 22:17:23.000000000 -0400
19325+++ linux-3.0.3/arch/x86/mm/init_32.c 2011-08-23 21:47:55.000000000 -0400
19326@@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
19327 }
19328
19329 /*
19330- * Creates a middle page table and puts a pointer to it in the
19331- * given global directory entry. This only returns the gd entry
19332- * in non-PAE compilation mode, since the middle layer is folded.
19333- */
19334-static pmd_t * __init one_md_table_init(pgd_t *pgd)
19335-{
19336- pud_t *pud;
19337- pmd_t *pmd_table;
19338-
19339-#ifdef CONFIG_X86_PAE
19340- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
19341- if (after_bootmem)
19342- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
19343- else
19344- pmd_table = (pmd_t *)alloc_low_page();
19345- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
19346- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
19347- pud = pud_offset(pgd, 0);
19348- BUG_ON(pmd_table != pmd_offset(pud, 0));
19349-
19350- return pmd_table;
19351- }
19352-#endif
19353- pud = pud_offset(pgd, 0);
19354- pmd_table = pmd_offset(pud, 0);
19355-
19356- return pmd_table;
19357-}
19358-
19359-/*
19360 * Create a page table and place a pointer to it in a middle page
19361 * directory entry:
19362 */
19363@@ -123,13 +93,28 @@ static pte_t * __init one_page_table_ini
19364 page_table = (pte_t *)alloc_low_page();
19365
19366 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
19367+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19368+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
19369+#else
19370 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
19371+#endif
19372 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
19373 }
19374
19375 return pte_offset_kernel(pmd, 0);
19376 }
19377
19378+static pmd_t * __init one_md_table_init(pgd_t *pgd)
19379+{
19380+ pud_t *pud;
19381+ pmd_t *pmd_table;
19382+
19383+ pud = pud_offset(pgd, 0);
19384+ pmd_table = pmd_offset(pud, 0);
19385+
19386+ return pmd_table;
19387+}
19388+
19389 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
19390 {
19391 int pgd_idx = pgd_index(vaddr);
19392@@ -203,6 +188,7 @@ page_table_range_init(unsigned long star
19393 int pgd_idx, pmd_idx;
19394 unsigned long vaddr;
19395 pgd_t *pgd;
19396+ pud_t *pud;
19397 pmd_t *pmd;
19398 pte_t *pte = NULL;
19399
19400@@ -212,8 +198,13 @@ page_table_range_init(unsigned long star
19401 pgd = pgd_base + pgd_idx;
19402
19403 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
19404- pmd = one_md_table_init(pgd);
19405- pmd = pmd + pmd_index(vaddr);
19406+ pud = pud_offset(pgd, vaddr);
19407+ pmd = pmd_offset(pud, vaddr);
19408+
19409+#ifdef CONFIG_X86_PAE
19410+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
19411+#endif
19412+
19413 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
19414 pmd++, pmd_idx++) {
19415 pte = page_table_kmap_check(one_page_table_init(pmd),
19416@@ -225,11 +216,20 @@ page_table_range_init(unsigned long star
19417 }
19418 }
19419
19420-static inline int is_kernel_text(unsigned long addr)
19421+static inline int is_kernel_text(unsigned long start, unsigned long end)
19422 {
19423- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
19424- return 1;
19425- return 0;
19426+ if ((start > ktla_ktva((unsigned long)_etext) ||
19427+ end <= ktla_ktva((unsigned long)_stext)) &&
19428+ (start > ktla_ktva((unsigned long)_einittext) ||
19429+ end <= ktla_ktva((unsigned long)_sinittext)) &&
19430+
19431+#ifdef CONFIG_ACPI_SLEEP
19432+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
19433+#endif
19434+
19435+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
19436+ return 0;
19437+ return 1;
19438 }
19439
19440 /*
19441@@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned lo
19442 unsigned long last_map_addr = end;
19443 unsigned long start_pfn, end_pfn;
19444 pgd_t *pgd_base = swapper_pg_dir;
19445- int pgd_idx, pmd_idx, pte_ofs;
19446+ unsigned int pgd_idx, pmd_idx, pte_ofs;
19447 unsigned long pfn;
19448 pgd_t *pgd;
19449+ pud_t *pud;
19450 pmd_t *pmd;
19451 pte_t *pte;
19452 unsigned pages_2m, pages_4k;
19453@@ -281,8 +282,13 @@ repeat:
19454 pfn = start_pfn;
19455 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
19456 pgd = pgd_base + pgd_idx;
19457- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
19458- pmd = one_md_table_init(pgd);
19459+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
19460+ pud = pud_offset(pgd, 0);
19461+ pmd = pmd_offset(pud, 0);
19462+
19463+#ifdef CONFIG_X86_PAE
19464+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
19465+#endif
19466
19467 if (pfn >= end_pfn)
19468 continue;
19469@@ -294,14 +300,13 @@ repeat:
19470 #endif
19471 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
19472 pmd++, pmd_idx++) {
19473- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
19474+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
19475
19476 /*
19477 * Map with big pages if possible, otherwise
19478 * create normal page tables:
19479 */
19480 if (use_pse) {
19481- unsigned int addr2;
19482 pgprot_t prot = PAGE_KERNEL_LARGE;
19483 /*
19484 * first pass will use the same initial
19485@@ -311,11 +316,7 @@ repeat:
19486 __pgprot(PTE_IDENT_ATTR |
19487 _PAGE_PSE);
19488
19489- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
19490- PAGE_OFFSET + PAGE_SIZE-1;
19491-
19492- if (is_kernel_text(addr) ||
19493- is_kernel_text(addr2))
19494+ if (is_kernel_text(address, address + PMD_SIZE))
19495 prot = PAGE_KERNEL_LARGE_EXEC;
19496
19497 pages_2m++;
19498@@ -332,7 +333,7 @@ repeat:
19499 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
19500 pte += pte_ofs;
19501 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
19502- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
19503+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
19504 pgprot_t prot = PAGE_KERNEL;
19505 /*
19506 * first pass will use the same initial
19507@@ -340,7 +341,7 @@ repeat:
19508 */
19509 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
19510
19511- if (is_kernel_text(addr))
19512+ if (is_kernel_text(address, address + PAGE_SIZE))
19513 prot = PAGE_KERNEL_EXEC;
19514
19515 pages_4k++;
19516@@ -472,7 +473,7 @@ void __init native_pagetable_setup_start
19517
19518 pud = pud_offset(pgd, va);
19519 pmd = pmd_offset(pud, va);
19520- if (!pmd_present(*pmd))
19521+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
19522 break;
19523
19524 pte = pte_offset_kernel(pmd, va);
19525@@ -524,12 +525,10 @@ void __init early_ioremap_page_table_ran
19526
19527 static void __init pagetable_init(void)
19528 {
19529- pgd_t *pgd_base = swapper_pg_dir;
19530-
19531- permanent_kmaps_init(pgd_base);
19532+ permanent_kmaps_init(swapper_pg_dir);
19533 }
19534
19535-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
19536+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
19537 EXPORT_SYMBOL_GPL(__supported_pte_mask);
19538
19539 /* user-defined highmem size */
19540@@ -757,6 +756,12 @@ void __init mem_init(void)
19541
19542 pci_iommu_alloc();
19543
19544+#ifdef CONFIG_PAX_PER_CPU_PGD
19545+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
19546+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19547+ KERNEL_PGD_PTRS);
19548+#endif
19549+
19550 #ifdef CONFIG_FLATMEM
19551 BUG_ON(!mem_map);
19552 #endif
19553@@ -774,7 +779,7 @@ void __init mem_init(void)
19554 set_highmem_pages_init();
19555
19556 codesize = (unsigned long) &_etext - (unsigned long) &_text;
19557- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
19558+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
19559 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
19560
19561 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
19562@@ -815,10 +820,10 @@ void __init mem_init(void)
19563 ((unsigned long)&__init_end -
19564 (unsigned long)&__init_begin) >> 10,
19565
19566- (unsigned long)&_etext, (unsigned long)&_edata,
19567- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
19568+ (unsigned long)&_sdata, (unsigned long)&_edata,
19569+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
19570
19571- (unsigned long)&_text, (unsigned long)&_etext,
19572+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
19573 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
19574
19575 /*
19576@@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
19577 if (!kernel_set_to_readonly)
19578 return;
19579
19580+ start = ktla_ktva(start);
19581 pr_debug("Set kernel text: %lx - %lx for read write\n",
19582 start, start+size);
19583
19584@@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
19585 if (!kernel_set_to_readonly)
19586 return;
19587
19588+ start = ktla_ktva(start);
19589 pr_debug("Set kernel text: %lx - %lx for read only\n",
19590 start, start+size);
19591
19592@@ -938,6 +945,7 @@ void mark_rodata_ro(void)
19593 unsigned long start = PFN_ALIGN(_text);
19594 unsigned long size = PFN_ALIGN(_etext) - start;
19595
19596+ start = ktla_ktva(start);
19597 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
19598 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
19599 size >> 10);
19600diff -urNp linux-3.0.3/arch/x86/mm/init_64.c linux-3.0.3/arch/x86/mm/init_64.c
19601--- linux-3.0.3/arch/x86/mm/init_64.c 2011-07-21 22:17:23.000000000 -0400
19602+++ linux-3.0.3/arch/x86/mm/init_64.c 2011-08-23 21:47:55.000000000 -0400
19603@@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpa
19604 * around without checking the pgd every time.
19605 */
19606
19607-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
19608+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
19609 EXPORT_SYMBOL_GPL(__supported_pte_mask);
19610
19611 int force_personality32;
19612@@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long star
19613
19614 for (address = start; address <= end; address += PGDIR_SIZE) {
19615 const pgd_t *pgd_ref = pgd_offset_k(address);
19616+
19617+#ifdef CONFIG_PAX_PER_CPU_PGD
19618+ unsigned long cpu;
19619+#else
19620 struct page *page;
19621+#endif
19622
19623 if (pgd_none(*pgd_ref))
19624 continue;
19625
19626 spin_lock(&pgd_lock);
19627+
19628+#ifdef CONFIG_PAX_PER_CPU_PGD
19629+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
19630+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
19631+#else
19632 list_for_each_entry(page, &pgd_list, lru) {
19633 pgd_t *pgd;
19634 spinlock_t *pgt_lock;
19635@@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long star
19636 /* the pgt_lock only for Xen */
19637 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
19638 spin_lock(pgt_lock);
19639+#endif
19640
19641 if (pgd_none(*pgd))
19642 set_pgd(pgd, *pgd_ref);
19643@@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long star
19644 BUG_ON(pgd_page_vaddr(*pgd)
19645 != pgd_page_vaddr(*pgd_ref));
19646
19647+#ifndef CONFIG_PAX_PER_CPU_PGD
19648 spin_unlock(pgt_lock);
19649+#endif
19650+
19651 }
19652 spin_unlock(&pgd_lock);
19653 }
19654@@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
19655 pmd = fill_pmd(pud, vaddr);
19656 pte = fill_pte(pmd, vaddr);
19657
19658+ pax_open_kernel();
19659 set_pte(pte, new_pte);
19660+ pax_close_kernel();
19661
19662 /*
19663 * It's enough to flush this one mapping.
19664@@ -262,14 +278,12 @@ static void __init __init_extra_mapping(
19665 pgd = pgd_offset_k((unsigned long)__va(phys));
19666 if (pgd_none(*pgd)) {
19667 pud = (pud_t *) spp_getpage();
19668- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
19669- _PAGE_USER));
19670+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
19671 }
19672 pud = pud_offset(pgd, (unsigned long)__va(phys));
19673 if (pud_none(*pud)) {
19674 pmd = (pmd_t *) spp_getpage();
19675- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
19676- _PAGE_USER));
19677+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
19678 }
19679 pmd = pmd_offset(pud, phys);
19680 BUG_ON(!pmd_none(*pmd));
19681@@ -693,6 +707,12 @@ void __init mem_init(void)
19682
19683 pci_iommu_alloc();
19684
19685+#ifdef CONFIG_PAX_PER_CPU_PGD
19686+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
19687+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19688+ KERNEL_PGD_PTRS);
19689+#endif
19690+
19691 /* clear_bss() already clear the empty_zero_page */
19692
19693 reservedpages = 0;
19694@@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
19695 static struct vm_area_struct gate_vma = {
19696 .vm_start = VSYSCALL_START,
19697 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
19698- .vm_page_prot = PAGE_READONLY_EXEC,
19699- .vm_flags = VM_READ | VM_EXEC
19700+ .vm_page_prot = PAGE_READONLY,
19701+ .vm_flags = VM_READ
19702 };
19703
19704 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
19705@@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long add
19706
19707 const char *arch_vma_name(struct vm_area_struct *vma)
19708 {
19709- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
19710+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
19711 return "[vdso]";
19712 if (vma == &gate_vma)
19713 return "[vsyscall]";
19714diff -urNp linux-3.0.3/arch/x86/mm/init.c linux-3.0.3/arch/x86/mm/init.c
19715--- linux-3.0.3/arch/x86/mm/init.c 2011-07-21 22:17:23.000000000 -0400
19716+++ linux-3.0.3/arch/x86/mm/init.c 2011-08-23 21:48:14.000000000 -0400
19717@@ -31,7 +31,7 @@ int direct_gbpages
19718 static void __init find_early_table_space(unsigned long end, int use_pse,
19719 int use_gbpages)
19720 {
19721- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
19722+ unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
19723 phys_addr_t base;
19724
19725 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
19726@@ -313,12 +313,34 @@ unsigned long __init_refok init_memory_m
19727 */
19728 int devmem_is_allowed(unsigned long pagenr)
19729 {
19730- if (pagenr <= 256)
19731+#ifdef CONFIG_GRKERNSEC_KMEM
19732+ /* allow BDA */
19733+ if (!pagenr)
19734+ return 1;
19735+ /* allow EBDA */
19736+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
19737+ return 1;
19738+#else
19739+ if (!pagenr)
19740+ return 1;
19741+#ifdef CONFIG_VM86
19742+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
19743+ return 1;
19744+#endif
19745+#endif
19746+
19747+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
19748 return 1;
19749+#ifdef CONFIG_GRKERNSEC_KMEM
19750+ /* throw out everything else below 1MB */
19751+ if (pagenr <= 256)
19752+ return 0;
19753+#endif
19754 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
19755 return 0;
19756 if (!page_is_ram(pagenr))
19757 return 1;
19758+
19759 return 0;
19760 }
19761
19762@@ -373,6 +395,86 @@ void free_init_pages(char *what, unsigne
19763
19764 void free_initmem(void)
19765 {
19766+
19767+#ifdef CONFIG_PAX_KERNEXEC
19768+#ifdef CONFIG_X86_32
19769+ /* PaX: limit KERNEL_CS to actual size */
19770+ unsigned long addr, limit;
19771+ struct desc_struct d;
19772+ int cpu;
19773+
19774+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
19775+ limit = (limit - 1UL) >> PAGE_SHIFT;
19776+
19777+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
19778+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
19779+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
19780+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
19781+ }
19782+
19783+ /* PaX: make KERNEL_CS read-only */
19784+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
19785+ if (!paravirt_enabled())
19786+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
19787+/*
19788+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
19789+ pgd = pgd_offset_k(addr);
19790+ pud = pud_offset(pgd, addr);
19791+ pmd = pmd_offset(pud, addr);
19792+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19793+ }
19794+*/
19795+#ifdef CONFIG_X86_PAE
19796+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
19797+/*
19798+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
19799+ pgd = pgd_offset_k(addr);
19800+ pud = pud_offset(pgd, addr);
19801+ pmd = pmd_offset(pud, addr);
19802+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
19803+ }
19804+*/
19805+#endif
19806+
19807+#ifdef CONFIG_MODULES
19808+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
19809+#endif
19810+
19811+#else
19812+ pgd_t *pgd;
19813+ pud_t *pud;
19814+ pmd_t *pmd;
19815+ unsigned long addr, end;
19816+
19817+ /* PaX: make kernel code/rodata read-only, rest non-executable */
19818+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
19819+ pgd = pgd_offset_k(addr);
19820+ pud = pud_offset(pgd, addr);
19821+ pmd = pmd_offset(pud, addr);
19822+ if (!pmd_present(*pmd))
19823+ continue;
19824+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
19825+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19826+ else
19827+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
19828+ }
19829+
19830+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
19831+ end = addr + KERNEL_IMAGE_SIZE;
19832+ for (; addr < end; addr += PMD_SIZE) {
19833+ pgd = pgd_offset_k(addr);
19834+ pud = pud_offset(pgd, addr);
19835+ pmd = pmd_offset(pud, addr);
19836+ if (!pmd_present(*pmd))
19837+ continue;
19838+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
19839+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19840+ }
19841+#endif
19842+
19843+ flush_tlb_all();
19844+#endif
19845+
19846 free_init_pages("unused kernel memory",
19847 (unsigned long)(&__init_begin),
19848 (unsigned long)(&__init_end));
19849diff -urNp linux-3.0.3/arch/x86/mm/iomap_32.c linux-3.0.3/arch/x86/mm/iomap_32.c
19850--- linux-3.0.3/arch/x86/mm/iomap_32.c 2011-07-21 22:17:23.000000000 -0400
19851+++ linux-3.0.3/arch/x86/mm/iomap_32.c 2011-08-23 21:47:55.000000000 -0400
19852@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long
19853 type = kmap_atomic_idx_push();
19854 idx = type + KM_TYPE_NR * smp_processor_id();
19855 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
19856+
19857+ pax_open_kernel();
19858 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
19859+ pax_close_kernel();
19860+
19861 arch_flush_lazy_mmu_mode();
19862
19863 return (void *)vaddr;
19864diff -urNp linux-3.0.3/arch/x86/mm/ioremap.c linux-3.0.3/arch/x86/mm/ioremap.c
19865--- linux-3.0.3/arch/x86/mm/ioremap.c 2011-07-21 22:17:23.000000000 -0400
19866+++ linux-3.0.3/arch/x86/mm/ioremap.c 2011-08-23 21:47:55.000000000 -0400
19867@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(re
19868 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
19869 int is_ram = page_is_ram(pfn);
19870
19871- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
19872+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
19873 return NULL;
19874 WARN_ON_ONCE(is_ram);
19875 }
19876@@ -344,7 +344,7 @@ static int __init early_ioremap_debug_se
19877 early_param("early_ioremap_debug", early_ioremap_debug_setup);
19878
19879 static __initdata int after_paging_init;
19880-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
19881+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
19882
19883 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
19884 {
19885@@ -381,8 +381,7 @@ void __init early_ioremap_init(void)
19886 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
19887
19888 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
19889- memset(bm_pte, 0, sizeof(bm_pte));
19890- pmd_populate_kernel(&init_mm, pmd, bm_pte);
19891+ pmd_populate_user(&init_mm, pmd, bm_pte);
19892
19893 /*
19894 * The boot-ioremap range spans multiple pmds, for which
19895diff -urNp linux-3.0.3/arch/x86/mm/kmemcheck/kmemcheck.c linux-3.0.3/arch/x86/mm/kmemcheck/kmemcheck.c
19896--- linux-3.0.3/arch/x86/mm/kmemcheck/kmemcheck.c 2011-07-21 22:17:23.000000000 -0400
19897+++ linux-3.0.3/arch/x86/mm/kmemcheck/kmemcheck.c 2011-08-23 21:47:55.000000000 -0400
19898@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
19899 * memory (e.g. tracked pages)? For now, we need this to avoid
19900 * invoking kmemcheck for PnP BIOS calls.
19901 */
19902- if (regs->flags & X86_VM_MASK)
19903+ if (v8086_mode(regs))
19904 return false;
19905- if (regs->cs != __KERNEL_CS)
19906+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
19907 return false;
19908
19909 pte = kmemcheck_pte_lookup(address);
19910diff -urNp linux-3.0.3/arch/x86/mm/mmap.c linux-3.0.3/arch/x86/mm/mmap.c
19911--- linux-3.0.3/arch/x86/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
19912+++ linux-3.0.3/arch/x86/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
19913@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
19914 * Leave an at least ~128 MB hole with possible stack randomization.
19915 */
19916 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
19917-#define MAX_GAP (TASK_SIZE/6*5)
19918+#define MAX_GAP (pax_task_size/6*5)
19919
19920 /*
19921 * True on X86_32 or when emulating IA32 on X86_64
19922@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
19923 return rnd << PAGE_SHIFT;
19924 }
19925
19926-static unsigned long mmap_base(void)
19927+static unsigned long mmap_base(struct mm_struct *mm)
19928 {
19929 unsigned long gap = rlimit(RLIMIT_STACK);
19930+ unsigned long pax_task_size = TASK_SIZE;
19931+
19932+#ifdef CONFIG_PAX_SEGMEXEC
19933+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19934+ pax_task_size = SEGMEXEC_TASK_SIZE;
19935+#endif
19936
19937 if (gap < MIN_GAP)
19938 gap = MIN_GAP;
19939 else if (gap > MAX_GAP)
19940 gap = MAX_GAP;
19941
19942- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
19943+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
19944 }
19945
19946 /*
19947 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
19948 * does, but not when emulating X86_32
19949 */
19950-static unsigned long mmap_legacy_base(void)
19951+static unsigned long mmap_legacy_base(struct mm_struct *mm)
19952 {
19953- if (mmap_is_ia32())
19954+ if (mmap_is_ia32()) {
19955+
19956+#ifdef CONFIG_PAX_SEGMEXEC
19957+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19958+ return SEGMEXEC_TASK_UNMAPPED_BASE;
19959+ else
19960+#endif
19961+
19962 return TASK_UNMAPPED_BASE;
19963- else
19964+ } else
19965 return TASK_UNMAPPED_BASE + mmap_rnd();
19966 }
19967
19968@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
19969 void arch_pick_mmap_layout(struct mm_struct *mm)
19970 {
19971 if (mmap_is_legacy()) {
19972- mm->mmap_base = mmap_legacy_base();
19973+ mm->mmap_base = mmap_legacy_base(mm);
19974+
19975+#ifdef CONFIG_PAX_RANDMMAP
19976+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19977+ mm->mmap_base += mm->delta_mmap;
19978+#endif
19979+
19980 mm->get_unmapped_area = arch_get_unmapped_area;
19981 mm->unmap_area = arch_unmap_area;
19982 } else {
19983- mm->mmap_base = mmap_base();
19984+ mm->mmap_base = mmap_base(mm);
19985+
19986+#ifdef CONFIG_PAX_RANDMMAP
19987+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19988+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
19989+#endif
19990+
19991 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
19992 mm->unmap_area = arch_unmap_area_topdown;
19993 }
19994diff -urNp linux-3.0.3/arch/x86/mm/mmio-mod.c linux-3.0.3/arch/x86/mm/mmio-mod.c
19995--- linux-3.0.3/arch/x86/mm/mmio-mod.c 2011-07-21 22:17:23.000000000 -0400
19996+++ linux-3.0.3/arch/x86/mm/mmio-mod.c 2011-08-23 21:47:55.000000000 -0400
19997@@ -195,7 +195,7 @@ static void pre(struct kmmio_probe *p, s
19998 break;
19999 default:
20000 {
20001- unsigned char *ip = (unsigned char *)instptr;
20002+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
20003 my_trace->opcode = MMIO_UNKNOWN_OP;
20004 my_trace->width = 0;
20005 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
20006@@ -235,7 +235,7 @@ static void post(struct kmmio_probe *p,
20007 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
20008 void __iomem *addr)
20009 {
20010- static atomic_t next_id;
20011+ static atomic_unchecked_t next_id;
20012 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
20013 /* These are page-unaligned. */
20014 struct mmiotrace_map map = {
20015@@ -259,7 +259,7 @@ static void ioremap_trace_core(resource_
20016 .private = trace
20017 },
20018 .phys = offset,
20019- .id = atomic_inc_return(&next_id)
20020+ .id = atomic_inc_return_unchecked(&next_id)
20021 };
20022 map.map_id = trace->id;
20023
20024diff -urNp linux-3.0.3/arch/x86/mm/pageattr.c linux-3.0.3/arch/x86/mm/pageattr.c
20025--- linux-3.0.3/arch/x86/mm/pageattr.c 2011-07-21 22:17:23.000000000 -0400
20026+++ linux-3.0.3/arch/x86/mm/pageattr.c 2011-08-23 21:47:55.000000000 -0400
20027@@ -261,7 +261,7 @@ static inline pgprot_t static_protection
20028 */
20029 #ifdef CONFIG_PCI_BIOS
20030 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
20031- pgprot_val(forbidden) |= _PAGE_NX;
20032+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20033 #endif
20034
20035 /*
20036@@ -269,9 +269,10 @@ static inline pgprot_t static_protection
20037 * Does not cover __inittext since that is gone later on. On
20038 * 64bit we do not enforce !NX on the low mapping
20039 */
20040- if (within(address, (unsigned long)_text, (unsigned long)_etext))
20041- pgprot_val(forbidden) |= _PAGE_NX;
20042+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
20043+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20044
20045+#ifdef CONFIG_DEBUG_RODATA
20046 /*
20047 * The .rodata section needs to be read-only. Using the pfn
20048 * catches all aliases.
20049@@ -279,6 +280,7 @@ static inline pgprot_t static_protection
20050 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
20051 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
20052 pgprot_val(forbidden) |= _PAGE_RW;
20053+#endif
20054
20055 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
20056 /*
20057@@ -317,6 +319,13 @@ static inline pgprot_t static_protection
20058 }
20059 #endif
20060
20061+#ifdef CONFIG_PAX_KERNEXEC
20062+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
20063+ pgprot_val(forbidden) |= _PAGE_RW;
20064+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20065+ }
20066+#endif
20067+
20068 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
20069
20070 return prot;
20071@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
20072 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
20073 {
20074 /* change init_mm */
20075+ pax_open_kernel();
20076 set_pte_atomic(kpte, pte);
20077+
20078 #ifdef CONFIG_X86_32
20079 if (!SHARED_KERNEL_PMD) {
20080+
20081+#ifdef CONFIG_PAX_PER_CPU_PGD
20082+ unsigned long cpu;
20083+#else
20084 struct page *page;
20085+#endif
20086
20087+#ifdef CONFIG_PAX_PER_CPU_PGD
20088+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20089+ pgd_t *pgd = get_cpu_pgd(cpu);
20090+#else
20091 list_for_each_entry(page, &pgd_list, lru) {
20092- pgd_t *pgd;
20093+ pgd_t *pgd = (pgd_t *)page_address(page);
20094+#endif
20095+
20096 pud_t *pud;
20097 pmd_t *pmd;
20098
20099- pgd = (pgd_t *)page_address(page) + pgd_index(address);
20100+ pgd += pgd_index(address);
20101 pud = pud_offset(pgd, address);
20102 pmd = pmd_offset(pud, address);
20103 set_pte_atomic((pte_t *)pmd, pte);
20104 }
20105 }
20106 #endif
20107+ pax_close_kernel();
20108 }
20109
20110 static int
20111diff -urNp linux-3.0.3/arch/x86/mm/pageattr-test.c linux-3.0.3/arch/x86/mm/pageattr-test.c
20112--- linux-3.0.3/arch/x86/mm/pageattr-test.c 2011-07-21 22:17:23.000000000 -0400
20113+++ linux-3.0.3/arch/x86/mm/pageattr-test.c 2011-08-23 21:47:55.000000000 -0400
20114@@ -36,7 +36,7 @@ enum {
20115
20116 static int pte_testbit(pte_t pte)
20117 {
20118- return pte_flags(pte) & _PAGE_UNUSED1;
20119+ return pte_flags(pte) & _PAGE_CPA_TEST;
20120 }
20121
20122 struct split_state {
20123diff -urNp linux-3.0.3/arch/x86/mm/pat.c linux-3.0.3/arch/x86/mm/pat.c
20124--- linux-3.0.3/arch/x86/mm/pat.c 2011-07-21 22:17:23.000000000 -0400
20125+++ linux-3.0.3/arch/x86/mm/pat.c 2011-08-23 21:47:55.000000000 -0400
20126@@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
20127
20128 if (!entry) {
20129 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
20130- current->comm, current->pid, start, end);
20131+ current->comm, task_pid_nr(current), start, end);
20132 return -EINVAL;
20133 }
20134
20135@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsig
20136 while (cursor < to) {
20137 if (!devmem_is_allowed(pfn)) {
20138 printk(KERN_INFO
20139- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
20140- current->comm, from, to);
20141+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
20142+ current->comm, from, to, cursor);
20143 return 0;
20144 }
20145 cursor += PAGE_SIZE;
20146@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, un
20147 printk(KERN_INFO
20148 "%s:%d ioremap_change_attr failed %s "
20149 "for %Lx-%Lx\n",
20150- current->comm, current->pid,
20151+ current->comm, task_pid_nr(current),
20152 cattr_name(flags),
20153 base, (unsigned long long)(base + size));
20154 return -EINVAL;
20155@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr,
20156 if (want_flags != flags) {
20157 printk(KERN_WARNING
20158 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
20159- current->comm, current->pid,
20160+ current->comm, task_pid_nr(current),
20161 cattr_name(want_flags),
20162 (unsigned long long)paddr,
20163 (unsigned long long)(paddr + size),
20164@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr,
20165 free_memtype(paddr, paddr + size);
20166 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
20167 " for %Lx-%Lx, got %s\n",
20168- current->comm, current->pid,
20169+ current->comm, task_pid_nr(current),
20170 cattr_name(want_flags),
20171 (unsigned long long)paddr,
20172 (unsigned long long)(paddr + size),
20173diff -urNp linux-3.0.3/arch/x86/mm/pf_in.c linux-3.0.3/arch/x86/mm/pf_in.c
20174--- linux-3.0.3/arch/x86/mm/pf_in.c 2011-07-21 22:17:23.000000000 -0400
20175+++ linux-3.0.3/arch/x86/mm/pf_in.c 2011-08-23 21:47:55.000000000 -0400
20176@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
20177 int i;
20178 enum reason_type rv = OTHERS;
20179
20180- p = (unsigned char *)ins_addr;
20181+ p = (unsigned char *)ktla_ktva(ins_addr);
20182 p += skip_prefix(p, &prf);
20183 p += get_opcode(p, &opcode);
20184
20185@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
20186 struct prefix_bits prf;
20187 int i;
20188
20189- p = (unsigned char *)ins_addr;
20190+ p = (unsigned char *)ktla_ktva(ins_addr);
20191 p += skip_prefix(p, &prf);
20192 p += get_opcode(p, &opcode);
20193
20194@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
20195 struct prefix_bits prf;
20196 int i;
20197
20198- p = (unsigned char *)ins_addr;
20199+ p = (unsigned char *)ktla_ktva(ins_addr);
20200 p += skip_prefix(p, &prf);
20201 p += get_opcode(p, &opcode);
20202
20203@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned l
20204 struct prefix_bits prf;
20205 int i;
20206
20207- p = (unsigned char *)ins_addr;
20208+ p = (unsigned char *)ktla_ktva(ins_addr);
20209 p += skip_prefix(p, &prf);
20210 p += get_opcode(p, &opcode);
20211 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
20212@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned l
20213 struct prefix_bits prf;
20214 int i;
20215
20216- p = (unsigned char *)ins_addr;
20217+ p = (unsigned char *)ktla_ktva(ins_addr);
20218 p += skip_prefix(p, &prf);
20219 p += get_opcode(p, &opcode);
20220 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
20221diff -urNp linux-3.0.3/arch/x86/mm/pgtable_32.c linux-3.0.3/arch/x86/mm/pgtable_32.c
20222--- linux-3.0.3/arch/x86/mm/pgtable_32.c 2011-07-21 22:17:23.000000000 -0400
20223+++ linux-3.0.3/arch/x86/mm/pgtable_32.c 2011-08-23 21:47:55.000000000 -0400
20224@@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr,
20225 return;
20226 }
20227 pte = pte_offset_kernel(pmd, vaddr);
20228+
20229+ pax_open_kernel();
20230 if (pte_val(pteval))
20231 set_pte_at(&init_mm, vaddr, pte, pteval);
20232 else
20233 pte_clear(&init_mm, vaddr, pte);
20234+ pax_close_kernel();
20235
20236 /*
20237 * It's enough to flush this one mapping.
20238diff -urNp linux-3.0.3/arch/x86/mm/pgtable.c linux-3.0.3/arch/x86/mm/pgtable.c
20239--- linux-3.0.3/arch/x86/mm/pgtable.c 2011-07-21 22:17:23.000000000 -0400
20240+++ linux-3.0.3/arch/x86/mm/pgtable.c 2011-08-23 21:47:55.000000000 -0400
20241@@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *p
20242 list_del(&page->lru);
20243 }
20244
20245-#define UNSHARED_PTRS_PER_PGD \
20246- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
20247+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20248+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
20249
20250+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
20251+{
20252+ while (count--)
20253+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
20254+}
20255+#endif
20256+
20257+#ifdef CONFIG_PAX_PER_CPU_PGD
20258+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
20259+{
20260+ while (count--)
20261+
20262+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20263+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
20264+#else
20265+ *dst++ = *src++;
20266+#endif
20267
20268+}
20269+#endif
20270+
20271+#ifdef CONFIG_X86_64
20272+#define pxd_t pud_t
20273+#define pyd_t pgd_t
20274+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
20275+#define pxd_free(mm, pud) pud_free((mm), (pud))
20276+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
20277+#define pyd_offset(mm ,address) pgd_offset((mm), (address))
20278+#define PYD_SIZE PGDIR_SIZE
20279+#else
20280+#define pxd_t pmd_t
20281+#define pyd_t pud_t
20282+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
20283+#define pxd_free(mm, pud) pmd_free((mm), (pud))
20284+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
20285+#define pyd_offset(mm ,address) pud_offset((mm), (address))
20286+#define PYD_SIZE PUD_SIZE
20287+#endif
20288+
20289+#ifdef CONFIG_PAX_PER_CPU_PGD
20290+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
20291+static inline void pgd_dtor(pgd_t *pgd) {}
20292+#else
20293 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
20294 {
20295 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
20296@@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
20297 pgd_list_del(pgd);
20298 spin_unlock(&pgd_lock);
20299 }
20300+#endif
20301
20302 /*
20303 * List of all pgd's needed for non-PAE so it can invalidate entries
20304@@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
20305 * -- wli
20306 */
20307
20308-#ifdef CONFIG_X86_PAE
20309+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
20310 /*
20311 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
20312 * updating the top-level pagetable entries to guarantee the
20313@@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
20314 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
20315 * and initialize the kernel pmds here.
20316 */
20317-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
20318+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
20319
20320 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
20321 {
20322@@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm,
20323 */
20324 flush_tlb_mm(mm);
20325 }
20326+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
20327+#define PREALLOCATED_PXDS USER_PGD_PTRS
20328 #else /* !CONFIG_X86_PAE */
20329
20330 /* No need to prepopulate any pagetable entries in non-PAE modes. */
20331-#define PREALLOCATED_PMDS 0
20332+#define PREALLOCATED_PXDS 0
20333
20334 #endif /* CONFIG_X86_PAE */
20335
20336-static void free_pmds(pmd_t *pmds[])
20337+static void free_pxds(pxd_t *pxds[])
20338 {
20339 int i;
20340
20341- for(i = 0; i < PREALLOCATED_PMDS; i++)
20342- if (pmds[i])
20343- free_page((unsigned long)pmds[i]);
20344+ for(i = 0; i < PREALLOCATED_PXDS; i++)
20345+ if (pxds[i])
20346+ free_page((unsigned long)pxds[i]);
20347 }
20348
20349-static int preallocate_pmds(pmd_t *pmds[])
20350+static int preallocate_pxds(pxd_t *pxds[])
20351 {
20352 int i;
20353 bool failed = false;
20354
20355- for(i = 0; i < PREALLOCATED_PMDS; i++) {
20356- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
20357- if (pmd == NULL)
20358+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
20359+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
20360+ if (pxd == NULL)
20361 failed = true;
20362- pmds[i] = pmd;
20363+ pxds[i] = pxd;
20364 }
20365
20366 if (failed) {
20367- free_pmds(pmds);
20368+ free_pxds(pxds);
20369 return -ENOMEM;
20370 }
20371
20372@@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[
20373 * preallocate which never got a corresponding vma will need to be
20374 * freed manually.
20375 */
20376-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
20377+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
20378 {
20379 int i;
20380
20381- for(i = 0; i < PREALLOCATED_PMDS; i++) {
20382+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
20383 pgd_t pgd = pgdp[i];
20384
20385 if (pgd_val(pgd) != 0) {
20386- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
20387+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
20388
20389- pgdp[i] = native_make_pgd(0);
20390+ set_pgd(pgdp + i, native_make_pgd(0));
20391
20392- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
20393- pmd_free(mm, pmd);
20394+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
20395+ pxd_free(mm, pxd);
20396 }
20397 }
20398 }
20399
20400-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
20401+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
20402 {
20403- pud_t *pud;
20404+ pyd_t *pyd;
20405 unsigned long addr;
20406 int i;
20407
20408- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
20409+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
20410 return;
20411
20412- pud = pud_offset(pgd, 0);
20413+#ifdef CONFIG_X86_64
20414+ pyd = pyd_offset(mm, 0L);
20415+#else
20416+ pyd = pyd_offset(pgd, 0L);
20417+#endif
20418
20419- for (addr = i = 0; i < PREALLOCATED_PMDS;
20420- i++, pud++, addr += PUD_SIZE) {
20421- pmd_t *pmd = pmds[i];
20422+ for (addr = i = 0; i < PREALLOCATED_PXDS;
20423+ i++, pyd++, addr += PYD_SIZE) {
20424+ pxd_t *pxd = pxds[i];
20425
20426 if (i >= KERNEL_PGD_BOUNDARY)
20427- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
20428- sizeof(pmd_t) * PTRS_PER_PMD);
20429+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
20430+ sizeof(pxd_t) * PTRS_PER_PMD);
20431
20432- pud_populate(mm, pud, pmd);
20433+ pyd_populate(mm, pyd, pxd);
20434 }
20435 }
20436
20437 pgd_t *pgd_alloc(struct mm_struct *mm)
20438 {
20439 pgd_t *pgd;
20440- pmd_t *pmds[PREALLOCATED_PMDS];
20441+ pxd_t *pxds[PREALLOCATED_PXDS];
20442
20443 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
20444
20445@@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
20446
20447 mm->pgd = pgd;
20448
20449- if (preallocate_pmds(pmds) != 0)
20450+ if (preallocate_pxds(pxds) != 0)
20451 goto out_free_pgd;
20452
20453 if (paravirt_pgd_alloc(mm) != 0)
20454- goto out_free_pmds;
20455+ goto out_free_pxds;
20456
20457 /*
20458 * Make sure that pre-populating the pmds is atomic with
20459@@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
20460 spin_lock(&pgd_lock);
20461
20462 pgd_ctor(mm, pgd);
20463- pgd_prepopulate_pmd(mm, pgd, pmds);
20464+ pgd_prepopulate_pxd(mm, pgd, pxds);
20465
20466 spin_unlock(&pgd_lock);
20467
20468 return pgd;
20469
20470-out_free_pmds:
20471- free_pmds(pmds);
20472+out_free_pxds:
20473+ free_pxds(pxds);
20474 out_free_pgd:
20475 free_page((unsigned long)pgd);
20476 out:
20477@@ -295,7 +344,7 @@ out:
20478
20479 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
20480 {
20481- pgd_mop_up_pmds(mm, pgd);
20482+ pgd_mop_up_pxds(mm, pgd);
20483 pgd_dtor(pgd);
20484 paravirt_pgd_free(mm, pgd);
20485 free_page((unsigned long)pgd);
20486diff -urNp linux-3.0.3/arch/x86/mm/setup_nx.c linux-3.0.3/arch/x86/mm/setup_nx.c
20487--- linux-3.0.3/arch/x86/mm/setup_nx.c 2011-07-21 22:17:23.000000000 -0400
20488+++ linux-3.0.3/arch/x86/mm/setup_nx.c 2011-08-23 21:47:55.000000000 -0400
20489@@ -5,8 +5,10 @@
20490 #include <asm/pgtable.h>
20491 #include <asm/proto.h>
20492
20493+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
20494 static int disable_nx __cpuinitdata;
20495
20496+#ifndef CONFIG_PAX_PAGEEXEC
20497 /*
20498 * noexec = on|off
20499 *
20500@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str
20501 return 0;
20502 }
20503 early_param("noexec", noexec_setup);
20504+#endif
20505+
20506+#endif
20507
20508 void __cpuinit x86_configure_nx(void)
20509 {
20510+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
20511 if (cpu_has_nx && !disable_nx)
20512 __supported_pte_mask |= _PAGE_NX;
20513 else
20514+#endif
20515 __supported_pte_mask &= ~_PAGE_NX;
20516 }
20517
20518diff -urNp linux-3.0.3/arch/x86/mm/tlb.c linux-3.0.3/arch/x86/mm/tlb.c
20519--- linux-3.0.3/arch/x86/mm/tlb.c 2011-07-21 22:17:23.000000000 -0400
20520+++ linux-3.0.3/arch/x86/mm/tlb.c 2011-08-23 21:47:55.000000000 -0400
20521@@ -65,7 +65,11 @@ void leave_mm(int cpu)
20522 BUG();
20523 cpumask_clear_cpu(cpu,
20524 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
20525+
20526+#ifndef CONFIG_PAX_PER_CPU_PGD
20527 load_cr3(swapper_pg_dir);
20528+#endif
20529+
20530 }
20531 EXPORT_SYMBOL_GPL(leave_mm);
20532
20533diff -urNp linux-3.0.3/arch/x86/net/bpf_jit_comp.c linux-3.0.3/arch/x86/net/bpf_jit_comp.c
20534--- linux-3.0.3/arch/x86/net/bpf_jit_comp.c 2011-07-21 22:17:23.000000000 -0400
20535+++ linux-3.0.3/arch/x86/net/bpf_jit_comp.c 2011-08-23 21:47:55.000000000 -0400
20536@@ -589,7 +589,9 @@ cond_branch: f_offset = addrs[i + filt
20537 module_free(NULL, image);
20538 return;
20539 }
20540+ pax_open_kernel();
20541 memcpy(image + proglen, temp, ilen);
20542+ pax_close_kernel();
20543 }
20544 proglen += ilen;
20545 addrs[i] = proglen;
20546@@ -609,7 +611,7 @@ cond_branch: f_offset = addrs[i + filt
20547 break;
20548 }
20549 if (proglen == oldproglen) {
20550- image = module_alloc(max_t(unsigned int,
20551+ image = module_alloc_exec(max_t(unsigned int,
20552 proglen,
20553 sizeof(struct work_struct)));
20554 if (!image)
20555diff -urNp linux-3.0.3/arch/x86/oprofile/backtrace.c linux-3.0.3/arch/x86/oprofile/backtrace.c
20556--- linux-3.0.3/arch/x86/oprofile/backtrace.c 2011-08-23 21:44:40.000000000 -0400
20557+++ linux-3.0.3/arch/x86/oprofile/backtrace.c 2011-08-23 21:47:55.000000000 -0400
20558@@ -148,7 +148,7 @@ x86_backtrace(struct pt_regs * const reg
20559 {
20560 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
20561
20562- if (!user_mode_vm(regs)) {
20563+ if (!user_mode(regs)) {
20564 unsigned long stack = kernel_stack_pointer(regs);
20565 if (depth)
20566 dump_trace(NULL, regs, (unsigned long *)stack, 0,
20567diff -urNp linux-3.0.3/arch/x86/pci/mrst.c linux-3.0.3/arch/x86/pci/mrst.c
20568--- linux-3.0.3/arch/x86/pci/mrst.c 2011-07-21 22:17:23.000000000 -0400
20569+++ linux-3.0.3/arch/x86/pci/mrst.c 2011-08-23 21:47:55.000000000 -0400
20570@@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
20571 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
20572 pci_mmcfg_late_init();
20573 pcibios_enable_irq = mrst_pci_irq_enable;
20574- pci_root_ops = pci_mrst_ops;
20575+ pax_open_kernel();
20576+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
20577+ pax_close_kernel();
20578 /* Continue with standard init */
20579 return 1;
20580 }
20581diff -urNp linux-3.0.3/arch/x86/pci/pcbios.c linux-3.0.3/arch/x86/pci/pcbios.c
20582--- linux-3.0.3/arch/x86/pci/pcbios.c 2011-07-21 22:17:23.000000000 -0400
20583+++ linux-3.0.3/arch/x86/pci/pcbios.c 2011-08-23 21:47:55.000000000 -0400
20584@@ -79,50 +79,93 @@ union bios32 {
20585 static struct {
20586 unsigned long address;
20587 unsigned short segment;
20588-} bios32_indirect = { 0, __KERNEL_CS };
20589+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
20590
20591 /*
20592 * Returns the entry point for the given service, NULL on error
20593 */
20594
20595-static unsigned long bios32_service(unsigned long service)
20596+static unsigned long __devinit bios32_service(unsigned long service)
20597 {
20598 unsigned char return_code; /* %al */
20599 unsigned long address; /* %ebx */
20600 unsigned long length; /* %ecx */
20601 unsigned long entry; /* %edx */
20602 unsigned long flags;
20603+ struct desc_struct d, *gdt;
20604
20605 local_irq_save(flags);
20606- __asm__("lcall *(%%edi); cld"
20607+
20608+ gdt = get_cpu_gdt_table(smp_processor_id());
20609+
20610+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
20611+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
20612+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
20613+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
20614+
20615+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
20616 : "=a" (return_code),
20617 "=b" (address),
20618 "=c" (length),
20619 "=d" (entry)
20620 : "0" (service),
20621 "1" (0),
20622- "D" (&bios32_indirect));
20623+ "D" (&bios32_indirect),
20624+ "r"(__PCIBIOS_DS)
20625+ : "memory");
20626+
20627+ pax_open_kernel();
20628+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
20629+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
20630+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
20631+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
20632+ pax_close_kernel();
20633+
20634 local_irq_restore(flags);
20635
20636 switch (return_code) {
20637- case 0:
20638- return address + entry;
20639- case 0x80: /* Not present */
20640- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
20641- return 0;
20642- default: /* Shouldn't happen */
20643- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
20644- service, return_code);
20645+ case 0: {
20646+ int cpu;
20647+ unsigned char flags;
20648+
20649+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
20650+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
20651+ printk(KERN_WARNING "bios32_service: not valid\n");
20652 return 0;
20653+ }
20654+ address = address + PAGE_OFFSET;
20655+ length += 16UL; /* some BIOSs underreport this... */
20656+ flags = 4;
20657+ if (length >= 64*1024*1024) {
20658+ length >>= PAGE_SHIFT;
20659+ flags |= 8;
20660+ }
20661+
20662+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
20663+ gdt = get_cpu_gdt_table(cpu);
20664+ pack_descriptor(&d, address, length, 0x9b, flags);
20665+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
20666+ pack_descriptor(&d, address, length, 0x93, flags);
20667+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
20668+ }
20669+ return entry;
20670+ }
20671+ case 0x80: /* Not present */
20672+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
20673+ return 0;
20674+ default: /* Shouldn't happen */
20675+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
20676+ service, return_code);
20677+ return 0;
20678 }
20679 }
20680
20681 static struct {
20682 unsigned long address;
20683 unsigned short segment;
20684-} pci_indirect = { 0, __KERNEL_CS };
20685+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
20686
20687-static int pci_bios_present;
20688+static int pci_bios_present __read_only;
20689
20690 static int __devinit check_pcibios(void)
20691 {
20692@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
20693 unsigned long flags, pcibios_entry;
20694
20695 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
20696- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
20697+ pci_indirect.address = pcibios_entry;
20698
20699 local_irq_save(flags);
20700- __asm__(
20701- "lcall *(%%edi); cld\n\t"
20702+ __asm__("movw %w6, %%ds\n\t"
20703+ "lcall *%%ss:(%%edi); cld\n\t"
20704+ "push %%ss\n\t"
20705+ "pop %%ds\n\t"
20706 "jc 1f\n\t"
20707 "xor %%ah, %%ah\n"
20708 "1:"
20709@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
20710 "=b" (ebx),
20711 "=c" (ecx)
20712 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
20713- "D" (&pci_indirect)
20714+ "D" (&pci_indirect),
20715+ "r" (__PCIBIOS_DS)
20716 : "memory");
20717 local_irq_restore(flags);
20718
20719@@ -188,7 +234,10 @@ static int pci_bios_read(unsigned int se
20720
20721 switch (len) {
20722 case 1:
20723- __asm__("lcall *(%%esi); cld\n\t"
20724+ __asm__("movw %w6, %%ds\n\t"
20725+ "lcall *%%ss:(%%esi); cld\n\t"
20726+ "push %%ss\n\t"
20727+ "pop %%ds\n\t"
20728 "jc 1f\n\t"
20729 "xor %%ah, %%ah\n"
20730 "1:"
20731@@ -197,7 +246,8 @@ static int pci_bios_read(unsigned int se
20732 : "1" (PCIBIOS_READ_CONFIG_BYTE),
20733 "b" (bx),
20734 "D" ((long)reg),
20735- "S" (&pci_indirect));
20736+ "S" (&pci_indirect),
20737+ "r" (__PCIBIOS_DS));
20738 /*
20739 * Zero-extend the result beyond 8 bits, do not trust the
20740 * BIOS having done it:
20741@@ -205,7 +255,10 @@ static int pci_bios_read(unsigned int se
20742 *value &= 0xff;
20743 break;
20744 case 2:
20745- __asm__("lcall *(%%esi); cld\n\t"
20746+ __asm__("movw %w6, %%ds\n\t"
20747+ "lcall *%%ss:(%%esi); cld\n\t"
20748+ "push %%ss\n\t"
20749+ "pop %%ds\n\t"
20750 "jc 1f\n\t"
20751 "xor %%ah, %%ah\n"
20752 "1:"
20753@@ -214,7 +267,8 @@ static int pci_bios_read(unsigned int se
20754 : "1" (PCIBIOS_READ_CONFIG_WORD),
20755 "b" (bx),
20756 "D" ((long)reg),
20757- "S" (&pci_indirect));
20758+ "S" (&pci_indirect),
20759+ "r" (__PCIBIOS_DS));
20760 /*
20761 * Zero-extend the result beyond 16 bits, do not trust the
20762 * BIOS having done it:
20763@@ -222,7 +276,10 @@ static int pci_bios_read(unsigned int se
20764 *value &= 0xffff;
20765 break;
20766 case 4:
20767- __asm__("lcall *(%%esi); cld\n\t"
20768+ __asm__("movw %w6, %%ds\n\t"
20769+ "lcall *%%ss:(%%esi); cld\n\t"
20770+ "push %%ss\n\t"
20771+ "pop %%ds\n\t"
20772 "jc 1f\n\t"
20773 "xor %%ah, %%ah\n"
20774 "1:"
20775@@ -231,7 +288,8 @@ static int pci_bios_read(unsigned int se
20776 : "1" (PCIBIOS_READ_CONFIG_DWORD),
20777 "b" (bx),
20778 "D" ((long)reg),
20779- "S" (&pci_indirect));
20780+ "S" (&pci_indirect),
20781+ "r" (__PCIBIOS_DS));
20782 break;
20783 }
20784
20785@@ -254,7 +312,10 @@ static int pci_bios_write(unsigned int s
20786
20787 switch (len) {
20788 case 1:
20789- __asm__("lcall *(%%esi); cld\n\t"
20790+ __asm__("movw %w6, %%ds\n\t"
20791+ "lcall *%%ss:(%%esi); cld\n\t"
20792+ "push %%ss\n\t"
20793+ "pop %%ds\n\t"
20794 "jc 1f\n\t"
20795 "xor %%ah, %%ah\n"
20796 "1:"
20797@@ -263,10 +324,14 @@ static int pci_bios_write(unsigned int s
20798 "c" (value),
20799 "b" (bx),
20800 "D" ((long)reg),
20801- "S" (&pci_indirect));
20802+ "S" (&pci_indirect),
20803+ "r" (__PCIBIOS_DS));
20804 break;
20805 case 2:
20806- __asm__("lcall *(%%esi); cld\n\t"
20807+ __asm__("movw %w6, %%ds\n\t"
20808+ "lcall *%%ss:(%%esi); cld\n\t"
20809+ "push %%ss\n\t"
20810+ "pop %%ds\n\t"
20811 "jc 1f\n\t"
20812 "xor %%ah, %%ah\n"
20813 "1:"
20814@@ -275,10 +340,14 @@ static int pci_bios_write(unsigned int s
20815 "c" (value),
20816 "b" (bx),
20817 "D" ((long)reg),
20818- "S" (&pci_indirect));
20819+ "S" (&pci_indirect),
20820+ "r" (__PCIBIOS_DS));
20821 break;
20822 case 4:
20823- __asm__("lcall *(%%esi); cld\n\t"
20824+ __asm__("movw %w6, %%ds\n\t"
20825+ "lcall *%%ss:(%%esi); cld\n\t"
20826+ "push %%ss\n\t"
20827+ "pop %%ds\n\t"
20828 "jc 1f\n\t"
20829 "xor %%ah, %%ah\n"
20830 "1:"
20831@@ -287,7 +356,8 @@ static int pci_bios_write(unsigned int s
20832 "c" (value),
20833 "b" (bx),
20834 "D" ((long)reg),
20835- "S" (&pci_indirect));
20836+ "S" (&pci_indirect),
20837+ "r" (__PCIBIOS_DS));
20838 break;
20839 }
20840
20841@@ -392,10 +462,13 @@ struct irq_routing_table * pcibios_get_i
20842
20843 DBG("PCI: Fetching IRQ routing table... ");
20844 __asm__("push %%es\n\t"
20845+ "movw %w8, %%ds\n\t"
20846 "push %%ds\n\t"
20847 "pop %%es\n\t"
20848- "lcall *(%%esi); cld\n\t"
20849+ "lcall *%%ss:(%%esi); cld\n\t"
20850 "pop %%es\n\t"
20851+ "push %%ss\n\t"
20852+ "pop %%ds\n"
20853 "jc 1f\n\t"
20854 "xor %%ah, %%ah\n"
20855 "1:"
20856@@ -406,7 +479,8 @@ struct irq_routing_table * pcibios_get_i
20857 "1" (0),
20858 "D" ((long) &opt),
20859 "S" (&pci_indirect),
20860- "m" (opt)
20861+ "m" (opt),
20862+ "r" (__PCIBIOS_DS)
20863 : "memory");
20864 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
20865 if (ret & 0xff00)
20866@@ -430,7 +504,10 @@ int pcibios_set_irq_routing(struct pci_d
20867 {
20868 int ret;
20869
20870- __asm__("lcall *(%%esi); cld\n\t"
20871+ __asm__("movw %w5, %%ds\n\t"
20872+ "lcall *%%ss:(%%esi); cld\n\t"
20873+ "push %%ss\n\t"
20874+ "pop %%ds\n"
20875 "jc 1f\n\t"
20876 "xor %%ah, %%ah\n"
20877 "1:"
20878@@ -438,7 +515,8 @@ int pcibios_set_irq_routing(struct pci_d
20879 : "0" (PCIBIOS_SET_PCI_HW_INT),
20880 "b" ((dev->bus->number << 8) | dev->devfn),
20881 "c" ((irq << 8) | (pin + 10)),
20882- "S" (&pci_indirect));
20883+ "S" (&pci_indirect),
20884+ "r" (__PCIBIOS_DS));
20885 return !(ret & 0xff00);
20886 }
20887 EXPORT_SYMBOL(pcibios_set_irq_routing);
20888diff -urNp linux-3.0.3/arch/x86/platform/efi/efi_32.c linux-3.0.3/arch/x86/platform/efi/efi_32.c
20889--- linux-3.0.3/arch/x86/platform/efi/efi_32.c 2011-07-21 22:17:23.000000000 -0400
20890+++ linux-3.0.3/arch/x86/platform/efi/efi_32.c 2011-08-23 21:47:55.000000000 -0400
20891@@ -38,70 +38,37 @@
20892 */
20893
20894 static unsigned long efi_rt_eflags;
20895-static pgd_t efi_bak_pg_dir_pointer[2];
20896+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
20897
20898-void efi_call_phys_prelog(void)
20899+void __init efi_call_phys_prelog(void)
20900 {
20901- unsigned long cr4;
20902- unsigned long temp;
20903 struct desc_ptr gdt_descr;
20904
20905 local_irq_save(efi_rt_eflags);
20906
20907- /*
20908- * If I don't have PAE, I should just duplicate two entries in page
20909- * directory. If I have PAE, I just need to duplicate one entry in
20910- * page directory.
20911- */
20912- cr4 = read_cr4_safe();
20913-
20914- if (cr4 & X86_CR4_PAE) {
20915- efi_bak_pg_dir_pointer[0].pgd =
20916- swapper_pg_dir[pgd_index(0)].pgd;
20917- swapper_pg_dir[0].pgd =
20918- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
20919- } else {
20920- efi_bak_pg_dir_pointer[0].pgd =
20921- swapper_pg_dir[pgd_index(0)].pgd;
20922- efi_bak_pg_dir_pointer[1].pgd =
20923- swapper_pg_dir[pgd_index(0x400000)].pgd;
20924- swapper_pg_dir[pgd_index(0)].pgd =
20925- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
20926- temp = PAGE_OFFSET + 0x400000;
20927- swapper_pg_dir[pgd_index(0x400000)].pgd =
20928- swapper_pg_dir[pgd_index(temp)].pgd;
20929- }
20930+ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
20931+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20932+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
20933
20934 /*
20935 * After the lock is released, the original page table is restored.
20936 */
20937 __flush_tlb_all();
20938
20939- gdt_descr.address = __pa(get_cpu_gdt_table(0));
20940+ gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
20941 gdt_descr.size = GDT_SIZE - 1;
20942 load_gdt(&gdt_descr);
20943 }
20944
20945-void efi_call_phys_epilog(void)
20946+void __init efi_call_phys_epilog(void)
20947 {
20948- unsigned long cr4;
20949 struct desc_ptr gdt_descr;
20950
20951- gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
20952+ gdt_descr.address = get_cpu_gdt_table(0);
20953 gdt_descr.size = GDT_SIZE - 1;
20954 load_gdt(&gdt_descr);
20955
20956- cr4 = read_cr4_safe();
20957-
20958- if (cr4 & X86_CR4_PAE) {
20959- swapper_pg_dir[pgd_index(0)].pgd =
20960- efi_bak_pg_dir_pointer[0].pgd;
20961- } else {
20962- swapper_pg_dir[pgd_index(0)].pgd =
20963- efi_bak_pg_dir_pointer[0].pgd;
20964- swapper_pg_dir[pgd_index(0x400000)].pgd =
20965- efi_bak_pg_dir_pointer[1].pgd;
20966- }
20967+ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
20968
20969 /*
20970 * After the lock is released, the original page table is restored.
20971diff -urNp linux-3.0.3/arch/x86/platform/efi/efi_stub_32.S linux-3.0.3/arch/x86/platform/efi/efi_stub_32.S
20972--- linux-3.0.3/arch/x86/platform/efi/efi_stub_32.S 2011-07-21 22:17:23.000000000 -0400
20973+++ linux-3.0.3/arch/x86/platform/efi/efi_stub_32.S 2011-08-23 21:47:55.000000000 -0400
20974@@ -6,6 +6,7 @@
20975 */
20976
20977 #include <linux/linkage.h>
20978+#include <linux/init.h>
20979 #include <asm/page_types.h>
20980
20981 /*
20982@@ -20,7 +21,7 @@
20983 * service functions will comply with gcc calling convention, too.
20984 */
20985
20986-.text
20987+__INIT
20988 ENTRY(efi_call_phys)
20989 /*
20990 * 0. The function can only be called in Linux kernel. So CS has been
20991@@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
20992 * The mapping of lower virtual memory has been created in prelog and
20993 * epilog.
20994 */
20995- movl $1f, %edx
20996- subl $__PAGE_OFFSET, %edx
20997- jmp *%edx
20998+ jmp 1f-__PAGE_OFFSET
20999 1:
21000
21001 /*
21002@@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
21003 * parameter 2, ..., param n. To make things easy, we save the return
21004 * address of efi_call_phys in a global variable.
21005 */
21006- popl %edx
21007- movl %edx, saved_return_addr
21008- /* get the function pointer into ECX*/
21009- popl %ecx
21010- movl %ecx, efi_rt_function_ptr
21011- movl $2f, %edx
21012- subl $__PAGE_OFFSET, %edx
21013- pushl %edx
21014+ popl (saved_return_addr)
21015+ popl (efi_rt_function_ptr)
21016
21017 /*
21018 * 3. Clear PG bit in %CR0.
21019@@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
21020 /*
21021 * 5. Call the physical function.
21022 */
21023- jmp *%ecx
21024+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
21025
21026-2:
21027 /*
21028 * 6. After EFI runtime service returns, control will return to
21029 * following instruction. We'd better readjust stack pointer first.
21030@@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
21031 movl %cr0, %edx
21032 orl $0x80000000, %edx
21033 movl %edx, %cr0
21034- jmp 1f
21035-1:
21036+
21037 /*
21038 * 8. Now restore the virtual mode from flat mode by
21039 * adding EIP with PAGE_OFFSET.
21040 */
21041- movl $1f, %edx
21042- jmp *%edx
21043+ jmp 1f+__PAGE_OFFSET
21044 1:
21045
21046 /*
21047 * 9. Balance the stack. And because EAX contain the return value,
21048 * we'd better not clobber it.
21049 */
21050- leal efi_rt_function_ptr, %edx
21051- movl (%edx), %ecx
21052- pushl %ecx
21053+ pushl (efi_rt_function_ptr)
21054
21055 /*
21056- * 10. Push the saved return address onto the stack and return.
21057+ * 10. Return to the saved return address.
21058 */
21059- leal saved_return_addr, %edx
21060- movl (%edx), %ecx
21061- pushl %ecx
21062- ret
21063+ jmpl *(saved_return_addr)
21064 ENDPROC(efi_call_phys)
21065 .previous
21066
21067-.data
21068+__INITDATA
21069 saved_return_addr:
21070 .long 0
21071 efi_rt_function_ptr:
21072diff -urNp linux-3.0.3/arch/x86/platform/mrst/mrst.c linux-3.0.3/arch/x86/platform/mrst/mrst.c
21073--- linux-3.0.3/arch/x86/platform/mrst/mrst.c 2011-07-21 22:17:23.000000000 -0400
21074+++ linux-3.0.3/arch/x86/platform/mrst/mrst.c 2011-08-23 21:47:55.000000000 -0400
21075@@ -239,14 +239,16 @@ static int mrst_i8042_detect(void)
21076 }
21077
21078 /* Reboot and power off are handled by the SCU on a MID device */
21079-static void mrst_power_off(void)
21080+static __noreturn void mrst_power_off(void)
21081 {
21082 intel_scu_ipc_simple_command(0xf1, 1);
21083+ BUG();
21084 }
21085
21086-static void mrst_reboot(void)
21087+static __noreturn void mrst_reboot(void)
21088 {
21089 intel_scu_ipc_simple_command(0xf1, 0);
21090+ BUG();
21091 }
21092
21093 /*
21094diff -urNp linux-3.0.3/arch/x86/platform/uv/tlb_uv.c linux-3.0.3/arch/x86/platform/uv/tlb_uv.c
21095--- linux-3.0.3/arch/x86/platform/uv/tlb_uv.c 2011-07-21 22:17:23.000000000 -0400
21096+++ linux-3.0.3/arch/x86/platform/uv/tlb_uv.c 2011-08-23 21:48:14.000000000 -0400
21097@@ -373,6 +373,8 @@ static void reset_with_ipi(struct bau_ta
21098 cpumask_t mask;
21099 struct reset_args reset_args;
21100
21101+ pax_track_stack();
21102+
21103 reset_args.sender = sender;
21104 cpus_clear(mask);
21105 /* find a single cpu for each uvhub in this distribution mask */
21106diff -urNp linux-3.0.3/arch/x86/power/cpu.c linux-3.0.3/arch/x86/power/cpu.c
21107--- linux-3.0.3/arch/x86/power/cpu.c 2011-07-21 22:17:23.000000000 -0400
21108+++ linux-3.0.3/arch/x86/power/cpu.c 2011-08-23 21:47:55.000000000 -0400
21109@@ -130,7 +130,7 @@ static void do_fpu_end(void)
21110 static void fix_processor_context(void)
21111 {
21112 int cpu = smp_processor_id();
21113- struct tss_struct *t = &per_cpu(init_tss, cpu);
21114+ struct tss_struct *t = init_tss + cpu;
21115
21116 set_tss_desc(cpu, t); /*
21117 * This just modifies memory; should not be
21118@@ -140,7 +140,9 @@ static void fix_processor_context(void)
21119 */
21120
21121 #ifdef CONFIG_X86_64
21122+ pax_open_kernel();
21123 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
21124+ pax_close_kernel();
21125
21126 syscall_init(); /* This sets MSR_*STAR and related */
21127 #endif
21128diff -urNp linux-3.0.3/arch/x86/vdso/Makefile linux-3.0.3/arch/x86/vdso/Makefile
21129--- linux-3.0.3/arch/x86/vdso/Makefile 2011-07-21 22:17:23.000000000 -0400
21130+++ linux-3.0.3/arch/x86/vdso/Makefile 2011-08-23 21:47:55.000000000 -0400
21131@@ -136,7 +136,7 @@ quiet_cmd_vdso = VDSO $@
21132 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
21133 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
21134
21135-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
21136+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
21137 GCOV_PROFILE := n
21138
21139 #
21140diff -urNp linux-3.0.3/arch/x86/vdso/vdso32-setup.c linux-3.0.3/arch/x86/vdso/vdso32-setup.c
21141--- linux-3.0.3/arch/x86/vdso/vdso32-setup.c 2011-07-21 22:17:23.000000000 -0400
21142+++ linux-3.0.3/arch/x86/vdso/vdso32-setup.c 2011-08-23 21:47:55.000000000 -0400
21143@@ -25,6 +25,7 @@
21144 #include <asm/tlbflush.h>
21145 #include <asm/vdso.h>
21146 #include <asm/proto.h>
21147+#include <asm/mman.h>
21148
21149 enum {
21150 VDSO_DISABLED = 0,
21151@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
21152 void enable_sep_cpu(void)
21153 {
21154 int cpu = get_cpu();
21155- struct tss_struct *tss = &per_cpu(init_tss, cpu);
21156+ struct tss_struct *tss = init_tss + cpu;
21157
21158 if (!boot_cpu_has(X86_FEATURE_SEP)) {
21159 put_cpu();
21160@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
21161 gate_vma.vm_start = FIXADDR_USER_START;
21162 gate_vma.vm_end = FIXADDR_USER_END;
21163 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
21164- gate_vma.vm_page_prot = __P101;
21165+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
21166 /*
21167 * Make sure the vDSO gets into every core dump.
21168 * Dumping its contents makes post-mortem fully interpretable later
21169@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
21170 if (compat)
21171 addr = VDSO_HIGH_BASE;
21172 else {
21173- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
21174+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
21175 if (IS_ERR_VALUE(addr)) {
21176 ret = addr;
21177 goto up_fail;
21178 }
21179 }
21180
21181- current->mm->context.vdso = (void *)addr;
21182+ current->mm->context.vdso = addr;
21183
21184 if (compat_uses_vma || !compat) {
21185 /*
21186@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
21187 }
21188
21189 current_thread_info()->sysenter_return =
21190- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
21191+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
21192
21193 up_fail:
21194 if (ret)
21195- current->mm->context.vdso = NULL;
21196+ current->mm->context.vdso = 0;
21197
21198 up_write(&mm->mmap_sem);
21199
21200@@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
21201
21202 const char *arch_vma_name(struct vm_area_struct *vma)
21203 {
21204- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
21205+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
21206 return "[vdso]";
21207+
21208+#ifdef CONFIG_PAX_SEGMEXEC
21209+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
21210+ return "[vdso]";
21211+#endif
21212+
21213 return NULL;
21214 }
21215
21216@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
21217 * Check to see if the corresponding task was created in compat vdso
21218 * mode.
21219 */
21220- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
21221+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
21222 return &gate_vma;
21223 return NULL;
21224 }
21225diff -urNp linux-3.0.3/arch/x86/vdso/vma.c linux-3.0.3/arch/x86/vdso/vma.c
21226--- linux-3.0.3/arch/x86/vdso/vma.c 2011-07-21 22:17:23.000000000 -0400
21227+++ linux-3.0.3/arch/x86/vdso/vma.c 2011-08-23 21:47:55.000000000 -0400
21228@@ -15,18 +15,19 @@
21229 #include <asm/proto.h>
21230 #include <asm/vdso.h>
21231
21232-unsigned int __read_mostly vdso_enabled = 1;
21233-
21234 extern char vdso_start[], vdso_end[];
21235 extern unsigned short vdso_sync_cpuid;
21236+extern char __vsyscall_0;
21237
21238 static struct page **vdso_pages;
21239+static struct page *vsyscall_page;
21240 static unsigned vdso_size;
21241
21242 static int __init init_vdso_vars(void)
21243 {
21244- int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
21245- int i;
21246+ size_t nbytes = vdso_end - vdso_start;
21247+ size_t npages = (nbytes + PAGE_SIZE - 1) / PAGE_SIZE;
21248+ size_t i;
21249
21250 vdso_size = npages << PAGE_SHIFT;
21251 vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL);
21252@@ -34,19 +35,19 @@ static int __init init_vdso_vars(void)
21253 goto oom;
21254 for (i = 0; i < npages; i++) {
21255 struct page *p;
21256- p = alloc_page(GFP_KERNEL);
21257+ p = alloc_page(GFP_KERNEL | __GFP_ZERO);
21258 if (!p)
21259 goto oom;
21260 vdso_pages[i] = p;
21261- copy_page(page_address(p), vdso_start + i*PAGE_SIZE);
21262+ memcpy(page_address(p), vdso_start + i*PAGE_SIZE, nbytes > PAGE_SIZE ? PAGE_SIZE : nbytes);
21263+ nbytes -= PAGE_SIZE;
21264 }
21265+ vsyscall_page = pfn_to_page((__pa_symbol(&__vsyscall_0)) >> PAGE_SHIFT);
21266
21267 return 0;
21268
21269 oom:
21270- printk("Cannot allocate vdso\n");
21271- vdso_enabled = 0;
21272- return -ENOMEM;
21273+ panic("Cannot allocate vdso\n");
21274 }
21275 subsys_initcall(init_vdso_vars);
21276
21277@@ -80,37 +81,35 @@ int arch_setup_additional_pages(struct l
21278 unsigned long addr;
21279 int ret;
21280
21281- if (!vdso_enabled)
21282- return 0;
21283-
21284 down_write(&mm->mmap_sem);
21285- addr = vdso_addr(mm->start_stack, vdso_size);
21286- addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
21287+ addr = vdso_addr(mm->start_stack, vdso_size + PAGE_SIZE);
21288+ addr = get_unmapped_area(NULL, addr, vdso_size + PAGE_SIZE, 0, 0);
21289 if (IS_ERR_VALUE(addr)) {
21290 ret = addr;
21291 goto up_fail;
21292 }
21293
21294- current->mm->context.vdso = (void *)addr;
21295+ mm->context.vdso = addr + PAGE_SIZE;
21296
21297- ret = install_special_mapping(mm, addr, vdso_size,
21298+ ret = install_special_mapping(mm, addr, PAGE_SIZE,
21299 VM_READ|VM_EXEC|
21300- VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
21301+ VM_MAYREAD|VM_MAYEXEC|
21302 VM_ALWAYSDUMP,
21303- vdso_pages);
21304+ &vsyscall_page);
21305 if (ret) {
21306- current->mm->context.vdso = NULL;
21307+ mm->context.vdso = 0;
21308 goto up_fail;
21309 }
21310
21311+ ret = install_special_mapping(mm, addr + PAGE_SIZE, vdso_size,
21312+ VM_READ|VM_EXEC|
21313+ VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
21314+ VM_ALWAYSDUMP,
21315+ vdso_pages);
21316+ if (ret)
21317+ mm->context.vdso = 0;
21318+
21319 up_fail:
21320 up_write(&mm->mmap_sem);
21321 return ret;
21322 }
21323-
21324-static __init int vdso_setup(char *s)
21325-{
21326- vdso_enabled = simple_strtoul(s, NULL, 0);
21327- return 0;
21328-}
21329-__setup("vdso=", vdso_setup);
21330diff -urNp linux-3.0.3/arch/x86/xen/enlighten.c linux-3.0.3/arch/x86/xen/enlighten.c
21331--- linux-3.0.3/arch/x86/xen/enlighten.c 2011-08-23 21:44:40.000000000 -0400
21332+++ linux-3.0.3/arch/x86/xen/enlighten.c 2011-08-23 21:47:55.000000000 -0400
21333@@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
21334
21335 struct shared_info xen_dummy_shared_info;
21336
21337-void *xen_initial_gdt;
21338-
21339 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
21340 __read_mostly int xen_have_vector_callback;
21341 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
21342@@ -1010,7 +1008,7 @@ static const struct pv_apic_ops xen_apic
21343 #endif
21344 };
21345
21346-static void xen_reboot(int reason)
21347+static __noreturn void xen_reboot(int reason)
21348 {
21349 struct sched_shutdown r = { .reason = reason };
21350
21351@@ -1018,17 +1016,17 @@ static void xen_reboot(int reason)
21352 BUG();
21353 }
21354
21355-static void xen_restart(char *msg)
21356+static __noreturn void xen_restart(char *msg)
21357 {
21358 xen_reboot(SHUTDOWN_reboot);
21359 }
21360
21361-static void xen_emergency_restart(void)
21362+static __noreturn void xen_emergency_restart(void)
21363 {
21364 xen_reboot(SHUTDOWN_reboot);
21365 }
21366
21367-static void xen_machine_halt(void)
21368+static __noreturn void xen_machine_halt(void)
21369 {
21370 xen_reboot(SHUTDOWN_poweroff);
21371 }
21372@@ -1134,7 +1132,17 @@ asmlinkage void __init xen_start_kernel(
21373 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
21374
21375 /* Work out if we support NX */
21376- x86_configure_nx();
21377+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21378+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
21379+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
21380+ unsigned l, h;
21381+
21382+ __supported_pte_mask |= _PAGE_NX;
21383+ rdmsr(MSR_EFER, l, h);
21384+ l |= EFER_NX;
21385+ wrmsr(MSR_EFER, l, h);
21386+ }
21387+#endif
21388
21389 xen_setup_features();
21390
21391@@ -1165,13 +1173,6 @@ asmlinkage void __init xen_start_kernel(
21392
21393 machine_ops = xen_machine_ops;
21394
21395- /*
21396- * The only reliable way to retain the initial address of the
21397- * percpu gdt_page is to remember it here, so we can go and
21398- * mark it RW later, when the initial percpu area is freed.
21399- */
21400- xen_initial_gdt = &per_cpu(gdt_page, 0);
21401-
21402 xen_smp_init();
21403
21404 #ifdef CONFIG_ACPI_NUMA
21405diff -urNp linux-3.0.3/arch/x86/xen/mmu.c linux-3.0.3/arch/x86/xen/mmu.c
21406--- linux-3.0.3/arch/x86/xen/mmu.c 2011-07-21 22:17:23.000000000 -0400
21407+++ linux-3.0.3/arch/x86/xen/mmu.c 2011-08-24 18:10:12.000000000 -0400
21408@@ -1679,6 +1679,8 @@ pgd_t * __init xen_setup_kernel_pagetabl
21409 convert_pfn_mfn(init_level4_pgt);
21410 convert_pfn_mfn(level3_ident_pgt);
21411 convert_pfn_mfn(level3_kernel_pgt);
21412+ convert_pfn_mfn(level3_vmalloc_pgt);
21413+ convert_pfn_mfn(level3_vmemmap_pgt);
21414
21415 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
21416 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
21417@@ -1697,7 +1699,10 @@ pgd_t * __init xen_setup_kernel_pagetabl
21418 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
21419 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
21420 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
21421+ set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
21422+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
21423 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
21424+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
21425 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
21426 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
21427
21428@@ -1909,6 +1914,7 @@ static void __init xen_post_allocator_in
21429 pv_mmu_ops.set_pud = xen_set_pud;
21430 #if PAGETABLE_LEVELS == 4
21431 pv_mmu_ops.set_pgd = xen_set_pgd;
21432+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
21433 #endif
21434
21435 /* This will work as long as patching hasn't happened yet
21436@@ -1990,6 +1996,7 @@ static const struct pv_mmu_ops xen_mmu_o
21437 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
21438 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
21439 .set_pgd = xen_set_pgd_hyper,
21440+ .set_pgd_batched = xen_set_pgd_hyper,
21441
21442 .alloc_pud = xen_alloc_pmd_init,
21443 .release_pud = xen_release_pmd_init,
21444diff -urNp linux-3.0.3/arch/x86/xen/smp.c linux-3.0.3/arch/x86/xen/smp.c
21445--- linux-3.0.3/arch/x86/xen/smp.c 2011-07-21 22:17:23.000000000 -0400
21446+++ linux-3.0.3/arch/x86/xen/smp.c 2011-08-23 21:47:55.000000000 -0400
21447@@ -193,11 +193,6 @@ static void __init xen_smp_prepare_boot_
21448 {
21449 BUG_ON(smp_processor_id() != 0);
21450 native_smp_prepare_boot_cpu();
21451-
21452- /* We've switched to the "real" per-cpu gdt, so make sure the
21453- old memory can be recycled */
21454- make_lowmem_page_readwrite(xen_initial_gdt);
21455-
21456 xen_filter_cpu_maps();
21457 xen_setup_vcpu_info_placement();
21458 }
21459@@ -265,12 +260,12 @@ cpu_initialize_context(unsigned int cpu,
21460 gdt = get_cpu_gdt_table(cpu);
21461
21462 ctxt->flags = VGCF_IN_KERNEL;
21463- ctxt->user_regs.ds = __USER_DS;
21464- ctxt->user_regs.es = __USER_DS;
21465+ ctxt->user_regs.ds = __KERNEL_DS;
21466+ ctxt->user_regs.es = __KERNEL_DS;
21467 ctxt->user_regs.ss = __KERNEL_DS;
21468 #ifdef CONFIG_X86_32
21469 ctxt->user_regs.fs = __KERNEL_PERCPU;
21470- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
21471+ savesegment(gs, ctxt->user_regs.gs);
21472 #else
21473 ctxt->gs_base_kernel = per_cpu_offset(cpu);
21474 #endif
21475@@ -321,13 +316,12 @@ static int __cpuinit xen_cpu_up(unsigned
21476 int rc;
21477
21478 per_cpu(current_task, cpu) = idle;
21479+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
21480 #ifdef CONFIG_X86_32
21481 irq_ctx_init(cpu);
21482 #else
21483 clear_tsk_thread_flag(idle, TIF_FORK);
21484- per_cpu(kernel_stack, cpu) =
21485- (unsigned long)task_stack_page(idle) -
21486- KERNEL_STACK_OFFSET + THREAD_SIZE;
21487+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
21488 #endif
21489 xen_setup_runstate_info(cpu);
21490 xen_setup_timer(cpu);
21491diff -urNp linux-3.0.3/arch/x86/xen/xen-asm_32.S linux-3.0.3/arch/x86/xen/xen-asm_32.S
21492--- linux-3.0.3/arch/x86/xen/xen-asm_32.S 2011-07-21 22:17:23.000000000 -0400
21493+++ linux-3.0.3/arch/x86/xen/xen-asm_32.S 2011-08-23 21:47:55.000000000 -0400
21494@@ -83,14 +83,14 @@ ENTRY(xen_iret)
21495 ESP_OFFSET=4 # bytes pushed onto stack
21496
21497 /*
21498- * Store vcpu_info pointer for easy access. Do it this way to
21499- * avoid having to reload %fs
21500+ * Store vcpu_info pointer for easy access.
21501 */
21502 #ifdef CONFIG_SMP
21503- GET_THREAD_INFO(%eax)
21504- movl TI_cpu(%eax), %eax
21505- movl __per_cpu_offset(,%eax,4), %eax
21506- mov xen_vcpu(%eax), %eax
21507+ push %fs
21508+ mov $(__KERNEL_PERCPU), %eax
21509+ mov %eax, %fs
21510+ mov PER_CPU_VAR(xen_vcpu), %eax
21511+ pop %fs
21512 #else
21513 movl xen_vcpu, %eax
21514 #endif
21515diff -urNp linux-3.0.3/arch/x86/xen/xen-head.S linux-3.0.3/arch/x86/xen/xen-head.S
21516--- linux-3.0.3/arch/x86/xen/xen-head.S 2011-07-21 22:17:23.000000000 -0400
21517+++ linux-3.0.3/arch/x86/xen/xen-head.S 2011-08-23 21:47:55.000000000 -0400
21518@@ -19,6 +19,17 @@ ENTRY(startup_xen)
21519 #ifdef CONFIG_X86_32
21520 mov %esi,xen_start_info
21521 mov $init_thread_union+THREAD_SIZE,%esp
21522+#ifdef CONFIG_SMP
21523+ movl $cpu_gdt_table,%edi
21524+ movl $__per_cpu_load,%eax
21525+ movw %ax,__KERNEL_PERCPU + 2(%edi)
21526+ rorl $16,%eax
21527+ movb %al,__KERNEL_PERCPU + 4(%edi)
21528+ movb %ah,__KERNEL_PERCPU + 7(%edi)
21529+ movl $__per_cpu_end - 1,%eax
21530+ subl $__per_cpu_start,%eax
21531+ movw %ax,__KERNEL_PERCPU + 0(%edi)
21532+#endif
21533 #else
21534 mov %rsi,xen_start_info
21535 mov $init_thread_union+THREAD_SIZE,%rsp
21536diff -urNp linux-3.0.3/arch/x86/xen/xen-ops.h linux-3.0.3/arch/x86/xen/xen-ops.h
21537--- linux-3.0.3/arch/x86/xen/xen-ops.h 2011-08-23 21:44:40.000000000 -0400
21538+++ linux-3.0.3/arch/x86/xen/xen-ops.h 2011-08-23 21:47:55.000000000 -0400
21539@@ -10,8 +10,6 @@
21540 extern const char xen_hypervisor_callback[];
21541 extern const char xen_failsafe_callback[];
21542
21543-extern void *xen_initial_gdt;
21544-
21545 struct trap_info;
21546 void xen_copy_trap_info(struct trap_info *traps);
21547
21548diff -urNp linux-3.0.3/block/blk-iopoll.c linux-3.0.3/block/blk-iopoll.c
21549--- linux-3.0.3/block/blk-iopoll.c 2011-07-21 22:17:23.000000000 -0400
21550+++ linux-3.0.3/block/blk-iopoll.c 2011-08-23 21:47:55.000000000 -0400
21551@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
21552 }
21553 EXPORT_SYMBOL(blk_iopoll_complete);
21554
21555-static void blk_iopoll_softirq(struct softirq_action *h)
21556+static void blk_iopoll_softirq(void)
21557 {
21558 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
21559 int rearm = 0, budget = blk_iopoll_budget;
21560diff -urNp linux-3.0.3/block/blk-map.c linux-3.0.3/block/blk-map.c
21561--- linux-3.0.3/block/blk-map.c 2011-07-21 22:17:23.000000000 -0400
21562+++ linux-3.0.3/block/blk-map.c 2011-08-23 21:47:55.000000000 -0400
21563@@ -301,7 +301,7 @@ int blk_rq_map_kern(struct request_queue
21564 if (!len || !kbuf)
21565 return -EINVAL;
21566
21567- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
21568+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
21569 if (do_copy)
21570 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
21571 else
21572diff -urNp linux-3.0.3/block/blk-softirq.c linux-3.0.3/block/blk-softirq.c
21573--- linux-3.0.3/block/blk-softirq.c 2011-07-21 22:17:23.000000000 -0400
21574+++ linux-3.0.3/block/blk-softirq.c 2011-08-23 21:47:55.000000000 -0400
21575@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
21576 * Softirq action handler - move entries to local list and loop over them
21577 * while passing them to the queue registered handler.
21578 */
21579-static void blk_done_softirq(struct softirq_action *h)
21580+static void blk_done_softirq(void)
21581 {
21582 struct list_head *cpu_list, local_list;
21583
21584diff -urNp linux-3.0.3/block/bsg.c linux-3.0.3/block/bsg.c
21585--- linux-3.0.3/block/bsg.c 2011-07-21 22:17:23.000000000 -0400
21586+++ linux-3.0.3/block/bsg.c 2011-08-23 21:47:55.000000000 -0400
21587@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
21588 struct sg_io_v4 *hdr, struct bsg_device *bd,
21589 fmode_t has_write_perm)
21590 {
21591+ unsigned char tmpcmd[sizeof(rq->__cmd)];
21592+ unsigned char *cmdptr;
21593+
21594 if (hdr->request_len > BLK_MAX_CDB) {
21595 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
21596 if (!rq->cmd)
21597 return -ENOMEM;
21598- }
21599+ cmdptr = rq->cmd;
21600+ } else
21601+ cmdptr = tmpcmd;
21602
21603- if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
21604+ if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
21605 hdr->request_len))
21606 return -EFAULT;
21607
21608+ if (cmdptr != rq->cmd)
21609+ memcpy(rq->cmd, cmdptr, hdr->request_len);
21610+
21611 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
21612 if (blk_verify_command(rq->cmd, has_write_perm))
21613 return -EPERM;
21614diff -urNp linux-3.0.3/block/scsi_ioctl.c linux-3.0.3/block/scsi_ioctl.c
21615--- linux-3.0.3/block/scsi_ioctl.c 2011-07-21 22:17:23.000000000 -0400
21616+++ linux-3.0.3/block/scsi_ioctl.c 2011-08-23 21:47:55.000000000 -0400
21617@@ -222,8 +222,20 @@ EXPORT_SYMBOL(blk_verify_command);
21618 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
21619 struct sg_io_hdr *hdr, fmode_t mode)
21620 {
21621- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
21622+ unsigned char tmpcmd[sizeof(rq->__cmd)];
21623+ unsigned char *cmdptr;
21624+
21625+ if (rq->cmd != rq->__cmd)
21626+ cmdptr = rq->cmd;
21627+ else
21628+ cmdptr = tmpcmd;
21629+
21630+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
21631 return -EFAULT;
21632+
21633+ if (cmdptr != rq->cmd)
21634+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
21635+
21636 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
21637 return -EPERM;
21638
21639@@ -432,6 +444,8 @@ int sg_scsi_ioctl(struct request_queue *
21640 int err;
21641 unsigned int in_len, out_len, bytes, opcode, cmdlen;
21642 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
21643+ unsigned char tmpcmd[sizeof(rq->__cmd)];
21644+ unsigned char *cmdptr;
21645
21646 if (!sic)
21647 return -EINVAL;
21648@@ -465,9 +479,18 @@ int sg_scsi_ioctl(struct request_queue *
21649 */
21650 err = -EFAULT;
21651 rq->cmd_len = cmdlen;
21652- if (copy_from_user(rq->cmd, sic->data, cmdlen))
21653+
21654+ if (rq->cmd != rq->__cmd)
21655+ cmdptr = rq->cmd;
21656+ else
21657+ cmdptr = tmpcmd;
21658+
21659+ if (copy_from_user(cmdptr, sic->data, cmdlen))
21660 goto error;
21661
21662+ if (rq->cmd != cmdptr)
21663+ memcpy(rq->cmd, cmdptr, cmdlen);
21664+
21665 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
21666 goto error;
21667
21668diff -urNp linux-3.0.3/crypto/cryptd.c linux-3.0.3/crypto/cryptd.c
21669--- linux-3.0.3/crypto/cryptd.c 2011-07-21 22:17:23.000000000 -0400
21670+++ linux-3.0.3/crypto/cryptd.c 2011-08-23 21:47:55.000000000 -0400
21671@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
21672
21673 struct cryptd_blkcipher_request_ctx {
21674 crypto_completion_t complete;
21675-};
21676+} __no_const;
21677
21678 struct cryptd_hash_ctx {
21679 struct crypto_shash *child;
21680@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
21681
21682 struct cryptd_aead_request_ctx {
21683 crypto_completion_t complete;
21684-};
21685+} __no_const;
21686
21687 static void cryptd_queue_worker(struct work_struct *work);
21688
21689diff -urNp linux-3.0.3/crypto/gf128mul.c linux-3.0.3/crypto/gf128mul.c
21690--- linux-3.0.3/crypto/gf128mul.c 2011-07-21 22:17:23.000000000 -0400
21691+++ linux-3.0.3/crypto/gf128mul.c 2011-08-23 21:47:55.000000000 -0400
21692@@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128
21693 for (i = 0; i < 7; ++i)
21694 gf128mul_x_lle(&p[i + 1], &p[i]);
21695
21696- memset(r, 0, sizeof(r));
21697+ memset(r, 0, sizeof(*r));
21698 for (i = 0;;) {
21699 u8 ch = ((u8 *)b)[15 - i];
21700
21701@@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128
21702 for (i = 0; i < 7; ++i)
21703 gf128mul_x_bbe(&p[i + 1], &p[i]);
21704
21705- memset(r, 0, sizeof(r));
21706+ memset(r, 0, sizeof(*r));
21707 for (i = 0;;) {
21708 u8 ch = ((u8 *)b)[i];
21709
21710diff -urNp linux-3.0.3/crypto/serpent.c linux-3.0.3/crypto/serpent.c
21711--- linux-3.0.3/crypto/serpent.c 2011-07-21 22:17:23.000000000 -0400
21712+++ linux-3.0.3/crypto/serpent.c 2011-08-23 21:48:14.000000000 -0400
21713@@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
21714 u32 r0,r1,r2,r3,r4;
21715 int i;
21716
21717+ pax_track_stack();
21718+
21719 /* Copy key, add padding */
21720
21721 for (i = 0; i < keylen; ++i)
21722diff -urNp linux-3.0.3/Documentation/dontdiff linux-3.0.3/Documentation/dontdiff
21723--- linux-3.0.3/Documentation/dontdiff 2011-07-21 22:17:23.000000000 -0400
21724+++ linux-3.0.3/Documentation/dontdiff 2011-08-23 21:47:55.000000000 -0400
21725@@ -5,6 +5,7 @@
21726 *.cis
21727 *.cpio
21728 *.csp
21729+*.dbg
21730 *.dsp
21731 *.dvi
21732 *.elf
21733@@ -48,9 +49,11 @@
21734 *.tab.h
21735 *.tex
21736 *.ver
21737+*.vim
21738 *.xml
21739 *.xz
21740 *_MODULES
21741+*_reg_safe.h
21742 *_vga16.c
21743 *~
21744 \#*#
21745@@ -70,6 +73,7 @@ Kerntypes
21746 Module.markers
21747 Module.symvers
21748 PENDING
21749+PERF*
21750 SCCS
21751 System.map*
21752 TAGS
21753@@ -98,6 +102,8 @@ bzImage*
21754 capability_names.h
21755 capflags.c
21756 classlist.h*
21757+clut_vga16.c
21758+common-cmds.h
21759 comp*.log
21760 compile.h*
21761 conf
21762@@ -126,12 +132,14 @@ fore200e_pca_fw.c*
21763 gconf
21764 gconf.glade.h
21765 gen-devlist
21766+gen-kdb_cmds.c
21767 gen_crc32table
21768 gen_init_cpio
21769 generated
21770 genheaders
21771 genksyms
21772 *_gray256.c
21773+hash
21774 hpet_example
21775 hugepage-mmap
21776 hugepage-shm
21777@@ -146,7 +154,6 @@ int32.c
21778 int4.c
21779 int8.c
21780 kallsyms
21781-kconfig
21782 keywords.c
21783 ksym.c*
21784 ksym.h*
21785@@ -154,7 +161,6 @@ kxgettext
21786 lkc_defs.h
21787 lex.c
21788 lex.*.c
21789-linux
21790 logo_*.c
21791 logo_*_clut224.c
21792 logo_*_mono.c
21793@@ -174,6 +180,7 @@ mkboot
21794 mkbugboot
21795 mkcpustr
21796 mkdep
21797+mkpiggy
21798 mkprep
21799 mkregtable
21800 mktables
21801@@ -209,6 +216,7 @@ r300_reg_safe.h
21802 r420_reg_safe.h
21803 r600_reg_safe.h
21804 recordmcount
21805+regdb.c
21806 relocs
21807 rlim_names.h
21808 rn50_reg_safe.h
21809@@ -219,6 +227,7 @@ setup
21810 setup.bin
21811 setup.elf
21812 sImage
21813+slabinfo
21814 sm_tbl*
21815 split-include
21816 syscalltab.h
21817@@ -246,7 +255,9 @@ vmlinux
21818 vmlinux-*
21819 vmlinux.aout
21820 vmlinux.bin.all
21821+vmlinux.bin.bz2
21822 vmlinux.lds
21823+vmlinux.relocs
21824 vmlinuz
21825 voffset.h
21826 vsyscall.lds
21827@@ -254,6 +265,7 @@ vsyscall_32.lds
21828 wanxlfw.inc
21829 uImage
21830 unifdef
21831+utsrelease.h
21832 wakeup.bin
21833 wakeup.elf
21834 wakeup.lds
21835diff -urNp linux-3.0.3/Documentation/kernel-parameters.txt linux-3.0.3/Documentation/kernel-parameters.txt
21836--- linux-3.0.3/Documentation/kernel-parameters.txt 2011-07-21 22:17:23.000000000 -0400
21837+++ linux-3.0.3/Documentation/kernel-parameters.txt 2011-08-23 21:47:55.000000000 -0400
21838@@ -1883,6 +1883,13 @@ bytes respectively. Such letter suffixes
21839 the specified number of seconds. This is to be used if
21840 your oopses keep scrolling off the screen.
21841
21842+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
21843+ virtualization environments that don't cope well with the
21844+ expand down segment used by UDEREF on X86-32 or the frequent
21845+ page table updates on X86-64.
21846+
21847+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
21848+
21849 pcbit= [HW,ISDN]
21850
21851 pcd. [PARIDE]
21852diff -urNp linux-3.0.3/drivers/acpi/apei/cper.c linux-3.0.3/drivers/acpi/apei/cper.c
21853--- linux-3.0.3/drivers/acpi/apei/cper.c 2011-07-21 22:17:23.000000000 -0400
21854+++ linux-3.0.3/drivers/acpi/apei/cper.c 2011-08-23 21:47:55.000000000 -0400
21855@@ -38,12 +38,12 @@
21856 */
21857 u64 cper_next_record_id(void)
21858 {
21859- static atomic64_t seq;
21860+ static atomic64_unchecked_t seq;
21861
21862- if (!atomic64_read(&seq))
21863- atomic64_set(&seq, ((u64)get_seconds()) << 32);
21864+ if (!atomic64_read_unchecked(&seq))
21865+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
21866
21867- return atomic64_inc_return(&seq);
21868+ return atomic64_inc_return_unchecked(&seq);
21869 }
21870 EXPORT_SYMBOL_GPL(cper_next_record_id);
21871
21872diff -urNp linux-3.0.3/drivers/acpi/ec_sys.c linux-3.0.3/drivers/acpi/ec_sys.c
21873--- linux-3.0.3/drivers/acpi/ec_sys.c 2011-07-21 22:17:23.000000000 -0400
21874+++ linux-3.0.3/drivers/acpi/ec_sys.c 2011-08-24 19:06:55.000000000 -0400
21875@@ -11,6 +11,7 @@
21876 #include <linux/kernel.h>
21877 #include <linux/acpi.h>
21878 #include <linux/debugfs.h>
21879+#include <asm/uaccess.h>
21880 #include "internal.h"
21881
21882 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
21883@@ -39,7 +40,7 @@ static ssize_t acpi_ec_read_io(struct fi
21884 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
21885 */
21886 unsigned int size = EC_SPACE_SIZE;
21887- u8 *data = (u8 *) buf;
21888+ u8 data;
21889 loff_t init_off = *off;
21890 int err = 0;
21891
21892@@ -52,9 +53,11 @@ static ssize_t acpi_ec_read_io(struct fi
21893 size = count;
21894
21895 while (size) {
21896- err = ec_read(*off, &data[*off - init_off]);
21897+ err = ec_read(*off, &data);
21898 if (err)
21899 return err;
21900+ if (put_user(data, &buf[*off - init_off]))
21901+ return -EFAULT;
21902 *off += 1;
21903 size--;
21904 }
21905@@ -70,7 +73,6 @@ static ssize_t acpi_ec_write_io(struct f
21906
21907 unsigned int size = count;
21908 loff_t init_off = *off;
21909- u8 *data = (u8 *) buf;
21910 int err = 0;
21911
21912 if (*off >= EC_SPACE_SIZE)
21913@@ -81,7 +83,9 @@ static ssize_t acpi_ec_write_io(struct f
21914 }
21915
21916 while (size) {
21917- u8 byte_write = data[*off - init_off];
21918+ u8 byte_write;
21919+ if (get_user(byte_write, &buf[*off - init_off]))
21920+ return -EFAULT;
21921 err = ec_write(*off, byte_write);
21922 if (err)
21923 return err;
21924diff -urNp linux-3.0.3/drivers/acpi/proc.c linux-3.0.3/drivers/acpi/proc.c
21925--- linux-3.0.3/drivers/acpi/proc.c 2011-07-21 22:17:23.000000000 -0400
21926+++ linux-3.0.3/drivers/acpi/proc.c 2011-08-23 21:47:55.000000000 -0400
21927@@ -342,19 +342,13 @@ acpi_system_write_wakeup_device(struct f
21928 size_t count, loff_t * ppos)
21929 {
21930 struct list_head *node, *next;
21931- char strbuf[5];
21932- char str[5] = "";
21933- unsigned int len = count;
21934-
21935- if (len > 4)
21936- len = 4;
21937- if (len < 0)
21938- return -EFAULT;
21939+ char strbuf[5] = {0};
21940
21941- if (copy_from_user(strbuf, buffer, len))
21942+ if (count > 4)
21943+ count = 4;
21944+ if (copy_from_user(strbuf, buffer, count))
21945 return -EFAULT;
21946- strbuf[len] = '\0';
21947- sscanf(strbuf, "%s", str);
21948+ strbuf[count] = '\0';
21949
21950 mutex_lock(&acpi_device_lock);
21951 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
21952@@ -363,7 +357,7 @@ acpi_system_write_wakeup_device(struct f
21953 if (!dev->wakeup.flags.valid)
21954 continue;
21955
21956- if (!strncmp(dev->pnp.bus_id, str, 4)) {
21957+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
21958 if (device_can_wakeup(&dev->dev)) {
21959 bool enable = !device_may_wakeup(&dev->dev);
21960 device_set_wakeup_enable(&dev->dev, enable);
21961diff -urNp linux-3.0.3/drivers/acpi/processor_driver.c linux-3.0.3/drivers/acpi/processor_driver.c
21962--- linux-3.0.3/drivers/acpi/processor_driver.c 2011-07-21 22:17:23.000000000 -0400
21963+++ linux-3.0.3/drivers/acpi/processor_driver.c 2011-08-23 21:47:55.000000000 -0400
21964@@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(
21965 return 0;
21966 #endif
21967
21968- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
21969+ BUG_ON(pr->id >= nr_cpu_ids);
21970
21971 /*
21972 * Buggy BIOS check
21973diff -urNp linux-3.0.3/drivers/ata/libata-core.c linux-3.0.3/drivers/ata/libata-core.c
21974--- linux-3.0.3/drivers/ata/libata-core.c 2011-07-21 22:17:23.000000000 -0400
21975+++ linux-3.0.3/drivers/ata/libata-core.c 2011-08-23 21:47:55.000000000 -0400
21976@@ -4753,7 +4753,7 @@ void ata_qc_free(struct ata_queued_cmd *
21977 struct ata_port *ap;
21978 unsigned int tag;
21979
21980- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
21981+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
21982 ap = qc->ap;
21983
21984 qc->flags = 0;
21985@@ -4769,7 +4769,7 @@ void __ata_qc_complete(struct ata_queued
21986 struct ata_port *ap;
21987 struct ata_link *link;
21988
21989- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
21990+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
21991 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
21992 ap = qc->ap;
21993 link = qc->dev->link;
21994@@ -5774,6 +5774,7 @@ static void ata_finalize_port_ops(struct
21995 return;
21996
21997 spin_lock(&lock);
21998+ pax_open_kernel();
21999
22000 for (cur = ops->inherits; cur; cur = cur->inherits) {
22001 void **inherit = (void **)cur;
22002@@ -5787,8 +5788,9 @@ static void ata_finalize_port_ops(struct
22003 if (IS_ERR(*pp))
22004 *pp = NULL;
22005
22006- ops->inherits = NULL;
22007+ *(struct ata_port_operations **)&ops->inherits = NULL;
22008
22009+ pax_close_kernel();
22010 spin_unlock(&lock);
22011 }
22012
22013diff -urNp linux-3.0.3/drivers/ata/libata-eh.c linux-3.0.3/drivers/ata/libata-eh.c
22014--- linux-3.0.3/drivers/ata/libata-eh.c 2011-07-21 22:17:23.000000000 -0400
22015+++ linux-3.0.3/drivers/ata/libata-eh.c 2011-08-23 21:48:14.000000000 -0400
22016@@ -2518,6 +2518,8 @@ void ata_eh_report(struct ata_port *ap)
22017 {
22018 struct ata_link *link;
22019
22020+ pax_track_stack();
22021+
22022 ata_for_each_link(link, ap, HOST_FIRST)
22023 ata_eh_link_report(link);
22024 }
22025diff -urNp linux-3.0.3/drivers/ata/pata_arasan_cf.c linux-3.0.3/drivers/ata/pata_arasan_cf.c
22026--- linux-3.0.3/drivers/ata/pata_arasan_cf.c 2011-07-21 22:17:23.000000000 -0400
22027+++ linux-3.0.3/drivers/ata/pata_arasan_cf.c 2011-08-23 21:47:55.000000000 -0400
22028@@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(str
22029 /* Handle platform specific quirks */
22030 if (pdata->quirk) {
22031 if (pdata->quirk & CF_BROKEN_PIO) {
22032- ap->ops->set_piomode = NULL;
22033+ pax_open_kernel();
22034+ *(void **)&ap->ops->set_piomode = NULL;
22035+ pax_close_kernel();
22036 ap->pio_mask = 0;
22037 }
22038 if (pdata->quirk & CF_BROKEN_MWDMA)
22039diff -urNp linux-3.0.3/drivers/atm/adummy.c linux-3.0.3/drivers/atm/adummy.c
22040--- linux-3.0.3/drivers/atm/adummy.c 2011-07-21 22:17:23.000000000 -0400
22041+++ linux-3.0.3/drivers/atm/adummy.c 2011-08-23 21:47:55.000000000 -0400
22042@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct
22043 vcc->pop(vcc, skb);
22044 else
22045 dev_kfree_skb_any(skb);
22046- atomic_inc(&vcc->stats->tx);
22047+ atomic_inc_unchecked(&vcc->stats->tx);
22048
22049 return 0;
22050 }
22051diff -urNp linux-3.0.3/drivers/atm/ambassador.c linux-3.0.3/drivers/atm/ambassador.c
22052--- linux-3.0.3/drivers/atm/ambassador.c 2011-07-21 22:17:23.000000000 -0400
22053+++ linux-3.0.3/drivers/atm/ambassador.c 2011-08-23 21:47:55.000000000 -0400
22054@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev,
22055 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
22056
22057 // VC layer stats
22058- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22059+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22060
22061 // free the descriptor
22062 kfree (tx_descr);
22063@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev,
22064 dump_skb ("<<<", vc, skb);
22065
22066 // VC layer stats
22067- atomic_inc(&atm_vcc->stats->rx);
22068+ atomic_inc_unchecked(&atm_vcc->stats->rx);
22069 __net_timestamp(skb);
22070 // end of our responsibility
22071 atm_vcc->push (atm_vcc, skb);
22072@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev,
22073 } else {
22074 PRINTK (KERN_INFO, "dropped over-size frame");
22075 // should we count this?
22076- atomic_inc(&atm_vcc->stats->rx_drop);
22077+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22078 }
22079
22080 } else {
22081@@ -1342,7 +1342,7 @@ static int amb_send (struct atm_vcc * at
22082 }
22083
22084 if (check_area (skb->data, skb->len)) {
22085- atomic_inc(&atm_vcc->stats->tx_err);
22086+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
22087 return -ENOMEM; // ?
22088 }
22089
22090diff -urNp linux-3.0.3/drivers/atm/atmtcp.c linux-3.0.3/drivers/atm/atmtcp.c
22091--- linux-3.0.3/drivers/atm/atmtcp.c 2011-07-21 22:17:23.000000000 -0400
22092+++ linux-3.0.3/drivers/atm/atmtcp.c 2011-08-23 21:47:55.000000000 -0400
22093@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc
22094 if (vcc->pop) vcc->pop(vcc,skb);
22095 else dev_kfree_skb(skb);
22096 if (dev_data) return 0;
22097- atomic_inc(&vcc->stats->tx_err);
22098+ atomic_inc_unchecked(&vcc->stats->tx_err);
22099 return -ENOLINK;
22100 }
22101 size = skb->len+sizeof(struct atmtcp_hdr);
22102@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc
22103 if (!new_skb) {
22104 if (vcc->pop) vcc->pop(vcc,skb);
22105 else dev_kfree_skb(skb);
22106- atomic_inc(&vcc->stats->tx_err);
22107+ atomic_inc_unchecked(&vcc->stats->tx_err);
22108 return -ENOBUFS;
22109 }
22110 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
22111@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc
22112 if (vcc->pop) vcc->pop(vcc,skb);
22113 else dev_kfree_skb(skb);
22114 out_vcc->push(out_vcc,new_skb);
22115- atomic_inc(&vcc->stats->tx);
22116- atomic_inc(&out_vcc->stats->rx);
22117+ atomic_inc_unchecked(&vcc->stats->tx);
22118+ atomic_inc_unchecked(&out_vcc->stats->rx);
22119 return 0;
22120 }
22121
22122@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc
22123 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
22124 read_unlock(&vcc_sklist_lock);
22125 if (!out_vcc) {
22126- atomic_inc(&vcc->stats->tx_err);
22127+ atomic_inc_unchecked(&vcc->stats->tx_err);
22128 goto done;
22129 }
22130 skb_pull(skb,sizeof(struct atmtcp_hdr));
22131@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc
22132 __net_timestamp(new_skb);
22133 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
22134 out_vcc->push(out_vcc,new_skb);
22135- atomic_inc(&vcc->stats->tx);
22136- atomic_inc(&out_vcc->stats->rx);
22137+ atomic_inc_unchecked(&vcc->stats->tx);
22138+ atomic_inc_unchecked(&out_vcc->stats->rx);
22139 done:
22140 if (vcc->pop) vcc->pop(vcc,skb);
22141 else dev_kfree_skb(skb);
22142diff -urNp linux-3.0.3/drivers/atm/eni.c linux-3.0.3/drivers/atm/eni.c
22143--- linux-3.0.3/drivers/atm/eni.c 2011-07-21 22:17:23.000000000 -0400
22144+++ linux-3.0.3/drivers/atm/eni.c 2011-08-23 21:47:55.000000000 -0400
22145@@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
22146 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
22147 vcc->dev->number);
22148 length = 0;
22149- atomic_inc(&vcc->stats->rx_err);
22150+ atomic_inc_unchecked(&vcc->stats->rx_err);
22151 }
22152 else {
22153 length = ATM_CELL_SIZE-1; /* no HEC */
22154@@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
22155 size);
22156 }
22157 eff = length = 0;
22158- atomic_inc(&vcc->stats->rx_err);
22159+ atomic_inc_unchecked(&vcc->stats->rx_err);
22160 }
22161 else {
22162 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
22163@@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
22164 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
22165 vcc->dev->number,vcc->vci,length,size << 2,descr);
22166 length = eff = 0;
22167- atomic_inc(&vcc->stats->rx_err);
22168+ atomic_inc_unchecked(&vcc->stats->rx_err);
22169 }
22170 }
22171 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
22172@@ -771,7 +771,7 @@ rx_dequeued++;
22173 vcc->push(vcc,skb);
22174 pushed++;
22175 }
22176- atomic_inc(&vcc->stats->rx);
22177+ atomic_inc_unchecked(&vcc->stats->rx);
22178 }
22179 wake_up(&eni_dev->rx_wait);
22180 }
22181@@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *d
22182 PCI_DMA_TODEVICE);
22183 if (vcc->pop) vcc->pop(vcc,skb);
22184 else dev_kfree_skb_irq(skb);
22185- atomic_inc(&vcc->stats->tx);
22186+ atomic_inc_unchecked(&vcc->stats->tx);
22187 wake_up(&eni_dev->tx_wait);
22188 dma_complete++;
22189 }
22190diff -urNp linux-3.0.3/drivers/atm/firestream.c linux-3.0.3/drivers/atm/firestream.c
22191--- linux-3.0.3/drivers/atm/firestream.c 2011-07-21 22:17:23.000000000 -0400
22192+++ linux-3.0.3/drivers/atm/firestream.c 2011-08-23 21:47:55.000000000 -0400
22193@@ -749,7 +749,7 @@ static void process_txdone_queue (struct
22194 }
22195 }
22196
22197- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22198+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22199
22200 fs_dprintk (FS_DEBUG_TXMEM, "i");
22201 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
22202@@ -816,7 +816,7 @@ static void process_incoming (struct fs_
22203 #endif
22204 skb_put (skb, qe->p1 & 0xffff);
22205 ATM_SKB(skb)->vcc = atm_vcc;
22206- atomic_inc(&atm_vcc->stats->rx);
22207+ atomic_inc_unchecked(&atm_vcc->stats->rx);
22208 __net_timestamp(skb);
22209 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
22210 atm_vcc->push (atm_vcc, skb);
22211@@ -837,12 +837,12 @@ static void process_incoming (struct fs_
22212 kfree (pe);
22213 }
22214 if (atm_vcc)
22215- atomic_inc(&atm_vcc->stats->rx_drop);
22216+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22217 break;
22218 case 0x1f: /* Reassembly abort: no buffers. */
22219 /* Silently increment error counter. */
22220 if (atm_vcc)
22221- atomic_inc(&atm_vcc->stats->rx_drop);
22222+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22223 break;
22224 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
22225 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
22226diff -urNp linux-3.0.3/drivers/atm/fore200e.c linux-3.0.3/drivers/atm/fore200e.c
22227--- linux-3.0.3/drivers/atm/fore200e.c 2011-07-21 22:17:23.000000000 -0400
22228+++ linux-3.0.3/drivers/atm/fore200e.c 2011-08-23 21:47:55.000000000 -0400
22229@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200
22230 #endif
22231 /* check error condition */
22232 if (*entry->status & STATUS_ERROR)
22233- atomic_inc(&vcc->stats->tx_err);
22234+ atomic_inc_unchecked(&vcc->stats->tx_err);
22235 else
22236- atomic_inc(&vcc->stats->tx);
22237+ atomic_inc_unchecked(&vcc->stats->tx);
22238 }
22239 }
22240
22241@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore2
22242 if (skb == NULL) {
22243 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
22244
22245- atomic_inc(&vcc->stats->rx_drop);
22246+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22247 return -ENOMEM;
22248 }
22249
22250@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore2
22251
22252 dev_kfree_skb_any(skb);
22253
22254- atomic_inc(&vcc->stats->rx_drop);
22255+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22256 return -ENOMEM;
22257 }
22258
22259 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
22260
22261 vcc->push(vcc, skb);
22262- atomic_inc(&vcc->stats->rx);
22263+ atomic_inc_unchecked(&vcc->stats->rx);
22264
22265 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
22266
22267@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200
22268 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
22269 fore200e->atm_dev->number,
22270 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
22271- atomic_inc(&vcc->stats->rx_err);
22272+ atomic_inc_unchecked(&vcc->stats->rx_err);
22273 }
22274 }
22275
22276@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struc
22277 goto retry_here;
22278 }
22279
22280- atomic_inc(&vcc->stats->tx_err);
22281+ atomic_inc_unchecked(&vcc->stats->tx_err);
22282
22283 fore200e->tx_sat++;
22284 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
22285diff -urNp linux-3.0.3/drivers/atm/he.c linux-3.0.3/drivers/atm/he.c
22286--- linux-3.0.3/drivers/atm/he.c 2011-07-21 22:17:23.000000000 -0400
22287+++ linux-3.0.3/drivers/atm/he.c 2011-08-23 21:47:55.000000000 -0400
22288@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22289
22290 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
22291 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
22292- atomic_inc(&vcc->stats->rx_drop);
22293+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22294 goto return_host_buffers;
22295 }
22296
22297@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22298 RBRQ_LEN_ERR(he_dev->rbrq_head)
22299 ? "LEN_ERR" : "",
22300 vcc->vpi, vcc->vci);
22301- atomic_inc(&vcc->stats->rx_err);
22302+ atomic_inc_unchecked(&vcc->stats->rx_err);
22303 goto return_host_buffers;
22304 }
22305
22306@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22307 vcc->push(vcc, skb);
22308 spin_lock(&he_dev->global_lock);
22309
22310- atomic_inc(&vcc->stats->rx);
22311+ atomic_inc_unchecked(&vcc->stats->rx);
22312
22313 return_host_buffers:
22314 ++pdus_assembled;
22315@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
22316 tpd->vcc->pop(tpd->vcc, tpd->skb);
22317 else
22318 dev_kfree_skb_any(tpd->skb);
22319- atomic_inc(&tpd->vcc->stats->tx_err);
22320+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
22321 }
22322 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
22323 return;
22324@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22325 vcc->pop(vcc, skb);
22326 else
22327 dev_kfree_skb_any(skb);
22328- atomic_inc(&vcc->stats->tx_err);
22329+ atomic_inc_unchecked(&vcc->stats->tx_err);
22330 return -EINVAL;
22331 }
22332
22333@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22334 vcc->pop(vcc, skb);
22335 else
22336 dev_kfree_skb_any(skb);
22337- atomic_inc(&vcc->stats->tx_err);
22338+ atomic_inc_unchecked(&vcc->stats->tx_err);
22339 return -EINVAL;
22340 }
22341 #endif
22342@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22343 vcc->pop(vcc, skb);
22344 else
22345 dev_kfree_skb_any(skb);
22346- atomic_inc(&vcc->stats->tx_err);
22347+ atomic_inc_unchecked(&vcc->stats->tx_err);
22348 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22349 return -ENOMEM;
22350 }
22351@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22352 vcc->pop(vcc, skb);
22353 else
22354 dev_kfree_skb_any(skb);
22355- atomic_inc(&vcc->stats->tx_err);
22356+ atomic_inc_unchecked(&vcc->stats->tx_err);
22357 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22358 return -ENOMEM;
22359 }
22360@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22361 __enqueue_tpd(he_dev, tpd, cid);
22362 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22363
22364- atomic_inc(&vcc->stats->tx);
22365+ atomic_inc_unchecked(&vcc->stats->tx);
22366
22367 return 0;
22368 }
22369diff -urNp linux-3.0.3/drivers/atm/horizon.c linux-3.0.3/drivers/atm/horizon.c
22370--- linux-3.0.3/drivers/atm/horizon.c 2011-07-21 22:17:23.000000000 -0400
22371+++ linux-3.0.3/drivers/atm/horizon.c 2011-08-23 21:47:55.000000000 -0400
22372@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev,
22373 {
22374 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
22375 // VC layer stats
22376- atomic_inc(&vcc->stats->rx);
22377+ atomic_inc_unchecked(&vcc->stats->rx);
22378 __net_timestamp(skb);
22379 // end of our responsibility
22380 vcc->push (vcc, skb);
22381@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const
22382 dev->tx_iovec = NULL;
22383
22384 // VC layer stats
22385- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22386+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22387
22388 // free the skb
22389 hrz_kfree_skb (skb);
22390diff -urNp linux-3.0.3/drivers/atm/idt77252.c linux-3.0.3/drivers/atm/idt77252.c
22391--- linux-3.0.3/drivers/atm/idt77252.c 2011-07-21 22:17:23.000000000 -0400
22392+++ linux-3.0.3/drivers/atm/idt77252.c 2011-08-23 21:47:55.000000000 -0400
22393@@ -811,7 +811,7 @@ drain_scq(struct idt77252_dev *card, str
22394 else
22395 dev_kfree_skb(skb);
22396
22397- atomic_inc(&vcc->stats->tx);
22398+ atomic_inc_unchecked(&vcc->stats->tx);
22399 }
22400
22401 atomic_dec(&scq->used);
22402@@ -1074,13 +1074,13 @@ dequeue_rx(struct idt77252_dev *card, st
22403 if ((sb = dev_alloc_skb(64)) == NULL) {
22404 printk("%s: Can't allocate buffers for aal0.\n",
22405 card->name);
22406- atomic_add(i, &vcc->stats->rx_drop);
22407+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
22408 break;
22409 }
22410 if (!atm_charge(vcc, sb->truesize)) {
22411 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
22412 card->name);
22413- atomic_add(i - 1, &vcc->stats->rx_drop);
22414+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
22415 dev_kfree_skb(sb);
22416 break;
22417 }
22418@@ -1097,7 +1097,7 @@ dequeue_rx(struct idt77252_dev *card, st
22419 ATM_SKB(sb)->vcc = vcc;
22420 __net_timestamp(sb);
22421 vcc->push(vcc, sb);
22422- atomic_inc(&vcc->stats->rx);
22423+ atomic_inc_unchecked(&vcc->stats->rx);
22424
22425 cell += ATM_CELL_PAYLOAD;
22426 }
22427@@ -1134,13 +1134,13 @@ dequeue_rx(struct idt77252_dev *card, st
22428 "(CDC: %08x)\n",
22429 card->name, len, rpp->len, readl(SAR_REG_CDC));
22430 recycle_rx_pool_skb(card, rpp);
22431- atomic_inc(&vcc->stats->rx_err);
22432+ atomic_inc_unchecked(&vcc->stats->rx_err);
22433 return;
22434 }
22435 if (stat & SAR_RSQE_CRC) {
22436 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
22437 recycle_rx_pool_skb(card, rpp);
22438- atomic_inc(&vcc->stats->rx_err);
22439+ atomic_inc_unchecked(&vcc->stats->rx_err);
22440 return;
22441 }
22442 if (skb_queue_len(&rpp->queue) > 1) {
22443@@ -1151,7 +1151,7 @@ dequeue_rx(struct idt77252_dev *card, st
22444 RXPRINTK("%s: Can't alloc RX skb.\n",
22445 card->name);
22446 recycle_rx_pool_skb(card, rpp);
22447- atomic_inc(&vcc->stats->rx_err);
22448+ atomic_inc_unchecked(&vcc->stats->rx_err);
22449 return;
22450 }
22451 if (!atm_charge(vcc, skb->truesize)) {
22452@@ -1170,7 +1170,7 @@ dequeue_rx(struct idt77252_dev *card, st
22453 __net_timestamp(skb);
22454
22455 vcc->push(vcc, skb);
22456- atomic_inc(&vcc->stats->rx);
22457+ atomic_inc_unchecked(&vcc->stats->rx);
22458
22459 return;
22460 }
22461@@ -1192,7 +1192,7 @@ dequeue_rx(struct idt77252_dev *card, st
22462 __net_timestamp(skb);
22463
22464 vcc->push(vcc, skb);
22465- atomic_inc(&vcc->stats->rx);
22466+ atomic_inc_unchecked(&vcc->stats->rx);
22467
22468 if (skb->truesize > SAR_FB_SIZE_3)
22469 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
22470@@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
22471 if (vcc->qos.aal != ATM_AAL0) {
22472 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
22473 card->name, vpi, vci);
22474- atomic_inc(&vcc->stats->rx_drop);
22475+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22476 goto drop;
22477 }
22478
22479 if ((sb = dev_alloc_skb(64)) == NULL) {
22480 printk("%s: Can't allocate buffers for AAL0.\n",
22481 card->name);
22482- atomic_inc(&vcc->stats->rx_err);
22483+ atomic_inc_unchecked(&vcc->stats->rx_err);
22484 goto drop;
22485 }
22486
22487@@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
22488 ATM_SKB(sb)->vcc = vcc;
22489 __net_timestamp(sb);
22490 vcc->push(vcc, sb);
22491- atomic_inc(&vcc->stats->rx);
22492+ atomic_inc_unchecked(&vcc->stats->rx);
22493
22494 drop:
22495 skb_pull(queue, 64);
22496@@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22497
22498 if (vc == NULL) {
22499 printk("%s: NULL connection in send().\n", card->name);
22500- atomic_inc(&vcc->stats->tx_err);
22501+ atomic_inc_unchecked(&vcc->stats->tx_err);
22502 dev_kfree_skb(skb);
22503 return -EINVAL;
22504 }
22505 if (!test_bit(VCF_TX, &vc->flags)) {
22506 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
22507- atomic_inc(&vcc->stats->tx_err);
22508+ atomic_inc_unchecked(&vcc->stats->tx_err);
22509 dev_kfree_skb(skb);
22510 return -EINVAL;
22511 }
22512@@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22513 break;
22514 default:
22515 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
22516- atomic_inc(&vcc->stats->tx_err);
22517+ atomic_inc_unchecked(&vcc->stats->tx_err);
22518 dev_kfree_skb(skb);
22519 return -EINVAL;
22520 }
22521
22522 if (skb_shinfo(skb)->nr_frags != 0) {
22523 printk("%s: No scatter-gather yet.\n", card->name);
22524- atomic_inc(&vcc->stats->tx_err);
22525+ atomic_inc_unchecked(&vcc->stats->tx_err);
22526 dev_kfree_skb(skb);
22527 return -EINVAL;
22528 }
22529@@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22530
22531 err = queue_skb(card, vc, skb, oam);
22532 if (err) {
22533- atomic_inc(&vcc->stats->tx_err);
22534+ atomic_inc_unchecked(&vcc->stats->tx_err);
22535 dev_kfree_skb(skb);
22536 return err;
22537 }
22538@@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
22539 skb = dev_alloc_skb(64);
22540 if (!skb) {
22541 printk("%s: Out of memory in send_oam().\n", card->name);
22542- atomic_inc(&vcc->stats->tx_err);
22543+ atomic_inc_unchecked(&vcc->stats->tx_err);
22544 return -ENOMEM;
22545 }
22546 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
22547diff -urNp linux-3.0.3/drivers/atm/iphase.c linux-3.0.3/drivers/atm/iphase.c
22548--- linux-3.0.3/drivers/atm/iphase.c 2011-07-21 22:17:23.000000000 -0400
22549+++ linux-3.0.3/drivers/atm/iphase.c 2011-08-23 21:47:55.000000000 -0400
22550@@ -1120,7 +1120,7 @@ static int rx_pkt(struct atm_dev *dev)
22551 status = (u_short) (buf_desc_ptr->desc_mode);
22552 if (status & (RX_CER | RX_PTE | RX_OFL))
22553 {
22554- atomic_inc(&vcc->stats->rx_err);
22555+ atomic_inc_unchecked(&vcc->stats->rx_err);
22556 IF_ERR(printk("IA: bad packet, dropping it");)
22557 if (status & RX_CER) {
22558 IF_ERR(printk(" cause: packet CRC error\n");)
22559@@ -1143,7 +1143,7 @@ static int rx_pkt(struct atm_dev *dev)
22560 len = dma_addr - buf_addr;
22561 if (len > iadev->rx_buf_sz) {
22562 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
22563- atomic_inc(&vcc->stats->rx_err);
22564+ atomic_inc_unchecked(&vcc->stats->rx_err);
22565 goto out_free_desc;
22566 }
22567
22568@@ -1293,7 +1293,7 @@ static void rx_dle_intr(struct atm_dev *
22569 ia_vcc = INPH_IA_VCC(vcc);
22570 if (ia_vcc == NULL)
22571 {
22572- atomic_inc(&vcc->stats->rx_err);
22573+ atomic_inc_unchecked(&vcc->stats->rx_err);
22574 dev_kfree_skb_any(skb);
22575 atm_return(vcc, atm_guess_pdu2truesize(len));
22576 goto INCR_DLE;
22577@@ -1305,7 +1305,7 @@ static void rx_dle_intr(struct atm_dev *
22578 if ((length > iadev->rx_buf_sz) || (length >
22579 (skb->len - sizeof(struct cpcs_trailer))))
22580 {
22581- atomic_inc(&vcc->stats->rx_err);
22582+ atomic_inc_unchecked(&vcc->stats->rx_err);
22583 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
22584 length, skb->len);)
22585 dev_kfree_skb_any(skb);
22586@@ -1321,7 +1321,7 @@ static void rx_dle_intr(struct atm_dev *
22587
22588 IF_RX(printk("rx_dle_intr: skb push");)
22589 vcc->push(vcc,skb);
22590- atomic_inc(&vcc->stats->rx);
22591+ atomic_inc_unchecked(&vcc->stats->rx);
22592 iadev->rx_pkt_cnt++;
22593 }
22594 INCR_DLE:
22595@@ -2801,15 +2801,15 @@ static int ia_ioctl(struct atm_dev *dev,
22596 {
22597 struct k_sonet_stats *stats;
22598 stats = &PRIV(_ia_dev[board])->sonet_stats;
22599- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
22600- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
22601- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
22602- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
22603- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
22604- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
22605- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
22606- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
22607- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
22608+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
22609+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
22610+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
22611+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
22612+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
22613+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
22614+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
22615+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
22616+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
22617 }
22618 ia_cmds.status = 0;
22619 break;
22620@@ -2914,7 +2914,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
22621 if ((desc == 0) || (desc > iadev->num_tx_desc))
22622 {
22623 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
22624- atomic_inc(&vcc->stats->tx);
22625+ atomic_inc_unchecked(&vcc->stats->tx);
22626 if (vcc->pop)
22627 vcc->pop(vcc, skb);
22628 else
22629@@ -3019,14 +3019,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
22630 ATM_DESC(skb) = vcc->vci;
22631 skb_queue_tail(&iadev->tx_dma_q, skb);
22632
22633- atomic_inc(&vcc->stats->tx);
22634+ atomic_inc_unchecked(&vcc->stats->tx);
22635 iadev->tx_pkt_cnt++;
22636 /* Increment transaction counter */
22637 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
22638
22639 #if 0
22640 /* add flow control logic */
22641- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
22642+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
22643 if (iavcc->vc_desc_cnt > 10) {
22644 vcc->tx_quota = vcc->tx_quota * 3 / 4;
22645 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
22646diff -urNp linux-3.0.3/drivers/atm/lanai.c linux-3.0.3/drivers/atm/lanai.c
22647--- linux-3.0.3/drivers/atm/lanai.c 2011-07-21 22:17:23.000000000 -0400
22648+++ linux-3.0.3/drivers/atm/lanai.c 2011-08-23 21:47:55.000000000 -0400
22649@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct l
22650 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
22651 lanai_endtx(lanai, lvcc);
22652 lanai_free_skb(lvcc->tx.atmvcc, skb);
22653- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
22654+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
22655 }
22656
22657 /* Try to fill the buffer - don't call unless there is backlog */
22658@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc
22659 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
22660 __net_timestamp(skb);
22661 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
22662- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
22663+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
22664 out:
22665 lvcc->rx.buf.ptr = end;
22666 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
22667@@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_d
22668 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
22669 "vcc %d\n", lanai->number, (unsigned int) s, vci);
22670 lanai->stats.service_rxnotaal5++;
22671- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22672+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22673 return 0;
22674 }
22675 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
22676@@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_d
22677 int bytes;
22678 read_unlock(&vcc_sklist_lock);
22679 DPRINTK("got trashed rx pdu on vci %d\n", vci);
22680- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22681+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22682 lvcc->stats.x.aal5.service_trash++;
22683 bytes = (SERVICE_GET_END(s) * 16) -
22684 (((unsigned long) lvcc->rx.buf.ptr) -
22685@@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_d
22686 }
22687 if (s & SERVICE_STREAM) {
22688 read_unlock(&vcc_sklist_lock);
22689- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22690+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22691 lvcc->stats.x.aal5.service_stream++;
22692 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
22693 "PDU on VCI %d!\n", lanai->number, vci);
22694@@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_d
22695 return 0;
22696 }
22697 DPRINTK("got rx crc error on vci %d\n", vci);
22698- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22699+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22700 lvcc->stats.x.aal5.service_rxcrc++;
22701 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
22702 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
22703diff -urNp linux-3.0.3/drivers/atm/nicstar.c linux-3.0.3/drivers/atm/nicstar.c
22704--- linux-3.0.3/drivers/atm/nicstar.c 2011-07-21 22:17:23.000000000 -0400
22705+++ linux-3.0.3/drivers/atm/nicstar.c 2011-08-23 21:47:55.000000000 -0400
22706@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc,
22707 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
22708 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
22709 card->index);
22710- atomic_inc(&vcc->stats->tx_err);
22711+ atomic_inc_unchecked(&vcc->stats->tx_err);
22712 dev_kfree_skb_any(skb);
22713 return -EINVAL;
22714 }
22715@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc,
22716 if (!vc->tx) {
22717 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
22718 card->index);
22719- atomic_inc(&vcc->stats->tx_err);
22720+ atomic_inc_unchecked(&vcc->stats->tx_err);
22721 dev_kfree_skb_any(skb);
22722 return -EINVAL;
22723 }
22724@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc,
22725 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
22726 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
22727 card->index);
22728- atomic_inc(&vcc->stats->tx_err);
22729+ atomic_inc_unchecked(&vcc->stats->tx_err);
22730 dev_kfree_skb_any(skb);
22731 return -EINVAL;
22732 }
22733
22734 if (skb_shinfo(skb)->nr_frags != 0) {
22735 printk("nicstar%d: No scatter-gather yet.\n", card->index);
22736- atomic_inc(&vcc->stats->tx_err);
22737+ atomic_inc_unchecked(&vcc->stats->tx_err);
22738 dev_kfree_skb_any(skb);
22739 return -EINVAL;
22740 }
22741@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc,
22742 }
22743
22744 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
22745- atomic_inc(&vcc->stats->tx_err);
22746+ atomic_inc_unchecked(&vcc->stats->tx_err);
22747 dev_kfree_skb_any(skb);
22748 return -EIO;
22749 }
22750- atomic_inc(&vcc->stats->tx);
22751+ atomic_inc_unchecked(&vcc->stats->tx);
22752
22753 return 0;
22754 }
22755@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns
22756 printk
22757 ("nicstar%d: Can't allocate buffers for aal0.\n",
22758 card->index);
22759- atomic_add(i, &vcc->stats->rx_drop);
22760+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
22761 break;
22762 }
22763 if (!atm_charge(vcc, sb->truesize)) {
22764 RXPRINTK
22765 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
22766 card->index);
22767- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
22768+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
22769 dev_kfree_skb_any(sb);
22770 break;
22771 }
22772@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns
22773 ATM_SKB(sb)->vcc = vcc;
22774 __net_timestamp(sb);
22775 vcc->push(vcc, sb);
22776- atomic_inc(&vcc->stats->rx);
22777+ atomic_inc_unchecked(&vcc->stats->rx);
22778 cell += ATM_CELL_PAYLOAD;
22779 }
22780
22781@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns
22782 if (iovb == NULL) {
22783 printk("nicstar%d: Out of iovec buffers.\n",
22784 card->index);
22785- atomic_inc(&vcc->stats->rx_drop);
22786+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22787 recycle_rx_buf(card, skb);
22788 return;
22789 }
22790@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns
22791 small or large buffer itself. */
22792 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
22793 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
22794- atomic_inc(&vcc->stats->rx_err);
22795+ atomic_inc_unchecked(&vcc->stats->rx_err);
22796 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22797 NS_MAX_IOVECS);
22798 NS_PRV_IOVCNT(iovb) = 0;
22799@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns
22800 ("nicstar%d: Expected a small buffer, and this is not one.\n",
22801 card->index);
22802 which_list(card, skb);
22803- atomic_inc(&vcc->stats->rx_err);
22804+ atomic_inc_unchecked(&vcc->stats->rx_err);
22805 recycle_rx_buf(card, skb);
22806 vc->rx_iov = NULL;
22807 recycle_iov_buf(card, iovb);
22808@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns
22809 ("nicstar%d: Expected a large buffer, and this is not one.\n",
22810 card->index);
22811 which_list(card, skb);
22812- atomic_inc(&vcc->stats->rx_err);
22813+ atomic_inc_unchecked(&vcc->stats->rx_err);
22814 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22815 NS_PRV_IOVCNT(iovb));
22816 vc->rx_iov = NULL;
22817@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns
22818 printk(" - PDU size mismatch.\n");
22819 else
22820 printk(".\n");
22821- atomic_inc(&vcc->stats->rx_err);
22822+ atomic_inc_unchecked(&vcc->stats->rx_err);
22823 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22824 NS_PRV_IOVCNT(iovb));
22825 vc->rx_iov = NULL;
22826@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns
22827 /* skb points to a small buffer */
22828 if (!atm_charge(vcc, skb->truesize)) {
22829 push_rxbufs(card, skb);
22830- atomic_inc(&vcc->stats->rx_drop);
22831+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22832 } else {
22833 skb_put(skb, len);
22834 dequeue_sm_buf(card, skb);
22835@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns
22836 ATM_SKB(skb)->vcc = vcc;
22837 __net_timestamp(skb);
22838 vcc->push(vcc, skb);
22839- atomic_inc(&vcc->stats->rx);
22840+ atomic_inc_unchecked(&vcc->stats->rx);
22841 }
22842 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
22843 struct sk_buff *sb;
22844@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns
22845 if (len <= NS_SMBUFSIZE) {
22846 if (!atm_charge(vcc, sb->truesize)) {
22847 push_rxbufs(card, sb);
22848- atomic_inc(&vcc->stats->rx_drop);
22849+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22850 } else {
22851 skb_put(sb, len);
22852 dequeue_sm_buf(card, sb);
22853@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns
22854 ATM_SKB(sb)->vcc = vcc;
22855 __net_timestamp(sb);
22856 vcc->push(vcc, sb);
22857- atomic_inc(&vcc->stats->rx);
22858+ atomic_inc_unchecked(&vcc->stats->rx);
22859 }
22860
22861 push_rxbufs(card, skb);
22862@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns
22863
22864 if (!atm_charge(vcc, skb->truesize)) {
22865 push_rxbufs(card, skb);
22866- atomic_inc(&vcc->stats->rx_drop);
22867+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22868 } else {
22869 dequeue_lg_buf(card, skb);
22870 #ifdef NS_USE_DESTRUCTORS
22871@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns
22872 ATM_SKB(skb)->vcc = vcc;
22873 __net_timestamp(skb);
22874 vcc->push(vcc, skb);
22875- atomic_inc(&vcc->stats->rx);
22876+ atomic_inc_unchecked(&vcc->stats->rx);
22877 }
22878
22879 push_rxbufs(card, sb);
22880@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns
22881 printk
22882 ("nicstar%d: Out of huge buffers.\n",
22883 card->index);
22884- atomic_inc(&vcc->stats->rx_drop);
22885+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22886 recycle_iovec_rx_bufs(card,
22887 (struct iovec *)
22888 iovb->data,
22889@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns
22890 card->hbpool.count++;
22891 } else
22892 dev_kfree_skb_any(hb);
22893- atomic_inc(&vcc->stats->rx_drop);
22894+ atomic_inc_unchecked(&vcc->stats->rx_drop);
22895 } else {
22896 /* Copy the small buffer to the huge buffer */
22897 sb = (struct sk_buff *)iov->iov_base;
22898@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns
22899 #endif /* NS_USE_DESTRUCTORS */
22900 __net_timestamp(hb);
22901 vcc->push(vcc, hb);
22902- atomic_inc(&vcc->stats->rx);
22903+ atomic_inc_unchecked(&vcc->stats->rx);
22904 }
22905 }
22906
22907diff -urNp linux-3.0.3/drivers/atm/solos-pci.c linux-3.0.3/drivers/atm/solos-pci.c
22908--- linux-3.0.3/drivers/atm/solos-pci.c 2011-07-21 22:17:23.000000000 -0400
22909+++ linux-3.0.3/drivers/atm/solos-pci.c 2011-08-23 21:48:14.000000000 -0400
22910@@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
22911 }
22912 atm_charge(vcc, skb->truesize);
22913 vcc->push(vcc, skb);
22914- atomic_inc(&vcc->stats->rx);
22915+ atomic_inc_unchecked(&vcc->stats->rx);
22916 break;
22917
22918 case PKT_STATUS:
22919@@ -899,6 +899,8 @@ static int print_buffer(struct sk_buff *
22920 char msg[500];
22921 char item[10];
22922
22923+ pax_track_stack();
22924+
22925 len = buf->len;
22926 for (i = 0; i < len; i++){
22927 if(i % 8 == 0)
22928@@ -1008,7 +1010,7 @@ static uint32_t fpga_tx(struct solos_car
22929 vcc = SKB_CB(oldskb)->vcc;
22930
22931 if (vcc) {
22932- atomic_inc(&vcc->stats->tx);
22933+ atomic_inc_unchecked(&vcc->stats->tx);
22934 solos_pop(vcc, oldskb);
22935 } else
22936 dev_kfree_skb_irq(oldskb);
22937diff -urNp linux-3.0.3/drivers/atm/suni.c linux-3.0.3/drivers/atm/suni.c
22938--- linux-3.0.3/drivers/atm/suni.c 2011-07-21 22:17:23.000000000 -0400
22939+++ linux-3.0.3/drivers/atm/suni.c 2011-08-23 21:47:55.000000000 -0400
22940@@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
22941
22942
22943 #define ADD_LIMITED(s,v) \
22944- atomic_add((v),&stats->s); \
22945- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
22946+ atomic_add_unchecked((v),&stats->s); \
22947+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
22948
22949
22950 static void suni_hz(unsigned long from_timer)
22951diff -urNp linux-3.0.3/drivers/atm/uPD98402.c linux-3.0.3/drivers/atm/uPD98402.c
22952--- linux-3.0.3/drivers/atm/uPD98402.c 2011-07-21 22:17:23.000000000 -0400
22953+++ linux-3.0.3/drivers/atm/uPD98402.c 2011-08-23 21:47:55.000000000 -0400
22954@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *d
22955 struct sonet_stats tmp;
22956 int error = 0;
22957
22958- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
22959+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
22960 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
22961 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
22962 if (zero && !error) {
22963@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev
22964
22965
22966 #define ADD_LIMITED(s,v) \
22967- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
22968- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
22969- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
22970+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
22971+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
22972+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
22973
22974
22975 static void stat_event(struct atm_dev *dev)
22976@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev
22977 if (reason & uPD98402_INT_PFM) stat_event(dev);
22978 if (reason & uPD98402_INT_PCO) {
22979 (void) GET(PCOCR); /* clear interrupt cause */
22980- atomic_add(GET(HECCT),
22981+ atomic_add_unchecked(GET(HECCT),
22982 &PRIV(dev)->sonet_stats.uncorr_hcs);
22983 }
22984 if ((reason & uPD98402_INT_RFO) &&
22985@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev
22986 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
22987 uPD98402_INT_LOS),PIMR); /* enable them */
22988 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
22989- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
22990- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
22991- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
22992+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
22993+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
22994+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
22995 return 0;
22996 }
22997
22998diff -urNp linux-3.0.3/drivers/atm/zatm.c linux-3.0.3/drivers/atm/zatm.c
22999--- linux-3.0.3/drivers/atm/zatm.c 2011-07-21 22:17:23.000000000 -0400
23000+++ linux-3.0.3/drivers/atm/zatm.c 2011-08-23 21:47:55.000000000 -0400
23001@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
23002 }
23003 if (!size) {
23004 dev_kfree_skb_irq(skb);
23005- if (vcc) atomic_inc(&vcc->stats->rx_err);
23006+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
23007 continue;
23008 }
23009 if (!atm_charge(vcc,skb->truesize)) {
23010@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
23011 skb->len = size;
23012 ATM_SKB(skb)->vcc = vcc;
23013 vcc->push(vcc,skb);
23014- atomic_inc(&vcc->stats->rx);
23015+ atomic_inc_unchecked(&vcc->stats->rx);
23016 }
23017 zout(pos & 0xffff,MTA(mbx));
23018 #if 0 /* probably a stupid idea */
23019@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
23020 skb_queue_head(&zatm_vcc->backlog,skb);
23021 break;
23022 }
23023- atomic_inc(&vcc->stats->tx);
23024+ atomic_inc_unchecked(&vcc->stats->tx);
23025 wake_up(&zatm_vcc->tx_wait);
23026 }
23027
23028diff -urNp linux-3.0.3/drivers/base/power/wakeup.c linux-3.0.3/drivers/base/power/wakeup.c
23029--- linux-3.0.3/drivers/base/power/wakeup.c 2011-07-21 22:17:23.000000000 -0400
23030+++ linux-3.0.3/drivers/base/power/wakeup.c 2011-08-23 21:47:55.000000000 -0400
23031@@ -29,14 +29,14 @@ bool events_check_enabled;
23032 * They need to be modified together atomically, so it's better to use one
23033 * atomic variable to hold them both.
23034 */
23035-static atomic_t combined_event_count = ATOMIC_INIT(0);
23036+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
23037
23038 #define IN_PROGRESS_BITS (sizeof(int) * 4)
23039 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
23040
23041 static void split_counters(unsigned int *cnt, unsigned int *inpr)
23042 {
23043- unsigned int comb = atomic_read(&combined_event_count);
23044+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
23045
23046 *cnt = (comb >> IN_PROGRESS_BITS);
23047 *inpr = comb & MAX_IN_PROGRESS;
23048@@ -350,7 +350,7 @@ static void wakeup_source_activate(struc
23049 ws->last_time = ktime_get();
23050
23051 /* Increment the counter of events in progress. */
23052- atomic_inc(&combined_event_count);
23053+ atomic_inc_unchecked(&combined_event_count);
23054 }
23055
23056 /**
23057@@ -440,7 +440,7 @@ static void wakeup_source_deactivate(str
23058 * Increment the counter of registered wakeup events and decrement the
23059 * couter of wakeup events in progress simultaneously.
23060 */
23061- atomic_add(MAX_IN_PROGRESS, &combined_event_count);
23062+ atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
23063 }
23064
23065 /**
23066diff -urNp linux-3.0.3/drivers/block/cciss.c linux-3.0.3/drivers/block/cciss.c
23067--- linux-3.0.3/drivers/block/cciss.c 2011-07-21 22:17:23.000000000 -0400
23068+++ linux-3.0.3/drivers/block/cciss.c 2011-08-23 21:48:14.000000000 -0400
23069@@ -1179,6 +1179,8 @@ static int cciss_ioctl32_passthru(struct
23070 int err;
23071 u32 cp;
23072
23073+ memset(&arg64, 0, sizeof(arg64));
23074+
23075 err = 0;
23076 err |=
23077 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
23078@@ -2986,7 +2988,7 @@ static void start_io(ctlr_info_t *h)
23079 while (!list_empty(&h->reqQ)) {
23080 c = list_entry(h->reqQ.next, CommandList_struct, list);
23081 /* can't do anything if fifo is full */
23082- if ((h->access.fifo_full(h))) {
23083+ if ((h->access->fifo_full(h))) {
23084 dev_warn(&h->pdev->dev, "fifo full\n");
23085 break;
23086 }
23087@@ -2996,7 +2998,7 @@ static void start_io(ctlr_info_t *h)
23088 h->Qdepth--;
23089
23090 /* Tell the controller execute command */
23091- h->access.submit_command(h, c);
23092+ h->access->submit_command(h, c);
23093
23094 /* Put job onto the completed Q */
23095 addQ(&h->cmpQ, c);
23096@@ -3422,17 +3424,17 @@ startio:
23097
23098 static inline unsigned long get_next_completion(ctlr_info_t *h)
23099 {
23100- return h->access.command_completed(h);
23101+ return h->access->command_completed(h);
23102 }
23103
23104 static inline int interrupt_pending(ctlr_info_t *h)
23105 {
23106- return h->access.intr_pending(h);
23107+ return h->access->intr_pending(h);
23108 }
23109
23110 static inline long interrupt_not_for_us(ctlr_info_t *h)
23111 {
23112- return ((h->access.intr_pending(h) == 0) ||
23113+ return ((h->access->intr_pending(h) == 0) ||
23114 (h->interrupts_enabled == 0));
23115 }
23116
23117@@ -3465,7 +3467,7 @@ static inline u32 next_command(ctlr_info
23118 u32 a;
23119
23120 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
23121- return h->access.command_completed(h);
23122+ return h->access->command_completed(h);
23123
23124 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
23125 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
23126@@ -4020,7 +4022,7 @@ static void __devinit cciss_put_controll
23127 trans_support & CFGTBL_Trans_use_short_tags);
23128
23129 /* Change the access methods to the performant access methods */
23130- h->access = SA5_performant_access;
23131+ h->access = &SA5_performant_access;
23132 h->transMethod = CFGTBL_Trans_Performant;
23133
23134 return;
23135@@ -4292,7 +4294,7 @@ static int __devinit cciss_pci_init(ctlr
23136 if (prod_index < 0)
23137 return -ENODEV;
23138 h->product_name = products[prod_index].product_name;
23139- h->access = *(products[prod_index].access);
23140+ h->access = products[prod_index].access;
23141
23142 if (cciss_board_disabled(h)) {
23143 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
23144@@ -5002,7 +5004,7 @@ reinit_after_soft_reset:
23145 }
23146
23147 /* make sure the board interrupts are off */
23148- h->access.set_intr_mask(h, CCISS_INTR_OFF);
23149+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
23150 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
23151 if (rc)
23152 goto clean2;
23153@@ -5054,7 +5056,7 @@ reinit_after_soft_reset:
23154 * fake ones to scoop up any residual completions.
23155 */
23156 spin_lock_irqsave(&h->lock, flags);
23157- h->access.set_intr_mask(h, CCISS_INTR_OFF);
23158+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
23159 spin_unlock_irqrestore(&h->lock, flags);
23160 free_irq(h->intr[PERF_MODE_INT], h);
23161 rc = cciss_request_irq(h, cciss_msix_discard_completions,
23162@@ -5074,9 +5076,9 @@ reinit_after_soft_reset:
23163 dev_info(&h->pdev->dev, "Board READY.\n");
23164 dev_info(&h->pdev->dev,
23165 "Waiting for stale completions to drain.\n");
23166- h->access.set_intr_mask(h, CCISS_INTR_ON);
23167+ h->access->set_intr_mask(h, CCISS_INTR_ON);
23168 msleep(10000);
23169- h->access.set_intr_mask(h, CCISS_INTR_OFF);
23170+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
23171
23172 rc = controller_reset_failed(h->cfgtable);
23173 if (rc)
23174@@ -5099,7 +5101,7 @@ reinit_after_soft_reset:
23175 cciss_scsi_setup(h);
23176
23177 /* Turn the interrupts on so we can service requests */
23178- h->access.set_intr_mask(h, CCISS_INTR_ON);
23179+ h->access->set_intr_mask(h, CCISS_INTR_ON);
23180
23181 /* Get the firmware version */
23182 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
23183@@ -5171,7 +5173,7 @@ static void cciss_shutdown(struct pci_de
23184 kfree(flush_buf);
23185 if (return_code != IO_OK)
23186 dev_warn(&h->pdev->dev, "Error flushing cache\n");
23187- h->access.set_intr_mask(h, CCISS_INTR_OFF);
23188+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
23189 free_irq(h->intr[PERF_MODE_INT], h);
23190 }
23191
23192diff -urNp linux-3.0.3/drivers/block/cciss.h linux-3.0.3/drivers/block/cciss.h
23193--- linux-3.0.3/drivers/block/cciss.h 2011-08-23 21:44:40.000000000 -0400
23194+++ linux-3.0.3/drivers/block/cciss.h 2011-08-23 21:47:55.000000000 -0400
23195@@ -100,7 +100,7 @@ struct ctlr_info
23196 /* information about each logical volume */
23197 drive_info_struct *drv[CISS_MAX_LUN];
23198
23199- struct access_method access;
23200+ struct access_method *access;
23201
23202 /* queue and queue Info */
23203 struct list_head reqQ;
23204diff -urNp linux-3.0.3/drivers/block/cpqarray.c linux-3.0.3/drivers/block/cpqarray.c
23205--- linux-3.0.3/drivers/block/cpqarray.c 2011-07-21 22:17:23.000000000 -0400
23206+++ linux-3.0.3/drivers/block/cpqarray.c 2011-08-23 21:48:14.000000000 -0400
23207@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_c
23208 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
23209 goto Enomem4;
23210 }
23211- hba[i]->access.set_intr_mask(hba[i], 0);
23212+ hba[i]->access->set_intr_mask(hba[i], 0);
23213 if (request_irq(hba[i]->intr, do_ida_intr,
23214 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
23215 {
23216@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_c
23217 add_timer(&hba[i]->timer);
23218
23219 /* Enable IRQ now that spinlock and rate limit timer are set up */
23220- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
23221+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
23222
23223 for(j=0; j<NWD; j++) {
23224 struct gendisk *disk = ida_gendisk[i][j];
23225@@ -694,7 +694,7 @@ DBGINFO(
23226 for(i=0; i<NR_PRODUCTS; i++) {
23227 if (board_id == products[i].board_id) {
23228 c->product_name = products[i].product_name;
23229- c->access = *(products[i].access);
23230+ c->access = products[i].access;
23231 break;
23232 }
23233 }
23234@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detec
23235 hba[ctlr]->intr = intr;
23236 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
23237 hba[ctlr]->product_name = products[j].product_name;
23238- hba[ctlr]->access = *(products[j].access);
23239+ hba[ctlr]->access = products[j].access;
23240 hba[ctlr]->ctlr = ctlr;
23241 hba[ctlr]->board_id = board_id;
23242 hba[ctlr]->pci_dev = NULL; /* not PCI */
23243@@ -911,6 +911,8 @@ static void do_ida_request(struct reques
23244 struct scatterlist tmp_sg[SG_MAX];
23245 int i, dir, seg;
23246
23247+ pax_track_stack();
23248+
23249 queue_next:
23250 creq = blk_peek_request(q);
23251 if (!creq)
23252@@ -980,7 +982,7 @@ static void start_io(ctlr_info_t *h)
23253
23254 while((c = h->reqQ) != NULL) {
23255 /* Can't do anything if we're busy */
23256- if (h->access.fifo_full(h) == 0)
23257+ if (h->access->fifo_full(h) == 0)
23258 return;
23259
23260 /* Get the first entry from the request Q */
23261@@ -988,7 +990,7 @@ static void start_io(ctlr_info_t *h)
23262 h->Qdepth--;
23263
23264 /* Tell the controller to do our bidding */
23265- h->access.submit_command(h, c);
23266+ h->access->submit_command(h, c);
23267
23268 /* Get onto the completion Q */
23269 addQ(&h->cmpQ, c);
23270@@ -1050,7 +1052,7 @@ static irqreturn_t do_ida_intr(int irq,
23271 unsigned long flags;
23272 __u32 a,a1;
23273
23274- istat = h->access.intr_pending(h);
23275+ istat = h->access->intr_pending(h);
23276 /* Is this interrupt for us? */
23277 if (istat == 0)
23278 return IRQ_NONE;
23279@@ -1061,7 +1063,7 @@ static irqreturn_t do_ida_intr(int irq,
23280 */
23281 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
23282 if (istat & FIFO_NOT_EMPTY) {
23283- while((a = h->access.command_completed(h))) {
23284+ while((a = h->access->command_completed(h))) {
23285 a1 = a; a &= ~3;
23286 if ((c = h->cmpQ) == NULL)
23287 {
23288@@ -1449,11 +1451,11 @@ static int sendcmd(
23289 /*
23290 * Disable interrupt
23291 */
23292- info_p->access.set_intr_mask(info_p, 0);
23293+ info_p->access->set_intr_mask(info_p, 0);
23294 /* Make sure there is room in the command FIFO */
23295 /* Actually it should be completely empty at this time. */
23296 for (i = 200000; i > 0; i--) {
23297- temp = info_p->access.fifo_full(info_p);
23298+ temp = info_p->access->fifo_full(info_p);
23299 if (temp != 0) {
23300 break;
23301 }
23302@@ -1466,7 +1468,7 @@ DBG(
23303 /*
23304 * Send the cmd
23305 */
23306- info_p->access.submit_command(info_p, c);
23307+ info_p->access->submit_command(info_p, c);
23308 complete = pollcomplete(ctlr);
23309
23310 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
23311@@ -1549,9 +1551,9 @@ static int revalidate_allvol(ctlr_info_t
23312 * we check the new geometry. Then turn interrupts back on when
23313 * we're done.
23314 */
23315- host->access.set_intr_mask(host, 0);
23316+ host->access->set_intr_mask(host, 0);
23317 getgeometry(ctlr);
23318- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
23319+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
23320
23321 for(i=0; i<NWD; i++) {
23322 struct gendisk *disk = ida_gendisk[ctlr][i];
23323@@ -1591,7 +1593,7 @@ static int pollcomplete(int ctlr)
23324 /* Wait (up to 2 seconds) for a command to complete */
23325
23326 for (i = 200000; i > 0; i--) {
23327- done = hba[ctlr]->access.command_completed(hba[ctlr]);
23328+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
23329 if (done == 0) {
23330 udelay(10); /* a short fixed delay */
23331 } else
23332diff -urNp linux-3.0.3/drivers/block/cpqarray.h linux-3.0.3/drivers/block/cpqarray.h
23333--- linux-3.0.3/drivers/block/cpqarray.h 2011-07-21 22:17:23.000000000 -0400
23334+++ linux-3.0.3/drivers/block/cpqarray.h 2011-08-23 21:47:55.000000000 -0400
23335@@ -99,7 +99,7 @@ struct ctlr_info {
23336 drv_info_t drv[NWD];
23337 struct proc_dir_entry *proc;
23338
23339- struct access_method access;
23340+ struct access_method *access;
23341
23342 cmdlist_t *reqQ;
23343 cmdlist_t *cmpQ;
23344diff -urNp linux-3.0.3/drivers/block/DAC960.c linux-3.0.3/drivers/block/DAC960.c
23345--- linux-3.0.3/drivers/block/DAC960.c 2011-07-21 22:17:23.000000000 -0400
23346+++ linux-3.0.3/drivers/block/DAC960.c 2011-08-23 21:48:14.000000000 -0400
23347@@ -1980,6 +1980,8 @@ static bool DAC960_V1_ReadDeviceConfigur
23348 unsigned long flags;
23349 int Channel, TargetID;
23350
23351+ pax_track_stack();
23352+
23353 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
23354 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
23355 sizeof(DAC960_SCSI_Inquiry_T) +
23356diff -urNp linux-3.0.3/drivers/block/drbd/drbd_int.h linux-3.0.3/drivers/block/drbd/drbd_int.h
23357--- linux-3.0.3/drivers/block/drbd/drbd_int.h 2011-07-21 22:17:23.000000000 -0400
23358+++ linux-3.0.3/drivers/block/drbd/drbd_int.h 2011-08-23 21:47:55.000000000 -0400
23359@@ -737,7 +737,7 @@ struct drbd_request;
23360 struct drbd_epoch {
23361 struct list_head list;
23362 unsigned int barrier_nr;
23363- atomic_t epoch_size; /* increased on every request added. */
23364+ atomic_unchecked_t epoch_size; /* increased on every request added. */
23365 atomic_t active; /* increased on every req. added, and dec on every finished. */
23366 unsigned long flags;
23367 };
23368@@ -1109,7 +1109,7 @@ struct drbd_conf {
23369 void *int_dig_in;
23370 void *int_dig_vv;
23371 wait_queue_head_t seq_wait;
23372- atomic_t packet_seq;
23373+ atomic_unchecked_t packet_seq;
23374 unsigned int peer_seq;
23375 spinlock_t peer_seq_lock;
23376 unsigned int minor;
23377diff -urNp linux-3.0.3/drivers/block/drbd/drbd_main.c linux-3.0.3/drivers/block/drbd/drbd_main.c
23378--- linux-3.0.3/drivers/block/drbd/drbd_main.c 2011-07-21 22:17:23.000000000 -0400
23379+++ linux-3.0.3/drivers/block/drbd/drbd_main.c 2011-08-23 21:47:55.000000000 -0400
23380@@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_co
23381 p.sector = sector;
23382 p.block_id = block_id;
23383 p.blksize = blksize;
23384- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
23385+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
23386
23387 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
23388 return false;
23389@@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *m
23390 p.sector = cpu_to_be64(req->sector);
23391 p.block_id = (unsigned long)req;
23392 p.seq_num = cpu_to_be32(req->seq_num =
23393- atomic_add_return(1, &mdev->packet_seq));
23394+ atomic_add_return_unchecked(1, &mdev->packet_seq));
23395
23396 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
23397
23398@@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_
23399 atomic_set(&mdev->unacked_cnt, 0);
23400 atomic_set(&mdev->local_cnt, 0);
23401 atomic_set(&mdev->net_cnt, 0);
23402- atomic_set(&mdev->packet_seq, 0);
23403+ atomic_set_unchecked(&mdev->packet_seq, 0);
23404 atomic_set(&mdev->pp_in_use, 0);
23405 atomic_set(&mdev->pp_in_use_by_net, 0);
23406 atomic_set(&mdev->rs_sect_in, 0);
23407@@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf
23408 mdev->receiver.t_state);
23409
23410 /* no need to lock it, I'm the only thread alive */
23411- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
23412- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
23413+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
23414+ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
23415 mdev->al_writ_cnt =
23416 mdev->bm_writ_cnt =
23417 mdev->read_cnt =
23418diff -urNp linux-3.0.3/drivers/block/drbd/drbd_nl.c linux-3.0.3/drivers/block/drbd/drbd_nl.c
23419--- linux-3.0.3/drivers/block/drbd/drbd_nl.c 2011-07-21 22:17:23.000000000 -0400
23420+++ linux-3.0.3/drivers/block/drbd/drbd_nl.c 2011-08-23 21:47:55.000000000 -0400
23421@@ -2359,7 +2359,7 @@ static void drbd_connector_callback(stru
23422 module_put(THIS_MODULE);
23423 }
23424
23425-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
23426+static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
23427
23428 static unsigned short *
23429 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
23430@@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *
23431 cn_reply->id.idx = CN_IDX_DRBD;
23432 cn_reply->id.val = CN_VAL_DRBD;
23433
23434- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23435+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23436 cn_reply->ack = 0; /* not used here. */
23437 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23438 (int)((char *)tl - (char *)reply->tag_list);
23439@@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_co
23440 cn_reply->id.idx = CN_IDX_DRBD;
23441 cn_reply->id.val = CN_VAL_DRBD;
23442
23443- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23444+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23445 cn_reply->ack = 0; /* not used here. */
23446 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23447 (int)((char *)tl - (char *)reply->tag_list);
23448@@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mde
23449 cn_reply->id.idx = CN_IDX_DRBD;
23450 cn_reply->id.val = CN_VAL_DRBD;
23451
23452- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
23453+ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
23454 cn_reply->ack = 0; // not used here.
23455 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23456 (int)((char*)tl - (char*)reply->tag_list);
23457@@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drb
23458 cn_reply->id.idx = CN_IDX_DRBD;
23459 cn_reply->id.val = CN_VAL_DRBD;
23460
23461- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23462+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23463 cn_reply->ack = 0; /* not used here. */
23464 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23465 (int)((char *)tl - (char *)reply->tag_list);
23466diff -urNp linux-3.0.3/drivers/block/drbd/drbd_receiver.c linux-3.0.3/drivers/block/drbd/drbd_receiver.c
23467--- linux-3.0.3/drivers/block/drbd/drbd_receiver.c 2011-07-21 22:17:23.000000000 -0400
23468+++ linux-3.0.3/drivers/block/drbd/drbd_receiver.c 2011-08-23 21:47:55.000000000 -0400
23469@@ -894,7 +894,7 @@ retry:
23470 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
23471 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
23472
23473- atomic_set(&mdev->packet_seq, 0);
23474+ atomic_set_unchecked(&mdev->packet_seq, 0);
23475 mdev->peer_seq = 0;
23476
23477 drbd_thread_start(&mdev->asender);
23478@@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish
23479 do {
23480 next_epoch = NULL;
23481
23482- epoch_size = atomic_read(&epoch->epoch_size);
23483+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
23484
23485 switch (ev & ~EV_CLEANUP) {
23486 case EV_PUT:
23487@@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish
23488 rv = FE_DESTROYED;
23489 } else {
23490 epoch->flags = 0;
23491- atomic_set(&epoch->epoch_size, 0);
23492+ atomic_set_unchecked(&epoch->epoch_size, 0);
23493 /* atomic_set(&epoch->active, 0); is already zero */
23494 if (rv == FE_STILL_LIVE)
23495 rv = FE_RECYCLED;
23496@@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_c
23497 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
23498 drbd_flush(mdev);
23499
23500- if (atomic_read(&mdev->current_epoch->epoch_size)) {
23501+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
23502 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
23503 if (epoch)
23504 break;
23505 }
23506
23507 epoch = mdev->current_epoch;
23508- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
23509+ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
23510
23511 D_ASSERT(atomic_read(&epoch->active) == 0);
23512 D_ASSERT(epoch->flags == 0);
23513@@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_c
23514 }
23515
23516 epoch->flags = 0;
23517- atomic_set(&epoch->epoch_size, 0);
23518+ atomic_set_unchecked(&epoch->epoch_size, 0);
23519 atomic_set(&epoch->active, 0);
23520
23521 spin_lock(&mdev->epoch_lock);
23522- if (atomic_read(&mdev->current_epoch->epoch_size)) {
23523+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
23524 list_add(&epoch->list, &mdev->current_epoch->list);
23525 mdev->current_epoch = epoch;
23526 mdev->epochs++;
23527@@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf
23528 spin_unlock(&mdev->peer_seq_lock);
23529
23530 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
23531- atomic_inc(&mdev->current_epoch->epoch_size);
23532+ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
23533 return drbd_drain_block(mdev, data_size);
23534 }
23535
23536@@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf
23537
23538 spin_lock(&mdev->epoch_lock);
23539 e->epoch = mdev->current_epoch;
23540- atomic_inc(&e->epoch->epoch_size);
23541+ atomic_inc_unchecked(&e->epoch->epoch_size);
23542 atomic_inc(&e->epoch->active);
23543 spin_unlock(&mdev->epoch_lock);
23544
23545@@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_
23546 D_ASSERT(list_empty(&mdev->done_ee));
23547
23548 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
23549- atomic_set(&mdev->current_epoch->epoch_size, 0);
23550+ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
23551 D_ASSERT(list_empty(&mdev->current_epoch->list));
23552 }
23553
23554diff -urNp linux-3.0.3/drivers/block/nbd.c linux-3.0.3/drivers/block/nbd.c
23555--- linux-3.0.3/drivers/block/nbd.c 2011-07-21 22:17:23.000000000 -0400
23556+++ linux-3.0.3/drivers/block/nbd.c 2011-08-23 21:48:14.000000000 -0400
23557@@ -157,6 +157,8 @@ static int sock_xmit(struct nbd_device *
23558 struct kvec iov;
23559 sigset_t blocked, oldset;
23560
23561+ pax_track_stack();
23562+
23563 if (unlikely(!sock)) {
23564 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
23565 lo->disk->disk_name, (send ? "send" : "recv"));
23566@@ -572,6 +574,8 @@ static void do_nbd_request(struct reques
23567 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
23568 unsigned int cmd, unsigned long arg)
23569 {
23570+ pax_track_stack();
23571+
23572 switch (cmd) {
23573 case NBD_DISCONNECT: {
23574 struct request sreq;
23575diff -urNp linux-3.0.3/drivers/char/agp/frontend.c linux-3.0.3/drivers/char/agp/frontend.c
23576--- linux-3.0.3/drivers/char/agp/frontend.c 2011-07-21 22:17:23.000000000 -0400
23577+++ linux-3.0.3/drivers/char/agp/frontend.c 2011-08-23 21:47:55.000000000 -0400
23578@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct ag
23579 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
23580 return -EFAULT;
23581
23582- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
23583+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
23584 return -EFAULT;
23585
23586 client = agp_find_client_by_pid(reserve.pid);
23587diff -urNp linux-3.0.3/drivers/char/briq_panel.c linux-3.0.3/drivers/char/briq_panel.c
23588--- linux-3.0.3/drivers/char/briq_panel.c 2011-07-21 22:17:23.000000000 -0400
23589+++ linux-3.0.3/drivers/char/briq_panel.c 2011-08-23 21:48:14.000000000 -0400
23590@@ -9,6 +9,7 @@
23591 #include <linux/types.h>
23592 #include <linux/errno.h>
23593 #include <linux/tty.h>
23594+#include <linux/mutex.h>
23595 #include <linux/timer.h>
23596 #include <linux/kernel.h>
23597 #include <linux/wait.h>
23598@@ -34,6 +35,7 @@ static int vfd_is_open;
23599 static unsigned char vfd[40];
23600 static int vfd_cursor;
23601 static unsigned char ledpb, led;
23602+static DEFINE_MUTEX(vfd_mutex);
23603
23604 static void update_vfd(void)
23605 {
23606@@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct f
23607 if (!vfd_is_open)
23608 return -EBUSY;
23609
23610+ mutex_lock(&vfd_mutex);
23611 for (;;) {
23612 char c;
23613 if (!indx)
23614 break;
23615- if (get_user(c, buf))
23616+ if (get_user(c, buf)) {
23617+ mutex_unlock(&vfd_mutex);
23618 return -EFAULT;
23619+ }
23620 if (esc) {
23621 set_led(c);
23622 esc = 0;
23623@@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct f
23624 buf++;
23625 }
23626 update_vfd();
23627+ mutex_unlock(&vfd_mutex);
23628
23629 return len;
23630 }
23631diff -urNp linux-3.0.3/drivers/char/genrtc.c linux-3.0.3/drivers/char/genrtc.c
23632--- linux-3.0.3/drivers/char/genrtc.c 2011-07-21 22:17:23.000000000 -0400
23633+++ linux-3.0.3/drivers/char/genrtc.c 2011-08-23 21:48:14.000000000 -0400
23634@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *fi
23635 switch (cmd) {
23636
23637 case RTC_PLL_GET:
23638+ memset(&pll, 0, sizeof(pll));
23639 if (get_rtc_pll(&pll))
23640 return -EINVAL;
23641 else
23642diff -urNp linux-3.0.3/drivers/char/hpet.c linux-3.0.3/drivers/char/hpet.c
23643--- linux-3.0.3/drivers/char/hpet.c 2011-07-21 22:17:23.000000000 -0400
23644+++ linux-3.0.3/drivers/char/hpet.c 2011-08-23 21:47:55.000000000 -0400
23645@@ -572,7 +572,7 @@ static inline unsigned long hpet_time_di
23646 }
23647
23648 static int
23649-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
23650+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
23651 struct hpet_info *info)
23652 {
23653 struct hpet_timer __iomem *timer;
23654diff -urNp linux-3.0.3/drivers/char/ipmi/ipmi_msghandler.c linux-3.0.3/drivers/char/ipmi/ipmi_msghandler.c
23655--- linux-3.0.3/drivers/char/ipmi/ipmi_msghandler.c 2011-07-21 22:17:23.000000000 -0400
23656+++ linux-3.0.3/drivers/char/ipmi/ipmi_msghandler.c 2011-08-23 21:48:14.000000000 -0400
23657@@ -415,7 +415,7 @@ struct ipmi_smi {
23658 struct proc_dir_entry *proc_dir;
23659 char proc_dir_name[10];
23660
23661- atomic_t stats[IPMI_NUM_STATS];
23662+ atomic_unchecked_t stats[IPMI_NUM_STATS];
23663
23664 /*
23665 * run_to_completion duplicate of smb_info, smi_info
23666@@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
23667
23668
23669 #define ipmi_inc_stat(intf, stat) \
23670- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
23671+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
23672 #define ipmi_get_stat(intf, stat) \
23673- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
23674+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
23675
23676 static int is_lan_addr(struct ipmi_addr *addr)
23677 {
23678@@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
23679 INIT_LIST_HEAD(&intf->cmd_rcvrs);
23680 init_waitqueue_head(&intf->waitq);
23681 for (i = 0; i < IPMI_NUM_STATS; i++)
23682- atomic_set(&intf->stats[i], 0);
23683+ atomic_set_unchecked(&intf->stats[i], 0);
23684
23685 intf->proc_dir = NULL;
23686
23687@@ -4220,6 +4220,8 @@ static void send_panic_events(char *str)
23688 struct ipmi_smi_msg smi_msg;
23689 struct ipmi_recv_msg recv_msg;
23690
23691+ pax_track_stack();
23692+
23693 si = (struct ipmi_system_interface_addr *) &addr;
23694 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
23695 si->channel = IPMI_BMC_CHANNEL;
23696diff -urNp linux-3.0.3/drivers/char/ipmi/ipmi_si_intf.c linux-3.0.3/drivers/char/ipmi/ipmi_si_intf.c
23697--- linux-3.0.3/drivers/char/ipmi/ipmi_si_intf.c 2011-07-21 22:17:23.000000000 -0400
23698+++ linux-3.0.3/drivers/char/ipmi/ipmi_si_intf.c 2011-08-23 21:47:55.000000000 -0400
23699@@ -277,7 +277,7 @@ struct smi_info {
23700 unsigned char slave_addr;
23701
23702 /* Counters and things for the proc filesystem. */
23703- atomic_t stats[SI_NUM_STATS];
23704+ atomic_unchecked_t stats[SI_NUM_STATS];
23705
23706 struct task_struct *thread;
23707
23708@@ -286,9 +286,9 @@ struct smi_info {
23709 };
23710
23711 #define smi_inc_stat(smi, stat) \
23712- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
23713+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
23714 #define smi_get_stat(smi, stat) \
23715- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
23716+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
23717
23718 #define SI_MAX_PARMS 4
23719
23720@@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info
23721 atomic_set(&new_smi->req_events, 0);
23722 new_smi->run_to_completion = 0;
23723 for (i = 0; i < SI_NUM_STATS; i++)
23724- atomic_set(&new_smi->stats[i], 0);
23725+ atomic_set_unchecked(&new_smi->stats[i], 0);
23726
23727 new_smi->interrupt_disabled = 1;
23728 atomic_set(&new_smi->stop_operation, 0);
23729diff -urNp linux-3.0.3/drivers/char/Kconfig linux-3.0.3/drivers/char/Kconfig
23730--- linux-3.0.3/drivers/char/Kconfig 2011-07-21 22:17:23.000000000 -0400
23731+++ linux-3.0.3/drivers/char/Kconfig 2011-08-23 21:48:14.000000000 -0400
23732@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
23733
23734 config DEVKMEM
23735 bool "/dev/kmem virtual device support"
23736- default y
23737+ default n
23738+ depends on !GRKERNSEC_KMEM
23739 help
23740 Say Y here if you want to support the /dev/kmem device. The
23741 /dev/kmem device is rarely used, but can be used for certain
23742@@ -596,6 +597,7 @@ config DEVPORT
23743 bool
23744 depends on !M68K
23745 depends on ISA || PCI
23746+ depends on !GRKERNSEC_KMEM
23747 default y
23748
23749 source "drivers/s390/char/Kconfig"
23750diff -urNp linux-3.0.3/drivers/char/mem.c linux-3.0.3/drivers/char/mem.c
23751--- linux-3.0.3/drivers/char/mem.c 2011-07-21 22:17:23.000000000 -0400
23752+++ linux-3.0.3/drivers/char/mem.c 2011-08-23 21:48:14.000000000 -0400
23753@@ -18,6 +18,7 @@
23754 #include <linux/raw.h>
23755 #include <linux/tty.h>
23756 #include <linux/capability.h>
23757+#include <linux/security.h>
23758 #include <linux/ptrace.h>
23759 #include <linux/device.h>
23760 #include <linux/highmem.h>
23761@@ -34,6 +35,10 @@
23762 # include <linux/efi.h>
23763 #endif
23764
23765+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
23766+extern struct file_operations grsec_fops;
23767+#endif
23768+
23769 static inline unsigned long size_inside_page(unsigned long start,
23770 unsigned long size)
23771 {
23772@@ -65,9 +70,13 @@ static inline int range_is_allowed(unsig
23773
23774 while (cursor < to) {
23775 if (!devmem_is_allowed(pfn)) {
23776+#ifdef CONFIG_GRKERNSEC_KMEM
23777+ gr_handle_mem_readwrite(from, to);
23778+#else
23779 printk(KERN_INFO
23780 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
23781 current->comm, from, to);
23782+#endif
23783 return 0;
23784 }
23785 cursor += PAGE_SIZE;
23786@@ -75,6 +84,11 @@ static inline int range_is_allowed(unsig
23787 }
23788 return 1;
23789 }
23790+#elif defined(CONFIG_GRKERNSEC_KMEM)
23791+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23792+{
23793+ return 0;
23794+}
23795 #else
23796 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23797 {
23798@@ -117,6 +131,7 @@ static ssize_t read_mem(struct file *fil
23799
23800 while (count > 0) {
23801 unsigned long remaining;
23802+ char *temp;
23803
23804 sz = size_inside_page(p, count);
23805
23806@@ -132,7 +147,23 @@ static ssize_t read_mem(struct file *fil
23807 if (!ptr)
23808 return -EFAULT;
23809
23810- remaining = copy_to_user(buf, ptr, sz);
23811+#ifdef CONFIG_PAX_USERCOPY
23812+ temp = kmalloc(sz, GFP_KERNEL);
23813+ if (!temp) {
23814+ unxlate_dev_mem_ptr(p, ptr);
23815+ return -ENOMEM;
23816+ }
23817+ memcpy(temp, ptr, sz);
23818+#else
23819+ temp = ptr;
23820+#endif
23821+
23822+ remaining = copy_to_user(buf, temp, sz);
23823+
23824+#ifdef CONFIG_PAX_USERCOPY
23825+ kfree(temp);
23826+#endif
23827+
23828 unxlate_dev_mem_ptr(p, ptr);
23829 if (remaining)
23830 return -EFAULT;
23831@@ -395,9 +426,8 @@ static ssize_t read_kmem(struct file *fi
23832 size_t count, loff_t *ppos)
23833 {
23834 unsigned long p = *ppos;
23835- ssize_t low_count, read, sz;
23836+ ssize_t low_count, read, sz, err = 0;
23837 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
23838- int err = 0;
23839
23840 read = 0;
23841 if (p < (unsigned long) high_memory) {
23842@@ -419,6 +449,8 @@ static ssize_t read_kmem(struct file *fi
23843 }
23844 #endif
23845 while (low_count > 0) {
23846+ char *temp;
23847+
23848 sz = size_inside_page(p, low_count);
23849
23850 /*
23851@@ -428,7 +460,22 @@ static ssize_t read_kmem(struct file *fi
23852 */
23853 kbuf = xlate_dev_kmem_ptr((char *)p);
23854
23855- if (copy_to_user(buf, kbuf, sz))
23856+#ifdef CONFIG_PAX_USERCOPY
23857+ temp = kmalloc(sz, GFP_KERNEL);
23858+ if (!temp)
23859+ return -ENOMEM;
23860+ memcpy(temp, kbuf, sz);
23861+#else
23862+ temp = kbuf;
23863+#endif
23864+
23865+ err = copy_to_user(buf, temp, sz);
23866+
23867+#ifdef CONFIG_PAX_USERCOPY
23868+ kfree(temp);
23869+#endif
23870+
23871+ if (err)
23872 return -EFAULT;
23873 buf += sz;
23874 p += sz;
23875@@ -866,6 +913,9 @@ static const struct memdev {
23876 #ifdef CONFIG_CRASH_DUMP
23877 [12] = { "oldmem", 0, &oldmem_fops, NULL },
23878 #endif
23879+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
23880+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
23881+#endif
23882 };
23883
23884 static int memory_open(struct inode *inode, struct file *filp)
23885diff -urNp linux-3.0.3/drivers/char/nvram.c linux-3.0.3/drivers/char/nvram.c
23886--- linux-3.0.3/drivers/char/nvram.c 2011-07-21 22:17:23.000000000 -0400
23887+++ linux-3.0.3/drivers/char/nvram.c 2011-08-23 21:47:55.000000000 -0400
23888@@ -246,7 +246,7 @@ static ssize_t nvram_read(struct file *f
23889
23890 spin_unlock_irq(&rtc_lock);
23891
23892- if (copy_to_user(buf, contents, tmp - contents))
23893+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
23894 return -EFAULT;
23895
23896 *ppos = i;
23897diff -urNp linux-3.0.3/drivers/char/random.c linux-3.0.3/drivers/char/random.c
23898--- linux-3.0.3/drivers/char/random.c 2011-08-23 21:44:40.000000000 -0400
23899+++ linux-3.0.3/drivers/char/random.c 2011-08-23 21:48:14.000000000 -0400
23900@@ -261,8 +261,13 @@
23901 /*
23902 * Configuration information
23903 */
23904+#ifdef CONFIG_GRKERNSEC_RANDNET
23905+#define INPUT_POOL_WORDS 512
23906+#define OUTPUT_POOL_WORDS 128
23907+#else
23908 #define INPUT_POOL_WORDS 128
23909 #define OUTPUT_POOL_WORDS 32
23910+#endif
23911 #define SEC_XFER_SIZE 512
23912 #define EXTRACT_SIZE 10
23913
23914@@ -300,10 +305,17 @@ static struct poolinfo {
23915 int poolwords;
23916 int tap1, tap2, tap3, tap4, tap5;
23917 } poolinfo_table[] = {
23918+#ifdef CONFIG_GRKERNSEC_RANDNET
23919+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
23920+ { 512, 411, 308, 208, 104, 1 },
23921+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
23922+ { 128, 103, 76, 51, 25, 1 },
23923+#else
23924 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
23925 { 128, 103, 76, 51, 25, 1 },
23926 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
23927 { 32, 26, 20, 14, 7, 1 },
23928+#endif
23929 #if 0
23930 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
23931 { 2048, 1638, 1231, 819, 411, 1 },
23932@@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(stru
23933
23934 extract_buf(r, tmp);
23935 i = min_t(int, nbytes, EXTRACT_SIZE);
23936- if (copy_to_user(buf, tmp, i)) {
23937+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
23938 ret = -EFAULT;
23939 break;
23940 }
23941@@ -1214,7 +1226,7 @@ EXPORT_SYMBOL(generate_random_uuid);
23942 #include <linux/sysctl.h>
23943
23944 static int min_read_thresh = 8, min_write_thresh;
23945-static int max_read_thresh = INPUT_POOL_WORDS * 32;
23946+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
23947 static int max_write_thresh = INPUT_POOL_WORDS * 32;
23948 static char sysctl_bootid[16];
23949
23950diff -urNp linux-3.0.3/drivers/char/sonypi.c linux-3.0.3/drivers/char/sonypi.c
23951--- linux-3.0.3/drivers/char/sonypi.c 2011-07-21 22:17:23.000000000 -0400
23952+++ linux-3.0.3/drivers/char/sonypi.c 2011-08-23 21:47:55.000000000 -0400
23953@@ -55,6 +55,7 @@
23954 #include <asm/uaccess.h>
23955 #include <asm/io.h>
23956 #include <asm/system.h>
23957+#include <asm/local.h>
23958
23959 #include <linux/sonypi.h>
23960
23961@@ -491,7 +492,7 @@ static struct sonypi_device {
23962 spinlock_t fifo_lock;
23963 wait_queue_head_t fifo_proc_list;
23964 struct fasync_struct *fifo_async;
23965- int open_count;
23966+ local_t open_count;
23967 int model;
23968 struct input_dev *input_jog_dev;
23969 struct input_dev *input_key_dev;
23970@@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, st
23971 static int sonypi_misc_release(struct inode *inode, struct file *file)
23972 {
23973 mutex_lock(&sonypi_device.lock);
23974- sonypi_device.open_count--;
23975+ local_dec(&sonypi_device.open_count);
23976 mutex_unlock(&sonypi_device.lock);
23977 return 0;
23978 }
23979@@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode
23980 {
23981 mutex_lock(&sonypi_device.lock);
23982 /* Flush input queue on first open */
23983- if (!sonypi_device.open_count)
23984+ if (!local_read(&sonypi_device.open_count))
23985 kfifo_reset(&sonypi_device.fifo);
23986- sonypi_device.open_count++;
23987+ local_inc(&sonypi_device.open_count);
23988 mutex_unlock(&sonypi_device.lock);
23989
23990 return 0;
23991diff -urNp linux-3.0.3/drivers/char/tpm/tpm_bios.c linux-3.0.3/drivers/char/tpm/tpm_bios.c
23992--- linux-3.0.3/drivers/char/tpm/tpm_bios.c 2011-07-21 22:17:23.000000000 -0400
23993+++ linux-3.0.3/drivers/char/tpm/tpm_bios.c 2011-08-23 21:47:55.000000000 -0400
23994@@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start
23995 event = addr;
23996
23997 if ((event->event_type == 0 && event->event_size == 0) ||
23998- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
23999+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
24000 return NULL;
24001
24002 return addr;
24003@@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(
24004 return NULL;
24005
24006 if ((event->event_type == 0 && event->event_size == 0) ||
24007- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
24008+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
24009 return NULL;
24010
24011 (*pos)++;
24012@@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_
24013 int i;
24014
24015 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
24016- seq_putc(m, data[i]);
24017+ if (!seq_putc(m, data[i]))
24018+ return -EFAULT;
24019
24020 return 0;
24021 }
24022@@ -410,6 +411,11 @@ static int read_log(struct tpm_bios_log
24023 log->bios_event_log_end = log->bios_event_log + len;
24024
24025 virt = acpi_os_map_memory(start, len);
24026+ if (!virt) {
24027+ kfree(log->bios_event_log);
24028+ log->bios_event_log = NULL;
24029+ return -EFAULT;
24030+ }
24031
24032 memcpy(log->bios_event_log, virt, len);
24033
24034diff -urNp linux-3.0.3/drivers/char/tpm/tpm.c linux-3.0.3/drivers/char/tpm/tpm.c
24035--- linux-3.0.3/drivers/char/tpm/tpm.c 2011-07-21 22:17:23.000000000 -0400
24036+++ linux-3.0.3/drivers/char/tpm/tpm.c 2011-08-23 21:48:14.000000000 -0400
24037@@ -411,7 +411,7 @@ static ssize_t tpm_transmit(struct tpm_c
24038 chip->vendor.req_complete_val)
24039 goto out_recv;
24040
24041- if ((status == chip->vendor.req_canceled)) {
24042+ if (status == chip->vendor.req_canceled) {
24043 dev_err(chip->dev, "Operation Canceled\n");
24044 rc = -ECANCELED;
24045 goto out;
24046@@ -844,6 +844,8 @@ ssize_t tpm_show_pubek(struct device *de
24047
24048 struct tpm_chip *chip = dev_get_drvdata(dev);
24049
24050+ pax_track_stack();
24051+
24052 tpm_cmd.header.in = tpm_readpubek_header;
24053 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
24054 "attempting to read the PUBEK");
24055diff -urNp linux-3.0.3/drivers/crypto/hifn_795x.c linux-3.0.3/drivers/crypto/hifn_795x.c
24056--- linux-3.0.3/drivers/crypto/hifn_795x.c 2011-07-21 22:17:23.000000000 -0400
24057+++ linux-3.0.3/drivers/crypto/hifn_795x.c 2011-08-23 21:48:14.000000000 -0400
24058@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
24059 0xCA, 0x34, 0x2B, 0x2E};
24060 struct scatterlist sg;
24061
24062+ pax_track_stack();
24063+
24064 memset(src, 0, sizeof(src));
24065 memset(ctx.key, 0, sizeof(ctx.key));
24066
24067diff -urNp linux-3.0.3/drivers/crypto/padlock-aes.c linux-3.0.3/drivers/crypto/padlock-aes.c
24068--- linux-3.0.3/drivers/crypto/padlock-aes.c 2011-07-21 22:17:23.000000000 -0400
24069+++ linux-3.0.3/drivers/crypto/padlock-aes.c 2011-08-23 21:48:14.000000000 -0400
24070@@ -109,6 +109,8 @@ static int aes_set_key(struct crypto_tfm
24071 struct crypto_aes_ctx gen_aes;
24072 int cpu;
24073
24074+ pax_track_stack();
24075+
24076 if (key_len % 8) {
24077 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
24078 return -EINVAL;
24079diff -urNp linux-3.0.3/drivers/edac/edac_pci_sysfs.c linux-3.0.3/drivers/edac/edac_pci_sysfs.c
24080--- linux-3.0.3/drivers/edac/edac_pci_sysfs.c 2011-07-21 22:17:23.000000000 -0400
24081+++ linux-3.0.3/drivers/edac/edac_pci_sysfs.c 2011-08-23 21:47:55.000000000 -0400
24082@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log
24083 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
24084 static int edac_pci_poll_msec = 1000; /* one second workq period */
24085
24086-static atomic_t pci_parity_count = ATOMIC_INIT(0);
24087-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
24088+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
24089+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
24090
24091 static struct kobject *edac_pci_top_main_kobj;
24092 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
24093@@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(str
24094 edac_printk(KERN_CRIT, EDAC_PCI,
24095 "Signaled System Error on %s\n",
24096 pci_name(dev));
24097- atomic_inc(&pci_nonparity_count);
24098+ atomic_inc_unchecked(&pci_nonparity_count);
24099 }
24100
24101 if (status & (PCI_STATUS_PARITY)) {
24102@@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(str
24103 "Master Data Parity Error on %s\n",
24104 pci_name(dev));
24105
24106- atomic_inc(&pci_parity_count);
24107+ atomic_inc_unchecked(&pci_parity_count);
24108 }
24109
24110 if (status & (PCI_STATUS_DETECTED_PARITY)) {
24111@@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(str
24112 "Detected Parity Error on %s\n",
24113 pci_name(dev));
24114
24115- atomic_inc(&pci_parity_count);
24116+ atomic_inc_unchecked(&pci_parity_count);
24117 }
24118 }
24119
24120@@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(str
24121 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
24122 "Signaled System Error on %s\n",
24123 pci_name(dev));
24124- atomic_inc(&pci_nonparity_count);
24125+ atomic_inc_unchecked(&pci_nonparity_count);
24126 }
24127
24128 if (status & (PCI_STATUS_PARITY)) {
24129@@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(str
24130 "Master Data Parity Error on "
24131 "%s\n", pci_name(dev));
24132
24133- atomic_inc(&pci_parity_count);
24134+ atomic_inc_unchecked(&pci_parity_count);
24135 }
24136
24137 if (status & (PCI_STATUS_DETECTED_PARITY)) {
24138@@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(str
24139 "Detected Parity Error on %s\n",
24140 pci_name(dev));
24141
24142- atomic_inc(&pci_parity_count);
24143+ atomic_inc_unchecked(&pci_parity_count);
24144 }
24145 }
24146 }
24147@@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
24148 if (!check_pci_errors)
24149 return;
24150
24151- before_count = atomic_read(&pci_parity_count);
24152+ before_count = atomic_read_unchecked(&pci_parity_count);
24153
24154 /* scan all PCI devices looking for a Parity Error on devices and
24155 * bridges.
24156@@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
24157 /* Only if operator has selected panic on PCI Error */
24158 if (edac_pci_get_panic_on_pe()) {
24159 /* If the count is different 'after' from 'before' */
24160- if (before_count != atomic_read(&pci_parity_count))
24161+ if (before_count != atomic_read_unchecked(&pci_parity_count))
24162 panic("EDAC: PCI Parity Error");
24163 }
24164 }
24165diff -urNp linux-3.0.3/drivers/edac/i7core_edac.c linux-3.0.3/drivers/edac/i7core_edac.c
24166--- linux-3.0.3/drivers/edac/i7core_edac.c 2011-07-21 22:17:23.000000000 -0400
24167+++ linux-3.0.3/drivers/edac/i7core_edac.c 2011-08-23 21:47:55.000000000 -0400
24168@@ -1670,7 +1670,7 @@ static void i7core_mce_output_error(stru
24169 char *type, *optype, *err, *msg;
24170 unsigned long error = m->status & 0x1ff0000l;
24171 u32 optypenum = (m->status >> 4) & 0x07;
24172- u32 core_err_cnt = (m->status >> 38) && 0x7fff;
24173+ u32 core_err_cnt = (m->status >> 38) & 0x7fff;
24174 u32 dimm = (m->misc >> 16) & 0x3;
24175 u32 channel = (m->misc >> 18) & 0x3;
24176 u32 syndrome = m->misc >> 32;
24177diff -urNp linux-3.0.3/drivers/edac/mce_amd.h linux-3.0.3/drivers/edac/mce_amd.h
24178--- linux-3.0.3/drivers/edac/mce_amd.h 2011-07-21 22:17:23.000000000 -0400
24179+++ linux-3.0.3/drivers/edac/mce_amd.h 2011-08-23 21:47:55.000000000 -0400
24180@@ -83,7 +83,7 @@ struct amd_decoder_ops {
24181 bool (*dc_mce)(u16, u8);
24182 bool (*ic_mce)(u16, u8);
24183 bool (*nb_mce)(u16, u8);
24184-};
24185+} __no_const;
24186
24187 void amd_report_gart_errors(bool);
24188 void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32));
24189diff -urNp linux-3.0.3/drivers/firewire/core-card.c linux-3.0.3/drivers/firewire/core-card.c
24190--- linux-3.0.3/drivers/firewire/core-card.c 2011-07-21 22:17:23.000000000 -0400
24191+++ linux-3.0.3/drivers/firewire/core-card.c 2011-08-23 21:47:55.000000000 -0400
24192@@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
24193
24194 void fw_core_remove_card(struct fw_card *card)
24195 {
24196- struct fw_card_driver dummy_driver = dummy_driver_template;
24197+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
24198
24199 card->driver->update_phy_reg(card, 4,
24200 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
24201diff -urNp linux-3.0.3/drivers/firewire/core-cdev.c linux-3.0.3/drivers/firewire/core-cdev.c
24202--- linux-3.0.3/drivers/firewire/core-cdev.c 2011-08-23 21:44:40.000000000 -0400
24203+++ linux-3.0.3/drivers/firewire/core-cdev.c 2011-08-23 21:47:55.000000000 -0400
24204@@ -1313,8 +1313,7 @@ static int init_iso_resource(struct clie
24205 int ret;
24206
24207 if ((request->channels == 0 && request->bandwidth == 0) ||
24208- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
24209- request->bandwidth < 0)
24210+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
24211 return -EINVAL;
24212
24213 r = kmalloc(sizeof(*r), GFP_KERNEL);
24214diff -urNp linux-3.0.3/drivers/firewire/core.h linux-3.0.3/drivers/firewire/core.h
24215--- linux-3.0.3/drivers/firewire/core.h 2011-07-21 22:17:23.000000000 -0400
24216+++ linux-3.0.3/drivers/firewire/core.h 2011-08-23 21:47:55.000000000 -0400
24217@@ -101,6 +101,7 @@ struct fw_card_driver {
24218
24219 int (*stop_iso)(struct fw_iso_context *ctx);
24220 };
24221+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
24222
24223 void fw_card_initialize(struct fw_card *card,
24224 const struct fw_card_driver *driver, struct device *device);
24225diff -urNp linux-3.0.3/drivers/firewire/core-transaction.c linux-3.0.3/drivers/firewire/core-transaction.c
24226--- linux-3.0.3/drivers/firewire/core-transaction.c 2011-07-21 22:17:23.000000000 -0400
24227+++ linux-3.0.3/drivers/firewire/core-transaction.c 2011-08-23 21:48:14.000000000 -0400
24228@@ -37,6 +37,7 @@
24229 #include <linux/timer.h>
24230 #include <linux/types.h>
24231 #include <linux/workqueue.h>
24232+#include <linux/sched.h>
24233
24234 #include <asm/byteorder.h>
24235
24236@@ -422,6 +423,8 @@ int fw_run_transaction(struct fw_card *c
24237 struct transaction_callback_data d;
24238 struct fw_transaction t;
24239
24240+ pax_track_stack();
24241+
24242 init_timer_on_stack(&t.split_timeout_timer);
24243 init_completion(&d.done);
24244 d.payload = payload;
24245diff -urNp linux-3.0.3/drivers/firmware/dmi_scan.c linux-3.0.3/drivers/firmware/dmi_scan.c
24246--- linux-3.0.3/drivers/firmware/dmi_scan.c 2011-07-21 22:17:23.000000000 -0400
24247+++ linux-3.0.3/drivers/firmware/dmi_scan.c 2011-08-23 21:47:55.000000000 -0400
24248@@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
24249 }
24250 }
24251 else {
24252- /*
24253- * no iounmap() for that ioremap(); it would be a no-op, but
24254- * it's so early in setup that sucker gets confused into doing
24255- * what it shouldn't if we actually call it.
24256- */
24257 p = dmi_ioremap(0xF0000, 0x10000);
24258 if (p == NULL)
24259 goto error;
24260diff -urNp linux-3.0.3/drivers/gpio/vr41xx_giu.c linux-3.0.3/drivers/gpio/vr41xx_giu.c
24261--- linux-3.0.3/drivers/gpio/vr41xx_giu.c 2011-07-21 22:17:23.000000000 -0400
24262+++ linux-3.0.3/drivers/gpio/vr41xx_giu.c 2011-08-23 21:47:55.000000000 -0400
24263@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
24264 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
24265 maskl, pendl, maskh, pendh);
24266
24267- atomic_inc(&irq_err_count);
24268+ atomic_inc_unchecked(&irq_err_count);
24269
24270 return -EINVAL;
24271 }
24272diff -urNp linux-3.0.3/drivers/gpu/drm/drm_crtc_helper.c linux-3.0.3/drivers/gpu/drm/drm_crtc_helper.c
24273--- linux-3.0.3/drivers/gpu/drm/drm_crtc_helper.c 2011-07-21 22:17:23.000000000 -0400
24274+++ linux-3.0.3/drivers/gpu/drm/drm_crtc_helper.c 2011-08-23 21:48:14.000000000 -0400
24275@@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct d
24276 struct drm_crtc *tmp;
24277 int crtc_mask = 1;
24278
24279- WARN(!crtc, "checking null crtc?\n");
24280+ BUG_ON(!crtc);
24281
24282 dev = crtc->dev;
24283
24284@@ -343,6 +343,8 @@ bool drm_crtc_helper_set_mode(struct drm
24285 struct drm_encoder *encoder;
24286 bool ret = true;
24287
24288+ pax_track_stack();
24289+
24290 crtc->enabled = drm_helper_crtc_in_use(crtc);
24291 if (!crtc->enabled)
24292 return true;
24293diff -urNp linux-3.0.3/drivers/gpu/drm/drm_drv.c linux-3.0.3/drivers/gpu/drm/drm_drv.c
24294--- linux-3.0.3/drivers/gpu/drm/drm_drv.c 2011-07-21 22:17:23.000000000 -0400
24295+++ linux-3.0.3/drivers/gpu/drm/drm_drv.c 2011-08-23 21:47:55.000000000 -0400
24296@@ -386,7 +386,7 @@ long drm_ioctl(struct file *filp,
24297
24298 dev = file_priv->minor->dev;
24299 atomic_inc(&dev->ioctl_count);
24300- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
24301+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
24302 ++file_priv->ioctl_count;
24303
24304 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
24305diff -urNp linux-3.0.3/drivers/gpu/drm/drm_fops.c linux-3.0.3/drivers/gpu/drm/drm_fops.c
24306--- linux-3.0.3/drivers/gpu/drm/drm_fops.c 2011-07-21 22:17:23.000000000 -0400
24307+++ linux-3.0.3/drivers/gpu/drm/drm_fops.c 2011-08-23 21:47:55.000000000 -0400
24308@@ -70,7 +70,7 @@ static int drm_setup(struct drm_device *
24309 }
24310
24311 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
24312- atomic_set(&dev->counts[i], 0);
24313+ atomic_set_unchecked(&dev->counts[i], 0);
24314
24315 dev->sigdata.lock = NULL;
24316
24317@@ -134,8 +134,8 @@ int drm_open(struct inode *inode, struct
24318
24319 retcode = drm_open_helper(inode, filp, dev);
24320 if (!retcode) {
24321- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
24322- if (!dev->open_count++)
24323+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
24324+ if (local_inc_return(&dev->open_count) == 1)
24325 retcode = drm_setup(dev);
24326 }
24327 if (!retcode) {
24328@@ -472,7 +472,7 @@ int drm_release(struct inode *inode, str
24329
24330 mutex_lock(&drm_global_mutex);
24331
24332- DRM_DEBUG("open_count = %d\n", dev->open_count);
24333+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
24334
24335 if (dev->driver->preclose)
24336 dev->driver->preclose(dev, file_priv);
24337@@ -484,7 +484,7 @@ int drm_release(struct inode *inode, str
24338 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
24339 task_pid_nr(current),
24340 (long)old_encode_dev(file_priv->minor->device),
24341- dev->open_count);
24342+ local_read(&dev->open_count));
24343
24344 /* if the master has gone away we can't do anything with the lock */
24345 if (file_priv->minor->master)
24346@@ -565,8 +565,8 @@ int drm_release(struct inode *inode, str
24347 * End inline drm_release
24348 */
24349
24350- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
24351- if (!--dev->open_count) {
24352+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
24353+ if (local_dec_and_test(&dev->open_count)) {
24354 if (atomic_read(&dev->ioctl_count)) {
24355 DRM_ERROR("Device busy: %d\n",
24356 atomic_read(&dev->ioctl_count));
24357diff -urNp linux-3.0.3/drivers/gpu/drm/drm_global.c linux-3.0.3/drivers/gpu/drm/drm_global.c
24358--- linux-3.0.3/drivers/gpu/drm/drm_global.c 2011-07-21 22:17:23.000000000 -0400
24359+++ linux-3.0.3/drivers/gpu/drm/drm_global.c 2011-08-23 21:47:55.000000000 -0400
24360@@ -36,7 +36,7 @@
24361 struct drm_global_item {
24362 struct mutex mutex;
24363 void *object;
24364- int refcount;
24365+ atomic_t refcount;
24366 };
24367
24368 static struct drm_global_item glob[DRM_GLOBAL_NUM];
24369@@ -49,7 +49,7 @@ void drm_global_init(void)
24370 struct drm_global_item *item = &glob[i];
24371 mutex_init(&item->mutex);
24372 item->object = NULL;
24373- item->refcount = 0;
24374+ atomic_set(&item->refcount, 0);
24375 }
24376 }
24377
24378@@ -59,7 +59,7 @@ void drm_global_release(void)
24379 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
24380 struct drm_global_item *item = &glob[i];
24381 BUG_ON(item->object != NULL);
24382- BUG_ON(item->refcount != 0);
24383+ BUG_ON(atomic_read(&item->refcount) != 0);
24384 }
24385 }
24386
24387@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_globa
24388 void *object;
24389
24390 mutex_lock(&item->mutex);
24391- if (item->refcount == 0) {
24392+ if (atomic_read(&item->refcount) == 0) {
24393 item->object = kzalloc(ref->size, GFP_KERNEL);
24394 if (unlikely(item->object == NULL)) {
24395 ret = -ENOMEM;
24396@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_globa
24397 goto out_err;
24398
24399 }
24400- ++item->refcount;
24401+ atomic_inc(&item->refcount);
24402 ref->object = item->object;
24403 object = item->object;
24404 mutex_unlock(&item->mutex);
24405@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_gl
24406 struct drm_global_item *item = &glob[ref->global_type];
24407
24408 mutex_lock(&item->mutex);
24409- BUG_ON(item->refcount == 0);
24410+ BUG_ON(atomic_read(&item->refcount) == 0);
24411 BUG_ON(ref->object != item->object);
24412- if (--item->refcount == 0) {
24413+ if (atomic_dec_and_test(&item->refcount)) {
24414 ref->release(ref);
24415 item->object = NULL;
24416 }
24417diff -urNp linux-3.0.3/drivers/gpu/drm/drm_info.c linux-3.0.3/drivers/gpu/drm/drm_info.c
24418--- linux-3.0.3/drivers/gpu/drm/drm_info.c 2011-07-21 22:17:23.000000000 -0400
24419+++ linux-3.0.3/drivers/gpu/drm/drm_info.c 2011-08-23 21:48:14.000000000 -0400
24420@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
24421 struct drm_local_map *map;
24422 struct drm_map_list *r_list;
24423
24424- /* Hardcoded from _DRM_FRAME_BUFFER,
24425- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
24426- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
24427- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
24428+ static const char * const types[] = {
24429+ [_DRM_FRAME_BUFFER] = "FB",
24430+ [_DRM_REGISTERS] = "REG",
24431+ [_DRM_SHM] = "SHM",
24432+ [_DRM_AGP] = "AGP",
24433+ [_DRM_SCATTER_GATHER] = "SG",
24434+ [_DRM_CONSISTENT] = "PCI",
24435+ [_DRM_GEM] = "GEM" };
24436 const char *type;
24437 int i;
24438
24439@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
24440 map = r_list->map;
24441 if (!map)
24442 continue;
24443- if (map->type < 0 || map->type > 5)
24444+ if (map->type >= ARRAY_SIZE(types))
24445 type = "??";
24446 else
24447 type = types[map->type];
24448@@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, voi
24449 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
24450 vma->vm_flags & VM_LOCKED ? 'l' : '-',
24451 vma->vm_flags & VM_IO ? 'i' : '-',
24452+#ifdef CONFIG_GRKERNSEC_HIDESYM
24453+ 0);
24454+#else
24455 vma->vm_pgoff);
24456+#endif
24457
24458 #if defined(__i386__)
24459 pgprot = pgprot_val(vma->vm_page_prot);
24460diff -urNp linux-3.0.3/drivers/gpu/drm/drm_ioctl.c linux-3.0.3/drivers/gpu/drm/drm_ioctl.c
24461--- linux-3.0.3/drivers/gpu/drm/drm_ioctl.c 2011-07-21 22:17:23.000000000 -0400
24462+++ linux-3.0.3/drivers/gpu/drm/drm_ioctl.c 2011-08-23 21:47:55.000000000 -0400
24463@@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev,
24464 stats->data[i].value =
24465 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
24466 else
24467- stats->data[i].value = atomic_read(&dev->counts[i]);
24468+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
24469 stats->data[i].type = dev->types[i];
24470 }
24471
24472diff -urNp linux-3.0.3/drivers/gpu/drm/drm_lock.c linux-3.0.3/drivers/gpu/drm/drm_lock.c
24473--- linux-3.0.3/drivers/gpu/drm/drm_lock.c 2011-07-21 22:17:23.000000000 -0400
24474+++ linux-3.0.3/drivers/gpu/drm/drm_lock.c 2011-08-23 21:47:55.000000000 -0400
24475@@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, voi
24476 if (drm_lock_take(&master->lock, lock->context)) {
24477 master->lock.file_priv = file_priv;
24478 master->lock.lock_time = jiffies;
24479- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
24480+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
24481 break; /* Got lock */
24482 }
24483
24484@@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, v
24485 return -EINVAL;
24486 }
24487
24488- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
24489+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
24490
24491 if (drm_lock_free(&master->lock, lock->context)) {
24492 /* FIXME: Should really bail out here. */
24493diff -urNp linux-3.0.3/drivers/gpu/drm/i810/i810_dma.c linux-3.0.3/drivers/gpu/drm/i810/i810_dma.c
24494--- linux-3.0.3/drivers/gpu/drm/i810/i810_dma.c 2011-07-21 22:17:23.000000000 -0400
24495+++ linux-3.0.3/drivers/gpu/drm/i810/i810_dma.c 2011-08-23 21:47:55.000000000 -0400
24496@@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_de
24497 dma->buflist[vertex->idx],
24498 vertex->discard, vertex->used);
24499
24500- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
24501- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
24502+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
24503+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
24504 sarea_priv->last_enqueue = dev_priv->counter - 1;
24505 sarea_priv->last_dispatch = (int)hw_status[5];
24506
24507@@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device
24508 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
24509 mc->last_render);
24510
24511- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
24512- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
24513+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
24514+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
24515 sarea_priv->last_enqueue = dev_priv->counter - 1;
24516 sarea_priv->last_dispatch = (int)hw_status[5];
24517
24518diff -urNp linux-3.0.3/drivers/gpu/drm/i810/i810_drv.h linux-3.0.3/drivers/gpu/drm/i810/i810_drv.h
24519--- linux-3.0.3/drivers/gpu/drm/i810/i810_drv.h 2011-07-21 22:17:23.000000000 -0400
24520+++ linux-3.0.3/drivers/gpu/drm/i810/i810_drv.h 2011-08-23 21:47:55.000000000 -0400
24521@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
24522 int page_flipping;
24523
24524 wait_queue_head_t irq_queue;
24525- atomic_t irq_received;
24526- atomic_t irq_emitted;
24527+ atomic_unchecked_t irq_received;
24528+ atomic_unchecked_t irq_emitted;
24529
24530 int front_offset;
24531 } drm_i810_private_t;
24532diff -urNp linux-3.0.3/drivers/gpu/drm/i915/i915_debugfs.c linux-3.0.3/drivers/gpu/drm/i915/i915_debugfs.c
24533--- linux-3.0.3/drivers/gpu/drm/i915/i915_debugfs.c 2011-07-21 22:17:23.000000000 -0400
24534+++ linux-3.0.3/drivers/gpu/drm/i915/i915_debugfs.c 2011-08-23 21:47:55.000000000 -0400
24535@@ -497,7 +497,7 @@ static int i915_interrupt_info(struct se
24536 I915_READ(GTIMR));
24537 }
24538 seq_printf(m, "Interrupts received: %d\n",
24539- atomic_read(&dev_priv->irq_received));
24540+ atomic_read_unchecked(&dev_priv->irq_received));
24541 for (i = 0; i < I915_NUM_RINGS; i++) {
24542 if (IS_GEN6(dev)) {
24543 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
24544diff -urNp linux-3.0.3/drivers/gpu/drm/i915/i915_dma.c linux-3.0.3/drivers/gpu/drm/i915/i915_dma.c
24545--- linux-3.0.3/drivers/gpu/drm/i915/i915_dma.c 2011-08-23 21:44:40.000000000 -0400
24546+++ linux-3.0.3/drivers/gpu/drm/i915/i915_dma.c 2011-08-23 21:47:55.000000000 -0400
24547@@ -1169,7 +1169,7 @@ static bool i915_switcheroo_can_switch(s
24548 bool can_switch;
24549
24550 spin_lock(&dev->count_lock);
24551- can_switch = (dev->open_count == 0);
24552+ can_switch = (local_read(&dev->open_count) == 0);
24553 spin_unlock(&dev->count_lock);
24554 return can_switch;
24555 }
24556diff -urNp linux-3.0.3/drivers/gpu/drm/i915/i915_drv.h linux-3.0.3/drivers/gpu/drm/i915/i915_drv.h
24557--- linux-3.0.3/drivers/gpu/drm/i915/i915_drv.h 2011-07-21 22:17:23.000000000 -0400
24558+++ linux-3.0.3/drivers/gpu/drm/i915/i915_drv.h 2011-08-23 21:47:55.000000000 -0400
24559@@ -219,7 +219,7 @@ struct drm_i915_display_funcs {
24560 /* render clock increase/decrease */
24561 /* display clock increase/decrease */
24562 /* pll clock increase/decrease */
24563-};
24564+} __no_const;
24565
24566 struct intel_device_info {
24567 u8 gen;
24568@@ -300,7 +300,7 @@ typedef struct drm_i915_private {
24569 int current_page;
24570 int page_flipping;
24571
24572- atomic_t irq_received;
24573+ atomic_unchecked_t irq_received;
24574
24575 /* protects the irq masks */
24576 spinlock_t irq_lock;
24577@@ -874,7 +874,7 @@ struct drm_i915_gem_object {
24578 * will be page flipped away on the next vblank. When it
24579 * reaches 0, dev_priv->pending_flip_queue will be woken up.
24580 */
24581- atomic_t pending_flip;
24582+ atomic_unchecked_t pending_flip;
24583 };
24584
24585 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
24586@@ -1247,7 +1247,7 @@ extern int intel_setup_gmbus(struct drm_
24587 extern void intel_teardown_gmbus(struct drm_device *dev);
24588 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
24589 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
24590-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
24591+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
24592 {
24593 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
24594 }
24595diff -urNp linux-3.0.3/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-3.0.3/drivers/gpu/drm/i915/i915_gem_execbuffer.c
24596--- linux-3.0.3/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-07-21 22:17:23.000000000 -0400
24597+++ linux-3.0.3/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-08-23 21:47:55.000000000 -0400
24598@@ -188,7 +188,7 @@ i915_gem_object_set_to_gpu_domain(struct
24599 i915_gem_clflush_object(obj);
24600
24601 if (obj->base.pending_write_domain)
24602- cd->flips |= atomic_read(&obj->pending_flip);
24603+ cd->flips |= atomic_read_unchecked(&obj->pending_flip);
24604
24605 /* The actual obj->write_domain will be updated with
24606 * pending_write_domain after we emit the accumulated flush for all
24607diff -urNp linux-3.0.3/drivers/gpu/drm/i915/i915_irq.c linux-3.0.3/drivers/gpu/drm/i915/i915_irq.c
24608--- linux-3.0.3/drivers/gpu/drm/i915/i915_irq.c 2011-08-23 21:44:40.000000000 -0400
24609+++ linux-3.0.3/drivers/gpu/drm/i915/i915_irq.c 2011-08-23 21:47:55.000000000 -0400
24610@@ -473,7 +473,7 @@ static irqreturn_t ivybridge_irq_handler
24611 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
24612 struct drm_i915_master_private *master_priv;
24613
24614- atomic_inc(&dev_priv->irq_received);
24615+ atomic_inc_unchecked(&dev_priv->irq_received);
24616
24617 /* disable master interrupt before clearing iir */
24618 de_ier = I915_READ(DEIER);
24619@@ -563,7 +563,7 @@ static irqreturn_t ironlake_irq_handler(
24620 struct drm_i915_master_private *master_priv;
24621 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
24622
24623- atomic_inc(&dev_priv->irq_received);
24624+ atomic_inc_unchecked(&dev_priv->irq_received);
24625
24626 if (IS_GEN6(dev))
24627 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
24628@@ -1226,7 +1226,7 @@ static irqreturn_t i915_driver_irq_handl
24629 int ret = IRQ_NONE, pipe;
24630 bool blc_event = false;
24631
24632- atomic_inc(&dev_priv->irq_received);
24633+ atomic_inc_unchecked(&dev_priv->irq_received);
24634
24635 iir = I915_READ(IIR);
24636
24637@@ -1735,7 +1735,7 @@ static void ironlake_irq_preinstall(stru
24638 {
24639 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
24640
24641- atomic_set(&dev_priv->irq_received, 0);
24642+ atomic_set_unchecked(&dev_priv->irq_received, 0);
24643
24644 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
24645 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
24646@@ -1899,7 +1899,7 @@ static void i915_driver_irq_preinstall(s
24647 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
24648 int pipe;
24649
24650- atomic_set(&dev_priv->irq_received, 0);
24651+ atomic_set_unchecked(&dev_priv->irq_received, 0);
24652
24653 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
24654 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
24655diff -urNp linux-3.0.3/drivers/gpu/drm/i915/intel_display.c linux-3.0.3/drivers/gpu/drm/i915/intel_display.c
24656--- linux-3.0.3/drivers/gpu/drm/i915/intel_display.c 2011-08-23 21:44:40.000000000 -0400
24657+++ linux-3.0.3/drivers/gpu/drm/i915/intel_display.c 2011-08-23 21:47:55.000000000 -0400
24658@@ -1961,7 +1961,7 @@ intel_pipe_set_base(struct drm_crtc *crt
24659
24660 wait_event(dev_priv->pending_flip_queue,
24661 atomic_read(&dev_priv->mm.wedged) ||
24662- atomic_read(&obj->pending_flip) == 0);
24663+ atomic_read_unchecked(&obj->pending_flip) == 0);
24664
24665 /* Big Hammer, we also need to ensure that any pending
24666 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
24667@@ -2548,7 +2548,7 @@ static void intel_crtc_wait_for_pending_
24668 obj = to_intel_framebuffer(crtc->fb)->obj;
24669 dev_priv = crtc->dev->dev_private;
24670 wait_event(dev_priv->pending_flip_queue,
24671- atomic_read(&obj->pending_flip) == 0);
24672+ atomic_read_unchecked(&obj->pending_flip) == 0);
24673 }
24674
24675 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
24676@@ -6225,7 +6225,7 @@ static void do_intel_finish_page_flip(st
24677
24678 atomic_clear_mask(1 << intel_crtc->plane,
24679 &obj->pending_flip.counter);
24680- if (atomic_read(&obj->pending_flip) == 0)
24681+ if (atomic_read_unchecked(&obj->pending_flip) == 0)
24682 wake_up(&dev_priv->pending_flip_queue);
24683
24684 schedule_work(&work->work);
24685@@ -6514,7 +6514,7 @@ static int intel_crtc_page_flip(struct d
24686 /* Block clients from rendering to the new back buffer until
24687 * the flip occurs and the object is no longer visible.
24688 */
24689- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24690+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24691
24692 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
24693 if (ret)
24694@@ -6527,7 +6527,7 @@ static int intel_crtc_page_flip(struct d
24695 return 0;
24696
24697 cleanup_pending:
24698- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24699+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24700 cleanup_objs:
24701 drm_gem_object_unreference(&work->old_fb_obj->base);
24702 drm_gem_object_unreference(&obj->base);
24703diff -urNp linux-3.0.3/drivers/gpu/drm/mga/mga_drv.h linux-3.0.3/drivers/gpu/drm/mga/mga_drv.h
24704--- linux-3.0.3/drivers/gpu/drm/mga/mga_drv.h 2011-07-21 22:17:23.000000000 -0400
24705+++ linux-3.0.3/drivers/gpu/drm/mga/mga_drv.h 2011-08-23 21:47:55.000000000 -0400
24706@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
24707 u32 clear_cmd;
24708 u32 maccess;
24709
24710- atomic_t vbl_received; /**< Number of vblanks received. */
24711+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
24712 wait_queue_head_t fence_queue;
24713- atomic_t last_fence_retired;
24714+ atomic_unchecked_t last_fence_retired;
24715 u32 next_fence_to_post;
24716
24717 unsigned int fb_cpp;
24718diff -urNp linux-3.0.3/drivers/gpu/drm/mga/mga_irq.c linux-3.0.3/drivers/gpu/drm/mga/mga_irq.c
24719--- linux-3.0.3/drivers/gpu/drm/mga/mga_irq.c 2011-07-21 22:17:23.000000000 -0400
24720+++ linux-3.0.3/drivers/gpu/drm/mga/mga_irq.c 2011-08-23 21:47:55.000000000 -0400
24721@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
24722 if (crtc != 0)
24723 return 0;
24724
24725- return atomic_read(&dev_priv->vbl_received);
24726+ return atomic_read_unchecked(&dev_priv->vbl_received);
24727 }
24728
24729
24730@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
24731 /* VBLANK interrupt */
24732 if (status & MGA_VLINEPEN) {
24733 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
24734- atomic_inc(&dev_priv->vbl_received);
24735+ atomic_inc_unchecked(&dev_priv->vbl_received);
24736 drm_handle_vblank(dev, 0);
24737 handled = 1;
24738 }
24739@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
24740 if ((prim_start & ~0x03) != (prim_end & ~0x03))
24741 MGA_WRITE(MGA_PRIMEND, prim_end);
24742
24743- atomic_inc(&dev_priv->last_fence_retired);
24744+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
24745 DRM_WAKEUP(&dev_priv->fence_queue);
24746 handled = 1;
24747 }
24748@@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_dev
24749 * using fences.
24750 */
24751 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
24752- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
24753+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
24754 - *sequence) <= (1 << 23)));
24755
24756 *sequence = cur_fence;
24757diff -urNp linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_bios.c linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_bios.c
24758--- linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-07-21 22:17:23.000000000 -0400
24759+++ linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-08-26 19:49:56.000000000 -0400
24760@@ -200,7 +200,7 @@ struct methods {
24761 const char desc[8];
24762 void (*loadbios)(struct drm_device *, uint8_t *);
24763 const bool rw;
24764-};
24765+} __do_const;
24766
24767 static struct methods shadow_methods[] = {
24768 { "PRAMIN", load_vbios_pramin, true },
24769@@ -5488,7 +5488,7 @@ parse_bit_displayport_tbl_entry(struct d
24770 struct bit_table {
24771 const char id;
24772 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
24773-};
24774+} __no_const;
24775
24776 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
24777
24778diff -urNp linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_drv.h linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_drv.h
24779--- linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-07-21 22:17:23.000000000 -0400
24780+++ linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-08-23 21:47:55.000000000 -0400
24781@@ -227,7 +227,7 @@ struct nouveau_channel {
24782 struct list_head pending;
24783 uint32_t sequence;
24784 uint32_t sequence_ack;
24785- atomic_t last_sequence_irq;
24786+ atomic_unchecked_t last_sequence_irq;
24787 } fence;
24788
24789 /* DMA push buffer */
24790@@ -304,7 +304,7 @@ struct nouveau_exec_engine {
24791 u32 handle, u16 class);
24792 void (*set_tile_region)(struct drm_device *dev, int i);
24793 void (*tlb_flush)(struct drm_device *, int engine);
24794-};
24795+} __no_const;
24796
24797 struct nouveau_instmem_engine {
24798 void *priv;
24799@@ -325,13 +325,13 @@ struct nouveau_instmem_engine {
24800 struct nouveau_mc_engine {
24801 int (*init)(struct drm_device *dev);
24802 void (*takedown)(struct drm_device *dev);
24803-};
24804+} __no_const;
24805
24806 struct nouveau_timer_engine {
24807 int (*init)(struct drm_device *dev);
24808 void (*takedown)(struct drm_device *dev);
24809 uint64_t (*read)(struct drm_device *dev);
24810-};
24811+} __no_const;
24812
24813 struct nouveau_fb_engine {
24814 int num_tiles;
24815@@ -494,7 +494,7 @@ struct nouveau_vram_engine {
24816 void (*put)(struct drm_device *, struct nouveau_mem **);
24817
24818 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
24819-};
24820+} __no_const;
24821
24822 struct nouveau_engine {
24823 struct nouveau_instmem_engine instmem;
24824@@ -640,7 +640,7 @@ struct drm_nouveau_private {
24825 struct drm_global_reference mem_global_ref;
24826 struct ttm_bo_global_ref bo_global_ref;
24827 struct ttm_bo_device bdev;
24828- atomic_t validate_sequence;
24829+ atomic_unchecked_t validate_sequence;
24830 } ttm;
24831
24832 struct {
24833diff -urNp linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_fence.c linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_fence.c
24834--- linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-07-21 22:17:23.000000000 -0400
24835+++ linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-08-23 21:47:55.000000000 -0400
24836@@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_chan
24837 if (USE_REFCNT(dev))
24838 sequence = nvchan_rd32(chan, 0x48);
24839 else
24840- sequence = atomic_read(&chan->fence.last_sequence_irq);
24841+ sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
24842
24843 if (chan->fence.sequence_ack == sequence)
24844 goto out;
24845@@ -544,7 +544,7 @@ nouveau_fence_channel_init(struct nouvea
24846
24847 INIT_LIST_HEAD(&chan->fence.pending);
24848 spin_lock_init(&chan->fence.lock);
24849- atomic_set(&chan->fence.last_sequence_irq, 0);
24850+ atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
24851 return 0;
24852 }
24853
24854diff -urNp linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_gem.c linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_gem.c
24855--- linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-07-21 22:17:23.000000000 -0400
24856+++ linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-08-23 21:47:55.000000000 -0400
24857@@ -249,7 +249,7 @@ validate_init(struct nouveau_channel *ch
24858 int trycnt = 0;
24859 int ret, i;
24860
24861- sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
24862+ sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
24863 retry:
24864 if (++trycnt > 100000) {
24865 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
24866diff -urNp linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_state.c linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_state.c
24867--- linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_state.c 2011-07-21 22:17:23.000000000 -0400
24868+++ linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_state.c 2011-08-23 21:47:55.000000000 -0400
24869@@ -488,7 +488,7 @@ static bool nouveau_switcheroo_can_switc
24870 bool can_switch;
24871
24872 spin_lock(&dev->count_lock);
24873- can_switch = (dev->open_count == 0);
24874+ can_switch = (local_read(&dev->open_count) == 0);
24875 spin_unlock(&dev->count_lock);
24876 return can_switch;
24877 }
24878diff -urNp linux-3.0.3/drivers/gpu/drm/nouveau/nv04_graph.c linux-3.0.3/drivers/gpu/drm/nouveau/nv04_graph.c
24879--- linux-3.0.3/drivers/gpu/drm/nouveau/nv04_graph.c 2011-07-21 22:17:23.000000000 -0400
24880+++ linux-3.0.3/drivers/gpu/drm/nouveau/nv04_graph.c 2011-08-23 21:47:55.000000000 -0400
24881@@ -560,7 +560,7 @@ static int
24882 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
24883 u32 class, u32 mthd, u32 data)
24884 {
24885- atomic_set(&chan->fence.last_sequence_irq, data);
24886+ atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
24887 return 0;
24888 }
24889
24890diff -urNp linux-3.0.3/drivers/gpu/drm/r128/r128_cce.c linux-3.0.3/drivers/gpu/drm/r128/r128_cce.c
24891--- linux-3.0.3/drivers/gpu/drm/r128/r128_cce.c 2011-07-21 22:17:23.000000000 -0400
24892+++ linux-3.0.3/drivers/gpu/drm/r128/r128_cce.c 2011-08-23 21:47:55.000000000 -0400
24893@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
24894
24895 /* GH: Simple idle check.
24896 */
24897- atomic_set(&dev_priv->idle_count, 0);
24898+ atomic_set_unchecked(&dev_priv->idle_count, 0);
24899
24900 /* We don't support anything other than bus-mastering ring mode,
24901 * but the ring can be in either AGP or PCI space for the ring
24902diff -urNp linux-3.0.3/drivers/gpu/drm/r128/r128_drv.h linux-3.0.3/drivers/gpu/drm/r128/r128_drv.h
24903--- linux-3.0.3/drivers/gpu/drm/r128/r128_drv.h 2011-07-21 22:17:23.000000000 -0400
24904+++ linux-3.0.3/drivers/gpu/drm/r128/r128_drv.h 2011-08-23 21:47:55.000000000 -0400
24905@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
24906 int is_pci;
24907 unsigned long cce_buffers_offset;
24908
24909- atomic_t idle_count;
24910+ atomic_unchecked_t idle_count;
24911
24912 int page_flipping;
24913 int current_page;
24914 u32 crtc_offset;
24915 u32 crtc_offset_cntl;
24916
24917- atomic_t vbl_received;
24918+ atomic_unchecked_t vbl_received;
24919
24920 u32 color_fmt;
24921 unsigned int front_offset;
24922diff -urNp linux-3.0.3/drivers/gpu/drm/r128/r128_irq.c linux-3.0.3/drivers/gpu/drm/r128/r128_irq.c
24923--- linux-3.0.3/drivers/gpu/drm/r128/r128_irq.c 2011-07-21 22:17:23.000000000 -0400
24924+++ linux-3.0.3/drivers/gpu/drm/r128/r128_irq.c 2011-08-23 21:47:55.000000000 -0400
24925@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
24926 if (crtc != 0)
24927 return 0;
24928
24929- return atomic_read(&dev_priv->vbl_received);
24930+ return atomic_read_unchecked(&dev_priv->vbl_received);
24931 }
24932
24933 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
24934@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
24935 /* VBLANK interrupt */
24936 if (status & R128_CRTC_VBLANK_INT) {
24937 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
24938- atomic_inc(&dev_priv->vbl_received);
24939+ atomic_inc_unchecked(&dev_priv->vbl_received);
24940 drm_handle_vblank(dev, 0);
24941 return IRQ_HANDLED;
24942 }
24943diff -urNp linux-3.0.3/drivers/gpu/drm/r128/r128_state.c linux-3.0.3/drivers/gpu/drm/r128/r128_state.c
24944--- linux-3.0.3/drivers/gpu/drm/r128/r128_state.c 2011-07-21 22:17:23.000000000 -0400
24945+++ linux-3.0.3/drivers/gpu/drm/r128/r128_state.c 2011-08-23 21:47:55.000000000 -0400
24946@@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_priv
24947
24948 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
24949 {
24950- if (atomic_read(&dev_priv->idle_count) == 0)
24951+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
24952 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
24953 else
24954- atomic_set(&dev_priv->idle_count, 0);
24955+ atomic_set_unchecked(&dev_priv->idle_count, 0);
24956 }
24957
24958 #endif
24959diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/atom.c linux-3.0.3/drivers/gpu/drm/radeon/atom.c
24960--- linux-3.0.3/drivers/gpu/drm/radeon/atom.c 2011-07-21 22:17:23.000000000 -0400
24961+++ linux-3.0.3/drivers/gpu/drm/radeon/atom.c 2011-08-23 21:48:14.000000000 -0400
24962@@ -1245,6 +1245,8 @@ struct atom_context *atom_parse(struct c
24963 char name[512];
24964 int i;
24965
24966+ pax_track_stack();
24967+
24968 ctx->card = card;
24969 ctx->bios = bios;
24970
24971diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/mkregtable.c linux-3.0.3/drivers/gpu/drm/radeon/mkregtable.c
24972--- linux-3.0.3/drivers/gpu/drm/radeon/mkregtable.c 2011-07-21 22:17:23.000000000 -0400
24973+++ linux-3.0.3/drivers/gpu/drm/radeon/mkregtable.c 2011-08-23 21:47:55.000000000 -0400
24974@@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
24975 regex_t mask_rex;
24976 regmatch_t match[4];
24977 char buf[1024];
24978- size_t end;
24979+ long end;
24980 int len;
24981 int done = 0;
24982 int r;
24983 unsigned o;
24984 struct offset *offset;
24985 char last_reg_s[10];
24986- int last_reg;
24987+ unsigned long last_reg;
24988
24989 if (regcomp
24990 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
24991diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_atombios.c linux-3.0.3/drivers/gpu/drm/radeon/radeon_atombios.c
24992--- linux-3.0.3/drivers/gpu/drm/radeon/radeon_atombios.c 2011-07-21 22:17:23.000000000 -0400
24993+++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_atombios.c 2011-08-23 21:48:14.000000000 -0400
24994@@ -545,6 +545,8 @@ bool radeon_get_atom_connector_info_from
24995 struct radeon_gpio_rec gpio;
24996 struct radeon_hpd hpd;
24997
24998+ pax_track_stack();
24999+
25000 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
25001 return false;
25002
25003diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_device.c linux-3.0.3/drivers/gpu/drm/radeon/radeon_device.c
25004--- linux-3.0.3/drivers/gpu/drm/radeon/radeon_device.c 2011-08-23 21:44:40.000000000 -0400
25005+++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_device.c 2011-08-23 21:47:55.000000000 -0400
25006@@ -678,7 +678,7 @@ static bool radeon_switcheroo_can_switch
25007 bool can_switch;
25008
25009 spin_lock(&dev->count_lock);
25010- can_switch = (dev->open_count == 0);
25011+ can_switch = (local_read(&dev->open_count) == 0);
25012 spin_unlock(&dev->count_lock);
25013 return can_switch;
25014 }
25015diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_display.c linux-3.0.3/drivers/gpu/drm/radeon/radeon_display.c
25016--- linux-3.0.3/drivers/gpu/drm/radeon/radeon_display.c 2011-08-23 21:44:40.000000000 -0400
25017+++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_display.c 2011-08-23 21:48:14.000000000 -0400
25018@@ -946,6 +946,8 @@ void radeon_compute_pll_legacy(struct ra
25019 uint32_t post_div;
25020 u32 pll_out_min, pll_out_max;
25021
25022+ pax_track_stack();
25023+
25024 DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
25025 freq = freq * 1000;
25026
25027diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_drv.h linux-3.0.3/drivers/gpu/drm/radeon/radeon_drv.h
25028--- linux-3.0.3/drivers/gpu/drm/radeon/radeon_drv.h 2011-07-21 22:17:23.000000000 -0400
25029+++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_drv.h 2011-08-23 21:47:55.000000000 -0400
25030@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
25031
25032 /* SW interrupt */
25033 wait_queue_head_t swi_queue;
25034- atomic_t swi_emitted;
25035+ atomic_unchecked_t swi_emitted;
25036 int vblank_crtc;
25037 uint32_t irq_enable_reg;
25038 uint32_t r500_disp_irq_reg;
25039diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_fence.c linux-3.0.3/drivers/gpu/drm/radeon/radeon_fence.c
25040--- linux-3.0.3/drivers/gpu/drm/radeon/radeon_fence.c 2011-07-21 22:17:23.000000000 -0400
25041+++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_fence.c 2011-08-23 21:47:55.000000000 -0400
25042@@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_devi
25043 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
25044 return 0;
25045 }
25046- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
25047+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
25048 if (!rdev->cp.ready)
25049 /* FIXME: cp is not running assume everythings is done right
25050 * away
25051@@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct rade
25052 return r;
25053 }
25054 radeon_fence_write(rdev, 0);
25055- atomic_set(&rdev->fence_drv.seq, 0);
25056+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
25057 INIT_LIST_HEAD(&rdev->fence_drv.created);
25058 INIT_LIST_HEAD(&rdev->fence_drv.emited);
25059 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
25060diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon.h linux-3.0.3/drivers/gpu/drm/radeon/radeon.h
25061--- linux-3.0.3/drivers/gpu/drm/radeon/radeon.h 2011-07-21 22:17:23.000000000 -0400
25062+++ linux-3.0.3/drivers/gpu/drm/radeon/radeon.h 2011-08-23 21:47:55.000000000 -0400
25063@@ -191,7 +191,7 @@ extern int sumo_get_temp(struct radeon_d
25064 */
25065 struct radeon_fence_driver {
25066 uint32_t scratch_reg;
25067- atomic_t seq;
25068+ atomic_unchecked_t seq;
25069 uint32_t last_seq;
25070 unsigned long last_jiffies;
25071 unsigned long last_timeout;
25072@@ -960,7 +960,7 @@ struct radeon_asic {
25073 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
25074 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
25075 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
25076-};
25077+} __no_const;
25078
25079 /*
25080 * Asic structures
25081diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_ioc32.c linux-3.0.3/drivers/gpu/drm/radeon/radeon_ioc32.c
25082--- linux-3.0.3/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-07-21 22:17:23.000000000 -0400
25083+++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-08-23 21:47:55.000000000 -0400
25084@@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(str
25085 request = compat_alloc_user_space(sizeof(*request));
25086 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
25087 || __put_user(req32.param, &request->param)
25088- || __put_user((void __user *)(unsigned long)req32.value,
25089+ || __put_user((unsigned long)req32.value,
25090 &request->value))
25091 return -EFAULT;
25092
25093diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_irq.c linux-3.0.3/drivers/gpu/drm/radeon/radeon_irq.c
25094--- linux-3.0.3/drivers/gpu/drm/radeon/radeon_irq.c 2011-07-21 22:17:23.000000000 -0400
25095+++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_irq.c 2011-08-23 21:47:55.000000000 -0400
25096@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
25097 unsigned int ret;
25098 RING_LOCALS;
25099
25100- atomic_inc(&dev_priv->swi_emitted);
25101- ret = atomic_read(&dev_priv->swi_emitted);
25102+ atomic_inc_unchecked(&dev_priv->swi_emitted);
25103+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
25104
25105 BEGIN_RING(4);
25106 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
25107@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
25108 drm_radeon_private_t *dev_priv =
25109 (drm_radeon_private_t *) dev->dev_private;
25110
25111- atomic_set(&dev_priv->swi_emitted, 0);
25112+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
25113 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
25114
25115 dev->max_vblank_count = 0x001fffff;
25116diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_state.c linux-3.0.3/drivers/gpu/drm/radeon/radeon_state.c
25117--- linux-3.0.3/drivers/gpu/drm/radeon/radeon_state.c 2011-07-21 22:17:23.000000000 -0400
25118+++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_state.c 2011-08-23 21:47:55.000000000 -0400
25119@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_de
25120 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
25121 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
25122
25123- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
25124+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
25125 sarea_priv->nbox * sizeof(depth_boxes[0])))
25126 return -EFAULT;
25127
25128@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm
25129 {
25130 drm_radeon_private_t *dev_priv = dev->dev_private;
25131 drm_radeon_getparam_t *param = data;
25132- int value;
25133+ int value = 0;
25134
25135 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
25136
25137diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_ttm.c linux-3.0.3/drivers/gpu/drm/radeon/radeon_ttm.c
25138--- linux-3.0.3/drivers/gpu/drm/radeon/radeon_ttm.c 2011-07-21 22:17:23.000000000 -0400
25139+++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_ttm.c 2011-08-23 21:47:55.000000000 -0400
25140@@ -644,8 +644,10 @@ int radeon_mmap(struct file *filp, struc
25141 }
25142 if (unlikely(ttm_vm_ops == NULL)) {
25143 ttm_vm_ops = vma->vm_ops;
25144- radeon_ttm_vm_ops = *ttm_vm_ops;
25145- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
25146+ pax_open_kernel();
25147+ memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
25148+ *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
25149+ pax_close_kernel();
25150 }
25151 vma->vm_ops = &radeon_ttm_vm_ops;
25152 return 0;
25153diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/rs690.c linux-3.0.3/drivers/gpu/drm/radeon/rs690.c
25154--- linux-3.0.3/drivers/gpu/drm/radeon/rs690.c 2011-07-21 22:17:23.000000000 -0400
25155+++ linux-3.0.3/drivers/gpu/drm/radeon/rs690.c 2011-08-23 21:47:55.000000000 -0400
25156@@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct
25157 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
25158 rdev->pm.sideport_bandwidth.full)
25159 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
25160- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
25161+ read_delay_latency.full = dfixed_const(800 * 1000);
25162 read_delay_latency.full = dfixed_div(read_delay_latency,
25163 rdev->pm.igp_sideport_mclk);
25164+ a.full = dfixed_const(370);
25165+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
25166 } else {
25167 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
25168 rdev->pm.k8_bandwidth.full)
25169diff -urNp linux-3.0.3/drivers/gpu/drm/ttm/ttm_page_alloc.c linux-3.0.3/drivers/gpu/drm/ttm/ttm_page_alloc.c
25170--- linux-3.0.3/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-07-21 22:17:23.000000000 -0400
25171+++ linux-3.0.3/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-08-23 21:47:55.000000000 -0400
25172@@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages
25173 static int ttm_pool_mm_shrink(struct shrinker *shrink,
25174 struct shrink_control *sc)
25175 {
25176- static atomic_t start_pool = ATOMIC_INIT(0);
25177+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
25178 unsigned i;
25179- unsigned pool_offset = atomic_add_return(1, &start_pool);
25180+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
25181 struct ttm_page_pool *pool;
25182 int shrink_pages = sc->nr_to_scan;
25183
25184diff -urNp linux-3.0.3/drivers/gpu/drm/via/via_drv.h linux-3.0.3/drivers/gpu/drm/via/via_drv.h
25185--- linux-3.0.3/drivers/gpu/drm/via/via_drv.h 2011-07-21 22:17:23.000000000 -0400
25186+++ linux-3.0.3/drivers/gpu/drm/via/via_drv.h 2011-08-23 21:47:55.000000000 -0400
25187@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
25188 typedef uint32_t maskarray_t[5];
25189
25190 typedef struct drm_via_irq {
25191- atomic_t irq_received;
25192+ atomic_unchecked_t irq_received;
25193 uint32_t pending_mask;
25194 uint32_t enable_mask;
25195 wait_queue_head_t irq_queue;
25196@@ -75,7 +75,7 @@ typedef struct drm_via_private {
25197 struct timeval last_vblank;
25198 int last_vblank_valid;
25199 unsigned usec_per_vblank;
25200- atomic_t vbl_received;
25201+ atomic_unchecked_t vbl_received;
25202 drm_via_state_t hc_state;
25203 char pci_buf[VIA_PCI_BUF_SIZE];
25204 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
25205diff -urNp linux-3.0.3/drivers/gpu/drm/via/via_irq.c linux-3.0.3/drivers/gpu/drm/via/via_irq.c
25206--- linux-3.0.3/drivers/gpu/drm/via/via_irq.c 2011-07-21 22:17:23.000000000 -0400
25207+++ linux-3.0.3/drivers/gpu/drm/via/via_irq.c 2011-08-23 21:47:55.000000000 -0400
25208@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
25209 if (crtc != 0)
25210 return 0;
25211
25212- return atomic_read(&dev_priv->vbl_received);
25213+ return atomic_read_unchecked(&dev_priv->vbl_received);
25214 }
25215
25216 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
25217@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
25218
25219 status = VIA_READ(VIA_REG_INTERRUPT);
25220 if (status & VIA_IRQ_VBLANK_PENDING) {
25221- atomic_inc(&dev_priv->vbl_received);
25222- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
25223+ atomic_inc_unchecked(&dev_priv->vbl_received);
25224+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
25225 do_gettimeofday(&cur_vblank);
25226 if (dev_priv->last_vblank_valid) {
25227 dev_priv->usec_per_vblank =
25228@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
25229 dev_priv->last_vblank = cur_vblank;
25230 dev_priv->last_vblank_valid = 1;
25231 }
25232- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
25233+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
25234 DRM_DEBUG("US per vblank is: %u\n",
25235 dev_priv->usec_per_vblank);
25236 }
25237@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
25238
25239 for (i = 0; i < dev_priv->num_irqs; ++i) {
25240 if (status & cur_irq->pending_mask) {
25241- atomic_inc(&cur_irq->irq_received);
25242+ atomic_inc_unchecked(&cur_irq->irq_received);
25243 DRM_WAKEUP(&cur_irq->irq_queue);
25244 handled = 1;
25245 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
25246@@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *d
25247 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
25248 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
25249 masks[irq][4]));
25250- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
25251+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
25252 } else {
25253 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
25254 (((cur_irq_sequence =
25255- atomic_read(&cur_irq->irq_received)) -
25256+ atomic_read_unchecked(&cur_irq->irq_received)) -
25257 *sequence) <= (1 << 23)));
25258 }
25259 *sequence = cur_irq_sequence;
25260@@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct dr
25261 }
25262
25263 for (i = 0; i < dev_priv->num_irqs; ++i) {
25264- atomic_set(&cur_irq->irq_received, 0);
25265+ atomic_set_unchecked(&cur_irq->irq_received, 0);
25266 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
25267 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
25268 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
25269@@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev,
25270 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
25271 case VIA_IRQ_RELATIVE:
25272 irqwait->request.sequence +=
25273- atomic_read(&cur_irq->irq_received);
25274+ atomic_read_unchecked(&cur_irq->irq_received);
25275 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
25276 case VIA_IRQ_ABSOLUTE:
25277 break;
25278diff -urNp linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
25279--- linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-07-21 22:17:23.000000000 -0400
25280+++ linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-08-23 21:47:55.000000000 -0400
25281@@ -240,7 +240,7 @@ struct vmw_private {
25282 * Fencing and IRQs.
25283 */
25284
25285- atomic_t fence_seq;
25286+ atomic_unchecked_t fence_seq;
25287 wait_queue_head_t fence_queue;
25288 wait_queue_head_t fifo_queue;
25289 atomic_t fence_queue_waiters;
25290diff -urNp linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
25291--- linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-07-21 22:17:23.000000000 -0400
25292+++ linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-08-23 21:47:55.000000000 -0400
25293@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev
25294 while (!vmw_lag_lt(queue, us)) {
25295 spin_lock(&queue->lock);
25296 if (list_empty(&queue->head))
25297- sequence = atomic_read(&dev_priv->fence_seq);
25298+ sequence = atomic_read_unchecked(&dev_priv->fence_seq);
25299 else {
25300 fence = list_first_entry(&queue->head,
25301 struct vmw_fence, head);
25302diff -urNp linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
25303--- linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-07-21 22:17:23.000000000 -0400
25304+++ linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-08-23 21:47:55.000000000 -0400
25305@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *de
25306 (unsigned int) min,
25307 (unsigned int) fifo->capabilities);
25308
25309- atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
25310+ atomic_set_unchecked(&dev_priv->fence_seq, dev_priv->last_read_sequence);
25311 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
25312 vmw_fence_queue_init(&fifo->fence_queue);
25313 return vmw_fifo_send_fence(dev_priv, &dummy);
25314@@ -476,7 +476,7 @@ int vmw_fifo_send_fence(struct vmw_priva
25315
25316 fm = vmw_fifo_reserve(dev_priv, bytes);
25317 if (unlikely(fm == NULL)) {
25318- *sequence = atomic_read(&dev_priv->fence_seq);
25319+ *sequence = atomic_read_unchecked(&dev_priv->fence_seq);
25320 ret = -ENOMEM;
25321 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
25322 false, 3*HZ);
25323@@ -484,7 +484,7 @@ int vmw_fifo_send_fence(struct vmw_priva
25324 }
25325
25326 do {
25327- *sequence = atomic_add_return(1, &dev_priv->fence_seq);
25328+ *sequence = atomic_add_return_unchecked(1, &dev_priv->fence_seq);
25329 } while (*sequence == 0);
25330
25331 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
25332diff -urNp linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
25333--- linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-07-21 22:17:23.000000000 -0400
25334+++ linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-08-23 21:47:55.000000000 -0400
25335@@ -100,7 +100,7 @@ bool vmw_fence_signaled(struct vmw_priva
25336 * emitted. Then the fence is stale and signaled.
25337 */
25338
25339- ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
25340+ ret = ((atomic_read_unchecked(&dev_priv->fence_seq) - sequence)
25341 > VMW_FENCE_WRAP);
25342
25343 return ret;
25344@@ -131,7 +131,7 @@ int vmw_fallback_wait(struct vmw_private
25345
25346 if (fifo_idle)
25347 down_read(&fifo_state->rwsem);
25348- signal_seq = atomic_read(&dev_priv->fence_seq);
25349+ signal_seq = atomic_read_unchecked(&dev_priv->fence_seq);
25350 ret = 0;
25351
25352 for (;;) {
25353diff -urNp linux-3.0.3/drivers/hid/hid-core.c linux-3.0.3/drivers/hid/hid-core.c
25354--- linux-3.0.3/drivers/hid/hid-core.c 2011-07-21 22:17:23.000000000 -0400
25355+++ linux-3.0.3/drivers/hid/hid-core.c 2011-08-23 21:47:55.000000000 -0400
25356@@ -1923,7 +1923,7 @@ static bool hid_ignore(struct hid_device
25357
25358 int hid_add_device(struct hid_device *hdev)
25359 {
25360- static atomic_t id = ATOMIC_INIT(0);
25361+ static atomic_unchecked_t id = ATOMIC_INIT(0);
25362 int ret;
25363
25364 if (WARN_ON(hdev->status & HID_STAT_ADDED))
25365@@ -1938,7 +1938,7 @@ int hid_add_device(struct hid_device *hd
25366 /* XXX hack, any other cleaner solution after the driver core
25367 * is converted to allow more than 20 bytes as the device name? */
25368 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
25369- hdev->vendor, hdev->product, atomic_inc_return(&id));
25370+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
25371
25372 hid_debug_register(hdev, dev_name(&hdev->dev));
25373 ret = device_add(&hdev->dev);
25374diff -urNp linux-3.0.3/drivers/hid/usbhid/hiddev.c linux-3.0.3/drivers/hid/usbhid/hiddev.c
25375--- linux-3.0.3/drivers/hid/usbhid/hiddev.c 2011-07-21 22:17:23.000000000 -0400
25376+++ linux-3.0.3/drivers/hid/usbhid/hiddev.c 2011-08-23 21:47:55.000000000 -0400
25377@@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *fi
25378 break;
25379
25380 case HIDIOCAPPLICATION:
25381- if (arg < 0 || arg >= hid->maxapplication)
25382+ if (arg >= hid->maxapplication)
25383 break;
25384
25385 for (i = 0; i < hid->maxcollection; i++)
25386diff -urNp linux-3.0.3/drivers/hwmon/acpi_power_meter.c linux-3.0.3/drivers/hwmon/acpi_power_meter.c
25387--- linux-3.0.3/drivers/hwmon/acpi_power_meter.c 2011-07-21 22:17:23.000000000 -0400
25388+++ linux-3.0.3/drivers/hwmon/acpi_power_meter.c 2011-08-23 21:47:55.000000000 -0400
25389@@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *d
25390 return res;
25391
25392 temp /= 1000;
25393- if (temp < 0)
25394- return -EINVAL;
25395
25396 mutex_lock(&resource->lock);
25397 resource->trip[attr->index - 7] = temp;
25398diff -urNp linux-3.0.3/drivers/hwmon/sht15.c linux-3.0.3/drivers/hwmon/sht15.c
25399--- linux-3.0.3/drivers/hwmon/sht15.c 2011-07-21 22:17:23.000000000 -0400
25400+++ linux-3.0.3/drivers/hwmon/sht15.c 2011-08-23 21:47:55.000000000 -0400
25401@@ -166,7 +166,7 @@ struct sht15_data {
25402 int supply_uV;
25403 bool supply_uV_valid;
25404 struct work_struct update_supply_work;
25405- atomic_t interrupt_handled;
25406+ atomic_unchecked_t interrupt_handled;
25407 };
25408
25409 /**
25410@@ -509,13 +509,13 @@ static int sht15_measurement(struct sht1
25411 return ret;
25412
25413 gpio_direction_input(data->pdata->gpio_data);
25414- atomic_set(&data->interrupt_handled, 0);
25415+ atomic_set_unchecked(&data->interrupt_handled, 0);
25416
25417 enable_irq(gpio_to_irq(data->pdata->gpio_data));
25418 if (gpio_get_value(data->pdata->gpio_data) == 0) {
25419 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
25420 /* Only relevant if the interrupt hasn't occurred. */
25421- if (!atomic_read(&data->interrupt_handled))
25422+ if (!atomic_read_unchecked(&data->interrupt_handled))
25423 schedule_work(&data->read_work);
25424 }
25425 ret = wait_event_timeout(data->wait_queue,
25426@@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired
25427
25428 /* First disable the interrupt */
25429 disable_irq_nosync(irq);
25430- atomic_inc(&data->interrupt_handled);
25431+ atomic_inc_unchecked(&data->interrupt_handled);
25432 /* Then schedule a reading work struct */
25433 if (data->state != SHT15_READING_NOTHING)
25434 schedule_work(&data->read_work);
25435@@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct wo
25436 * If not, then start the interrupt again - care here as could
25437 * have gone low in meantime so verify it hasn't!
25438 */
25439- atomic_set(&data->interrupt_handled, 0);
25440+ atomic_set_unchecked(&data->interrupt_handled, 0);
25441 enable_irq(gpio_to_irq(data->pdata->gpio_data));
25442 /* If still not occurred or another handler has been scheduled */
25443 if (gpio_get_value(data->pdata->gpio_data)
25444- || atomic_read(&data->interrupt_handled))
25445+ || atomic_read_unchecked(&data->interrupt_handled))
25446 return;
25447 }
25448
25449diff -urNp linux-3.0.3/drivers/hwmon/w83791d.c linux-3.0.3/drivers/hwmon/w83791d.c
25450--- linux-3.0.3/drivers/hwmon/w83791d.c 2011-07-21 22:17:23.000000000 -0400
25451+++ linux-3.0.3/drivers/hwmon/w83791d.c 2011-08-23 21:47:55.000000000 -0400
25452@@ -329,8 +329,8 @@ static int w83791d_detect(struct i2c_cli
25453 struct i2c_board_info *info);
25454 static int w83791d_remove(struct i2c_client *client);
25455
25456-static int w83791d_read(struct i2c_client *client, u8 register);
25457-static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
25458+static int w83791d_read(struct i2c_client *client, u8 reg);
25459+static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
25460 static struct w83791d_data *w83791d_update_device(struct device *dev);
25461
25462 #ifdef DEBUG
25463diff -urNp linux-3.0.3/drivers/i2c/busses/i2c-amd756-s4882.c linux-3.0.3/drivers/i2c/busses/i2c-amd756-s4882.c
25464--- linux-3.0.3/drivers/i2c/busses/i2c-amd756-s4882.c 2011-07-21 22:17:23.000000000 -0400
25465+++ linux-3.0.3/drivers/i2c/busses/i2c-amd756-s4882.c 2011-08-23 21:47:55.000000000 -0400
25466@@ -43,7 +43,7 @@
25467 extern struct i2c_adapter amd756_smbus;
25468
25469 static struct i2c_adapter *s4882_adapter;
25470-static struct i2c_algorithm *s4882_algo;
25471+static i2c_algorithm_no_const *s4882_algo;
25472
25473 /* Wrapper access functions for multiplexed SMBus */
25474 static DEFINE_MUTEX(amd756_lock);
25475diff -urNp linux-3.0.3/drivers/i2c/busses/i2c-nforce2-s4985.c linux-3.0.3/drivers/i2c/busses/i2c-nforce2-s4985.c
25476--- linux-3.0.3/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-07-21 22:17:23.000000000 -0400
25477+++ linux-3.0.3/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-08-23 21:47:55.000000000 -0400
25478@@ -41,7 +41,7 @@
25479 extern struct i2c_adapter *nforce2_smbus;
25480
25481 static struct i2c_adapter *s4985_adapter;
25482-static struct i2c_algorithm *s4985_algo;
25483+static i2c_algorithm_no_const *s4985_algo;
25484
25485 /* Wrapper access functions for multiplexed SMBus */
25486 static DEFINE_MUTEX(nforce2_lock);
25487diff -urNp linux-3.0.3/drivers/i2c/i2c-mux.c linux-3.0.3/drivers/i2c/i2c-mux.c
25488--- linux-3.0.3/drivers/i2c/i2c-mux.c 2011-07-21 22:17:23.000000000 -0400
25489+++ linux-3.0.3/drivers/i2c/i2c-mux.c 2011-08-23 21:47:55.000000000 -0400
25490@@ -28,7 +28,7 @@
25491 /* multiplexer per channel data */
25492 struct i2c_mux_priv {
25493 struct i2c_adapter adap;
25494- struct i2c_algorithm algo;
25495+ i2c_algorithm_no_const algo;
25496
25497 struct i2c_adapter *parent;
25498 void *mux_dev; /* the mux chip/device */
25499diff -urNp linux-3.0.3/drivers/ide/ide-cd.c linux-3.0.3/drivers/ide/ide-cd.c
25500--- linux-3.0.3/drivers/ide/ide-cd.c 2011-07-21 22:17:23.000000000 -0400
25501+++ linux-3.0.3/drivers/ide/ide-cd.c 2011-08-23 21:47:55.000000000 -0400
25502@@ -769,7 +769,7 @@ static void cdrom_do_block_pc(ide_drive_
25503 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
25504 if ((unsigned long)buf & alignment
25505 || blk_rq_bytes(rq) & q->dma_pad_mask
25506- || object_is_on_stack(buf))
25507+ || object_starts_on_stack(buf))
25508 drive->dma = 0;
25509 }
25510 }
25511diff -urNp linux-3.0.3/drivers/ide/ide-floppy.c linux-3.0.3/drivers/ide/ide-floppy.c
25512--- linux-3.0.3/drivers/ide/ide-floppy.c 2011-07-21 22:17:23.000000000 -0400
25513+++ linux-3.0.3/drivers/ide/ide-floppy.c 2011-08-23 21:48:14.000000000 -0400
25514@@ -379,6 +379,8 @@ static int ide_floppy_get_capacity(ide_d
25515 u8 pc_buf[256], header_len, desc_cnt;
25516 int i, rc = 1, blocks, length;
25517
25518+ pax_track_stack();
25519+
25520 ide_debug_log(IDE_DBG_FUNC, "enter");
25521
25522 drive->bios_cyl = 0;
25523diff -urNp linux-3.0.3/drivers/ide/setup-pci.c linux-3.0.3/drivers/ide/setup-pci.c
25524--- linux-3.0.3/drivers/ide/setup-pci.c 2011-07-21 22:17:23.000000000 -0400
25525+++ linux-3.0.3/drivers/ide/setup-pci.c 2011-08-23 21:48:14.000000000 -0400
25526@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
25527 int ret, i, n_ports = dev2 ? 4 : 2;
25528 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
25529
25530+ pax_track_stack();
25531+
25532 for (i = 0; i < n_ports / 2; i++) {
25533 ret = ide_setup_pci_controller(pdev[i], d, !i);
25534 if (ret < 0)
25535diff -urNp linux-3.0.3/drivers/infiniband/core/cm.c linux-3.0.3/drivers/infiniband/core/cm.c
25536--- linux-3.0.3/drivers/infiniband/core/cm.c 2011-07-21 22:17:23.000000000 -0400
25537+++ linux-3.0.3/drivers/infiniband/core/cm.c 2011-08-23 21:47:55.000000000 -0400
25538@@ -113,7 +113,7 @@ static char const counter_group_names[CM
25539
25540 struct cm_counter_group {
25541 struct kobject obj;
25542- atomic_long_t counter[CM_ATTR_COUNT];
25543+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
25544 };
25545
25546 struct cm_counter_attribute {
25547@@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm
25548 struct ib_mad_send_buf *msg = NULL;
25549 int ret;
25550
25551- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25552+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25553 counter[CM_REQ_COUNTER]);
25554
25555 /* Quick state check to discard duplicate REQs. */
25556@@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm
25557 if (!cm_id_priv)
25558 return;
25559
25560- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25561+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25562 counter[CM_REP_COUNTER]);
25563 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
25564 if (ret)
25565@@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work
25566 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
25567 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
25568 spin_unlock_irq(&cm_id_priv->lock);
25569- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25570+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25571 counter[CM_RTU_COUNTER]);
25572 goto out;
25573 }
25574@@ -2115,7 +2115,7 @@ static int cm_dreq_handler(struct cm_wor
25575 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
25576 dreq_msg->local_comm_id);
25577 if (!cm_id_priv) {
25578- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25579+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25580 counter[CM_DREQ_COUNTER]);
25581 cm_issue_drep(work->port, work->mad_recv_wc);
25582 return -EINVAL;
25583@@ -2140,7 +2140,7 @@ static int cm_dreq_handler(struct cm_wor
25584 case IB_CM_MRA_REP_RCVD:
25585 break;
25586 case IB_CM_TIMEWAIT:
25587- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25588+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25589 counter[CM_DREQ_COUNTER]);
25590 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
25591 goto unlock;
25592@@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_wor
25593 cm_free_msg(msg);
25594 goto deref;
25595 case IB_CM_DREQ_RCVD:
25596- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25597+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25598 counter[CM_DREQ_COUNTER]);
25599 goto unlock;
25600 default:
25601@@ -2521,7 +2521,7 @@ static int cm_mra_handler(struct cm_work
25602 ib_modify_mad(cm_id_priv->av.port->mad_agent,
25603 cm_id_priv->msg, timeout)) {
25604 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
25605- atomic_long_inc(&work->port->
25606+ atomic_long_inc_unchecked(&work->port->
25607 counter_group[CM_RECV_DUPLICATES].
25608 counter[CM_MRA_COUNTER]);
25609 goto out;
25610@@ -2530,7 +2530,7 @@ static int cm_mra_handler(struct cm_work
25611 break;
25612 case IB_CM_MRA_REQ_RCVD:
25613 case IB_CM_MRA_REP_RCVD:
25614- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25615+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25616 counter[CM_MRA_COUNTER]);
25617 /* fall through */
25618 default:
25619@@ -2692,7 +2692,7 @@ static int cm_lap_handler(struct cm_work
25620 case IB_CM_LAP_IDLE:
25621 break;
25622 case IB_CM_MRA_LAP_SENT:
25623- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25624+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25625 counter[CM_LAP_COUNTER]);
25626 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
25627 goto unlock;
25628@@ -2708,7 +2708,7 @@ static int cm_lap_handler(struct cm_work
25629 cm_free_msg(msg);
25630 goto deref;
25631 case IB_CM_LAP_RCVD:
25632- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25633+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25634 counter[CM_LAP_COUNTER]);
25635 goto unlock;
25636 default:
25637@@ -2992,7 +2992,7 @@ static int cm_sidr_req_handler(struct cm
25638 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
25639 if (cur_cm_id_priv) {
25640 spin_unlock_irq(&cm.lock);
25641- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25642+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25643 counter[CM_SIDR_REQ_COUNTER]);
25644 goto out; /* Duplicate message. */
25645 }
25646@@ -3204,10 +3204,10 @@ static void cm_send_handler(struct ib_ma
25647 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
25648 msg->retries = 1;
25649
25650- atomic_long_add(1 + msg->retries,
25651+ atomic_long_add_unchecked(1 + msg->retries,
25652 &port->counter_group[CM_XMIT].counter[attr_index]);
25653 if (msg->retries)
25654- atomic_long_add(msg->retries,
25655+ atomic_long_add_unchecked(msg->retries,
25656 &port->counter_group[CM_XMIT_RETRIES].
25657 counter[attr_index]);
25658
25659@@ -3417,7 +3417,7 @@ static void cm_recv_handler(struct ib_ma
25660 }
25661
25662 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
25663- atomic_long_inc(&port->counter_group[CM_RECV].
25664+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
25665 counter[attr_id - CM_ATTR_ID_OFFSET]);
25666
25667 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
25668@@ -3615,7 +3615,7 @@ static ssize_t cm_show_counter(struct ko
25669 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
25670
25671 return sprintf(buf, "%ld\n",
25672- atomic_long_read(&group->counter[cm_attr->index]));
25673+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
25674 }
25675
25676 static const struct sysfs_ops cm_counter_ops = {
25677diff -urNp linux-3.0.3/drivers/infiniband/core/fmr_pool.c linux-3.0.3/drivers/infiniband/core/fmr_pool.c
25678--- linux-3.0.3/drivers/infiniband/core/fmr_pool.c 2011-07-21 22:17:23.000000000 -0400
25679+++ linux-3.0.3/drivers/infiniband/core/fmr_pool.c 2011-08-23 21:47:55.000000000 -0400
25680@@ -97,8 +97,8 @@ struct ib_fmr_pool {
25681
25682 struct task_struct *thread;
25683
25684- atomic_t req_ser;
25685- atomic_t flush_ser;
25686+ atomic_unchecked_t req_ser;
25687+ atomic_unchecked_t flush_ser;
25688
25689 wait_queue_head_t force_wait;
25690 };
25691@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
25692 struct ib_fmr_pool *pool = pool_ptr;
25693
25694 do {
25695- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
25696+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
25697 ib_fmr_batch_release(pool);
25698
25699- atomic_inc(&pool->flush_ser);
25700+ atomic_inc_unchecked(&pool->flush_ser);
25701 wake_up_interruptible(&pool->force_wait);
25702
25703 if (pool->flush_function)
25704@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
25705 }
25706
25707 set_current_state(TASK_INTERRUPTIBLE);
25708- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
25709+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
25710 !kthread_should_stop())
25711 schedule();
25712 __set_current_state(TASK_RUNNING);
25713@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
25714 pool->dirty_watermark = params->dirty_watermark;
25715 pool->dirty_len = 0;
25716 spin_lock_init(&pool->pool_lock);
25717- atomic_set(&pool->req_ser, 0);
25718- atomic_set(&pool->flush_ser, 0);
25719+ atomic_set_unchecked(&pool->req_ser, 0);
25720+ atomic_set_unchecked(&pool->flush_ser, 0);
25721 init_waitqueue_head(&pool->force_wait);
25722
25723 pool->thread = kthread_run(ib_fmr_cleanup_thread,
25724@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
25725 }
25726 spin_unlock_irq(&pool->pool_lock);
25727
25728- serial = atomic_inc_return(&pool->req_ser);
25729+ serial = atomic_inc_return_unchecked(&pool->req_ser);
25730 wake_up_process(pool->thread);
25731
25732 if (wait_event_interruptible(pool->force_wait,
25733- atomic_read(&pool->flush_ser) - serial >= 0))
25734+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
25735 return -EINTR;
25736
25737 return 0;
25738@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
25739 } else {
25740 list_add_tail(&fmr->list, &pool->dirty_list);
25741 if (++pool->dirty_len >= pool->dirty_watermark) {
25742- atomic_inc(&pool->req_ser);
25743+ atomic_inc_unchecked(&pool->req_ser);
25744 wake_up_process(pool->thread);
25745 }
25746 }
25747diff -urNp linux-3.0.3/drivers/infiniband/hw/cxgb4/mem.c linux-3.0.3/drivers/infiniband/hw/cxgb4/mem.c
25748--- linux-3.0.3/drivers/infiniband/hw/cxgb4/mem.c 2011-07-21 22:17:23.000000000 -0400
25749+++ linux-3.0.3/drivers/infiniband/hw/cxgb4/mem.c 2011-08-23 21:47:55.000000000 -0400
25750@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_r
25751 int err;
25752 struct fw_ri_tpte tpt;
25753 u32 stag_idx;
25754- static atomic_t key;
25755+ static atomic_unchecked_t key;
25756
25757 if (c4iw_fatal_error(rdev))
25758 return -EIO;
25759@@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_r
25760 &rdev->resource.tpt_fifo_lock);
25761 if (!stag_idx)
25762 return -ENOMEM;
25763- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
25764+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
25765 }
25766 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
25767 __func__, stag_state, type, pdid, stag_idx);
25768diff -urNp linux-3.0.3/drivers/infiniband/hw/ipath/ipath_fs.c linux-3.0.3/drivers/infiniband/hw/ipath/ipath_fs.c
25769--- linux-3.0.3/drivers/infiniband/hw/ipath/ipath_fs.c 2011-07-21 22:17:23.000000000 -0400
25770+++ linux-3.0.3/drivers/infiniband/hw/ipath/ipath_fs.c 2011-08-23 21:48:14.000000000 -0400
25771@@ -113,6 +113,8 @@ static ssize_t atomic_counters_read(stru
25772 struct infinipath_counters counters;
25773 struct ipath_devdata *dd;
25774
25775+ pax_track_stack();
25776+
25777 dd = file->f_path.dentry->d_inode->i_private;
25778 dd->ipath_f_read_counters(dd, &counters);
25779
25780diff -urNp linux-3.0.3/drivers/infiniband/hw/ipath/ipath_rc.c linux-3.0.3/drivers/infiniband/hw/ipath/ipath_rc.c
25781--- linux-3.0.3/drivers/infiniband/hw/ipath/ipath_rc.c 2011-07-21 22:17:23.000000000 -0400
25782+++ linux-3.0.3/drivers/infiniband/hw/ipath/ipath_rc.c 2011-08-23 21:47:55.000000000 -0400
25783@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *de
25784 struct ib_atomic_eth *ateth;
25785 struct ipath_ack_entry *e;
25786 u64 vaddr;
25787- atomic64_t *maddr;
25788+ atomic64_unchecked_t *maddr;
25789 u64 sdata;
25790 u32 rkey;
25791 u8 next;
25792@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *de
25793 IB_ACCESS_REMOTE_ATOMIC)))
25794 goto nack_acc_unlck;
25795 /* Perform atomic OP and save result. */
25796- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
25797+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
25798 sdata = be64_to_cpu(ateth->swap_data);
25799 e = &qp->s_ack_queue[qp->r_head_ack_queue];
25800 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
25801- (u64) atomic64_add_return(sdata, maddr) - sdata :
25802+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
25803 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
25804 be64_to_cpu(ateth->compare_data),
25805 sdata);
25806diff -urNp linux-3.0.3/drivers/infiniband/hw/ipath/ipath_ruc.c linux-3.0.3/drivers/infiniband/hw/ipath/ipath_ruc.c
25807--- linux-3.0.3/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-07-21 22:17:23.000000000 -0400
25808+++ linux-3.0.3/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-08-23 21:47:55.000000000 -0400
25809@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ip
25810 unsigned long flags;
25811 struct ib_wc wc;
25812 u64 sdata;
25813- atomic64_t *maddr;
25814+ atomic64_unchecked_t *maddr;
25815 enum ib_wc_status send_status;
25816
25817 /*
25818@@ -382,11 +382,11 @@ again:
25819 IB_ACCESS_REMOTE_ATOMIC)))
25820 goto acc_err;
25821 /* Perform atomic OP and save result. */
25822- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
25823+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
25824 sdata = wqe->wr.wr.atomic.compare_add;
25825 *(u64 *) sqp->s_sge.sge.vaddr =
25826 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
25827- (u64) atomic64_add_return(sdata, maddr) - sdata :
25828+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
25829 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
25830 sdata, wqe->wr.wr.atomic.swap);
25831 goto send_comp;
25832diff -urNp linux-3.0.3/drivers/infiniband/hw/nes/nes.c linux-3.0.3/drivers/infiniband/hw/nes/nes.c
25833--- linux-3.0.3/drivers/infiniband/hw/nes/nes.c 2011-07-21 22:17:23.000000000 -0400
25834+++ linux-3.0.3/drivers/infiniband/hw/nes/nes.c 2011-08-23 21:47:55.000000000 -0400
25835@@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
25836 LIST_HEAD(nes_adapter_list);
25837 static LIST_HEAD(nes_dev_list);
25838
25839-atomic_t qps_destroyed;
25840+atomic_unchecked_t qps_destroyed;
25841
25842 static unsigned int ee_flsh_adapter;
25843 static unsigned int sysfs_nonidx_addr;
25844@@ -275,7 +275,7 @@ static void nes_cqp_rem_ref_callback(str
25845 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
25846 struct nes_adapter *nesadapter = nesdev->nesadapter;
25847
25848- atomic_inc(&qps_destroyed);
25849+ atomic_inc_unchecked(&qps_destroyed);
25850
25851 /* Free the control structures */
25852
25853diff -urNp linux-3.0.3/drivers/infiniband/hw/nes/nes_cm.c linux-3.0.3/drivers/infiniband/hw/nes/nes_cm.c
25854--- linux-3.0.3/drivers/infiniband/hw/nes/nes_cm.c 2011-07-21 22:17:23.000000000 -0400
25855+++ linux-3.0.3/drivers/infiniband/hw/nes/nes_cm.c 2011-08-23 21:47:55.000000000 -0400
25856@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
25857 u32 cm_packets_retrans;
25858 u32 cm_packets_created;
25859 u32 cm_packets_received;
25860-atomic_t cm_listens_created;
25861-atomic_t cm_listens_destroyed;
25862+atomic_unchecked_t cm_listens_created;
25863+atomic_unchecked_t cm_listens_destroyed;
25864 u32 cm_backlog_drops;
25865-atomic_t cm_loopbacks;
25866-atomic_t cm_nodes_created;
25867-atomic_t cm_nodes_destroyed;
25868-atomic_t cm_accel_dropped_pkts;
25869-atomic_t cm_resets_recvd;
25870+atomic_unchecked_t cm_loopbacks;
25871+atomic_unchecked_t cm_nodes_created;
25872+atomic_unchecked_t cm_nodes_destroyed;
25873+atomic_unchecked_t cm_accel_dropped_pkts;
25874+atomic_unchecked_t cm_resets_recvd;
25875
25876 static inline int mini_cm_accelerated(struct nes_cm_core *,
25877 struct nes_cm_node *);
25878@@ -151,13 +151,13 @@ static struct nes_cm_ops nes_cm_api = {
25879
25880 static struct nes_cm_core *g_cm_core;
25881
25882-atomic_t cm_connects;
25883-atomic_t cm_accepts;
25884-atomic_t cm_disconnects;
25885-atomic_t cm_closes;
25886-atomic_t cm_connecteds;
25887-atomic_t cm_connect_reqs;
25888-atomic_t cm_rejects;
25889+atomic_unchecked_t cm_connects;
25890+atomic_unchecked_t cm_accepts;
25891+atomic_unchecked_t cm_disconnects;
25892+atomic_unchecked_t cm_closes;
25893+atomic_unchecked_t cm_connecteds;
25894+atomic_unchecked_t cm_connect_reqs;
25895+atomic_unchecked_t cm_rejects;
25896
25897
25898 /**
25899@@ -1045,7 +1045,7 @@ static int mini_cm_dec_refcnt_listen(str
25900 kfree(listener);
25901 listener = NULL;
25902 ret = 0;
25903- atomic_inc(&cm_listens_destroyed);
25904+ atomic_inc_unchecked(&cm_listens_destroyed);
25905 } else {
25906 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
25907 }
25908@@ -1240,7 +1240,7 @@ static struct nes_cm_node *make_cm_node(
25909 cm_node->rem_mac);
25910
25911 add_hte_node(cm_core, cm_node);
25912- atomic_inc(&cm_nodes_created);
25913+ atomic_inc_unchecked(&cm_nodes_created);
25914
25915 return cm_node;
25916 }
25917@@ -1298,7 +1298,7 @@ static int rem_ref_cm_node(struct nes_cm
25918 }
25919
25920 atomic_dec(&cm_core->node_cnt);
25921- atomic_inc(&cm_nodes_destroyed);
25922+ atomic_inc_unchecked(&cm_nodes_destroyed);
25923 nesqp = cm_node->nesqp;
25924 if (nesqp) {
25925 nesqp->cm_node = NULL;
25926@@ -1365,7 +1365,7 @@ static int process_options(struct nes_cm
25927
25928 static void drop_packet(struct sk_buff *skb)
25929 {
25930- atomic_inc(&cm_accel_dropped_pkts);
25931+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
25932 dev_kfree_skb_any(skb);
25933 }
25934
25935@@ -1428,7 +1428,7 @@ static void handle_rst_pkt(struct nes_cm
25936 {
25937
25938 int reset = 0; /* whether to send reset in case of err.. */
25939- atomic_inc(&cm_resets_recvd);
25940+ atomic_inc_unchecked(&cm_resets_recvd);
25941 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
25942 " refcnt=%d\n", cm_node, cm_node->state,
25943 atomic_read(&cm_node->ref_count));
25944@@ -2057,7 +2057,7 @@ static struct nes_cm_node *mini_cm_conne
25945 rem_ref_cm_node(cm_node->cm_core, cm_node);
25946 return NULL;
25947 }
25948- atomic_inc(&cm_loopbacks);
25949+ atomic_inc_unchecked(&cm_loopbacks);
25950 loopbackremotenode->loopbackpartner = cm_node;
25951 loopbackremotenode->tcp_cntxt.rcv_wscale =
25952 NES_CM_DEFAULT_RCV_WND_SCALE;
25953@@ -2332,7 +2332,7 @@ static int mini_cm_recv_pkt(struct nes_c
25954 add_ref_cm_node(cm_node);
25955 } else if (cm_node->state == NES_CM_STATE_TSA) {
25956 rem_ref_cm_node(cm_core, cm_node);
25957- atomic_inc(&cm_accel_dropped_pkts);
25958+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
25959 dev_kfree_skb_any(skb);
25960 break;
25961 }
25962@@ -2638,7 +2638,7 @@ static int nes_cm_disconn_true(struct ne
25963
25964 if ((cm_id) && (cm_id->event_handler)) {
25965 if (issue_disconn) {
25966- atomic_inc(&cm_disconnects);
25967+ atomic_inc_unchecked(&cm_disconnects);
25968 cm_event.event = IW_CM_EVENT_DISCONNECT;
25969 cm_event.status = disconn_status;
25970 cm_event.local_addr = cm_id->local_addr;
25971@@ -2660,7 +2660,7 @@ static int nes_cm_disconn_true(struct ne
25972 }
25973
25974 if (issue_close) {
25975- atomic_inc(&cm_closes);
25976+ atomic_inc_unchecked(&cm_closes);
25977 nes_disconnect(nesqp, 1);
25978
25979 cm_id->provider_data = nesqp;
25980@@ -2791,7 +2791,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
25981
25982 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
25983 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
25984- atomic_inc(&cm_accepts);
25985+ atomic_inc_unchecked(&cm_accepts);
25986
25987 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
25988 netdev_refcnt_read(nesvnic->netdev));
25989@@ -3001,7 +3001,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
25990
25991 struct nes_cm_core *cm_core;
25992
25993- atomic_inc(&cm_rejects);
25994+ atomic_inc_unchecked(&cm_rejects);
25995 cm_node = (struct nes_cm_node *) cm_id->provider_data;
25996 loopback = cm_node->loopbackpartner;
25997 cm_core = cm_node->cm_core;
25998@@ -3067,7 +3067,7 @@ int nes_connect(struct iw_cm_id *cm_id,
25999 ntohl(cm_id->local_addr.sin_addr.s_addr),
26000 ntohs(cm_id->local_addr.sin_port));
26001
26002- atomic_inc(&cm_connects);
26003+ atomic_inc_unchecked(&cm_connects);
26004 nesqp->active_conn = 1;
26005
26006 /* cache the cm_id in the qp */
26007@@ -3173,7 +3173,7 @@ int nes_create_listen(struct iw_cm_id *c
26008 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
26009 return err;
26010 }
26011- atomic_inc(&cm_listens_created);
26012+ atomic_inc_unchecked(&cm_listens_created);
26013 }
26014
26015 cm_id->add_ref(cm_id);
26016@@ -3278,7 +3278,7 @@ static void cm_event_connected(struct ne
26017 if (nesqp->destroyed) {
26018 return;
26019 }
26020- atomic_inc(&cm_connecteds);
26021+ atomic_inc_unchecked(&cm_connecteds);
26022 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
26023 " local port 0x%04X. jiffies = %lu.\n",
26024 nesqp->hwqp.qp_id,
26025@@ -3493,7 +3493,7 @@ static void cm_event_reset(struct nes_cm
26026
26027 cm_id->add_ref(cm_id);
26028 ret = cm_id->event_handler(cm_id, &cm_event);
26029- atomic_inc(&cm_closes);
26030+ atomic_inc_unchecked(&cm_closes);
26031 cm_event.event = IW_CM_EVENT_CLOSE;
26032 cm_event.status = 0;
26033 cm_event.provider_data = cm_id->provider_data;
26034@@ -3529,7 +3529,7 @@ static void cm_event_mpa_req(struct nes_
26035 return;
26036 cm_id = cm_node->cm_id;
26037
26038- atomic_inc(&cm_connect_reqs);
26039+ atomic_inc_unchecked(&cm_connect_reqs);
26040 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
26041 cm_node, cm_id, jiffies);
26042
26043@@ -3567,7 +3567,7 @@ static void cm_event_mpa_reject(struct n
26044 return;
26045 cm_id = cm_node->cm_id;
26046
26047- atomic_inc(&cm_connect_reqs);
26048+ atomic_inc_unchecked(&cm_connect_reqs);
26049 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
26050 cm_node, cm_id, jiffies);
26051
26052diff -urNp linux-3.0.3/drivers/infiniband/hw/nes/nes.h linux-3.0.3/drivers/infiniband/hw/nes/nes.h
26053--- linux-3.0.3/drivers/infiniband/hw/nes/nes.h 2011-07-21 22:17:23.000000000 -0400
26054+++ linux-3.0.3/drivers/infiniband/hw/nes/nes.h 2011-08-23 21:47:55.000000000 -0400
26055@@ -175,17 +175,17 @@ extern unsigned int nes_debug_level;
26056 extern unsigned int wqm_quanta;
26057 extern struct list_head nes_adapter_list;
26058
26059-extern atomic_t cm_connects;
26060-extern atomic_t cm_accepts;
26061-extern atomic_t cm_disconnects;
26062-extern atomic_t cm_closes;
26063-extern atomic_t cm_connecteds;
26064-extern atomic_t cm_connect_reqs;
26065-extern atomic_t cm_rejects;
26066-extern atomic_t mod_qp_timouts;
26067-extern atomic_t qps_created;
26068-extern atomic_t qps_destroyed;
26069-extern atomic_t sw_qps_destroyed;
26070+extern atomic_unchecked_t cm_connects;
26071+extern atomic_unchecked_t cm_accepts;
26072+extern atomic_unchecked_t cm_disconnects;
26073+extern atomic_unchecked_t cm_closes;
26074+extern atomic_unchecked_t cm_connecteds;
26075+extern atomic_unchecked_t cm_connect_reqs;
26076+extern atomic_unchecked_t cm_rejects;
26077+extern atomic_unchecked_t mod_qp_timouts;
26078+extern atomic_unchecked_t qps_created;
26079+extern atomic_unchecked_t qps_destroyed;
26080+extern atomic_unchecked_t sw_qps_destroyed;
26081 extern u32 mh_detected;
26082 extern u32 mh_pauses_sent;
26083 extern u32 cm_packets_sent;
26084@@ -194,14 +194,14 @@ extern u32 cm_packets_created;
26085 extern u32 cm_packets_received;
26086 extern u32 cm_packets_dropped;
26087 extern u32 cm_packets_retrans;
26088-extern atomic_t cm_listens_created;
26089-extern atomic_t cm_listens_destroyed;
26090+extern atomic_unchecked_t cm_listens_created;
26091+extern atomic_unchecked_t cm_listens_destroyed;
26092 extern u32 cm_backlog_drops;
26093-extern atomic_t cm_loopbacks;
26094-extern atomic_t cm_nodes_created;
26095-extern atomic_t cm_nodes_destroyed;
26096-extern atomic_t cm_accel_dropped_pkts;
26097-extern atomic_t cm_resets_recvd;
26098+extern atomic_unchecked_t cm_loopbacks;
26099+extern atomic_unchecked_t cm_nodes_created;
26100+extern atomic_unchecked_t cm_nodes_destroyed;
26101+extern atomic_unchecked_t cm_accel_dropped_pkts;
26102+extern atomic_unchecked_t cm_resets_recvd;
26103
26104 extern u32 int_mod_timer_init;
26105 extern u32 int_mod_cq_depth_256;
26106diff -urNp linux-3.0.3/drivers/infiniband/hw/nes/nes_nic.c linux-3.0.3/drivers/infiniband/hw/nes/nes_nic.c
26107--- linux-3.0.3/drivers/infiniband/hw/nes/nes_nic.c 2011-07-21 22:17:23.000000000 -0400
26108+++ linux-3.0.3/drivers/infiniband/hw/nes/nes_nic.c 2011-08-23 21:47:55.000000000 -0400
26109@@ -1274,31 +1274,31 @@ static void nes_netdev_get_ethtool_stats
26110 target_stat_values[++index] = mh_detected;
26111 target_stat_values[++index] = mh_pauses_sent;
26112 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
26113- target_stat_values[++index] = atomic_read(&cm_connects);
26114- target_stat_values[++index] = atomic_read(&cm_accepts);
26115- target_stat_values[++index] = atomic_read(&cm_disconnects);
26116- target_stat_values[++index] = atomic_read(&cm_connecteds);
26117- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
26118- target_stat_values[++index] = atomic_read(&cm_rejects);
26119- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
26120- target_stat_values[++index] = atomic_read(&qps_created);
26121- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
26122- target_stat_values[++index] = atomic_read(&qps_destroyed);
26123- target_stat_values[++index] = atomic_read(&cm_closes);
26124+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
26125+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
26126+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
26127+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
26128+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
26129+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
26130+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
26131+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
26132+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
26133+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
26134+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
26135 target_stat_values[++index] = cm_packets_sent;
26136 target_stat_values[++index] = cm_packets_bounced;
26137 target_stat_values[++index] = cm_packets_created;
26138 target_stat_values[++index] = cm_packets_received;
26139 target_stat_values[++index] = cm_packets_dropped;
26140 target_stat_values[++index] = cm_packets_retrans;
26141- target_stat_values[++index] = atomic_read(&cm_listens_created);
26142- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
26143+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
26144+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
26145 target_stat_values[++index] = cm_backlog_drops;
26146- target_stat_values[++index] = atomic_read(&cm_loopbacks);
26147- target_stat_values[++index] = atomic_read(&cm_nodes_created);
26148- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
26149- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
26150- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
26151+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
26152+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
26153+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
26154+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
26155+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
26156 target_stat_values[++index] = nesadapter->free_4kpbl;
26157 target_stat_values[++index] = nesadapter->free_256pbl;
26158 target_stat_values[++index] = int_mod_timer_init;
26159diff -urNp linux-3.0.3/drivers/infiniband/hw/nes/nes_verbs.c linux-3.0.3/drivers/infiniband/hw/nes/nes_verbs.c
26160--- linux-3.0.3/drivers/infiniband/hw/nes/nes_verbs.c 2011-07-21 22:17:23.000000000 -0400
26161+++ linux-3.0.3/drivers/infiniband/hw/nes/nes_verbs.c 2011-08-23 21:47:55.000000000 -0400
26162@@ -46,9 +46,9 @@
26163
26164 #include <rdma/ib_umem.h>
26165
26166-atomic_t mod_qp_timouts;
26167-atomic_t qps_created;
26168-atomic_t sw_qps_destroyed;
26169+atomic_unchecked_t mod_qp_timouts;
26170+atomic_unchecked_t qps_created;
26171+atomic_unchecked_t sw_qps_destroyed;
26172
26173 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
26174
26175@@ -1141,7 +1141,7 @@ static struct ib_qp *nes_create_qp(struc
26176 if (init_attr->create_flags)
26177 return ERR_PTR(-EINVAL);
26178
26179- atomic_inc(&qps_created);
26180+ atomic_inc_unchecked(&qps_created);
26181 switch (init_attr->qp_type) {
26182 case IB_QPT_RC:
26183 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
26184@@ -1470,7 +1470,7 @@ static int nes_destroy_qp(struct ib_qp *
26185 struct iw_cm_event cm_event;
26186 int ret;
26187
26188- atomic_inc(&sw_qps_destroyed);
26189+ atomic_inc_unchecked(&sw_qps_destroyed);
26190 nesqp->destroyed = 1;
26191
26192 /* Blow away the connection if it exists. */
26193diff -urNp linux-3.0.3/drivers/infiniband/hw/qib/qib.h linux-3.0.3/drivers/infiniband/hw/qib/qib.h
26194--- linux-3.0.3/drivers/infiniband/hw/qib/qib.h 2011-07-21 22:17:23.000000000 -0400
26195+++ linux-3.0.3/drivers/infiniband/hw/qib/qib.h 2011-08-23 21:47:55.000000000 -0400
26196@@ -51,6 +51,7 @@
26197 #include <linux/completion.h>
26198 #include <linux/kref.h>
26199 #include <linux/sched.h>
26200+#include <linux/slab.h>
26201
26202 #include "qib_common.h"
26203 #include "qib_verbs.h"
26204diff -urNp linux-3.0.3/drivers/input/gameport/gameport.c linux-3.0.3/drivers/input/gameport/gameport.c
26205--- linux-3.0.3/drivers/input/gameport/gameport.c 2011-07-21 22:17:23.000000000 -0400
26206+++ linux-3.0.3/drivers/input/gameport/gameport.c 2011-08-23 21:47:55.000000000 -0400
26207@@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
26208 */
26209 static void gameport_init_port(struct gameport *gameport)
26210 {
26211- static atomic_t gameport_no = ATOMIC_INIT(0);
26212+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
26213
26214 __module_get(THIS_MODULE);
26215
26216 mutex_init(&gameport->drv_mutex);
26217 device_initialize(&gameport->dev);
26218 dev_set_name(&gameport->dev, "gameport%lu",
26219- (unsigned long)atomic_inc_return(&gameport_no) - 1);
26220+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
26221 gameport->dev.bus = &gameport_bus;
26222 gameport->dev.release = gameport_release_port;
26223 if (gameport->parent)
26224diff -urNp linux-3.0.3/drivers/input/input.c linux-3.0.3/drivers/input/input.c
26225--- linux-3.0.3/drivers/input/input.c 2011-07-21 22:17:23.000000000 -0400
26226+++ linux-3.0.3/drivers/input/input.c 2011-08-23 21:47:55.000000000 -0400
26227@@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struc
26228 */
26229 int input_register_device(struct input_dev *dev)
26230 {
26231- static atomic_t input_no = ATOMIC_INIT(0);
26232+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
26233 struct input_handler *handler;
26234 const char *path;
26235 int error;
26236@@ -1851,7 +1851,7 @@ int input_register_device(struct input_d
26237 dev->setkeycode = input_default_setkeycode;
26238
26239 dev_set_name(&dev->dev, "input%ld",
26240- (unsigned long) atomic_inc_return(&input_no) - 1);
26241+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
26242
26243 error = device_add(&dev->dev);
26244 if (error)
26245diff -urNp linux-3.0.3/drivers/input/joystick/sidewinder.c linux-3.0.3/drivers/input/joystick/sidewinder.c
26246--- linux-3.0.3/drivers/input/joystick/sidewinder.c 2011-07-21 22:17:23.000000000 -0400
26247+++ linux-3.0.3/drivers/input/joystick/sidewinder.c 2011-08-23 21:48:14.000000000 -0400
26248@@ -30,6 +30,7 @@
26249 #include <linux/kernel.h>
26250 #include <linux/module.h>
26251 #include <linux/slab.h>
26252+#include <linux/sched.h>
26253 #include <linux/init.h>
26254 #include <linux/input.h>
26255 #include <linux/gameport.h>
26256@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
26257 unsigned char buf[SW_LENGTH];
26258 int i;
26259
26260+ pax_track_stack();
26261+
26262 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
26263
26264 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
26265diff -urNp linux-3.0.3/drivers/input/joystick/xpad.c linux-3.0.3/drivers/input/joystick/xpad.c
26266--- linux-3.0.3/drivers/input/joystick/xpad.c 2011-07-21 22:17:23.000000000 -0400
26267+++ linux-3.0.3/drivers/input/joystick/xpad.c 2011-08-23 21:47:55.000000000 -0400
26268@@ -689,7 +689,7 @@ static void xpad_led_set(struct led_clas
26269
26270 static int xpad_led_probe(struct usb_xpad *xpad)
26271 {
26272- static atomic_t led_seq = ATOMIC_INIT(0);
26273+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
26274 long led_no;
26275 struct xpad_led *led;
26276 struct led_classdev *led_cdev;
26277@@ -702,7 +702,7 @@ static int xpad_led_probe(struct usb_xpa
26278 if (!led)
26279 return -ENOMEM;
26280
26281- led_no = (long)atomic_inc_return(&led_seq) - 1;
26282+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
26283
26284 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
26285 led->xpad = xpad;
26286diff -urNp linux-3.0.3/drivers/input/mousedev.c linux-3.0.3/drivers/input/mousedev.c
26287--- linux-3.0.3/drivers/input/mousedev.c 2011-07-21 22:17:23.000000000 -0400
26288+++ linux-3.0.3/drivers/input/mousedev.c 2011-08-23 21:47:55.000000000 -0400
26289@@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file
26290
26291 spin_unlock_irq(&client->packet_lock);
26292
26293- if (copy_to_user(buffer, data, count))
26294+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
26295 return -EFAULT;
26296
26297 return count;
26298diff -urNp linux-3.0.3/drivers/input/serio/serio.c linux-3.0.3/drivers/input/serio/serio.c
26299--- linux-3.0.3/drivers/input/serio/serio.c 2011-07-21 22:17:23.000000000 -0400
26300+++ linux-3.0.3/drivers/input/serio/serio.c 2011-08-23 21:47:55.000000000 -0400
26301@@ -497,7 +497,7 @@ static void serio_release_port(struct de
26302 */
26303 static void serio_init_port(struct serio *serio)
26304 {
26305- static atomic_t serio_no = ATOMIC_INIT(0);
26306+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
26307
26308 __module_get(THIS_MODULE);
26309
26310@@ -508,7 +508,7 @@ static void serio_init_port(struct serio
26311 mutex_init(&serio->drv_mutex);
26312 device_initialize(&serio->dev);
26313 dev_set_name(&serio->dev, "serio%ld",
26314- (long)atomic_inc_return(&serio_no) - 1);
26315+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
26316 serio->dev.bus = &serio_bus;
26317 serio->dev.release = serio_release_port;
26318 serio->dev.groups = serio_device_attr_groups;
26319diff -urNp linux-3.0.3/drivers/isdn/capi/capi.c linux-3.0.3/drivers/isdn/capi/capi.c
26320--- linux-3.0.3/drivers/isdn/capi/capi.c 2011-07-21 22:17:23.000000000 -0400
26321+++ linux-3.0.3/drivers/isdn/capi/capi.c 2011-08-23 21:47:55.000000000 -0400
26322@@ -83,8 +83,8 @@ struct capiminor {
26323
26324 struct capi20_appl *ap;
26325 u32 ncci;
26326- atomic_t datahandle;
26327- atomic_t msgid;
26328+ atomic_unchecked_t datahandle;
26329+ atomic_unchecked_t msgid;
26330
26331 struct tty_port port;
26332 int ttyinstop;
26333@@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *m
26334 capimsg_setu16(s, 2, mp->ap->applid);
26335 capimsg_setu8 (s, 4, CAPI_DATA_B3);
26336 capimsg_setu8 (s, 5, CAPI_RESP);
26337- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
26338+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
26339 capimsg_setu32(s, 8, mp->ncci);
26340 capimsg_setu16(s, 12, datahandle);
26341 }
26342@@ -518,14 +518,14 @@ static void handle_minor_send(struct cap
26343 mp->outbytes -= len;
26344 spin_unlock_bh(&mp->outlock);
26345
26346- datahandle = atomic_inc_return(&mp->datahandle);
26347+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
26348 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
26349 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
26350 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
26351 capimsg_setu16(skb->data, 2, mp->ap->applid);
26352 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
26353 capimsg_setu8 (skb->data, 5, CAPI_REQ);
26354- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
26355+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
26356 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
26357 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
26358 capimsg_setu16(skb->data, 16, len); /* Data length */
26359diff -urNp linux-3.0.3/drivers/isdn/gigaset/common.c linux-3.0.3/drivers/isdn/gigaset/common.c
26360--- linux-3.0.3/drivers/isdn/gigaset/common.c 2011-07-21 22:17:23.000000000 -0400
26361+++ linux-3.0.3/drivers/isdn/gigaset/common.c 2011-08-23 21:47:55.000000000 -0400
26362@@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct
26363 cs->commands_pending = 0;
26364 cs->cur_at_seq = 0;
26365 cs->gotfwver = -1;
26366- cs->open_count = 0;
26367+ local_set(&cs->open_count, 0);
26368 cs->dev = NULL;
26369 cs->tty = NULL;
26370 cs->tty_dev = NULL;
26371diff -urNp linux-3.0.3/drivers/isdn/gigaset/gigaset.h linux-3.0.3/drivers/isdn/gigaset/gigaset.h
26372--- linux-3.0.3/drivers/isdn/gigaset/gigaset.h 2011-07-21 22:17:23.000000000 -0400
26373+++ linux-3.0.3/drivers/isdn/gigaset/gigaset.h 2011-08-23 21:47:55.000000000 -0400
26374@@ -35,6 +35,7 @@
26375 #include <linux/tty_driver.h>
26376 #include <linux/list.h>
26377 #include <asm/atomic.h>
26378+#include <asm/local.h>
26379
26380 #define GIG_VERSION {0, 5, 0, 0}
26381 #define GIG_COMPAT {0, 4, 0, 0}
26382@@ -433,7 +434,7 @@ struct cardstate {
26383 spinlock_t cmdlock;
26384 unsigned curlen, cmdbytes;
26385
26386- unsigned open_count;
26387+ local_t open_count;
26388 struct tty_struct *tty;
26389 struct tasklet_struct if_wake_tasklet;
26390 unsigned control_state;
26391diff -urNp linux-3.0.3/drivers/isdn/gigaset/interface.c linux-3.0.3/drivers/isdn/gigaset/interface.c
26392--- linux-3.0.3/drivers/isdn/gigaset/interface.c 2011-07-21 22:17:23.000000000 -0400
26393+++ linux-3.0.3/drivers/isdn/gigaset/interface.c 2011-08-23 21:47:55.000000000 -0400
26394@@ -162,9 +162,7 @@ static int if_open(struct tty_struct *tt
26395 }
26396 tty->driver_data = cs;
26397
26398- ++cs->open_count;
26399-
26400- if (cs->open_count == 1) {
26401+ if (local_inc_return(&cs->open_count) == 1) {
26402 spin_lock_irqsave(&cs->lock, flags);
26403 cs->tty = tty;
26404 spin_unlock_irqrestore(&cs->lock, flags);
26405@@ -192,10 +190,10 @@ static void if_close(struct tty_struct *
26406
26407 if (!cs->connected)
26408 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26409- else if (!cs->open_count)
26410+ else if (!local_read(&cs->open_count))
26411 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26412 else {
26413- if (!--cs->open_count) {
26414+ if (!local_dec_return(&cs->open_count)) {
26415 spin_lock_irqsave(&cs->lock, flags);
26416 cs->tty = NULL;
26417 spin_unlock_irqrestore(&cs->lock, flags);
26418@@ -230,7 +228,7 @@ static int if_ioctl(struct tty_struct *t
26419 if (!cs->connected) {
26420 gig_dbg(DEBUG_IF, "not connected");
26421 retval = -ENODEV;
26422- } else if (!cs->open_count)
26423+ } else if (!local_read(&cs->open_count))
26424 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26425 else {
26426 retval = 0;
26427@@ -360,7 +358,7 @@ static int if_write(struct tty_struct *t
26428 retval = -ENODEV;
26429 goto done;
26430 }
26431- if (!cs->open_count) {
26432+ if (!local_read(&cs->open_count)) {
26433 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26434 retval = -ENODEV;
26435 goto done;
26436@@ -413,7 +411,7 @@ static int if_write_room(struct tty_stru
26437 if (!cs->connected) {
26438 gig_dbg(DEBUG_IF, "not connected");
26439 retval = -ENODEV;
26440- } else if (!cs->open_count)
26441+ } else if (!local_read(&cs->open_count))
26442 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26443 else if (cs->mstate != MS_LOCKED) {
26444 dev_warn(cs->dev, "can't write to unlocked device\n");
26445@@ -443,7 +441,7 @@ static int if_chars_in_buffer(struct tty
26446
26447 if (!cs->connected)
26448 gig_dbg(DEBUG_IF, "not connected");
26449- else if (!cs->open_count)
26450+ else if (!local_read(&cs->open_count))
26451 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26452 else if (cs->mstate != MS_LOCKED)
26453 dev_warn(cs->dev, "can't write to unlocked device\n");
26454@@ -471,7 +469,7 @@ static void if_throttle(struct tty_struc
26455
26456 if (!cs->connected)
26457 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26458- else if (!cs->open_count)
26459+ else if (!local_read(&cs->open_count))
26460 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26461 else
26462 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
26463@@ -495,7 +493,7 @@ static void if_unthrottle(struct tty_str
26464
26465 if (!cs->connected)
26466 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26467- else if (!cs->open_count)
26468+ else if (!local_read(&cs->open_count))
26469 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26470 else
26471 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
26472@@ -526,7 +524,7 @@ static void if_set_termios(struct tty_st
26473 goto out;
26474 }
26475
26476- if (!cs->open_count) {
26477+ if (!local_read(&cs->open_count)) {
26478 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26479 goto out;
26480 }
26481diff -urNp linux-3.0.3/drivers/isdn/hardware/avm/b1.c linux-3.0.3/drivers/isdn/hardware/avm/b1.c
26482--- linux-3.0.3/drivers/isdn/hardware/avm/b1.c 2011-07-21 22:17:23.000000000 -0400
26483+++ linux-3.0.3/drivers/isdn/hardware/avm/b1.c 2011-08-23 21:47:55.000000000 -0400
26484@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capilo
26485 }
26486 if (left) {
26487 if (t4file->user) {
26488- if (copy_from_user(buf, dp, left))
26489+ if (left > sizeof buf || copy_from_user(buf, dp, left))
26490 return -EFAULT;
26491 } else {
26492 memcpy(buf, dp, left);
26493@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capilo
26494 }
26495 if (left) {
26496 if (config->user) {
26497- if (copy_from_user(buf, dp, left))
26498+ if (left > sizeof buf || copy_from_user(buf, dp, left))
26499 return -EFAULT;
26500 } else {
26501 memcpy(buf, dp, left);
26502diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/capidtmf.c linux-3.0.3/drivers/isdn/hardware/eicon/capidtmf.c
26503--- linux-3.0.3/drivers/isdn/hardware/eicon/capidtmf.c 2011-07-21 22:17:23.000000000 -0400
26504+++ linux-3.0.3/drivers/isdn/hardware/eicon/capidtmf.c 2011-08-23 21:48:14.000000000 -0400
26505@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
26506 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
26507 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
26508
26509+ pax_track_stack();
26510
26511 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
26512 {
26513diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/capifunc.c linux-3.0.3/drivers/isdn/hardware/eicon/capifunc.c
26514--- linux-3.0.3/drivers/isdn/hardware/eicon/capifunc.c 2011-07-21 22:17:23.000000000 -0400
26515+++ linux-3.0.3/drivers/isdn/hardware/eicon/capifunc.c 2011-08-23 21:48:14.000000000 -0400
26516@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
26517 IDI_SYNC_REQ req;
26518 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26519
26520+ pax_track_stack();
26521+
26522 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26523
26524 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26525diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/diddfunc.c linux-3.0.3/drivers/isdn/hardware/eicon/diddfunc.c
26526--- linux-3.0.3/drivers/isdn/hardware/eicon/diddfunc.c 2011-07-21 22:17:23.000000000 -0400
26527+++ linux-3.0.3/drivers/isdn/hardware/eicon/diddfunc.c 2011-08-23 21:48:14.000000000 -0400
26528@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
26529 IDI_SYNC_REQ req;
26530 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26531
26532+ pax_track_stack();
26533+
26534 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26535
26536 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26537diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/divasfunc.c linux-3.0.3/drivers/isdn/hardware/eicon/divasfunc.c
26538--- linux-3.0.3/drivers/isdn/hardware/eicon/divasfunc.c 2011-07-21 22:17:23.000000000 -0400
26539+++ linux-3.0.3/drivers/isdn/hardware/eicon/divasfunc.c 2011-08-23 21:48:14.000000000 -0400
26540@@ -160,6 +160,8 @@ static int DIVA_INIT_FUNCTION connect_di
26541 IDI_SYNC_REQ req;
26542 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26543
26544+ pax_track_stack();
26545+
26546 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26547
26548 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26549diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/divasync.h linux-3.0.3/drivers/isdn/hardware/eicon/divasync.h
26550--- linux-3.0.3/drivers/isdn/hardware/eicon/divasync.h 2011-07-21 22:17:23.000000000 -0400
26551+++ linux-3.0.3/drivers/isdn/hardware/eicon/divasync.h 2011-08-23 21:47:55.000000000 -0400
26552@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
26553 } diva_didd_add_adapter_t;
26554 typedef struct _diva_didd_remove_adapter {
26555 IDI_CALL p_request;
26556-} diva_didd_remove_adapter_t;
26557+} __no_const diva_didd_remove_adapter_t;
26558 typedef struct _diva_didd_read_adapter_array {
26559 void * buffer;
26560 dword length;
26561diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/idifunc.c linux-3.0.3/drivers/isdn/hardware/eicon/idifunc.c
26562--- linux-3.0.3/drivers/isdn/hardware/eicon/idifunc.c 2011-07-21 22:17:23.000000000 -0400
26563+++ linux-3.0.3/drivers/isdn/hardware/eicon/idifunc.c 2011-08-23 21:48:14.000000000 -0400
26564@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
26565 IDI_SYNC_REQ req;
26566 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26567
26568+ pax_track_stack();
26569+
26570 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26571
26572 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26573diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/message.c linux-3.0.3/drivers/isdn/hardware/eicon/message.c
26574--- linux-3.0.3/drivers/isdn/hardware/eicon/message.c 2011-07-21 22:17:23.000000000 -0400
26575+++ linux-3.0.3/drivers/isdn/hardware/eicon/message.c 2011-08-23 21:48:14.000000000 -0400
26576@@ -4886,6 +4886,8 @@ static void sig_ind(PLCI *plci)
26577 dword d;
26578 word w;
26579
26580+ pax_track_stack();
26581+
26582 a = plci->adapter;
26583 Id = ((word)plci->Id<<8)|a->Id;
26584 PUT_WORD(&SS_Ind[4],0x0000);
26585@@ -7480,6 +7482,8 @@ static word add_b1(PLCI *plci, API_PARSE
26586 word j, n, w;
26587 dword d;
26588
26589+ pax_track_stack();
26590+
26591
26592 for(i=0;i<8;i++) bp_parms[i].length = 0;
26593 for(i=0;i<2;i++) global_config[i].length = 0;
26594@@ -7954,6 +7958,8 @@ static word add_b23(PLCI *plci, API_PARS
26595 const byte llc3[] = {4,3,2,2,6,6,0};
26596 const byte header[] = {0,2,3,3,0,0,0};
26597
26598+ pax_track_stack();
26599+
26600 for(i=0;i<8;i++) bp_parms[i].length = 0;
26601 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
26602 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
26603@@ -14741,6 +14747,8 @@ static void group_optimization(DIVA_CAPI
26604 word appl_number_group_type[MAX_APPL];
26605 PLCI *auxplci;
26606
26607+ pax_track_stack();
26608+
26609 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
26610
26611 if(!a->group_optimization_enabled)
26612diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/mntfunc.c linux-3.0.3/drivers/isdn/hardware/eicon/mntfunc.c
26613--- linux-3.0.3/drivers/isdn/hardware/eicon/mntfunc.c 2011-07-21 22:17:23.000000000 -0400
26614+++ linux-3.0.3/drivers/isdn/hardware/eicon/mntfunc.c 2011-08-23 21:48:14.000000000 -0400
26615@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
26616 IDI_SYNC_REQ req;
26617 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26618
26619+ pax_track_stack();
26620+
26621 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26622
26623 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26624diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/xdi_adapter.h linux-3.0.3/drivers/isdn/hardware/eicon/xdi_adapter.h
26625--- linux-3.0.3/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-07-21 22:17:23.000000000 -0400
26626+++ linux-3.0.3/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-08-23 21:47:55.000000000 -0400
26627@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
26628 typedef struct _diva_os_idi_adapter_interface {
26629 diva_init_card_proc_t cleanup_adapter_proc;
26630 diva_cmd_card_proc_t cmd_proc;
26631-} diva_os_idi_adapter_interface_t;
26632+} __no_const diva_os_idi_adapter_interface_t;
26633
26634 typedef struct _diva_os_xdi_adapter {
26635 struct list_head link;
26636diff -urNp linux-3.0.3/drivers/isdn/i4l/isdn_common.c linux-3.0.3/drivers/isdn/i4l/isdn_common.c
26637--- linux-3.0.3/drivers/isdn/i4l/isdn_common.c 2011-07-21 22:17:23.000000000 -0400
26638+++ linux-3.0.3/drivers/isdn/i4l/isdn_common.c 2011-08-23 21:48:14.000000000 -0400
26639@@ -1286,6 +1286,8 @@ isdn_ioctl(struct file *file, uint cmd,
26640 } iocpar;
26641 void __user *argp = (void __user *)arg;
26642
26643+ pax_track_stack();
26644+
26645 #define name iocpar.name
26646 #define bname iocpar.bname
26647 #define iocts iocpar.iocts
26648diff -urNp linux-3.0.3/drivers/isdn/icn/icn.c linux-3.0.3/drivers/isdn/icn/icn.c
26649--- linux-3.0.3/drivers/isdn/icn/icn.c 2011-07-21 22:17:23.000000000 -0400
26650+++ linux-3.0.3/drivers/isdn/icn/icn.c 2011-08-23 21:47:55.000000000 -0400
26651@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len
26652 if (count > len)
26653 count = len;
26654 if (user) {
26655- if (copy_from_user(msg, buf, count))
26656+ if (count > sizeof msg || copy_from_user(msg, buf, count))
26657 return -EFAULT;
26658 } else
26659 memcpy(msg, buf, count);
26660diff -urNp linux-3.0.3/drivers/lguest/core.c linux-3.0.3/drivers/lguest/core.c
26661--- linux-3.0.3/drivers/lguest/core.c 2011-07-21 22:17:23.000000000 -0400
26662+++ linux-3.0.3/drivers/lguest/core.c 2011-08-23 21:47:55.000000000 -0400
26663@@ -92,9 +92,17 @@ static __init int map_switcher(void)
26664 * it's worked so far. The end address needs +1 because __get_vm_area
26665 * allocates an extra guard page, so we need space for that.
26666 */
26667+
26668+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26669+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
26670+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
26671+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
26672+#else
26673 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
26674 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
26675 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
26676+#endif
26677+
26678 if (!switcher_vma) {
26679 err = -ENOMEM;
26680 printk("lguest: could not map switcher pages high\n");
26681@@ -119,7 +127,7 @@ static __init int map_switcher(void)
26682 * Now the Switcher is mapped at the right address, we can't fail!
26683 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
26684 */
26685- memcpy(switcher_vma->addr, start_switcher_text,
26686+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
26687 end_switcher_text - start_switcher_text);
26688
26689 printk(KERN_INFO "lguest: mapped switcher at %p\n",
26690diff -urNp linux-3.0.3/drivers/lguest/x86/core.c linux-3.0.3/drivers/lguest/x86/core.c
26691--- linux-3.0.3/drivers/lguest/x86/core.c 2011-07-21 22:17:23.000000000 -0400
26692+++ linux-3.0.3/drivers/lguest/x86/core.c 2011-08-23 21:47:55.000000000 -0400
26693@@ -59,7 +59,7 @@ static struct {
26694 /* Offset from where switcher.S was compiled to where we've copied it */
26695 static unsigned long switcher_offset(void)
26696 {
26697- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
26698+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
26699 }
26700
26701 /* This cpu's struct lguest_pages. */
26702@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
26703 * These copies are pretty cheap, so we do them unconditionally: */
26704 /* Save the current Host top-level page directory.
26705 */
26706+
26707+#ifdef CONFIG_PAX_PER_CPU_PGD
26708+ pages->state.host_cr3 = read_cr3();
26709+#else
26710 pages->state.host_cr3 = __pa(current->mm->pgd);
26711+#endif
26712+
26713 /*
26714 * Set up the Guest's page tables to see this CPU's pages (and no
26715 * other CPU's pages).
26716@@ -547,7 +553,7 @@ void __init lguest_arch_host_init(void)
26717 * compiled-in switcher code and the high-mapped copy we just made.
26718 */
26719 for (i = 0; i < IDT_ENTRIES; i++)
26720- default_idt_entries[i] += switcher_offset();
26721+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
26722
26723 /*
26724 * Set up the Switcher's per-cpu areas.
26725@@ -630,7 +636,7 @@ void __init lguest_arch_host_init(void)
26726 * it will be undisturbed when we switch. To change %cs and jump we
26727 * need this structure to feed to Intel's "lcall" instruction.
26728 */
26729- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
26730+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
26731 lguest_entry.segment = LGUEST_CS;
26732
26733 /*
26734diff -urNp linux-3.0.3/drivers/lguest/x86/switcher_32.S linux-3.0.3/drivers/lguest/x86/switcher_32.S
26735--- linux-3.0.3/drivers/lguest/x86/switcher_32.S 2011-07-21 22:17:23.000000000 -0400
26736+++ linux-3.0.3/drivers/lguest/x86/switcher_32.S 2011-08-23 21:47:55.000000000 -0400
26737@@ -87,6 +87,7 @@
26738 #include <asm/page.h>
26739 #include <asm/segment.h>
26740 #include <asm/lguest.h>
26741+#include <asm/processor-flags.h>
26742
26743 // We mark the start of the code to copy
26744 // It's placed in .text tho it's never run here
26745@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
26746 // Changes type when we load it: damn Intel!
26747 // For after we switch over our page tables
26748 // That entry will be read-only: we'd crash.
26749+
26750+#ifdef CONFIG_PAX_KERNEXEC
26751+ mov %cr0, %edx
26752+ xor $X86_CR0_WP, %edx
26753+ mov %edx, %cr0
26754+#endif
26755+
26756 movl $(GDT_ENTRY_TSS*8), %edx
26757 ltr %dx
26758
26759@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
26760 // Let's clear it again for our return.
26761 // The GDT descriptor of the Host
26762 // Points to the table after two "size" bytes
26763- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
26764+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
26765 // Clear "used" from type field (byte 5, bit 2)
26766- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
26767+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
26768+
26769+#ifdef CONFIG_PAX_KERNEXEC
26770+ mov %cr0, %eax
26771+ xor $X86_CR0_WP, %eax
26772+ mov %eax, %cr0
26773+#endif
26774
26775 // Once our page table's switched, the Guest is live!
26776 // The Host fades as we run this final step.
26777@@ -295,13 +309,12 @@ deliver_to_host:
26778 // I consulted gcc, and it gave
26779 // These instructions, which I gladly credit:
26780 leal (%edx,%ebx,8), %eax
26781- movzwl (%eax),%edx
26782- movl 4(%eax), %eax
26783- xorw %ax, %ax
26784- orl %eax, %edx
26785+ movl 4(%eax), %edx
26786+ movw (%eax), %dx
26787 // Now the address of the handler's in %edx
26788 // We call it now: its "iret" drops us home.
26789- jmp *%edx
26790+ ljmp $__KERNEL_CS, $1f
26791+1: jmp *%edx
26792
26793 // Every interrupt can come to us here
26794 // But we must truly tell each apart.
26795diff -urNp linux-3.0.3/drivers/md/dm.c linux-3.0.3/drivers/md/dm.c
26796--- linux-3.0.3/drivers/md/dm.c 2011-08-23 21:44:40.000000000 -0400
26797+++ linux-3.0.3/drivers/md/dm.c 2011-08-23 21:47:55.000000000 -0400
26798@@ -164,9 +164,9 @@ struct mapped_device {
26799 /*
26800 * Event handling.
26801 */
26802- atomic_t event_nr;
26803+ atomic_unchecked_t event_nr;
26804 wait_queue_head_t eventq;
26805- atomic_t uevent_seq;
26806+ atomic_unchecked_t uevent_seq;
26807 struct list_head uevent_list;
26808 spinlock_t uevent_lock; /* Protect access to uevent_list */
26809
26810@@ -1842,8 +1842,8 @@ static struct mapped_device *alloc_dev(i
26811 rwlock_init(&md->map_lock);
26812 atomic_set(&md->holders, 1);
26813 atomic_set(&md->open_count, 0);
26814- atomic_set(&md->event_nr, 0);
26815- atomic_set(&md->uevent_seq, 0);
26816+ atomic_set_unchecked(&md->event_nr, 0);
26817+ atomic_set_unchecked(&md->uevent_seq, 0);
26818 INIT_LIST_HEAD(&md->uevent_list);
26819 spin_lock_init(&md->uevent_lock);
26820
26821@@ -1977,7 +1977,7 @@ static void event_callback(void *context
26822
26823 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
26824
26825- atomic_inc(&md->event_nr);
26826+ atomic_inc_unchecked(&md->event_nr);
26827 wake_up(&md->eventq);
26828 }
26829
26830@@ -2553,18 +2553,18 @@ int dm_kobject_uevent(struct mapped_devi
26831
26832 uint32_t dm_next_uevent_seq(struct mapped_device *md)
26833 {
26834- return atomic_add_return(1, &md->uevent_seq);
26835+ return atomic_add_return_unchecked(1, &md->uevent_seq);
26836 }
26837
26838 uint32_t dm_get_event_nr(struct mapped_device *md)
26839 {
26840- return atomic_read(&md->event_nr);
26841+ return atomic_read_unchecked(&md->event_nr);
26842 }
26843
26844 int dm_wait_event(struct mapped_device *md, int event_nr)
26845 {
26846 return wait_event_interruptible(md->eventq,
26847- (event_nr != atomic_read(&md->event_nr)));
26848+ (event_nr != atomic_read_unchecked(&md->event_nr)));
26849 }
26850
26851 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
26852diff -urNp linux-3.0.3/drivers/md/dm-ioctl.c linux-3.0.3/drivers/md/dm-ioctl.c
26853--- linux-3.0.3/drivers/md/dm-ioctl.c 2011-07-21 22:17:23.000000000 -0400
26854+++ linux-3.0.3/drivers/md/dm-ioctl.c 2011-08-23 21:47:55.000000000 -0400
26855@@ -1551,7 +1551,7 @@ static int validate_params(uint cmd, str
26856 cmd == DM_LIST_VERSIONS_CMD)
26857 return 0;
26858
26859- if ((cmd == DM_DEV_CREATE_CMD)) {
26860+ if (cmd == DM_DEV_CREATE_CMD) {
26861 if (!*param->name) {
26862 DMWARN("name not supplied when creating device");
26863 return -EINVAL;
26864diff -urNp linux-3.0.3/drivers/md/dm-raid1.c linux-3.0.3/drivers/md/dm-raid1.c
26865--- linux-3.0.3/drivers/md/dm-raid1.c 2011-07-21 22:17:23.000000000 -0400
26866+++ linux-3.0.3/drivers/md/dm-raid1.c 2011-08-23 21:47:55.000000000 -0400
26867@@ -40,7 +40,7 @@ enum dm_raid1_error {
26868
26869 struct mirror {
26870 struct mirror_set *ms;
26871- atomic_t error_count;
26872+ atomic_unchecked_t error_count;
26873 unsigned long error_type;
26874 struct dm_dev *dev;
26875 sector_t offset;
26876@@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(s
26877 struct mirror *m;
26878
26879 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
26880- if (!atomic_read(&m->error_count))
26881+ if (!atomic_read_unchecked(&m->error_count))
26882 return m;
26883
26884 return NULL;
26885@@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m
26886 * simple way to tell if a device has encountered
26887 * errors.
26888 */
26889- atomic_inc(&m->error_count);
26890+ atomic_inc_unchecked(&m->error_count);
26891
26892 if (test_and_set_bit(error_type, &m->error_type))
26893 return;
26894@@ -408,7 +408,7 @@ static struct mirror *choose_mirror(stru
26895 struct mirror *m = get_default_mirror(ms);
26896
26897 do {
26898- if (likely(!atomic_read(&m->error_count)))
26899+ if (likely(!atomic_read_unchecked(&m->error_count)))
26900 return m;
26901
26902 if (m-- == ms->mirror)
26903@@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
26904 {
26905 struct mirror *default_mirror = get_default_mirror(m->ms);
26906
26907- return !atomic_read(&default_mirror->error_count);
26908+ return !atomic_read_unchecked(&default_mirror->error_count);
26909 }
26910
26911 static int mirror_available(struct mirror_set *ms, struct bio *bio)
26912@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *
26913 */
26914 if (likely(region_in_sync(ms, region, 1)))
26915 m = choose_mirror(ms, bio->bi_sector);
26916- else if (m && atomic_read(&m->error_count))
26917+ else if (m && atomic_read_unchecked(&m->error_count))
26918 m = NULL;
26919
26920 if (likely(m))
26921@@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set
26922 }
26923
26924 ms->mirror[mirror].ms = ms;
26925- atomic_set(&(ms->mirror[mirror].error_count), 0);
26926+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
26927 ms->mirror[mirror].error_type = 0;
26928 ms->mirror[mirror].offset = offset;
26929
26930@@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_targ
26931 */
26932 static char device_status_char(struct mirror *m)
26933 {
26934- if (!atomic_read(&(m->error_count)))
26935+ if (!atomic_read_unchecked(&(m->error_count)))
26936 return 'A';
26937
26938 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
26939diff -urNp linux-3.0.3/drivers/md/dm-stripe.c linux-3.0.3/drivers/md/dm-stripe.c
26940--- linux-3.0.3/drivers/md/dm-stripe.c 2011-07-21 22:17:23.000000000 -0400
26941+++ linux-3.0.3/drivers/md/dm-stripe.c 2011-08-23 21:47:55.000000000 -0400
26942@@ -20,7 +20,7 @@ struct stripe {
26943 struct dm_dev *dev;
26944 sector_t physical_start;
26945
26946- atomic_t error_count;
26947+ atomic_unchecked_t error_count;
26948 };
26949
26950 struct stripe_c {
26951@@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *
26952 kfree(sc);
26953 return r;
26954 }
26955- atomic_set(&(sc->stripe[i].error_count), 0);
26956+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
26957 }
26958
26959 ti->private = sc;
26960@@ -314,7 +314,7 @@ static int stripe_status(struct dm_targe
26961 DMEMIT("%d ", sc->stripes);
26962 for (i = 0; i < sc->stripes; i++) {
26963 DMEMIT("%s ", sc->stripe[i].dev->name);
26964- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
26965+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
26966 'D' : 'A';
26967 }
26968 buffer[i] = '\0';
26969@@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_targe
26970 */
26971 for (i = 0; i < sc->stripes; i++)
26972 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
26973- atomic_inc(&(sc->stripe[i].error_count));
26974- if (atomic_read(&(sc->stripe[i].error_count)) <
26975+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
26976+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
26977 DM_IO_ERROR_THRESHOLD)
26978 schedule_work(&sc->trigger_event);
26979 }
26980diff -urNp linux-3.0.3/drivers/md/dm-table.c linux-3.0.3/drivers/md/dm-table.c
26981--- linux-3.0.3/drivers/md/dm-table.c 2011-07-21 22:17:23.000000000 -0400
26982+++ linux-3.0.3/drivers/md/dm-table.c 2011-08-23 21:47:55.000000000 -0400
26983@@ -390,7 +390,7 @@ static int device_area_is_invalid(struct
26984 if (!dev_size)
26985 return 0;
26986
26987- if ((start >= dev_size) || (start + len > dev_size)) {
26988+ if ((start >= dev_size) || (len > dev_size - start)) {
26989 DMWARN("%s: %s too small for target: "
26990 "start=%llu, len=%llu, dev_size=%llu",
26991 dm_device_name(ti->table->md), bdevname(bdev, b),
26992diff -urNp linux-3.0.3/drivers/md/md.c linux-3.0.3/drivers/md/md.c
26993--- linux-3.0.3/drivers/md/md.c 2011-07-21 22:17:23.000000000 -0400
26994+++ linux-3.0.3/drivers/md/md.c 2011-08-23 21:47:55.000000000 -0400
26995@@ -226,10 +226,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
26996 * start build, activate spare
26997 */
26998 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
26999-static atomic_t md_event_count;
27000+static atomic_unchecked_t md_event_count;
27001 void md_new_event(mddev_t *mddev)
27002 {
27003- atomic_inc(&md_event_count);
27004+ atomic_inc_unchecked(&md_event_count);
27005 wake_up(&md_event_waiters);
27006 }
27007 EXPORT_SYMBOL_GPL(md_new_event);
27008@@ -239,7 +239,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
27009 */
27010 static void md_new_event_inintr(mddev_t *mddev)
27011 {
27012- atomic_inc(&md_event_count);
27013+ atomic_inc_unchecked(&md_event_count);
27014 wake_up(&md_event_waiters);
27015 }
27016
27017@@ -1457,7 +1457,7 @@ static int super_1_load(mdk_rdev_t *rdev
27018
27019 rdev->preferred_minor = 0xffff;
27020 rdev->data_offset = le64_to_cpu(sb->data_offset);
27021- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
27022+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
27023
27024 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
27025 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
27026@@ -1635,7 +1635,7 @@ static void super_1_sync(mddev_t *mddev,
27027 else
27028 sb->resync_offset = cpu_to_le64(0);
27029
27030- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
27031+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
27032
27033 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
27034 sb->size = cpu_to_le64(mddev->dev_sectors);
27035@@ -2428,7 +2428,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
27036 static ssize_t
27037 errors_show(mdk_rdev_t *rdev, char *page)
27038 {
27039- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
27040+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
27041 }
27042
27043 static ssize_t
27044@@ -2437,7 +2437,7 @@ errors_store(mdk_rdev_t *rdev, const cha
27045 char *e;
27046 unsigned long n = simple_strtoul(buf, &e, 10);
27047 if (*buf && (*e == 0 || *e == '\n')) {
27048- atomic_set(&rdev->corrected_errors, n);
27049+ atomic_set_unchecked(&rdev->corrected_errors, n);
27050 return len;
27051 }
27052 return -EINVAL;
27053@@ -2793,8 +2793,8 @@ void md_rdev_init(mdk_rdev_t *rdev)
27054 rdev->last_read_error.tv_sec = 0;
27055 rdev->last_read_error.tv_nsec = 0;
27056 atomic_set(&rdev->nr_pending, 0);
27057- atomic_set(&rdev->read_errors, 0);
27058- atomic_set(&rdev->corrected_errors, 0);
27059+ atomic_set_unchecked(&rdev->read_errors, 0);
27060+ atomic_set_unchecked(&rdev->corrected_errors, 0);
27061
27062 INIT_LIST_HEAD(&rdev->same_set);
27063 init_waitqueue_head(&rdev->blocked_wait);
27064@@ -6415,7 +6415,7 @@ static int md_seq_show(struct seq_file *
27065
27066 spin_unlock(&pers_lock);
27067 seq_printf(seq, "\n");
27068- mi->event = atomic_read(&md_event_count);
27069+ mi->event = atomic_read_unchecked(&md_event_count);
27070 return 0;
27071 }
27072 if (v == (void*)2) {
27073@@ -6504,7 +6504,7 @@ static int md_seq_show(struct seq_file *
27074 chunk_kb ? "KB" : "B");
27075 if (bitmap->file) {
27076 seq_printf(seq, ", file: ");
27077- seq_path(seq, &bitmap->file->f_path, " \t\n");
27078+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
27079 }
27080
27081 seq_printf(seq, "\n");
27082@@ -6538,7 +6538,7 @@ static int md_seq_open(struct inode *ino
27083 else {
27084 struct seq_file *p = file->private_data;
27085 p->private = mi;
27086- mi->event = atomic_read(&md_event_count);
27087+ mi->event = atomic_read_unchecked(&md_event_count);
27088 }
27089 return error;
27090 }
27091@@ -6554,7 +6554,7 @@ static unsigned int mdstat_poll(struct f
27092 /* always allow read */
27093 mask = POLLIN | POLLRDNORM;
27094
27095- if (mi->event != atomic_read(&md_event_count))
27096+ if (mi->event != atomic_read_unchecked(&md_event_count))
27097 mask |= POLLERR | POLLPRI;
27098 return mask;
27099 }
27100@@ -6598,7 +6598,7 @@ static int is_mddev_idle(mddev_t *mddev,
27101 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
27102 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
27103 (int)part_stat_read(&disk->part0, sectors[1]) -
27104- atomic_read(&disk->sync_io);
27105+ atomic_read_unchecked(&disk->sync_io);
27106 /* sync IO will cause sync_io to increase before the disk_stats
27107 * as sync_io is counted when a request starts, and
27108 * disk_stats is counted when it completes.
27109diff -urNp linux-3.0.3/drivers/md/md.h linux-3.0.3/drivers/md/md.h
27110--- linux-3.0.3/drivers/md/md.h 2011-07-21 22:17:23.000000000 -0400
27111+++ linux-3.0.3/drivers/md/md.h 2011-08-23 21:47:55.000000000 -0400
27112@@ -97,13 +97,13 @@ struct mdk_rdev_s
27113 * only maintained for arrays that
27114 * support hot removal
27115 */
27116- atomic_t read_errors; /* number of consecutive read errors that
27117+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
27118 * we have tried to ignore.
27119 */
27120 struct timespec last_read_error; /* monotonic time since our
27121 * last read error
27122 */
27123- atomic_t corrected_errors; /* number of corrected read errors,
27124+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
27125 * for reporting to userspace and storing
27126 * in superblock.
27127 */
27128@@ -344,7 +344,7 @@ static inline void rdev_dec_pending(mdk_
27129
27130 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
27131 {
27132- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
27133+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
27134 }
27135
27136 struct mdk_personality
27137diff -urNp linux-3.0.3/drivers/md/raid10.c linux-3.0.3/drivers/md/raid10.c
27138--- linux-3.0.3/drivers/md/raid10.c 2011-07-21 22:17:23.000000000 -0400
27139+++ linux-3.0.3/drivers/md/raid10.c 2011-08-23 21:47:55.000000000 -0400
27140@@ -1186,7 +1186,7 @@ static void end_sync_read(struct bio *bi
27141 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
27142 set_bit(R10BIO_Uptodate, &r10_bio->state);
27143 else {
27144- atomic_add(r10_bio->sectors,
27145+ atomic_add_unchecked(r10_bio->sectors,
27146 &conf->mirrors[d].rdev->corrected_errors);
27147 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
27148 md_error(r10_bio->mddev,
27149@@ -1394,7 +1394,7 @@ static void check_decay_read_errors(mdde
27150 {
27151 struct timespec cur_time_mon;
27152 unsigned long hours_since_last;
27153- unsigned int read_errors = atomic_read(&rdev->read_errors);
27154+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
27155
27156 ktime_get_ts(&cur_time_mon);
27157
27158@@ -1416,9 +1416,9 @@ static void check_decay_read_errors(mdde
27159 * overflowing the shift of read_errors by hours_since_last.
27160 */
27161 if (hours_since_last >= 8 * sizeof(read_errors))
27162- atomic_set(&rdev->read_errors, 0);
27163+ atomic_set_unchecked(&rdev->read_errors, 0);
27164 else
27165- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
27166+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
27167 }
27168
27169 /*
27170@@ -1448,8 +1448,8 @@ static void fix_read_error(conf_t *conf,
27171 return;
27172
27173 check_decay_read_errors(mddev, rdev);
27174- atomic_inc(&rdev->read_errors);
27175- if (atomic_read(&rdev->read_errors) > max_read_errors) {
27176+ atomic_inc_unchecked(&rdev->read_errors);
27177+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
27178 char b[BDEVNAME_SIZE];
27179 bdevname(rdev->bdev, b);
27180
27181@@ -1457,7 +1457,7 @@ static void fix_read_error(conf_t *conf,
27182 "md/raid10:%s: %s: Raid device exceeded "
27183 "read_error threshold [cur %d:max %d]\n",
27184 mdname(mddev), b,
27185- atomic_read(&rdev->read_errors), max_read_errors);
27186+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
27187 printk(KERN_NOTICE
27188 "md/raid10:%s: %s: Failing raid device\n",
27189 mdname(mddev), b);
27190@@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
27191 test_bit(In_sync, &rdev->flags)) {
27192 atomic_inc(&rdev->nr_pending);
27193 rcu_read_unlock();
27194- atomic_add(s, &rdev->corrected_errors);
27195+ atomic_add_unchecked(s, &rdev->corrected_errors);
27196 if (sync_page_io(rdev,
27197 r10_bio->devs[sl].addr +
27198 sect,
27199diff -urNp linux-3.0.3/drivers/md/raid1.c linux-3.0.3/drivers/md/raid1.c
27200--- linux-3.0.3/drivers/md/raid1.c 2011-07-21 22:17:23.000000000 -0400
27201+++ linux-3.0.3/drivers/md/raid1.c 2011-08-23 21:47:55.000000000 -0400
27202@@ -1263,7 +1263,7 @@ static int fix_sync_read_error(r1bio_t *
27203 rdev_dec_pending(rdev, mddev);
27204 md_error(mddev, rdev);
27205 } else
27206- atomic_add(s, &rdev->corrected_errors);
27207+ atomic_add_unchecked(s, &rdev->corrected_errors);
27208 }
27209 d = start;
27210 while (d != r1_bio->read_disk) {
27211@@ -1492,7 +1492,7 @@ static void fix_read_error(conf_t *conf,
27212 /* Well, this device is dead */
27213 md_error(mddev, rdev);
27214 else {
27215- atomic_add(s, &rdev->corrected_errors);
27216+ atomic_add_unchecked(s, &rdev->corrected_errors);
27217 printk(KERN_INFO
27218 "md/raid1:%s: read error corrected "
27219 "(%d sectors at %llu on %s)\n",
27220diff -urNp linux-3.0.3/drivers/md/raid5.c linux-3.0.3/drivers/md/raid5.c
27221--- linux-3.0.3/drivers/md/raid5.c 2011-07-21 22:17:23.000000000 -0400
27222+++ linux-3.0.3/drivers/md/raid5.c 2011-08-23 21:48:14.000000000 -0400
27223@@ -550,7 +550,7 @@ static void ops_run_io(struct stripe_hea
27224 bi->bi_next = NULL;
27225 if ((rw & WRITE) &&
27226 test_bit(R5_ReWrite, &sh->dev[i].flags))
27227- atomic_add(STRIPE_SECTORS,
27228+ atomic_add_unchecked(STRIPE_SECTORS,
27229 &rdev->corrected_errors);
27230 generic_make_request(bi);
27231 } else {
27232@@ -1596,15 +1596,15 @@ static void raid5_end_read_request(struc
27233 clear_bit(R5_ReadError, &sh->dev[i].flags);
27234 clear_bit(R5_ReWrite, &sh->dev[i].flags);
27235 }
27236- if (atomic_read(&conf->disks[i].rdev->read_errors))
27237- atomic_set(&conf->disks[i].rdev->read_errors, 0);
27238+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
27239+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
27240 } else {
27241 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
27242 int retry = 0;
27243 rdev = conf->disks[i].rdev;
27244
27245 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
27246- atomic_inc(&rdev->read_errors);
27247+ atomic_inc_unchecked(&rdev->read_errors);
27248 if (conf->mddev->degraded >= conf->max_degraded)
27249 printk_rl(KERN_WARNING
27250 "md/raid:%s: read error not correctable "
27251@@ -1622,7 +1622,7 @@ static void raid5_end_read_request(struc
27252 (unsigned long long)(sh->sector
27253 + rdev->data_offset),
27254 bdn);
27255- else if (atomic_read(&rdev->read_errors)
27256+ else if (atomic_read_unchecked(&rdev->read_errors)
27257 > conf->max_nr_stripes)
27258 printk(KERN_WARNING
27259 "md/raid:%s: Too many read errors, failing device %s.\n",
27260@@ -1945,6 +1945,7 @@ static sector_t compute_blocknr(struct s
27261 sector_t r_sector;
27262 struct stripe_head sh2;
27263
27264+ pax_track_stack();
27265
27266 chunk_offset = sector_div(new_sector, sectors_per_chunk);
27267 stripe = new_sector;
27268diff -urNp linux-3.0.3/drivers/media/common/saa7146_hlp.c linux-3.0.3/drivers/media/common/saa7146_hlp.c
27269--- linux-3.0.3/drivers/media/common/saa7146_hlp.c 2011-07-21 22:17:23.000000000 -0400
27270+++ linux-3.0.3/drivers/media/common/saa7146_hlp.c 2011-08-23 21:48:14.000000000 -0400
27271@@ -353,6 +353,8 @@ static void calculate_clipping_registers
27272
27273 int x[32], y[32], w[32], h[32];
27274
27275+ pax_track_stack();
27276+
27277 /* clear out memory */
27278 memset(&line_list[0], 0x00, sizeof(u32)*32);
27279 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
27280diff -urNp linux-3.0.3/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-3.0.3/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
27281--- linux-3.0.3/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-07-21 22:17:23.000000000 -0400
27282+++ linux-3.0.3/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-08-23 21:48:14.000000000 -0400
27283@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
27284 u8 buf[HOST_LINK_BUF_SIZE];
27285 int i;
27286
27287+ pax_track_stack();
27288+
27289 dprintk("%s\n", __func__);
27290
27291 /* check if we have space for a link buf in the rx_buffer */
27292@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
27293 unsigned long timeout;
27294 int written;
27295
27296+ pax_track_stack();
27297+
27298 dprintk("%s\n", __func__);
27299
27300 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
27301diff -urNp linux-3.0.3/drivers/media/dvb/dvb-core/dvb_demux.h linux-3.0.3/drivers/media/dvb/dvb-core/dvb_demux.h
27302--- linux-3.0.3/drivers/media/dvb/dvb-core/dvb_demux.h 2011-07-21 22:17:23.000000000 -0400
27303+++ linux-3.0.3/drivers/media/dvb/dvb-core/dvb_demux.h 2011-08-24 18:24:40.000000000 -0400
27304@@ -68,12 +68,12 @@ struct dvb_demux_feed {
27305 union {
27306 struct dmx_ts_feed ts;
27307 struct dmx_section_feed sec;
27308- } feed;
27309+ } __no_const feed;
27310
27311 union {
27312 dmx_ts_cb ts;
27313 dmx_section_cb sec;
27314- } cb;
27315+ } __no_const cb;
27316
27317 struct dvb_demux *demux;
27318 void *priv;
27319diff -urNp linux-3.0.3/drivers/media/dvb/dvb-core/dvbdev.c linux-3.0.3/drivers/media/dvb/dvb-core/dvbdev.c
27320--- linux-3.0.3/drivers/media/dvb/dvb-core/dvbdev.c 2011-07-21 22:17:23.000000000 -0400
27321+++ linux-3.0.3/drivers/media/dvb/dvb-core/dvbdev.c 2011-08-24 18:24:19.000000000 -0400
27322@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapt
27323 const struct dvb_device *template, void *priv, int type)
27324 {
27325 struct dvb_device *dvbdev;
27326- struct file_operations *dvbdevfops;
27327+ file_operations_no_const *dvbdevfops;
27328 struct device *clsdev;
27329 int minor;
27330 int id;
27331diff -urNp linux-3.0.3/drivers/media/dvb/dvb-usb/cxusb.c linux-3.0.3/drivers/media/dvb/dvb-usb/cxusb.c
27332--- linux-3.0.3/drivers/media/dvb/dvb-usb/cxusb.c 2011-07-21 22:17:23.000000000 -0400
27333+++ linux-3.0.3/drivers/media/dvb/dvb-usb/cxusb.c 2011-08-24 18:26:33.000000000 -0400
27334@@ -1059,7 +1059,7 @@ static struct dib0070_config dib7070p_di
27335 struct dib0700_adapter_state {
27336 int (*set_param_save) (struct dvb_frontend *,
27337 struct dvb_frontend_parameters *);
27338-};
27339+} __no_const;
27340
27341 static int dib7070_set_param_override(struct dvb_frontend *fe,
27342 struct dvb_frontend_parameters *fep)
27343diff -urNp linux-3.0.3/drivers/media/dvb/dvb-usb/dib0700_core.c linux-3.0.3/drivers/media/dvb/dvb-usb/dib0700_core.c
27344--- linux-3.0.3/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-07-21 22:17:23.000000000 -0400
27345+++ linux-3.0.3/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-08-23 21:48:14.000000000 -0400
27346@@ -434,6 +434,8 @@ int dib0700_download_firmware(struct usb
27347 if (!buf)
27348 return -ENOMEM;
27349
27350+ pax_track_stack();
27351+
27352 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
27353 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",
27354 hx.addr, hx.len, hx.chk);
27355diff -urNp linux-3.0.3/drivers/media/dvb/dvb-usb/dibusb.h linux-3.0.3/drivers/media/dvb/dvb-usb/dibusb.h
27356--- linux-3.0.3/drivers/media/dvb/dvb-usb/dibusb.h 2011-07-21 22:17:23.000000000 -0400
27357+++ linux-3.0.3/drivers/media/dvb/dvb-usb/dibusb.h 2011-08-24 18:27:27.000000000 -0400
27358@@ -97,7 +97,7 @@
27359 #define DIBUSB_IOCTL_CMD_DISABLE_STREAM 0x02
27360
27361 struct dibusb_state {
27362- struct dib_fe_xfer_ops ops;
27363+ dib_fe_xfer_ops_no_const ops;
27364 int mt2060_present;
27365 u8 tuner_addr;
27366 };
27367diff -urNp linux-3.0.3/drivers/media/dvb/dvb-usb/dw2102.c linux-3.0.3/drivers/media/dvb/dvb-usb/dw2102.c
27368--- linux-3.0.3/drivers/media/dvb/dvb-usb/dw2102.c 2011-07-21 22:17:23.000000000 -0400
27369+++ linux-3.0.3/drivers/media/dvb/dvb-usb/dw2102.c 2011-08-24 18:27:45.000000000 -0400
27370@@ -95,7 +95,7 @@ struct su3000_state {
27371
27372 struct s6x0_state {
27373 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
27374-};
27375+} __no_const;
27376
27377 /* debug */
27378 static int dvb_usb_dw2102_debug;
27379diff -urNp linux-3.0.3/drivers/media/dvb/dvb-usb/lmedm04.c linux-3.0.3/drivers/media/dvb/dvb-usb/lmedm04.c
27380--- linux-3.0.3/drivers/media/dvb/dvb-usb/lmedm04.c 2011-07-21 22:17:23.000000000 -0400
27381+++ linux-3.0.3/drivers/media/dvb/dvb-usb/lmedm04.c 2011-08-23 21:48:14.000000000 -0400
27382@@ -742,6 +742,7 @@ static int lme2510_download_firmware(str
27383 usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
27384 0x06, 0x80, 0x0200, 0x00, data, 0x0109, 1000);
27385
27386+ pax_track_stack();
27387
27388 data[0] = 0x8a;
27389 len_in = 1;
27390@@ -764,6 +765,8 @@ static void lme_coldreset(struct usb_dev
27391 int ret = 0, len_in;
27392 u8 data[512] = {0};
27393
27394+ pax_track_stack();
27395+
27396 data[0] = 0x0a;
27397 len_in = 1;
27398 info("FRM Firmware Cold Reset");
27399diff -urNp linux-3.0.3/drivers/media/dvb/frontends/dib3000.h linux-3.0.3/drivers/media/dvb/frontends/dib3000.h
27400--- linux-3.0.3/drivers/media/dvb/frontends/dib3000.h 2011-07-21 22:17:23.000000000 -0400
27401+++ linux-3.0.3/drivers/media/dvb/frontends/dib3000.h 2011-08-24 18:28:18.000000000 -0400
27402@@ -40,10 +40,11 @@ struct dib_fe_xfer_ops
27403 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
27404 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
27405 };
27406+typedef struct dib_fe_xfer_ops __no_const dib_fe_xfer_ops_no_const;
27407
27408 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
27409 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
27410- struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops);
27411+ struct i2c_adapter* i2c, dib_fe_xfer_ops_no_const *xfer_ops);
27412 #else
27413 static inline struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
27414 struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops)
27415diff -urNp linux-3.0.3/drivers/media/dvb/frontends/dib3000mb.c linux-3.0.3/drivers/media/dvb/frontends/dib3000mb.c
27416--- linux-3.0.3/drivers/media/dvb/frontends/dib3000mb.c 2011-07-21 22:17:23.000000000 -0400
27417+++ linux-3.0.3/drivers/media/dvb/frontends/dib3000mb.c 2011-08-24 18:28:42.000000000 -0400
27418@@ -756,7 +756,7 @@ static int dib3000mb_tuner_pass_ctrl(str
27419 static struct dvb_frontend_ops dib3000mb_ops;
27420
27421 struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
27422- struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops)
27423+ struct i2c_adapter* i2c, dib_fe_xfer_ops_no_const *xfer_ops)
27424 {
27425 struct dib3000_state* state = NULL;
27426
27427diff -urNp linux-3.0.3/drivers/media/dvb/frontends/mb86a16.c linux-3.0.3/drivers/media/dvb/frontends/mb86a16.c
27428--- linux-3.0.3/drivers/media/dvb/frontends/mb86a16.c 2011-07-21 22:17:23.000000000 -0400
27429+++ linux-3.0.3/drivers/media/dvb/frontends/mb86a16.c 2011-08-23 21:48:14.000000000 -0400
27430@@ -1060,6 +1060,8 @@ static int mb86a16_set_fe(struct mb86a16
27431 int ret = -1;
27432 int sync;
27433
27434+ pax_track_stack();
27435+
27436 dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate);
27437
27438 fcp = 3000;
27439diff -urNp linux-3.0.3/drivers/media/dvb/frontends/or51211.c linux-3.0.3/drivers/media/dvb/frontends/or51211.c
27440--- linux-3.0.3/drivers/media/dvb/frontends/or51211.c 2011-07-21 22:17:23.000000000 -0400
27441+++ linux-3.0.3/drivers/media/dvb/frontends/or51211.c 2011-08-23 21:48:14.000000000 -0400
27442@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
27443 u8 tudata[585];
27444 int i;
27445
27446+ pax_track_stack();
27447+
27448 dprintk("Firmware is %zd bytes\n",fw->size);
27449
27450 /* Get eprom data */
27451diff -urNp linux-3.0.3/drivers/media/video/cx18/cx18-driver.c linux-3.0.3/drivers/media/video/cx18/cx18-driver.c
27452--- linux-3.0.3/drivers/media/video/cx18/cx18-driver.c 2011-07-21 22:17:23.000000000 -0400
27453+++ linux-3.0.3/drivers/media/video/cx18/cx18-driver.c 2011-08-23 21:48:14.000000000 -0400
27454@@ -327,6 +327,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
27455 struct i2c_client c;
27456 u8 eedata[256];
27457
27458+ pax_track_stack();
27459+
27460 memset(&c, 0, sizeof(c));
27461 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
27462 c.adapter = &cx->i2c_adap[0];
27463diff -urNp linux-3.0.3/drivers/media/video/cx23885/cx23885-input.c linux-3.0.3/drivers/media/video/cx23885/cx23885-input.c
27464--- linux-3.0.3/drivers/media/video/cx23885/cx23885-input.c 2011-07-21 22:17:23.000000000 -0400
27465+++ linux-3.0.3/drivers/media/video/cx23885/cx23885-input.c 2011-08-23 21:48:14.000000000 -0400
27466@@ -53,6 +53,8 @@ static void cx23885_input_process_measur
27467 bool handle = false;
27468 struct ir_raw_event ir_core_event[64];
27469
27470+ pax_track_stack();
27471+
27472 do {
27473 num = 0;
27474 v4l2_subdev_call(dev->sd_ir, ir, rx_read, (u8 *) ir_core_event,
27475diff -urNp linux-3.0.3/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-3.0.3/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
27476--- linux-3.0.3/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-07-21 22:17:23.000000000 -0400
27477+++ linux-3.0.3/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-08-23 21:48:14.000000000 -0400
27478@@ -120,6 +120,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
27479 u8 *eeprom;
27480 struct tveeprom tvdata;
27481
27482+ pax_track_stack();
27483+
27484 memset(&tvdata,0,sizeof(tvdata));
27485
27486 eeprom = pvr2_eeprom_fetch(hdw);
27487diff -urNp linux-3.0.3/drivers/media/video/saa7134/saa6752hs.c linux-3.0.3/drivers/media/video/saa7134/saa6752hs.c
27488--- linux-3.0.3/drivers/media/video/saa7134/saa6752hs.c 2011-07-21 22:17:23.000000000 -0400
27489+++ linux-3.0.3/drivers/media/video/saa7134/saa6752hs.c 2011-08-23 21:48:14.000000000 -0400
27490@@ -682,6 +682,8 @@ static int saa6752hs_init(struct v4l2_su
27491 unsigned char localPAT[256];
27492 unsigned char localPMT[256];
27493
27494+ pax_track_stack();
27495+
27496 /* Set video format - must be done first as it resets other settings */
27497 set_reg8(client, 0x41, h->video_format);
27498
27499diff -urNp linux-3.0.3/drivers/media/video/saa7164/saa7164-cmd.c linux-3.0.3/drivers/media/video/saa7164/saa7164-cmd.c
27500--- linux-3.0.3/drivers/media/video/saa7164/saa7164-cmd.c 2011-07-21 22:17:23.000000000 -0400
27501+++ linux-3.0.3/drivers/media/video/saa7164/saa7164-cmd.c 2011-08-23 21:48:14.000000000 -0400
27502@@ -88,6 +88,8 @@ int saa7164_irq_dequeue(struct saa7164_d
27503 u8 tmp[512];
27504 dprintk(DBGLVL_CMD, "%s()\n", __func__);
27505
27506+ pax_track_stack();
27507+
27508 /* While any outstand message on the bus exists... */
27509 do {
27510
27511@@ -141,6 +143,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
27512 u8 tmp[512];
27513 dprintk(DBGLVL_CMD, "%s()\n", __func__);
27514
27515+ pax_track_stack();
27516+
27517 while (loop) {
27518
27519 struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 };
27520diff -urNp linux-3.0.3/drivers/media/video/timblogiw.c linux-3.0.3/drivers/media/video/timblogiw.c
27521--- linux-3.0.3/drivers/media/video/timblogiw.c 2011-07-21 22:17:23.000000000 -0400
27522+++ linux-3.0.3/drivers/media/video/timblogiw.c 2011-08-24 18:29:20.000000000 -0400
27523@@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *f
27524
27525 /* Platform device functions */
27526
27527-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
27528+static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
27529 .vidioc_querycap = timblogiw_querycap,
27530 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
27531 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
27532diff -urNp linux-3.0.3/drivers/media/video/usbvision/usbvision-core.c linux-3.0.3/drivers/media/video/usbvision/usbvision-core.c
27533--- linux-3.0.3/drivers/media/video/usbvision/usbvision-core.c 2011-07-21 22:17:23.000000000 -0400
27534+++ linux-3.0.3/drivers/media/video/usbvision/usbvision-core.c 2011-08-23 21:48:14.000000000 -0400
27535@@ -707,6 +707,8 @@ static enum parse_state usbvision_parse_
27536 unsigned char rv, gv, bv;
27537 static unsigned char *Y, *U, *V;
27538
27539+ pax_track_stack();
27540+
27541 frame = usbvision->cur_frame;
27542 image_size = frame->frmwidth * frame->frmheight;
27543 if ((frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
27544diff -urNp linux-3.0.3/drivers/media/video/videobuf-dma-sg.c linux-3.0.3/drivers/media/video/videobuf-dma-sg.c
27545--- linux-3.0.3/drivers/media/video/videobuf-dma-sg.c 2011-07-21 22:17:23.000000000 -0400
27546+++ linux-3.0.3/drivers/media/video/videobuf-dma-sg.c 2011-08-23 21:48:14.000000000 -0400
27547@@ -606,6 +606,8 @@ void *videobuf_sg_alloc(size_t size)
27548 {
27549 struct videobuf_queue q;
27550
27551+ pax_track_stack();
27552+
27553 /* Required to make generic handler to call __videobuf_alloc */
27554 q.int_ops = &sg_ops;
27555
27556diff -urNp linux-3.0.3/drivers/message/fusion/mptbase.c linux-3.0.3/drivers/message/fusion/mptbase.c
27557--- linux-3.0.3/drivers/message/fusion/mptbase.c 2011-07-21 22:17:23.000000000 -0400
27558+++ linux-3.0.3/drivers/message/fusion/mptbase.c 2011-08-23 21:48:14.000000000 -0400
27559@@ -6681,8 +6681,13 @@ static int mpt_iocinfo_proc_show(struct
27560 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
27561 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
27562
27563+#ifdef CONFIG_GRKERNSEC_HIDESYM
27564+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
27565+#else
27566 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
27567 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
27568+#endif
27569+
27570 /*
27571 * Rounding UP to nearest 4-kB boundary here...
27572 */
27573diff -urNp linux-3.0.3/drivers/message/fusion/mptsas.c linux-3.0.3/drivers/message/fusion/mptsas.c
27574--- linux-3.0.3/drivers/message/fusion/mptsas.c 2011-07-21 22:17:23.000000000 -0400
27575+++ linux-3.0.3/drivers/message/fusion/mptsas.c 2011-08-23 21:47:55.000000000 -0400
27576@@ -439,6 +439,23 @@ mptsas_is_end_device(struct mptsas_devin
27577 return 0;
27578 }
27579
27580+static inline void
27581+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
27582+{
27583+ if (phy_info->port_details) {
27584+ phy_info->port_details->rphy = rphy;
27585+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
27586+ ioc->name, rphy));
27587+ }
27588+
27589+ if (rphy) {
27590+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
27591+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
27592+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
27593+ ioc->name, rphy, rphy->dev.release));
27594+ }
27595+}
27596+
27597 /* no mutex */
27598 static void
27599 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
27600@@ -477,23 +494,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
27601 return NULL;
27602 }
27603
27604-static inline void
27605-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
27606-{
27607- if (phy_info->port_details) {
27608- phy_info->port_details->rphy = rphy;
27609- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
27610- ioc->name, rphy));
27611- }
27612-
27613- if (rphy) {
27614- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
27615- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
27616- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
27617- ioc->name, rphy, rphy->dev.release));
27618- }
27619-}
27620-
27621 static inline struct sas_port *
27622 mptsas_get_port(struct mptsas_phyinfo *phy_info)
27623 {
27624diff -urNp linux-3.0.3/drivers/message/fusion/mptscsih.c linux-3.0.3/drivers/message/fusion/mptscsih.c
27625--- linux-3.0.3/drivers/message/fusion/mptscsih.c 2011-07-21 22:17:23.000000000 -0400
27626+++ linux-3.0.3/drivers/message/fusion/mptscsih.c 2011-08-23 21:47:55.000000000 -0400
27627@@ -1268,15 +1268,16 @@ mptscsih_info(struct Scsi_Host *SChost)
27628
27629 h = shost_priv(SChost);
27630
27631- if (h) {
27632- if (h->info_kbuf == NULL)
27633- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
27634- return h->info_kbuf;
27635- h->info_kbuf[0] = '\0';
27636+ if (!h)
27637+ return NULL;
27638
27639- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
27640- h->info_kbuf[size-1] = '\0';
27641- }
27642+ if (h->info_kbuf == NULL)
27643+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
27644+ return h->info_kbuf;
27645+ h->info_kbuf[0] = '\0';
27646+
27647+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
27648+ h->info_kbuf[size-1] = '\0';
27649
27650 return h->info_kbuf;
27651 }
27652diff -urNp linux-3.0.3/drivers/message/i2o/i2o_config.c linux-3.0.3/drivers/message/i2o/i2o_config.c
27653--- linux-3.0.3/drivers/message/i2o/i2o_config.c 2011-07-21 22:17:23.000000000 -0400
27654+++ linux-3.0.3/drivers/message/i2o/i2o_config.c 2011-08-23 21:48:14.000000000 -0400
27655@@ -781,6 +781,8 @@ static int i2o_cfg_passthru(unsigned lon
27656 struct i2o_message *msg;
27657 unsigned int iop;
27658
27659+ pax_track_stack();
27660+
27661 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
27662 return -EFAULT;
27663
27664diff -urNp linux-3.0.3/drivers/message/i2o/i2o_proc.c linux-3.0.3/drivers/message/i2o/i2o_proc.c
27665--- linux-3.0.3/drivers/message/i2o/i2o_proc.c 2011-07-21 22:17:23.000000000 -0400
27666+++ linux-3.0.3/drivers/message/i2o/i2o_proc.c 2011-08-23 21:47:55.000000000 -0400
27667@@ -255,13 +255,6 @@ static char *scsi_devices[] = {
27668 "Array Controller Device"
27669 };
27670
27671-static char *chtostr(u8 * chars, int n)
27672-{
27673- char tmp[256];
27674- tmp[0] = 0;
27675- return strncat(tmp, (char *)chars, n);
27676-}
27677-
27678 static int i2o_report_query_status(struct seq_file *seq, int block_status,
27679 char *group)
27680 {
27681@@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct
27682
27683 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
27684 seq_printf(seq, "%-#8x", ddm_table.module_id);
27685- seq_printf(seq, "%-29s",
27686- chtostr(ddm_table.module_name_version, 28));
27687+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
27688 seq_printf(seq, "%9d ", ddm_table.data_size);
27689 seq_printf(seq, "%8d", ddm_table.code_size);
27690
27691@@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(s
27692
27693 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
27694 seq_printf(seq, "%-#8x", dst->module_id);
27695- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
27696- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
27697+ seq_printf(seq, "%-.28s", dst->module_name_version);
27698+ seq_printf(seq, "%-.8s", dst->date);
27699 seq_printf(seq, "%8d ", dst->module_size);
27700 seq_printf(seq, "%8d ", dst->mpb_size);
27701 seq_printf(seq, "0x%04x", dst->module_flags);
27702@@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(str
27703 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
27704 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
27705 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
27706- seq_printf(seq, "Vendor info : %s\n",
27707- chtostr((u8 *) (work32 + 2), 16));
27708- seq_printf(seq, "Product info : %s\n",
27709- chtostr((u8 *) (work32 + 6), 16));
27710- seq_printf(seq, "Description : %s\n",
27711- chtostr((u8 *) (work32 + 10), 16));
27712- seq_printf(seq, "Product rev. : %s\n",
27713- chtostr((u8 *) (work32 + 14), 8));
27714+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
27715+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
27716+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
27717+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
27718
27719 seq_printf(seq, "Serial number : ");
27720 print_serial_number(seq, (u8 *) (work32 + 16),
27721@@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(str
27722 }
27723
27724 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
27725- seq_printf(seq, "Module name : %s\n",
27726- chtostr(result.module_name, 24));
27727- seq_printf(seq, "Module revision : %s\n",
27728- chtostr(result.module_rev, 8));
27729+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
27730+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
27731
27732 seq_printf(seq, "Serial number : ");
27733 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
27734@@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq
27735 return 0;
27736 }
27737
27738- seq_printf(seq, "Device name : %s\n",
27739- chtostr(result.device_name, 64));
27740- seq_printf(seq, "Service name : %s\n",
27741- chtostr(result.service_name, 64));
27742- seq_printf(seq, "Physical name : %s\n",
27743- chtostr(result.physical_location, 64));
27744- seq_printf(seq, "Instance number : %s\n",
27745- chtostr(result.instance_number, 4));
27746+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
27747+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
27748+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
27749+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
27750
27751 return 0;
27752 }
27753diff -urNp linux-3.0.3/drivers/message/i2o/iop.c linux-3.0.3/drivers/message/i2o/iop.c
27754--- linux-3.0.3/drivers/message/i2o/iop.c 2011-07-21 22:17:23.000000000 -0400
27755+++ linux-3.0.3/drivers/message/i2o/iop.c 2011-08-23 21:47:55.000000000 -0400
27756@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
27757
27758 spin_lock_irqsave(&c->context_list_lock, flags);
27759
27760- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
27761- atomic_inc(&c->context_list_counter);
27762+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
27763+ atomic_inc_unchecked(&c->context_list_counter);
27764
27765- entry->context = atomic_read(&c->context_list_counter);
27766+ entry->context = atomic_read_unchecked(&c->context_list_counter);
27767
27768 list_add(&entry->list, &c->context_list);
27769
27770@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(voi
27771
27772 #if BITS_PER_LONG == 64
27773 spin_lock_init(&c->context_list_lock);
27774- atomic_set(&c->context_list_counter, 0);
27775+ atomic_set_unchecked(&c->context_list_counter, 0);
27776 INIT_LIST_HEAD(&c->context_list);
27777 #endif
27778
27779diff -urNp linux-3.0.3/drivers/mfd/abx500-core.c linux-3.0.3/drivers/mfd/abx500-core.c
27780--- linux-3.0.3/drivers/mfd/abx500-core.c 2011-07-21 22:17:23.000000000 -0400
27781+++ linux-3.0.3/drivers/mfd/abx500-core.c 2011-08-23 21:47:55.000000000 -0400
27782@@ -14,7 +14,7 @@ static LIST_HEAD(abx500_list);
27783
27784 struct abx500_device_entry {
27785 struct list_head list;
27786- struct abx500_ops ops;
27787+ abx500_ops_no_const ops;
27788 struct device *dev;
27789 };
27790
27791diff -urNp linux-3.0.3/drivers/mfd/janz-cmodio.c linux-3.0.3/drivers/mfd/janz-cmodio.c
27792--- linux-3.0.3/drivers/mfd/janz-cmodio.c 2011-07-21 22:17:23.000000000 -0400
27793+++ linux-3.0.3/drivers/mfd/janz-cmodio.c 2011-08-23 21:47:55.000000000 -0400
27794@@ -13,6 +13,7 @@
27795
27796 #include <linux/kernel.h>
27797 #include <linux/module.h>
27798+#include <linux/slab.h>
27799 #include <linux/init.h>
27800 #include <linux/pci.h>
27801 #include <linux/interrupt.h>
27802diff -urNp linux-3.0.3/drivers/mfd/wm8350-i2c.c linux-3.0.3/drivers/mfd/wm8350-i2c.c
27803--- linux-3.0.3/drivers/mfd/wm8350-i2c.c 2011-07-21 22:17:23.000000000 -0400
27804+++ linux-3.0.3/drivers/mfd/wm8350-i2c.c 2011-08-23 21:48:14.000000000 -0400
27805@@ -44,6 +44,8 @@ static int wm8350_i2c_write_device(struc
27806 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
27807 int ret;
27808
27809+ pax_track_stack();
27810+
27811 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
27812 return -EINVAL;
27813
27814diff -urNp linux-3.0.3/drivers/misc/lis3lv02d/lis3lv02d.c linux-3.0.3/drivers/misc/lis3lv02d/lis3lv02d.c
27815--- linux-3.0.3/drivers/misc/lis3lv02d/lis3lv02d.c 2011-07-21 22:17:23.000000000 -0400
27816+++ linux-3.0.3/drivers/misc/lis3lv02d/lis3lv02d.c 2011-08-23 21:47:55.000000000 -0400
27817@@ -435,7 +435,7 @@ static irqreturn_t lis302dl_interrupt(in
27818 * the lid is closed. This leads to interrupts as soon as a little move
27819 * is done.
27820 */
27821- atomic_inc(&lis3_dev.count);
27822+ atomic_inc_unchecked(&lis3_dev.count);
27823
27824 wake_up_interruptible(&lis3_dev.misc_wait);
27825 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
27826@@ -518,7 +518,7 @@ static int lis3lv02d_misc_open(struct in
27827 if (lis3_dev.pm_dev)
27828 pm_runtime_get_sync(lis3_dev.pm_dev);
27829
27830- atomic_set(&lis3_dev.count, 0);
27831+ atomic_set_unchecked(&lis3_dev.count, 0);
27832 return 0;
27833 }
27834
27835@@ -545,7 +545,7 @@ static ssize_t lis3lv02d_misc_read(struc
27836 add_wait_queue(&lis3_dev.misc_wait, &wait);
27837 while (true) {
27838 set_current_state(TASK_INTERRUPTIBLE);
27839- data = atomic_xchg(&lis3_dev.count, 0);
27840+ data = atomic_xchg_unchecked(&lis3_dev.count, 0);
27841 if (data)
27842 break;
27843
27844@@ -583,7 +583,7 @@ out:
27845 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
27846 {
27847 poll_wait(file, &lis3_dev.misc_wait, wait);
27848- if (atomic_read(&lis3_dev.count))
27849+ if (atomic_read_unchecked(&lis3_dev.count))
27850 return POLLIN | POLLRDNORM;
27851 return 0;
27852 }
27853diff -urNp linux-3.0.3/drivers/misc/lis3lv02d/lis3lv02d.h linux-3.0.3/drivers/misc/lis3lv02d/lis3lv02d.h
27854--- linux-3.0.3/drivers/misc/lis3lv02d/lis3lv02d.h 2011-07-21 22:17:23.000000000 -0400
27855+++ linux-3.0.3/drivers/misc/lis3lv02d/lis3lv02d.h 2011-08-23 21:47:55.000000000 -0400
27856@@ -265,7 +265,7 @@ struct lis3lv02d {
27857 struct input_polled_dev *idev; /* input device */
27858 struct platform_device *pdev; /* platform device */
27859 struct regulator_bulk_data regulators[2];
27860- atomic_t count; /* interrupt count after last read */
27861+ atomic_unchecked_t count; /* interrupt count after last read */
27862 union axis_conversion ac; /* hw -> logical axis */
27863 int mapped_btns[3];
27864
27865diff -urNp linux-3.0.3/drivers/misc/sgi-gru/gruhandles.c linux-3.0.3/drivers/misc/sgi-gru/gruhandles.c
27866--- linux-3.0.3/drivers/misc/sgi-gru/gruhandles.c 2011-07-21 22:17:23.000000000 -0400
27867+++ linux-3.0.3/drivers/misc/sgi-gru/gruhandles.c 2011-08-23 21:47:55.000000000 -0400
27868@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op
27869 unsigned long nsec;
27870
27871 nsec = CLKS2NSEC(clks);
27872- atomic_long_inc(&mcs_op_statistics[op].count);
27873- atomic_long_add(nsec, &mcs_op_statistics[op].total);
27874+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
27875+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
27876 if (mcs_op_statistics[op].max < nsec)
27877 mcs_op_statistics[op].max = nsec;
27878 }
27879diff -urNp linux-3.0.3/drivers/misc/sgi-gru/gruprocfs.c linux-3.0.3/drivers/misc/sgi-gru/gruprocfs.c
27880--- linux-3.0.3/drivers/misc/sgi-gru/gruprocfs.c 2011-07-21 22:17:23.000000000 -0400
27881+++ linux-3.0.3/drivers/misc/sgi-gru/gruprocfs.c 2011-08-23 21:47:55.000000000 -0400
27882@@ -32,9 +32,9 @@
27883
27884 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
27885
27886-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
27887+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
27888 {
27889- unsigned long val = atomic_long_read(v);
27890+ unsigned long val = atomic_long_read_unchecked(v);
27891
27892 seq_printf(s, "%16lu %s\n", val, id);
27893 }
27894@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct se
27895
27896 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
27897 for (op = 0; op < mcsop_last; op++) {
27898- count = atomic_long_read(&mcs_op_statistics[op].count);
27899- total = atomic_long_read(&mcs_op_statistics[op].total);
27900+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
27901+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
27902 max = mcs_op_statistics[op].max;
27903 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
27904 count ? total / count : 0, max);
27905diff -urNp linux-3.0.3/drivers/misc/sgi-gru/grutables.h linux-3.0.3/drivers/misc/sgi-gru/grutables.h
27906--- linux-3.0.3/drivers/misc/sgi-gru/grutables.h 2011-07-21 22:17:23.000000000 -0400
27907+++ linux-3.0.3/drivers/misc/sgi-gru/grutables.h 2011-08-23 21:47:55.000000000 -0400
27908@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
27909 * GRU statistics.
27910 */
27911 struct gru_stats_s {
27912- atomic_long_t vdata_alloc;
27913- atomic_long_t vdata_free;
27914- atomic_long_t gts_alloc;
27915- atomic_long_t gts_free;
27916- atomic_long_t gms_alloc;
27917- atomic_long_t gms_free;
27918- atomic_long_t gts_double_allocate;
27919- atomic_long_t assign_context;
27920- atomic_long_t assign_context_failed;
27921- atomic_long_t free_context;
27922- atomic_long_t load_user_context;
27923- atomic_long_t load_kernel_context;
27924- atomic_long_t lock_kernel_context;
27925- atomic_long_t unlock_kernel_context;
27926- atomic_long_t steal_user_context;
27927- atomic_long_t steal_kernel_context;
27928- atomic_long_t steal_context_failed;
27929- atomic_long_t nopfn;
27930- atomic_long_t asid_new;
27931- atomic_long_t asid_next;
27932- atomic_long_t asid_wrap;
27933- atomic_long_t asid_reuse;
27934- atomic_long_t intr;
27935- atomic_long_t intr_cbr;
27936- atomic_long_t intr_tfh;
27937- atomic_long_t intr_spurious;
27938- atomic_long_t intr_mm_lock_failed;
27939- atomic_long_t call_os;
27940- atomic_long_t call_os_wait_queue;
27941- atomic_long_t user_flush_tlb;
27942- atomic_long_t user_unload_context;
27943- atomic_long_t user_exception;
27944- atomic_long_t set_context_option;
27945- atomic_long_t check_context_retarget_intr;
27946- atomic_long_t check_context_unload;
27947- atomic_long_t tlb_dropin;
27948- atomic_long_t tlb_preload_page;
27949- atomic_long_t tlb_dropin_fail_no_asid;
27950- atomic_long_t tlb_dropin_fail_upm;
27951- atomic_long_t tlb_dropin_fail_invalid;
27952- atomic_long_t tlb_dropin_fail_range_active;
27953- atomic_long_t tlb_dropin_fail_idle;
27954- atomic_long_t tlb_dropin_fail_fmm;
27955- atomic_long_t tlb_dropin_fail_no_exception;
27956- atomic_long_t tfh_stale_on_fault;
27957- atomic_long_t mmu_invalidate_range;
27958- atomic_long_t mmu_invalidate_page;
27959- atomic_long_t flush_tlb;
27960- atomic_long_t flush_tlb_gru;
27961- atomic_long_t flush_tlb_gru_tgh;
27962- atomic_long_t flush_tlb_gru_zero_asid;
27963-
27964- atomic_long_t copy_gpa;
27965- atomic_long_t read_gpa;
27966-
27967- atomic_long_t mesq_receive;
27968- atomic_long_t mesq_receive_none;
27969- atomic_long_t mesq_send;
27970- atomic_long_t mesq_send_failed;
27971- atomic_long_t mesq_noop;
27972- atomic_long_t mesq_send_unexpected_error;
27973- atomic_long_t mesq_send_lb_overflow;
27974- atomic_long_t mesq_send_qlimit_reached;
27975- atomic_long_t mesq_send_amo_nacked;
27976- atomic_long_t mesq_send_put_nacked;
27977- atomic_long_t mesq_page_overflow;
27978- atomic_long_t mesq_qf_locked;
27979- atomic_long_t mesq_qf_noop_not_full;
27980- atomic_long_t mesq_qf_switch_head_failed;
27981- atomic_long_t mesq_qf_unexpected_error;
27982- atomic_long_t mesq_noop_unexpected_error;
27983- atomic_long_t mesq_noop_lb_overflow;
27984- atomic_long_t mesq_noop_qlimit_reached;
27985- atomic_long_t mesq_noop_amo_nacked;
27986- atomic_long_t mesq_noop_put_nacked;
27987- atomic_long_t mesq_noop_page_overflow;
27988+ atomic_long_unchecked_t vdata_alloc;
27989+ atomic_long_unchecked_t vdata_free;
27990+ atomic_long_unchecked_t gts_alloc;
27991+ atomic_long_unchecked_t gts_free;
27992+ atomic_long_unchecked_t gms_alloc;
27993+ atomic_long_unchecked_t gms_free;
27994+ atomic_long_unchecked_t gts_double_allocate;
27995+ atomic_long_unchecked_t assign_context;
27996+ atomic_long_unchecked_t assign_context_failed;
27997+ atomic_long_unchecked_t free_context;
27998+ atomic_long_unchecked_t load_user_context;
27999+ atomic_long_unchecked_t load_kernel_context;
28000+ atomic_long_unchecked_t lock_kernel_context;
28001+ atomic_long_unchecked_t unlock_kernel_context;
28002+ atomic_long_unchecked_t steal_user_context;
28003+ atomic_long_unchecked_t steal_kernel_context;
28004+ atomic_long_unchecked_t steal_context_failed;
28005+ atomic_long_unchecked_t nopfn;
28006+ atomic_long_unchecked_t asid_new;
28007+ atomic_long_unchecked_t asid_next;
28008+ atomic_long_unchecked_t asid_wrap;
28009+ atomic_long_unchecked_t asid_reuse;
28010+ atomic_long_unchecked_t intr;
28011+ atomic_long_unchecked_t intr_cbr;
28012+ atomic_long_unchecked_t intr_tfh;
28013+ atomic_long_unchecked_t intr_spurious;
28014+ atomic_long_unchecked_t intr_mm_lock_failed;
28015+ atomic_long_unchecked_t call_os;
28016+ atomic_long_unchecked_t call_os_wait_queue;
28017+ atomic_long_unchecked_t user_flush_tlb;
28018+ atomic_long_unchecked_t user_unload_context;
28019+ atomic_long_unchecked_t user_exception;
28020+ atomic_long_unchecked_t set_context_option;
28021+ atomic_long_unchecked_t check_context_retarget_intr;
28022+ atomic_long_unchecked_t check_context_unload;
28023+ atomic_long_unchecked_t tlb_dropin;
28024+ atomic_long_unchecked_t tlb_preload_page;
28025+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
28026+ atomic_long_unchecked_t tlb_dropin_fail_upm;
28027+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
28028+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
28029+ atomic_long_unchecked_t tlb_dropin_fail_idle;
28030+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
28031+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
28032+ atomic_long_unchecked_t tfh_stale_on_fault;
28033+ atomic_long_unchecked_t mmu_invalidate_range;
28034+ atomic_long_unchecked_t mmu_invalidate_page;
28035+ atomic_long_unchecked_t flush_tlb;
28036+ atomic_long_unchecked_t flush_tlb_gru;
28037+ atomic_long_unchecked_t flush_tlb_gru_tgh;
28038+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
28039+
28040+ atomic_long_unchecked_t copy_gpa;
28041+ atomic_long_unchecked_t read_gpa;
28042+
28043+ atomic_long_unchecked_t mesq_receive;
28044+ atomic_long_unchecked_t mesq_receive_none;
28045+ atomic_long_unchecked_t mesq_send;
28046+ atomic_long_unchecked_t mesq_send_failed;
28047+ atomic_long_unchecked_t mesq_noop;
28048+ atomic_long_unchecked_t mesq_send_unexpected_error;
28049+ atomic_long_unchecked_t mesq_send_lb_overflow;
28050+ atomic_long_unchecked_t mesq_send_qlimit_reached;
28051+ atomic_long_unchecked_t mesq_send_amo_nacked;
28052+ atomic_long_unchecked_t mesq_send_put_nacked;
28053+ atomic_long_unchecked_t mesq_page_overflow;
28054+ atomic_long_unchecked_t mesq_qf_locked;
28055+ atomic_long_unchecked_t mesq_qf_noop_not_full;
28056+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
28057+ atomic_long_unchecked_t mesq_qf_unexpected_error;
28058+ atomic_long_unchecked_t mesq_noop_unexpected_error;
28059+ atomic_long_unchecked_t mesq_noop_lb_overflow;
28060+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
28061+ atomic_long_unchecked_t mesq_noop_amo_nacked;
28062+ atomic_long_unchecked_t mesq_noop_put_nacked;
28063+ atomic_long_unchecked_t mesq_noop_page_overflow;
28064
28065 };
28066
28067@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start
28068 tghop_invalidate, mcsop_last};
28069
28070 struct mcs_op_statistic {
28071- atomic_long_t count;
28072- atomic_long_t total;
28073+ atomic_long_unchecked_t count;
28074+ atomic_long_unchecked_t total;
28075 unsigned long max;
28076 };
28077
28078@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_st
28079
28080 #define STAT(id) do { \
28081 if (gru_options & OPT_STATS) \
28082- atomic_long_inc(&gru_stats.id); \
28083+ atomic_long_inc_unchecked(&gru_stats.id); \
28084 } while (0)
28085
28086 #ifdef CONFIG_SGI_GRU_DEBUG
28087diff -urNp linux-3.0.3/drivers/misc/sgi-xp/xp.h linux-3.0.3/drivers/misc/sgi-xp/xp.h
28088--- linux-3.0.3/drivers/misc/sgi-xp/xp.h 2011-07-21 22:17:23.000000000 -0400
28089+++ linux-3.0.3/drivers/misc/sgi-xp/xp.h 2011-08-23 21:47:55.000000000 -0400
28090@@ -289,7 +289,7 @@ struct xpc_interface {
28091 xpc_notify_func, void *);
28092 void (*received) (short, int, void *);
28093 enum xp_retval (*partid_to_nasids) (short, void *);
28094-};
28095+} __no_const;
28096
28097 extern struct xpc_interface xpc_interface;
28098
28099diff -urNp linux-3.0.3/drivers/mtd/chips/cfi_cmdset_0001.c linux-3.0.3/drivers/mtd/chips/cfi_cmdset_0001.c
28100--- linux-3.0.3/drivers/mtd/chips/cfi_cmdset_0001.c 2011-07-21 22:17:23.000000000 -0400
28101+++ linux-3.0.3/drivers/mtd/chips/cfi_cmdset_0001.c 2011-08-23 21:48:14.000000000 -0400
28102@@ -757,6 +757,8 @@ static int chip_ready (struct map_info *
28103 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
28104 unsigned long timeo = jiffies + HZ;
28105
28106+ pax_track_stack();
28107+
28108 /* Prevent setting state FL_SYNCING for chip in suspended state. */
28109 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
28110 goto sleep;
28111@@ -1653,6 +1655,8 @@ static int __xipram do_write_buffer(stru
28112 unsigned long initial_adr;
28113 int initial_len = len;
28114
28115+ pax_track_stack();
28116+
28117 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
28118 adr += chip->start;
28119 initial_adr = adr;
28120@@ -1871,6 +1875,8 @@ static int __xipram do_erase_oneblock(st
28121 int retries = 3;
28122 int ret;
28123
28124+ pax_track_stack();
28125+
28126 adr += chip->start;
28127
28128 retry:
28129diff -urNp linux-3.0.3/drivers/mtd/chips/cfi_cmdset_0020.c linux-3.0.3/drivers/mtd/chips/cfi_cmdset_0020.c
28130--- linux-3.0.3/drivers/mtd/chips/cfi_cmdset_0020.c 2011-07-21 22:17:23.000000000 -0400
28131+++ linux-3.0.3/drivers/mtd/chips/cfi_cmdset_0020.c 2011-08-23 21:48:14.000000000 -0400
28132@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
28133 unsigned long cmd_addr;
28134 struct cfi_private *cfi = map->fldrv_priv;
28135
28136+ pax_track_stack();
28137+
28138 adr += chip->start;
28139
28140 /* Ensure cmd read/writes are aligned. */
28141@@ -429,6 +431,8 @@ static inline int do_write_buffer(struct
28142 DECLARE_WAITQUEUE(wait, current);
28143 int wbufsize, z;
28144
28145+ pax_track_stack();
28146+
28147 /* M58LW064A requires bus alignment for buffer wriets -- saw */
28148 if (adr & (map_bankwidth(map)-1))
28149 return -EINVAL;
28150@@ -743,6 +747,8 @@ static inline int do_erase_oneblock(stru
28151 DECLARE_WAITQUEUE(wait, current);
28152 int ret = 0;
28153
28154+ pax_track_stack();
28155+
28156 adr += chip->start;
28157
28158 /* Let's determine this according to the interleave only once */
28159@@ -1048,6 +1054,8 @@ static inline int do_lock_oneblock(struc
28160 unsigned long timeo = jiffies + HZ;
28161 DECLARE_WAITQUEUE(wait, current);
28162
28163+ pax_track_stack();
28164+
28165 adr += chip->start;
28166
28167 /* Let's determine this according to the interleave only once */
28168@@ -1197,6 +1205,8 @@ static inline int do_unlock_oneblock(str
28169 unsigned long timeo = jiffies + HZ;
28170 DECLARE_WAITQUEUE(wait, current);
28171
28172+ pax_track_stack();
28173+
28174 adr += chip->start;
28175
28176 /* Let's determine this according to the interleave only once */
28177diff -urNp linux-3.0.3/drivers/mtd/devices/doc2000.c linux-3.0.3/drivers/mtd/devices/doc2000.c
28178--- linux-3.0.3/drivers/mtd/devices/doc2000.c 2011-07-21 22:17:23.000000000 -0400
28179+++ linux-3.0.3/drivers/mtd/devices/doc2000.c 2011-08-23 21:47:55.000000000 -0400
28180@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
28181
28182 /* The ECC will not be calculated correctly if less than 512 is written */
28183 /* DBB-
28184- if (len != 0x200 && eccbuf)
28185+ if (len != 0x200)
28186 printk(KERN_WARNING
28187 "ECC needs a full sector write (adr: %lx size %lx)\n",
28188 (long) to, (long) len);
28189diff -urNp linux-3.0.3/drivers/mtd/devices/doc2001.c linux-3.0.3/drivers/mtd/devices/doc2001.c
28190--- linux-3.0.3/drivers/mtd/devices/doc2001.c 2011-07-21 22:17:23.000000000 -0400
28191+++ linux-3.0.3/drivers/mtd/devices/doc2001.c 2011-08-23 21:47:55.000000000 -0400
28192@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
28193 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
28194
28195 /* Don't allow read past end of device */
28196- if (from >= this->totlen)
28197+ if (from >= this->totlen || !len)
28198 return -EINVAL;
28199
28200 /* Don't allow a single read to cross a 512-byte block boundary */
28201diff -urNp linux-3.0.3/drivers/mtd/ftl.c linux-3.0.3/drivers/mtd/ftl.c
28202--- linux-3.0.3/drivers/mtd/ftl.c 2011-07-21 22:17:23.000000000 -0400
28203+++ linux-3.0.3/drivers/mtd/ftl.c 2011-08-23 21:48:14.000000000 -0400
28204@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
28205 loff_t offset;
28206 uint16_t srcunitswap = cpu_to_le16(srcunit);
28207
28208+ pax_track_stack();
28209+
28210 eun = &part->EUNInfo[srcunit];
28211 xfer = &part->XferInfo[xferunit];
28212 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
28213diff -urNp linux-3.0.3/drivers/mtd/inftlcore.c linux-3.0.3/drivers/mtd/inftlcore.c
28214--- linux-3.0.3/drivers/mtd/inftlcore.c 2011-07-21 22:17:23.000000000 -0400
28215+++ linux-3.0.3/drivers/mtd/inftlcore.c 2011-08-23 21:48:14.000000000 -0400
28216@@ -259,6 +259,8 @@ static u16 INFTL_foldchain(struct INFTLr
28217 struct inftl_oob oob;
28218 size_t retlen;
28219
28220+ pax_track_stack();
28221+
28222 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
28223 "pending=%d)\n", inftl, thisVUC, pendingblock);
28224
28225diff -urNp linux-3.0.3/drivers/mtd/inftlmount.c linux-3.0.3/drivers/mtd/inftlmount.c
28226--- linux-3.0.3/drivers/mtd/inftlmount.c 2011-07-21 22:17:23.000000000 -0400
28227+++ linux-3.0.3/drivers/mtd/inftlmount.c 2011-08-23 21:48:14.000000000 -0400
28228@@ -53,6 +53,8 @@ static int find_boot_record(struct INFTL
28229 struct INFTLPartition *ip;
28230 size_t retlen;
28231
28232+ pax_track_stack();
28233+
28234 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
28235
28236 /*
28237diff -urNp linux-3.0.3/drivers/mtd/lpddr/qinfo_probe.c linux-3.0.3/drivers/mtd/lpddr/qinfo_probe.c
28238--- linux-3.0.3/drivers/mtd/lpddr/qinfo_probe.c 2011-07-21 22:17:23.000000000 -0400
28239+++ linux-3.0.3/drivers/mtd/lpddr/qinfo_probe.c 2011-08-23 21:48:14.000000000 -0400
28240@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
28241 {
28242 map_word pfow_val[4];
28243
28244+ pax_track_stack();
28245+
28246 /* Check identification string */
28247 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
28248 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
28249diff -urNp linux-3.0.3/drivers/mtd/mtdchar.c linux-3.0.3/drivers/mtd/mtdchar.c
28250--- linux-3.0.3/drivers/mtd/mtdchar.c 2011-07-21 22:17:23.000000000 -0400
28251+++ linux-3.0.3/drivers/mtd/mtdchar.c 2011-08-23 21:48:14.000000000 -0400
28252@@ -553,6 +553,8 @@ static int mtd_ioctl(struct file *file,
28253 u_long size;
28254 struct mtd_info_user info;
28255
28256+ pax_track_stack();
28257+
28258 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
28259
28260 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
28261diff -urNp linux-3.0.3/drivers/mtd/nand/denali.c linux-3.0.3/drivers/mtd/nand/denali.c
28262--- linux-3.0.3/drivers/mtd/nand/denali.c 2011-07-21 22:17:23.000000000 -0400
28263+++ linux-3.0.3/drivers/mtd/nand/denali.c 2011-08-23 21:47:55.000000000 -0400
28264@@ -26,6 +26,7 @@
28265 #include <linux/pci.h>
28266 #include <linux/mtd/mtd.h>
28267 #include <linux/module.h>
28268+#include <linux/slab.h>
28269
28270 #include "denali.h"
28271
28272diff -urNp linux-3.0.3/drivers/mtd/nftlcore.c linux-3.0.3/drivers/mtd/nftlcore.c
28273--- linux-3.0.3/drivers/mtd/nftlcore.c 2011-07-21 22:17:23.000000000 -0400
28274+++ linux-3.0.3/drivers/mtd/nftlcore.c 2011-08-23 21:48:14.000000000 -0400
28275@@ -264,6 +264,8 @@ static u16 NFTL_foldchain (struct NFTLre
28276 int inplace = 1;
28277 size_t retlen;
28278
28279+ pax_track_stack();
28280+
28281 memset(BlockMap, 0xff, sizeof(BlockMap));
28282 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
28283
28284diff -urNp linux-3.0.3/drivers/mtd/nftlmount.c linux-3.0.3/drivers/mtd/nftlmount.c
28285--- linux-3.0.3/drivers/mtd/nftlmount.c 2011-07-21 22:17:23.000000000 -0400
28286+++ linux-3.0.3/drivers/mtd/nftlmount.c 2011-08-23 21:48:14.000000000 -0400
28287@@ -24,6 +24,7 @@
28288 #include <asm/errno.h>
28289 #include <linux/delay.h>
28290 #include <linux/slab.h>
28291+#include <linux/sched.h>
28292 #include <linux/mtd/mtd.h>
28293 #include <linux/mtd/nand.h>
28294 #include <linux/mtd/nftl.h>
28295@@ -45,6 +46,8 @@ static int find_boot_record(struct NFTLr
28296 struct mtd_info *mtd = nftl->mbd.mtd;
28297 unsigned int i;
28298
28299+ pax_track_stack();
28300+
28301 /* Assume logical EraseSize == physical erasesize for starting the scan.
28302 We'll sort it out later if we find a MediaHeader which says otherwise */
28303 /* Actually, we won't. The new DiskOnChip driver has already scanned
28304diff -urNp linux-3.0.3/drivers/mtd/ubi/build.c linux-3.0.3/drivers/mtd/ubi/build.c
28305--- linux-3.0.3/drivers/mtd/ubi/build.c 2011-07-21 22:17:23.000000000 -0400
28306+++ linux-3.0.3/drivers/mtd/ubi/build.c 2011-08-23 21:47:55.000000000 -0400
28307@@ -1287,7 +1287,7 @@ module_exit(ubi_exit);
28308 static int __init bytes_str_to_int(const char *str)
28309 {
28310 char *endp;
28311- unsigned long result;
28312+ unsigned long result, scale = 1;
28313
28314 result = simple_strtoul(str, &endp, 0);
28315 if (str == endp || result >= INT_MAX) {
28316@@ -1298,11 +1298,11 @@ static int __init bytes_str_to_int(const
28317
28318 switch (*endp) {
28319 case 'G':
28320- result *= 1024;
28321+ scale *= 1024;
28322 case 'M':
28323- result *= 1024;
28324+ scale *= 1024;
28325 case 'K':
28326- result *= 1024;
28327+ scale *= 1024;
28328 if (endp[1] == 'i' && endp[2] == 'B')
28329 endp += 2;
28330 case '\0':
28331@@ -1313,7 +1313,13 @@ static int __init bytes_str_to_int(const
28332 return -EINVAL;
28333 }
28334
28335- return result;
28336+ if ((intoverflow_t)result*scale >= INT_MAX) {
28337+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
28338+ str);
28339+ return -EINVAL;
28340+ }
28341+
28342+ return result*scale;
28343 }
28344
28345 /**
28346diff -urNp linux-3.0.3/drivers/net/bna/bfa_ioc_ct.c linux-3.0.3/drivers/net/bna/bfa_ioc_ct.c
28347--- linux-3.0.3/drivers/net/bna/bfa_ioc_ct.c 2011-07-21 22:17:23.000000000 -0400
28348+++ linux-3.0.3/drivers/net/bna/bfa_ioc_ct.c 2011-08-23 21:47:55.000000000 -0400
28349@@ -48,7 +48,21 @@ static void bfa_ioc_ct_sync_ack(struct b
28350 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
28351 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
28352
28353-static struct bfa_ioc_hwif nw_hwif_ct;
28354+static struct bfa_ioc_hwif nw_hwif_ct = {
28355+ .ioc_pll_init = bfa_ioc_ct_pll_init,
28356+ .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
28357+ .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
28358+ .ioc_reg_init = bfa_ioc_ct_reg_init,
28359+ .ioc_map_port = bfa_ioc_ct_map_port,
28360+ .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
28361+ .ioc_notify_fail = bfa_ioc_ct_notify_fail,
28362+ .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
28363+ .ioc_sync_start = bfa_ioc_ct_sync_start,
28364+ .ioc_sync_join = bfa_ioc_ct_sync_join,
28365+ .ioc_sync_leave = bfa_ioc_ct_sync_leave,
28366+ .ioc_sync_ack = bfa_ioc_ct_sync_ack,
28367+ .ioc_sync_complete = bfa_ioc_ct_sync_complete
28368+};
28369
28370 /**
28371 * Called from bfa_ioc_attach() to map asic specific calls.
28372@@ -56,20 +70,6 @@ static struct bfa_ioc_hwif nw_hwif_ct;
28373 void
28374 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
28375 {
28376- nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
28377- nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
28378- nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
28379- nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
28380- nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
28381- nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
28382- nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
28383- nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
28384- nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
28385- nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
28386- nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
28387- nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
28388- nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
28389-
28390 ioc->ioc_hwif = &nw_hwif_ct;
28391 }
28392
28393diff -urNp linux-3.0.3/drivers/net/bna/bnad.c linux-3.0.3/drivers/net/bna/bnad.c
28394--- linux-3.0.3/drivers/net/bna/bnad.c 2011-07-21 22:17:23.000000000 -0400
28395+++ linux-3.0.3/drivers/net/bna/bnad.c 2011-08-23 21:47:55.000000000 -0400
28396@@ -1681,7 +1681,14 @@ bnad_setup_tx(struct bnad *bnad, uint tx
28397 struct bna_intr_info *intr_info =
28398 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
28399 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
28400- struct bna_tx_event_cbfn tx_cbfn;
28401+ static struct bna_tx_event_cbfn tx_cbfn = {
28402+ /* Initialize the tx event handlers */
28403+ .tcb_setup_cbfn = bnad_cb_tcb_setup,
28404+ .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
28405+ .tx_stall_cbfn = bnad_cb_tx_stall,
28406+ .tx_resume_cbfn = bnad_cb_tx_resume,
28407+ .tx_cleanup_cbfn = bnad_cb_tx_cleanup
28408+ };
28409 struct bna_tx *tx;
28410 unsigned long flags;
28411
28412@@ -1690,13 +1697,6 @@ bnad_setup_tx(struct bnad *bnad, uint tx
28413 tx_config->txq_depth = bnad->txq_depth;
28414 tx_config->tx_type = BNA_TX_T_REGULAR;
28415
28416- /* Initialize the tx event handlers */
28417- tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
28418- tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
28419- tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
28420- tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
28421- tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
28422-
28423 /* Get BNA's resource requirement for one tx object */
28424 spin_lock_irqsave(&bnad->bna_lock, flags);
28425 bna_tx_res_req(bnad->num_txq_per_tx,
28426@@ -1827,21 +1827,21 @@ bnad_setup_rx(struct bnad *bnad, uint rx
28427 struct bna_intr_info *intr_info =
28428 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
28429 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
28430- struct bna_rx_event_cbfn rx_cbfn;
28431+ static struct bna_rx_event_cbfn rx_cbfn = {
28432+ /* Initialize the Rx event handlers */
28433+ .rcb_setup_cbfn = bnad_cb_rcb_setup,
28434+ .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
28435+ .ccb_setup_cbfn = bnad_cb_ccb_setup,
28436+ .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
28437+ .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
28438+ .rx_post_cbfn = bnad_cb_rx_post
28439+ };
28440 struct bna_rx *rx;
28441 unsigned long flags;
28442
28443 /* Initialize the Rx object configuration */
28444 bnad_init_rx_config(bnad, rx_config);
28445
28446- /* Initialize the Rx event handlers */
28447- rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
28448- rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
28449- rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
28450- rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
28451- rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
28452- rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
28453-
28454 /* Get BNA's resource requirement for one Rx object */
28455 spin_lock_irqsave(&bnad->bna_lock, flags);
28456 bna_rx_res_req(rx_config, res_info);
28457diff -urNp linux-3.0.3/drivers/net/bnx2.c linux-3.0.3/drivers/net/bnx2.c
28458--- linux-3.0.3/drivers/net/bnx2.c 2011-07-21 22:17:23.000000000 -0400
28459+++ linux-3.0.3/drivers/net/bnx2.c 2011-08-23 21:48:14.000000000 -0400
28460@@ -5828,6 +5828,8 @@ bnx2_test_nvram(struct bnx2 *bp)
28461 int rc = 0;
28462 u32 magic, csum;
28463
28464+ pax_track_stack();
28465+
28466 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
28467 goto test_nvram_done;
28468
28469diff -urNp linux-3.0.3/drivers/net/bnx2x/bnx2x_ethtool.c linux-3.0.3/drivers/net/bnx2x/bnx2x_ethtool.c
28470--- linux-3.0.3/drivers/net/bnx2x/bnx2x_ethtool.c 2011-07-21 22:17:23.000000000 -0400
28471+++ linux-3.0.3/drivers/net/bnx2x/bnx2x_ethtool.c 2011-08-23 21:48:14.000000000 -0400
28472@@ -1705,6 +1705,8 @@ static int bnx2x_test_nvram(struct bnx2x
28473 int i, rc;
28474 u32 magic, crc;
28475
28476+ pax_track_stack();
28477+
28478 if (BP_NOMCP(bp))
28479 return 0;
28480
28481diff -urNp linux-3.0.3/drivers/net/cxgb3/l2t.h linux-3.0.3/drivers/net/cxgb3/l2t.h
28482--- linux-3.0.3/drivers/net/cxgb3/l2t.h 2011-07-21 22:17:23.000000000 -0400
28483+++ linux-3.0.3/drivers/net/cxgb3/l2t.h 2011-08-23 21:47:55.000000000 -0400
28484@@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)
28485 */
28486 struct l2t_skb_cb {
28487 arp_failure_handler_func arp_failure_handler;
28488-};
28489+} __no_const;
28490
28491 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
28492
28493diff -urNp linux-3.0.3/drivers/net/cxgb4/cxgb4_main.c linux-3.0.3/drivers/net/cxgb4/cxgb4_main.c
28494--- linux-3.0.3/drivers/net/cxgb4/cxgb4_main.c 2011-07-21 22:17:23.000000000 -0400
28495+++ linux-3.0.3/drivers/net/cxgb4/cxgb4_main.c 2011-08-23 21:48:14.000000000 -0400
28496@@ -3396,6 +3396,8 @@ static int __devinit enable_msix(struct
28497 unsigned int nchan = adap->params.nports;
28498 struct msix_entry entries[MAX_INGQ + 1];
28499
28500+ pax_track_stack();
28501+
28502 for (i = 0; i < ARRAY_SIZE(entries); ++i)
28503 entries[i].entry = i;
28504
28505diff -urNp linux-3.0.3/drivers/net/cxgb4/t4_hw.c linux-3.0.3/drivers/net/cxgb4/t4_hw.c
28506--- linux-3.0.3/drivers/net/cxgb4/t4_hw.c 2011-07-21 22:17:23.000000000 -0400
28507+++ linux-3.0.3/drivers/net/cxgb4/t4_hw.c 2011-08-23 21:48:14.000000000 -0400
28508@@ -362,6 +362,8 @@ static int get_vpd_params(struct adapter
28509 u8 vpd[VPD_LEN], csum;
28510 unsigned int vpdr_len, kw_offset, id_len;
28511
28512+ pax_track_stack();
28513+
28514 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
28515 if (ret < 0)
28516 return ret;
28517diff -urNp linux-3.0.3/drivers/net/e1000e/82571.c linux-3.0.3/drivers/net/e1000e/82571.c
28518--- linux-3.0.3/drivers/net/e1000e/82571.c 2011-07-21 22:17:23.000000000 -0400
28519+++ linux-3.0.3/drivers/net/e1000e/82571.c 2011-08-23 21:47:55.000000000 -0400
28520@@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(s
28521 {
28522 struct e1000_hw *hw = &adapter->hw;
28523 struct e1000_mac_info *mac = &hw->mac;
28524- struct e1000_mac_operations *func = &mac->ops;
28525+ e1000_mac_operations_no_const *func = &mac->ops;
28526 u32 swsm = 0;
28527 u32 swsm2 = 0;
28528 bool force_clear_smbi = false;
28529diff -urNp linux-3.0.3/drivers/net/e1000e/es2lan.c linux-3.0.3/drivers/net/e1000e/es2lan.c
28530--- linux-3.0.3/drivers/net/e1000e/es2lan.c 2011-07-21 22:17:23.000000000 -0400
28531+++ linux-3.0.3/drivers/net/e1000e/es2lan.c 2011-08-23 21:47:55.000000000 -0400
28532@@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es
28533 {
28534 struct e1000_hw *hw = &adapter->hw;
28535 struct e1000_mac_info *mac = &hw->mac;
28536- struct e1000_mac_operations *func = &mac->ops;
28537+ e1000_mac_operations_no_const *func = &mac->ops;
28538
28539 /* Set media type */
28540 switch (adapter->pdev->device) {
28541diff -urNp linux-3.0.3/drivers/net/e1000e/hw.h linux-3.0.3/drivers/net/e1000e/hw.h
28542--- linux-3.0.3/drivers/net/e1000e/hw.h 2011-07-21 22:17:23.000000000 -0400
28543+++ linux-3.0.3/drivers/net/e1000e/hw.h 2011-08-23 21:47:55.000000000 -0400
28544@@ -776,6 +776,7 @@ struct e1000_mac_operations {
28545 void (*write_vfta)(struct e1000_hw *, u32, u32);
28546 s32 (*read_mac_addr)(struct e1000_hw *);
28547 };
28548+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28549
28550 /* Function pointers for the PHY. */
28551 struct e1000_phy_operations {
28552@@ -799,6 +800,7 @@ struct e1000_phy_operations {
28553 void (*power_up)(struct e1000_hw *);
28554 void (*power_down)(struct e1000_hw *);
28555 };
28556+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
28557
28558 /* Function pointers for the NVM. */
28559 struct e1000_nvm_operations {
28560@@ -810,9 +812,10 @@ struct e1000_nvm_operations {
28561 s32 (*validate)(struct e1000_hw *);
28562 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
28563 };
28564+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
28565
28566 struct e1000_mac_info {
28567- struct e1000_mac_operations ops;
28568+ e1000_mac_operations_no_const ops;
28569 u8 addr[ETH_ALEN];
28570 u8 perm_addr[ETH_ALEN];
28571
28572@@ -853,7 +856,7 @@ struct e1000_mac_info {
28573 };
28574
28575 struct e1000_phy_info {
28576- struct e1000_phy_operations ops;
28577+ e1000_phy_operations_no_const ops;
28578
28579 enum e1000_phy_type type;
28580
28581@@ -887,7 +890,7 @@ struct e1000_phy_info {
28582 };
28583
28584 struct e1000_nvm_info {
28585- struct e1000_nvm_operations ops;
28586+ e1000_nvm_operations_no_const ops;
28587
28588 enum e1000_nvm_type type;
28589 enum e1000_nvm_override override;
28590diff -urNp linux-3.0.3/drivers/net/hamradio/6pack.c linux-3.0.3/drivers/net/hamradio/6pack.c
28591--- linux-3.0.3/drivers/net/hamradio/6pack.c 2011-07-21 22:17:23.000000000 -0400
28592+++ linux-3.0.3/drivers/net/hamradio/6pack.c 2011-08-23 21:48:14.000000000 -0400
28593@@ -463,6 +463,8 @@ static void sixpack_receive_buf(struct t
28594 unsigned char buf[512];
28595 int count1;
28596
28597+ pax_track_stack();
28598+
28599 if (!count)
28600 return;
28601
28602diff -urNp linux-3.0.3/drivers/net/igb/e1000_hw.h linux-3.0.3/drivers/net/igb/e1000_hw.h
28603--- linux-3.0.3/drivers/net/igb/e1000_hw.h 2011-07-21 22:17:23.000000000 -0400
28604+++ linux-3.0.3/drivers/net/igb/e1000_hw.h 2011-08-23 21:47:55.000000000 -0400
28605@@ -314,6 +314,7 @@ struct e1000_mac_operations {
28606 s32 (*read_mac_addr)(struct e1000_hw *);
28607 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
28608 };
28609+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28610
28611 struct e1000_phy_operations {
28612 s32 (*acquire)(struct e1000_hw *);
28613@@ -330,6 +331,7 @@ struct e1000_phy_operations {
28614 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
28615 s32 (*write_reg)(struct e1000_hw *, u32, u16);
28616 };
28617+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
28618
28619 struct e1000_nvm_operations {
28620 s32 (*acquire)(struct e1000_hw *);
28621@@ -339,6 +341,7 @@ struct e1000_nvm_operations {
28622 s32 (*update)(struct e1000_hw *);
28623 s32 (*validate)(struct e1000_hw *);
28624 };
28625+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
28626
28627 struct e1000_info {
28628 s32 (*get_invariants)(struct e1000_hw *);
28629@@ -350,7 +353,7 @@ struct e1000_info {
28630 extern const struct e1000_info e1000_82575_info;
28631
28632 struct e1000_mac_info {
28633- struct e1000_mac_operations ops;
28634+ e1000_mac_operations_no_const ops;
28635
28636 u8 addr[6];
28637 u8 perm_addr[6];
28638@@ -388,7 +391,7 @@ struct e1000_mac_info {
28639 };
28640
28641 struct e1000_phy_info {
28642- struct e1000_phy_operations ops;
28643+ e1000_phy_operations_no_const ops;
28644
28645 enum e1000_phy_type type;
28646
28647@@ -423,7 +426,7 @@ struct e1000_phy_info {
28648 };
28649
28650 struct e1000_nvm_info {
28651- struct e1000_nvm_operations ops;
28652+ e1000_nvm_operations_no_const ops;
28653 enum e1000_nvm_type type;
28654 enum e1000_nvm_override override;
28655
28656@@ -468,6 +471,7 @@ struct e1000_mbx_operations {
28657 s32 (*check_for_ack)(struct e1000_hw *, u16);
28658 s32 (*check_for_rst)(struct e1000_hw *, u16);
28659 };
28660+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
28661
28662 struct e1000_mbx_stats {
28663 u32 msgs_tx;
28664@@ -479,7 +483,7 @@ struct e1000_mbx_stats {
28665 };
28666
28667 struct e1000_mbx_info {
28668- struct e1000_mbx_operations ops;
28669+ e1000_mbx_operations_no_const ops;
28670 struct e1000_mbx_stats stats;
28671 u32 timeout;
28672 u32 usec_delay;
28673diff -urNp linux-3.0.3/drivers/net/igbvf/vf.h linux-3.0.3/drivers/net/igbvf/vf.h
28674--- linux-3.0.3/drivers/net/igbvf/vf.h 2011-07-21 22:17:23.000000000 -0400
28675+++ linux-3.0.3/drivers/net/igbvf/vf.h 2011-08-23 21:47:55.000000000 -0400
28676@@ -189,9 +189,10 @@ struct e1000_mac_operations {
28677 s32 (*read_mac_addr)(struct e1000_hw *);
28678 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
28679 };
28680+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28681
28682 struct e1000_mac_info {
28683- struct e1000_mac_operations ops;
28684+ e1000_mac_operations_no_const ops;
28685 u8 addr[6];
28686 u8 perm_addr[6];
28687
28688@@ -213,6 +214,7 @@ struct e1000_mbx_operations {
28689 s32 (*check_for_ack)(struct e1000_hw *);
28690 s32 (*check_for_rst)(struct e1000_hw *);
28691 };
28692+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
28693
28694 struct e1000_mbx_stats {
28695 u32 msgs_tx;
28696@@ -224,7 +226,7 @@ struct e1000_mbx_stats {
28697 };
28698
28699 struct e1000_mbx_info {
28700- struct e1000_mbx_operations ops;
28701+ e1000_mbx_operations_no_const ops;
28702 struct e1000_mbx_stats stats;
28703 u32 timeout;
28704 u32 usec_delay;
28705diff -urNp linux-3.0.3/drivers/net/ixgb/ixgb_main.c linux-3.0.3/drivers/net/ixgb/ixgb_main.c
28706--- linux-3.0.3/drivers/net/ixgb/ixgb_main.c 2011-07-21 22:17:23.000000000 -0400
28707+++ linux-3.0.3/drivers/net/ixgb/ixgb_main.c 2011-08-23 21:48:14.000000000 -0400
28708@@ -1070,6 +1070,8 @@ ixgb_set_multi(struct net_device *netdev
28709 u32 rctl;
28710 int i;
28711
28712+ pax_track_stack();
28713+
28714 /* Check for Promiscuous and All Multicast modes */
28715
28716 rctl = IXGB_READ_REG(hw, RCTL);
28717diff -urNp linux-3.0.3/drivers/net/ixgb/ixgb_param.c linux-3.0.3/drivers/net/ixgb/ixgb_param.c
28718--- linux-3.0.3/drivers/net/ixgb/ixgb_param.c 2011-07-21 22:17:23.000000000 -0400
28719+++ linux-3.0.3/drivers/net/ixgb/ixgb_param.c 2011-08-23 21:48:14.000000000 -0400
28720@@ -261,6 +261,9 @@ void __devinit
28721 ixgb_check_options(struct ixgb_adapter *adapter)
28722 {
28723 int bd = adapter->bd_number;
28724+
28725+ pax_track_stack();
28726+
28727 if (bd >= IXGB_MAX_NIC) {
28728 pr_notice("Warning: no configuration for board #%i\n", bd);
28729 pr_notice("Using defaults for all values\n");
28730diff -urNp linux-3.0.3/drivers/net/ixgbe/ixgbe_type.h linux-3.0.3/drivers/net/ixgbe/ixgbe_type.h
28731--- linux-3.0.3/drivers/net/ixgbe/ixgbe_type.h 2011-07-21 22:17:23.000000000 -0400
28732+++ linux-3.0.3/drivers/net/ixgbe/ixgbe_type.h 2011-08-23 21:47:55.000000000 -0400
28733@@ -2584,6 +2584,7 @@ struct ixgbe_eeprom_operations {
28734 s32 (*update_checksum)(struct ixgbe_hw *);
28735 u16 (*calc_checksum)(struct ixgbe_hw *);
28736 };
28737+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
28738
28739 struct ixgbe_mac_operations {
28740 s32 (*init_hw)(struct ixgbe_hw *);
28741@@ -2639,6 +2640,7 @@ struct ixgbe_mac_operations {
28742 /* Flow Control */
28743 s32 (*fc_enable)(struct ixgbe_hw *, s32);
28744 };
28745+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
28746
28747 struct ixgbe_phy_operations {
28748 s32 (*identify)(struct ixgbe_hw *);
28749@@ -2658,9 +2660,10 @@ struct ixgbe_phy_operations {
28750 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
28751 s32 (*check_overtemp)(struct ixgbe_hw *);
28752 };
28753+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
28754
28755 struct ixgbe_eeprom_info {
28756- struct ixgbe_eeprom_operations ops;
28757+ ixgbe_eeprom_operations_no_const ops;
28758 enum ixgbe_eeprom_type type;
28759 u32 semaphore_delay;
28760 u16 word_size;
28761@@ -2670,7 +2673,7 @@ struct ixgbe_eeprom_info {
28762
28763 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
28764 struct ixgbe_mac_info {
28765- struct ixgbe_mac_operations ops;
28766+ ixgbe_mac_operations_no_const ops;
28767 enum ixgbe_mac_type type;
28768 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
28769 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
28770@@ -2698,7 +2701,7 @@ struct ixgbe_mac_info {
28771 };
28772
28773 struct ixgbe_phy_info {
28774- struct ixgbe_phy_operations ops;
28775+ ixgbe_phy_operations_no_const ops;
28776 struct mdio_if_info mdio;
28777 enum ixgbe_phy_type type;
28778 u32 id;
28779@@ -2726,6 +2729,7 @@ struct ixgbe_mbx_operations {
28780 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
28781 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
28782 };
28783+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
28784
28785 struct ixgbe_mbx_stats {
28786 u32 msgs_tx;
28787@@ -2737,7 +2741,7 @@ struct ixgbe_mbx_stats {
28788 };
28789
28790 struct ixgbe_mbx_info {
28791- struct ixgbe_mbx_operations ops;
28792+ ixgbe_mbx_operations_no_const ops;
28793 struct ixgbe_mbx_stats stats;
28794 u32 timeout;
28795 u32 usec_delay;
28796diff -urNp linux-3.0.3/drivers/net/ixgbevf/vf.h linux-3.0.3/drivers/net/ixgbevf/vf.h
28797--- linux-3.0.3/drivers/net/ixgbevf/vf.h 2011-07-21 22:17:23.000000000 -0400
28798+++ linux-3.0.3/drivers/net/ixgbevf/vf.h 2011-08-23 21:47:55.000000000 -0400
28799@@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
28800 s32 (*clear_vfta)(struct ixgbe_hw *);
28801 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
28802 };
28803+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
28804
28805 enum ixgbe_mac_type {
28806 ixgbe_mac_unknown = 0,
28807@@ -79,7 +80,7 @@ enum ixgbe_mac_type {
28808 };
28809
28810 struct ixgbe_mac_info {
28811- struct ixgbe_mac_operations ops;
28812+ ixgbe_mac_operations_no_const ops;
28813 u8 addr[6];
28814 u8 perm_addr[6];
28815
28816@@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
28817 s32 (*check_for_ack)(struct ixgbe_hw *);
28818 s32 (*check_for_rst)(struct ixgbe_hw *);
28819 };
28820+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
28821
28822 struct ixgbe_mbx_stats {
28823 u32 msgs_tx;
28824@@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
28825 };
28826
28827 struct ixgbe_mbx_info {
28828- struct ixgbe_mbx_operations ops;
28829+ ixgbe_mbx_operations_no_const ops;
28830 struct ixgbe_mbx_stats stats;
28831 u32 timeout;
28832 u32 udelay;
28833diff -urNp linux-3.0.3/drivers/net/ksz884x.c linux-3.0.3/drivers/net/ksz884x.c
28834--- linux-3.0.3/drivers/net/ksz884x.c 2011-07-21 22:17:23.000000000 -0400
28835+++ linux-3.0.3/drivers/net/ksz884x.c 2011-08-23 21:48:14.000000000 -0400
28836@@ -6534,6 +6534,8 @@ static void netdev_get_ethtool_stats(str
28837 int rc;
28838 u64 counter[TOTAL_PORT_COUNTER_NUM];
28839
28840+ pax_track_stack();
28841+
28842 mutex_lock(&hw_priv->lock);
28843 n = SWITCH_PORT_NUM;
28844 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
28845diff -urNp linux-3.0.3/drivers/net/mlx4/main.c linux-3.0.3/drivers/net/mlx4/main.c
28846--- linux-3.0.3/drivers/net/mlx4/main.c 2011-07-21 22:17:23.000000000 -0400
28847+++ linux-3.0.3/drivers/net/mlx4/main.c 2011-08-23 21:48:14.000000000 -0400
28848@@ -40,6 +40,7 @@
28849 #include <linux/dma-mapping.h>
28850 #include <linux/slab.h>
28851 #include <linux/io-mapping.h>
28852+#include <linux/sched.h>
28853
28854 #include <linux/mlx4/device.h>
28855 #include <linux/mlx4/doorbell.h>
28856@@ -764,6 +765,8 @@ static int mlx4_init_hca(struct mlx4_dev
28857 u64 icm_size;
28858 int err;
28859
28860+ pax_track_stack();
28861+
28862 err = mlx4_QUERY_FW(dev);
28863 if (err) {
28864 if (err == -EACCES)
28865diff -urNp linux-3.0.3/drivers/net/niu.c linux-3.0.3/drivers/net/niu.c
28866--- linux-3.0.3/drivers/net/niu.c 2011-08-23 21:44:40.000000000 -0400
28867+++ linux-3.0.3/drivers/net/niu.c 2011-08-23 21:48:14.000000000 -0400
28868@@ -9056,6 +9056,8 @@ static void __devinit niu_try_msix(struc
28869 int i, num_irqs, err;
28870 u8 first_ldg;
28871
28872+ pax_track_stack();
28873+
28874 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
28875 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
28876 ldg_num_map[i] = first_ldg + i;
28877diff -urNp linux-3.0.3/drivers/net/pcnet32.c linux-3.0.3/drivers/net/pcnet32.c
28878--- linux-3.0.3/drivers/net/pcnet32.c 2011-07-21 22:17:23.000000000 -0400
28879+++ linux-3.0.3/drivers/net/pcnet32.c 2011-08-23 21:47:55.000000000 -0400
28880@@ -82,7 +82,7 @@ static int cards_found;
28881 /*
28882 * VLB I/O addresses
28883 */
28884-static unsigned int pcnet32_portlist[] __initdata =
28885+static unsigned int pcnet32_portlist[] __devinitdata =
28886 { 0x300, 0x320, 0x340, 0x360, 0 };
28887
28888 static int pcnet32_debug;
28889@@ -270,7 +270,7 @@ struct pcnet32_private {
28890 struct sk_buff **rx_skbuff;
28891 dma_addr_t *tx_dma_addr;
28892 dma_addr_t *rx_dma_addr;
28893- struct pcnet32_access a;
28894+ struct pcnet32_access *a;
28895 spinlock_t lock; /* Guard lock */
28896 unsigned int cur_rx, cur_tx; /* The next free ring entry */
28897 unsigned int rx_ring_size; /* current rx ring size */
28898@@ -460,9 +460,9 @@ static void pcnet32_netif_start(struct n
28899 u16 val;
28900
28901 netif_wake_queue(dev);
28902- val = lp->a.read_csr(ioaddr, CSR3);
28903+ val = lp->a->read_csr(ioaddr, CSR3);
28904 val &= 0x00ff;
28905- lp->a.write_csr(ioaddr, CSR3, val);
28906+ lp->a->write_csr(ioaddr, CSR3, val);
28907 napi_enable(&lp->napi);
28908 }
28909
28910@@ -730,7 +730,7 @@ static u32 pcnet32_get_link(struct net_d
28911 r = mii_link_ok(&lp->mii_if);
28912 } else if (lp->chip_version >= PCNET32_79C970A) {
28913 ulong ioaddr = dev->base_addr; /* card base I/O address */
28914- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
28915+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
28916 } else { /* can not detect link on really old chips */
28917 r = 1;
28918 }
28919@@ -792,7 +792,7 @@ static int pcnet32_set_ringparam(struct
28920 pcnet32_netif_stop(dev);
28921
28922 spin_lock_irqsave(&lp->lock, flags);
28923- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
28924+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
28925
28926 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
28927
28928@@ -868,7 +868,7 @@ static void pcnet32_ethtool_test(struct
28929 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
28930 {
28931 struct pcnet32_private *lp = netdev_priv(dev);
28932- struct pcnet32_access *a = &lp->a; /* access to registers */
28933+ struct pcnet32_access *a = lp->a; /* access to registers */
28934 ulong ioaddr = dev->base_addr; /* card base I/O address */
28935 struct sk_buff *skb; /* sk buff */
28936 int x, i; /* counters */
28937@@ -888,21 +888,21 @@ static int pcnet32_loopback_test(struct
28938 pcnet32_netif_stop(dev);
28939
28940 spin_lock_irqsave(&lp->lock, flags);
28941- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
28942+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
28943
28944 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
28945
28946 /* Reset the PCNET32 */
28947- lp->a.reset(ioaddr);
28948- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
28949+ lp->a->reset(ioaddr);
28950+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
28951
28952 /* switch pcnet32 to 32bit mode */
28953- lp->a.write_bcr(ioaddr, 20, 2);
28954+ lp->a->write_bcr(ioaddr, 20, 2);
28955
28956 /* purge & init rings but don't actually restart */
28957 pcnet32_restart(dev, 0x0000);
28958
28959- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
28960+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
28961
28962 /* Initialize Transmit buffers. */
28963 size = data_len + 15;
28964@@ -947,10 +947,10 @@ static int pcnet32_loopback_test(struct
28965
28966 /* set int loopback in CSR15 */
28967 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
28968- lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
28969+ lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
28970
28971 teststatus = cpu_to_le16(0x8000);
28972- lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
28973+ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
28974
28975 /* Check status of descriptors */
28976 for (x = 0; x < numbuffs; x++) {
28977@@ -969,7 +969,7 @@ static int pcnet32_loopback_test(struct
28978 }
28979 }
28980
28981- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
28982+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
28983 wmb();
28984 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
28985 netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
28986@@ -1015,7 +1015,7 @@ clean_up:
28987 pcnet32_restart(dev, CSR0_NORMAL);
28988 } else {
28989 pcnet32_purge_rx_ring(dev);
28990- lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
28991+ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
28992 }
28993 spin_unlock_irqrestore(&lp->lock, flags);
28994
28995@@ -1026,7 +1026,7 @@ static int pcnet32_set_phys_id(struct ne
28996 enum ethtool_phys_id_state state)
28997 {
28998 struct pcnet32_private *lp = netdev_priv(dev);
28999- struct pcnet32_access *a = &lp->a;
29000+ struct pcnet32_access *a = lp->a;
29001 ulong ioaddr = dev->base_addr;
29002 unsigned long flags;
29003 int i;
29004@@ -1067,7 +1067,7 @@ static int pcnet32_suspend(struct net_de
29005 {
29006 int csr5;
29007 struct pcnet32_private *lp = netdev_priv(dev);
29008- struct pcnet32_access *a = &lp->a;
29009+ struct pcnet32_access *a = lp->a;
29010 ulong ioaddr = dev->base_addr;
29011 int ticks;
29012
29013@@ -1324,8 +1324,8 @@ static int pcnet32_poll(struct napi_stru
29014 spin_lock_irqsave(&lp->lock, flags);
29015 if (pcnet32_tx(dev)) {
29016 /* reset the chip to clear the error condition, then restart */
29017- lp->a.reset(ioaddr);
29018- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29019+ lp->a->reset(ioaddr);
29020+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29021 pcnet32_restart(dev, CSR0_START);
29022 netif_wake_queue(dev);
29023 }
29024@@ -1337,12 +1337,12 @@ static int pcnet32_poll(struct napi_stru
29025 __napi_complete(napi);
29026
29027 /* clear interrupt masks */
29028- val = lp->a.read_csr(ioaddr, CSR3);
29029+ val = lp->a->read_csr(ioaddr, CSR3);
29030 val &= 0x00ff;
29031- lp->a.write_csr(ioaddr, CSR3, val);
29032+ lp->a->write_csr(ioaddr, CSR3, val);
29033
29034 /* Set interrupt enable. */
29035- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
29036+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
29037
29038 spin_unlock_irqrestore(&lp->lock, flags);
29039 }
29040@@ -1365,7 +1365,7 @@ static void pcnet32_get_regs(struct net_
29041 int i, csr0;
29042 u16 *buff = ptr;
29043 struct pcnet32_private *lp = netdev_priv(dev);
29044- struct pcnet32_access *a = &lp->a;
29045+ struct pcnet32_access *a = lp->a;
29046 ulong ioaddr = dev->base_addr;
29047 unsigned long flags;
29048
29049@@ -1401,9 +1401,9 @@ static void pcnet32_get_regs(struct net_
29050 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
29051 if (lp->phymask & (1 << j)) {
29052 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
29053- lp->a.write_bcr(ioaddr, 33,
29054+ lp->a->write_bcr(ioaddr, 33,
29055 (j << 5) | i);
29056- *buff++ = lp->a.read_bcr(ioaddr, 34);
29057+ *buff++ = lp->a->read_bcr(ioaddr, 34);
29058 }
29059 }
29060 }
29061@@ -1785,7 +1785,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29062 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
29063 lp->options |= PCNET32_PORT_FD;
29064
29065- lp->a = *a;
29066+ lp->a = a;
29067
29068 /* prior to register_netdev, dev->name is not yet correct */
29069 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
29070@@ -1844,7 +1844,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29071 if (lp->mii) {
29072 /* lp->phycount and lp->phymask are set to 0 by memset above */
29073
29074- lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
29075+ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
29076 /* scan for PHYs */
29077 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
29078 unsigned short id1, id2;
29079@@ -1864,7 +1864,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29080 pr_info("Found PHY %04x:%04x at address %d\n",
29081 id1, id2, i);
29082 }
29083- lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
29084+ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
29085 if (lp->phycount > 1)
29086 lp->options |= PCNET32_PORT_MII;
29087 }
29088@@ -2020,10 +2020,10 @@ static int pcnet32_open(struct net_devic
29089 }
29090
29091 /* Reset the PCNET32 */
29092- lp->a.reset(ioaddr);
29093+ lp->a->reset(ioaddr);
29094
29095 /* switch pcnet32 to 32bit mode */
29096- lp->a.write_bcr(ioaddr, 20, 2);
29097+ lp->a->write_bcr(ioaddr, 20, 2);
29098
29099 netif_printk(lp, ifup, KERN_DEBUG, dev,
29100 "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
29101@@ -2032,14 +2032,14 @@ static int pcnet32_open(struct net_devic
29102 (u32) (lp->init_dma_addr));
29103
29104 /* set/reset autoselect bit */
29105- val = lp->a.read_bcr(ioaddr, 2) & ~2;
29106+ val = lp->a->read_bcr(ioaddr, 2) & ~2;
29107 if (lp->options & PCNET32_PORT_ASEL)
29108 val |= 2;
29109- lp->a.write_bcr(ioaddr, 2, val);
29110+ lp->a->write_bcr(ioaddr, 2, val);
29111
29112 /* handle full duplex setting */
29113 if (lp->mii_if.full_duplex) {
29114- val = lp->a.read_bcr(ioaddr, 9) & ~3;
29115+ val = lp->a->read_bcr(ioaddr, 9) & ~3;
29116 if (lp->options & PCNET32_PORT_FD) {
29117 val |= 1;
29118 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
29119@@ -2049,14 +2049,14 @@ static int pcnet32_open(struct net_devic
29120 if (lp->chip_version == 0x2627)
29121 val |= 3;
29122 }
29123- lp->a.write_bcr(ioaddr, 9, val);
29124+ lp->a->write_bcr(ioaddr, 9, val);
29125 }
29126
29127 /* set/reset GPSI bit in test register */
29128- val = lp->a.read_csr(ioaddr, 124) & ~0x10;
29129+ val = lp->a->read_csr(ioaddr, 124) & ~0x10;
29130 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
29131 val |= 0x10;
29132- lp->a.write_csr(ioaddr, 124, val);
29133+ lp->a->write_csr(ioaddr, 124, val);
29134
29135 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
29136 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
29137@@ -2075,24 +2075,24 @@ static int pcnet32_open(struct net_devic
29138 * duplex, and/or enable auto negotiation, and clear DANAS
29139 */
29140 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
29141- lp->a.write_bcr(ioaddr, 32,
29142- lp->a.read_bcr(ioaddr, 32) | 0x0080);
29143+ lp->a->write_bcr(ioaddr, 32,
29144+ lp->a->read_bcr(ioaddr, 32) | 0x0080);
29145 /* disable Auto Negotiation, set 10Mpbs, HD */
29146- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
29147+ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
29148 if (lp->options & PCNET32_PORT_FD)
29149 val |= 0x10;
29150 if (lp->options & PCNET32_PORT_100)
29151 val |= 0x08;
29152- lp->a.write_bcr(ioaddr, 32, val);
29153+ lp->a->write_bcr(ioaddr, 32, val);
29154 } else {
29155 if (lp->options & PCNET32_PORT_ASEL) {
29156- lp->a.write_bcr(ioaddr, 32,
29157- lp->a.read_bcr(ioaddr,
29158+ lp->a->write_bcr(ioaddr, 32,
29159+ lp->a->read_bcr(ioaddr,
29160 32) | 0x0080);
29161 /* enable auto negotiate, setup, disable fd */
29162- val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
29163+ val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
29164 val |= 0x20;
29165- lp->a.write_bcr(ioaddr, 32, val);
29166+ lp->a->write_bcr(ioaddr, 32, val);
29167 }
29168 }
29169 } else {
29170@@ -2105,10 +2105,10 @@ static int pcnet32_open(struct net_devic
29171 * There is really no good other way to handle multiple PHYs
29172 * other than turning off all automatics
29173 */
29174- val = lp->a.read_bcr(ioaddr, 2);
29175- lp->a.write_bcr(ioaddr, 2, val & ~2);
29176- val = lp->a.read_bcr(ioaddr, 32);
29177- lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
29178+ val = lp->a->read_bcr(ioaddr, 2);
29179+ lp->a->write_bcr(ioaddr, 2, val & ~2);
29180+ val = lp->a->read_bcr(ioaddr, 32);
29181+ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
29182
29183 if (!(lp->options & PCNET32_PORT_ASEL)) {
29184 /* setup ecmd */
29185@@ -2118,7 +2118,7 @@ static int pcnet32_open(struct net_devic
29186 ethtool_cmd_speed_set(&ecmd,
29187 (lp->options & PCNET32_PORT_100) ?
29188 SPEED_100 : SPEED_10);
29189- bcr9 = lp->a.read_bcr(ioaddr, 9);
29190+ bcr9 = lp->a->read_bcr(ioaddr, 9);
29191
29192 if (lp->options & PCNET32_PORT_FD) {
29193 ecmd.duplex = DUPLEX_FULL;
29194@@ -2127,7 +2127,7 @@ static int pcnet32_open(struct net_devic
29195 ecmd.duplex = DUPLEX_HALF;
29196 bcr9 |= ~(1 << 0);
29197 }
29198- lp->a.write_bcr(ioaddr, 9, bcr9);
29199+ lp->a->write_bcr(ioaddr, 9, bcr9);
29200 }
29201
29202 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
29203@@ -2158,9 +2158,9 @@ static int pcnet32_open(struct net_devic
29204
29205 #ifdef DO_DXSUFLO
29206 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
29207- val = lp->a.read_csr(ioaddr, CSR3);
29208+ val = lp->a->read_csr(ioaddr, CSR3);
29209 val |= 0x40;
29210- lp->a.write_csr(ioaddr, CSR3, val);
29211+ lp->a->write_csr(ioaddr, CSR3, val);
29212 }
29213 #endif
29214
29215@@ -2176,11 +2176,11 @@ static int pcnet32_open(struct net_devic
29216 napi_enable(&lp->napi);
29217
29218 /* Re-initialize the PCNET32, and start it when done. */
29219- lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
29220- lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
29221+ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
29222+ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
29223
29224- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29225- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
29226+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29227+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
29228
29229 netif_start_queue(dev);
29230
29231@@ -2192,19 +2192,19 @@ static int pcnet32_open(struct net_devic
29232
29233 i = 0;
29234 while (i++ < 100)
29235- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
29236+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
29237 break;
29238 /*
29239 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
29240 * reports that doing so triggers a bug in the '974.
29241 */
29242- lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
29243+ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
29244
29245 netif_printk(lp, ifup, KERN_DEBUG, dev,
29246 "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
29247 i,
29248 (u32) (lp->init_dma_addr),
29249- lp->a.read_csr(ioaddr, CSR0));
29250+ lp->a->read_csr(ioaddr, CSR0));
29251
29252 spin_unlock_irqrestore(&lp->lock, flags);
29253
29254@@ -2218,7 +2218,7 @@ err_free_ring:
29255 * Switch back to 16bit mode to avoid problems with dumb
29256 * DOS packet driver after a warm reboot
29257 */
29258- lp->a.write_bcr(ioaddr, 20, 4);
29259+ lp->a->write_bcr(ioaddr, 20, 4);
29260
29261 err_free_irq:
29262 spin_unlock_irqrestore(&lp->lock, flags);
29263@@ -2323,7 +2323,7 @@ static void pcnet32_restart(struct net_d
29264
29265 /* wait for stop */
29266 for (i = 0; i < 100; i++)
29267- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
29268+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
29269 break;
29270
29271 if (i >= 100)
29272@@ -2335,13 +2335,13 @@ static void pcnet32_restart(struct net_d
29273 return;
29274
29275 /* ReInit Ring */
29276- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
29277+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
29278 i = 0;
29279 while (i++ < 1000)
29280- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
29281+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
29282 break;
29283
29284- lp->a.write_csr(ioaddr, CSR0, csr0_bits);
29285+ lp->a->write_csr(ioaddr, CSR0, csr0_bits);
29286 }
29287
29288 static void pcnet32_tx_timeout(struct net_device *dev)
29289@@ -2353,8 +2353,8 @@ static void pcnet32_tx_timeout(struct ne
29290 /* Transmitter timeout, serious problems. */
29291 if (pcnet32_debug & NETIF_MSG_DRV)
29292 pr_err("%s: transmit timed out, status %4.4x, resetting\n",
29293- dev->name, lp->a.read_csr(ioaddr, CSR0));
29294- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29295+ dev->name, lp->a->read_csr(ioaddr, CSR0));
29296+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29297 dev->stats.tx_errors++;
29298 if (netif_msg_tx_err(lp)) {
29299 int i;
29300@@ -2397,7 +2397,7 @@ static netdev_tx_t pcnet32_start_xmit(st
29301
29302 netif_printk(lp, tx_queued, KERN_DEBUG, dev,
29303 "%s() called, csr0 %4.4x\n",
29304- __func__, lp->a.read_csr(ioaddr, CSR0));
29305+ __func__, lp->a->read_csr(ioaddr, CSR0));
29306
29307 /* Default status -- will not enable Successful-TxDone
29308 * interrupt when that option is available to us.
29309@@ -2427,7 +2427,7 @@ static netdev_tx_t pcnet32_start_xmit(st
29310 dev->stats.tx_bytes += skb->len;
29311
29312 /* Trigger an immediate send poll. */
29313- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
29314+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
29315
29316 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
29317 lp->tx_full = 1;
29318@@ -2452,16 +2452,16 @@ pcnet32_interrupt(int irq, void *dev_id)
29319
29320 spin_lock(&lp->lock);
29321
29322- csr0 = lp->a.read_csr(ioaddr, CSR0);
29323+ csr0 = lp->a->read_csr(ioaddr, CSR0);
29324 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
29325 if (csr0 == 0xffff)
29326 break; /* PCMCIA remove happened */
29327 /* Acknowledge all of the current interrupt sources ASAP. */
29328- lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
29329+ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
29330
29331 netif_printk(lp, intr, KERN_DEBUG, dev,
29332 "interrupt csr0=%#2.2x new csr=%#2.2x\n",
29333- csr0, lp->a.read_csr(ioaddr, CSR0));
29334+ csr0, lp->a->read_csr(ioaddr, CSR0));
29335
29336 /* Log misc errors. */
29337 if (csr0 & 0x4000)
29338@@ -2488,19 +2488,19 @@ pcnet32_interrupt(int irq, void *dev_id)
29339 if (napi_schedule_prep(&lp->napi)) {
29340 u16 val;
29341 /* set interrupt masks */
29342- val = lp->a.read_csr(ioaddr, CSR3);
29343+ val = lp->a->read_csr(ioaddr, CSR3);
29344 val |= 0x5f00;
29345- lp->a.write_csr(ioaddr, CSR3, val);
29346+ lp->a->write_csr(ioaddr, CSR3, val);
29347
29348 __napi_schedule(&lp->napi);
29349 break;
29350 }
29351- csr0 = lp->a.read_csr(ioaddr, CSR0);
29352+ csr0 = lp->a->read_csr(ioaddr, CSR0);
29353 }
29354
29355 netif_printk(lp, intr, KERN_DEBUG, dev,
29356 "exiting interrupt, csr0=%#4.4x\n",
29357- lp->a.read_csr(ioaddr, CSR0));
29358+ lp->a->read_csr(ioaddr, CSR0));
29359
29360 spin_unlock(&lp->lock);
29361
29362@@ -2520,20 +2520,20 @@ static int pcnet32_close(struct net_devi
29363
29364 spin_lock_irqsave(&lp->lock, flags);
29365
29366- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
29367+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
29368
29369 netif_printk(lp, ifdown, KERN_DEBUG, dev,
29370 "Shutting down ethercard, status was %2.2x\n",
29371- lp->a.read_csr(ioaddr, CSR0));
29372+ lp->a->read_csr(ioaddr, CSR0));
29373
29374 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
29375- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29376+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29377
29378 /*
29379 * Switch back to 16bit mode to avoid problems with dumb
29380 * DOS packet driver after a warm reboot
29381 */
29382- lp->a.write_bcr(ioaddr, 20, 4);
29383+ lp->a->write_bcr(ioaddr, 20, 4);
29384
29385 spin_unlock_irqrestore(&lp->lock, flags);
29386
29387@@ -2556,7 +2556,7 @@ static struct net_device_stats *pcnet32_
29388 unsigned long flags;
29389
29390 spin_lock_irqsave(&lp->lock, flags);
29391- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
29392+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
29393 spin_unlock_irqrestore(&lp->lock, flags);
29394
29395 return &dev->stats;
29396@@ -2578,10 +2578,10 @@ static void pcnet32_load_multicast(struc
29397 if (dev->flags & IFF_ALLMULTI) {
29398 ib->filter[0] = cpu_to_le32(~0U);
29399 ib->filter[1] = cpu_to_le32(~0U);
29400- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
29401- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
29402- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
29403- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
29404+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
29405+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
29406+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
29407+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
29408 return;
29409 }
29410 /* clear the multicast filter */
29411@@ -2601,7 +2601,7 @@ static void pcnet32_load_multicast(struc
29412 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
29413 }
29414 for (i = 0; i < 4; i++)
29415- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
29416+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
29417 le16_to_cpu(mcast_table[i]));
29418 }
29419
29420@@ -2616,28 +2616,28 @@ static void pcnet32_set_multicast_list(s
29421
29422 spin_lock_irqsave(&lp->lock, flags);
29423 suspended = pcnet32_suspend(dev, &flags, 0);
29424- csr15 = lp->a.read_csr(ioaddr, CSR15);
29425+ csr15 = lp->a->read_csr(ioaddr, CSR15);
29426 if (dev->flags & IFF_PROMISC) {
29427 /* Log any net taps. */
29428 netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
29429 lp->init_block->mode =
29430 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
29431 7);
29432- lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
29433+ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
29434 } else {
29435 lp->init_block->mode =
29436 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
29437- lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
29438+ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
29439 pcnet32_load_multicast(dev);
29440 }
29441
29442 if (suspended) {
29443 int csr5;
29444 /* clear SUSPEND (SPND) - CSR5 bit 0 */
29445- csr5 = lp->a.read_csr(ioaddr, CSR5);
29446- lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
29447+ csr5 = lp->a->read_csr(ioaddr, CSR5);
29448+ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
29449 } else {
29450- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29451+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29452 pcnet32_restart(dev, CSR0_NORMAL);
29453 netif_wake_queue(dev);
29454 }
29455@@ -2655,8 +2655,8 @@ static int mdio_read(struct net_device *
29456 if (!lp->mii)
29457 return 0;
29458
29459- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29460- val_out = lp->a.read_bcr(ioaddr, 34);
29461+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29462+ val_out = lp->a->read_bcr(ioaddr, 34);
29463
29464 return val_out;
29465 }
29466@@ -2670,8 +2670,8 @@ static void mdio_write(struct net_device
29467 if (!lp->mii)
29468 return;
29469
29470- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29471- lp->a.write_bcr(ioaddr, 34, val);
29472+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29473+ lp->a->write_bcr(ioaddr, 34, val);
29474 }
29475
29476 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
29477@@ -2748,7 +2748,7 @@ static void pcnet32_check_media(struct n
29478 curr_link = mii_link_ok(&lp->mii_if);
29479 } else {
29480 ulong ioaddr = dev->base_addr; /* card base I/O address */
29481- curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
29482+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
29483 }
29484 if (!curr_link) {
29485 if (prev_link || verbose) {
29486@@ -2771,13 +2771,13 @@ static void pcnet32_check_media(struct n
29487 (ecmd.duplex == DUPLEX_FULL)
29488 ? "full" : "half");
29489 }
29490- bcr9 = lp->a.read_bcr(dev->base_addr, 9);
29491+ bcr9 = lp->a->read_bcr(dev->base_addr, 9);
29492 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
29493 if (lp->mii_if.full_duplex)
29494 bcr9 |= (1 << 0);
29495 else
29496 bcr9 &= ~(1 << 0);
29497- lp->a.write_bcr(dev->base_addr, 9, bcr9);
29498+ lp->a->write_bcr(dev->base_addr, 9, bcr9);
29499 }
29500 } else {
29501 netif_info(lp, link, dev, "link up\n");
29502diff -urNp linux-3.0.3/drivers/net/ppp_generic.c linux-3.0.3/drivers/net/ppp_generic.c
29503--- linux-3.0.3/drivers/net/ppp_generic.c 2011-07-21 22:17:23.000000000 -0400
29504+++ linux-3.0.3/drivers/net/ppp_generic.c 2011-08-23 21:47:55.000000000 -0400
29505@@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, st
29506 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
29507 struct ppp_stats stats;
29508 struct ppp_comp_stats cstats;
29509- char *vers;
29510
29511 switch (cmd) {
29512 case SIOCGPPPSTATS:
29513@@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, st
29514 break;
29515
29516 case SIOCGPPPVER:
29517- vers = PPP_VERSION;
29518- if (copy_to_user(addr, vers, strlen(vers) + 1))
29519+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
29520 break;
29521 err = 0;
29522 break;
29523diff -urNp linux-3.0.3/drivers/net/r8169.c linux-3.0.3/drivers/net/r8169.c
29524--- linux-3.0.3/drivers/net/r8169.c 2011-08-23 21:44:40.000000000 -0400
29525+++ linux-3.0.3/drivers/net/r8169.c 2011-08-23 21:47:55.000000000 -0400
29526@@ -645,12 +645,12 @@ struct rtl8169_private {
29527 struct mdio_ops {
29528 void (*write)(void __iomem *, int, int);
29529 int (*read)(void __iomem *, int);
29530- } mdio_ops;
29531+ } __no_const mdio_ops;
29532
29533 struct pll_power_ops {
29534 void (*down)(struct rtl8169_private *);
29535 void (*up)(struct rtl8169_private *);
29536- } pll_power_ops;
29537+ } __no_const pll_power_ops;
29538
29539 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
29540 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
29541diff -urNp linux-3.0.3/drivers/net/tg3.h linux-3.0.3/drivers/net/tg3.h
29542--- linux-3.0.3/drivers/net/tg3.h 2011-07-21 22:17:23.000000000 -0400
29543+++ linux-3.0.3/drivers/net/tg3.h 2011-08-23 21:47:55.000000000 -0400
29544@@ -134,6 +134,7 @@
29545 #define CHIPREV_ID_5750_A0 0x4000
29546 #define CHIPREV_ID_5750_A1 0x4001
29547 #define CHIPREV_ID_5750_A3 0x4003
29548+#define CHIPREV_ID_5750_C1 0x4201
29549 #define CHIPREV_ID_5750_C2 0x4202
29550 #define CHIPREV_ID_5752_A0_HW 0x5000
29551 #define CHIPREV_ID_5752_A0 0x6000
29552diff -urNp linux-3.0.3/drivers/net/tokenring/abyss.c linux-3.0.3/drivers/net/tokenring/abyss.c
29553--- linux-3.0.3/drivers/net/tokenring/abyss.c 2011-07-21 22:17:23.000000000 -0400
29554+++ linux-3.0.3/drivers/net/tokenring/abyss.c 2011-08-23 21:47:55.000000000 -0400
29555@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
29556
29557 static int __init abyss_init (void)
29558 {
29559- abyss_netdev_ops = tms380tr_netdev_ops;
29560+ pax_open_kernel();
29561+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29562
29563- abyss_netdev_ops.ndo_open = abyss_open;
29564- abyss_netdev_ops.ndo_stop = abyss_close;
29565+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
29566+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
29567+ pax_close_kernel();
29568
29569 return pci_register_driver(&abyss_driver);
29570 }
29571diff -urNp linux-3.0.3/drivers/net/tokenring/madgemc.c linux-3.0.3/drivers/net/tokenring/madgemc.c
29572--- linux-3.0.3/drivers/net/tokenring/madgemc.c 2011-07-21 22:17:23.000000000 -0400
29573+++ linux-3.0.3/drivers/net/tokenring/madgemc.c 2011-08-23 21:47:55.000000000 -0400
29574@@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver
29575
29576 static int __init madgemc_init (void)
29577 {
29578- madgemc_netdev_ops = tms380tr_netdev_ops;
29579- madgemc_netdev_ops.ndo_open = madgemc_open;
29580- madgemc_netdev_ops.ndo_stop = madgemc_close;
29581+ pax_open_kernel();
29582+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29583+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
29584+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
29585+ pax_close_kernel();
29586
29587 return mca_register_driver (&madgemc_driver);
29588 }
29589diff -urNp linux-3.0.3/drivers/net/tokenring/proteon.c linux-3.0.3/drivers/net/tokenring/proteon.c
29590--- linux-3.0.3/drivers/net/tokenring/proteon.c 2011-07-21 22:17:23.000000000 -0400
29591+++ linux-3.0.3/drivers/net/tokenring/proteon.c 2011-08-23 21:47:55.000000000 -0400
29592@@ -353,9 +353,11 @@ static int __init proteon_init(void)
29593 struct platform_device *pdev;
29594 int i, num = 0, err = 0;
29595
29596- proteon_netdev_ops = tms380tr_netdev_ops;
29597- proteon_netdev_ops.ndo_open = proteon_open;
29598- proteon_netdev_ops.ndo_stop = tms380tr_close;
29599+ pax_open_kernel();
29600+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29601+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
29602+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
29603+ pax_close_kernel();
29604
29605 err = platform_driver_register(&proteon_driver);
29606 if (err)
29607diff -urNp linux-3.0.3/drivers/net/tokenring/skisa.c linux-3.0.3/drivers/net/tokenring/skisa.c
29608--- linux-3.0.3/drivers/net/tokenring/skisa.c 2011-07-21 22:17:23.000000000 -0400
29609+++ linux-3.0.3/drivers/net/tokenring/skisa.c 2011-08-23 21:47:55.000000000 -0400
29610@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
29611 struct platform_device *pdev;
29612 int i, num = 0, err = 0;
29613
29614- sk_isa_netdev_ops = tms380tr_netdev_ops;
29615- sk_isa_netdev_ops.ndo_open = sk_isa_open;
29616- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
29617+ pax_open_kernel();
29618+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29619+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
29620+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
29621+ pax_close_kernel();
29622
29623 err = platform_driver_register(&sk_isa_driver);
29624 if (err)
29625diff -urNp linux-3.0.3/drivers/net/tulip/de2104x.c linux-3.0.3/drivers/net/tulip/de2104x.c
29626--- linux-3.0.3/drivers/net/tulip/de2104x.c 2011-07-21 22:17:23.000000000 -0400
29627+++ linux-3.0.3/drivers/net/tulip/de2104x.c 2011-08-23 21:48:14.000000000 -0400
29628@@ -1794,6 +1794,8 @@ static void __devinit de21041_get_srom_i
29629 struct de_srom_info_leaf *il;
29630 void *bufp;
29631
29632+ pax_track_stack();
29633+
29634 /* download entire eeprom */
29635 for (i = 0; i < DE_EEPROM_WORDS; i++)
29636 ((__le16 *)ee_data)[i] =
29637diff -urNp linux-3.0.3/drivers/net/tulip/de4x5.c linux-3.0.3/drivers/net/tulip/de4x5.c
29638--- linux-3.0.3/drivers/net/tulip/de4x5.c 2011-07-21 22:17:23.000000000 -0400
29639+++ linux-3.0.3/drivers/net/tulip/de4x5.c 2011-08-23 21:47:55.000000000 -0400
29640@@ -5401,7 +5401,7 @@ de4x5_ioctl(struct net_device *dev, stru
29641 for (i=0; i<ETH_ALEN; i++) {
29642 tmp.addr[i] = dev->dev_addr[i];
29643 }
29644- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
29645+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
29646 break;
29647
29648 case DE4X5_SET_HWADDR: /* Set the hardware address */
29649@@ -5441,7 +5441,7 @@ de4x5_ioctl(struct net_device *dev, stru
29650 spin_lock_irqsave(&lp->lock, flags);
29651 memcpy(&statbuf, &lp->pktStats, ioc->len);
29652 spin_unlock_irqrestore(&lp->lock, flags);
29653- if (copy_to_user(ioc->data, &statbuf, ioc->len))
29654+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
29655 return -EFAULT;
29656 break;
29657 }
29658diff -urNp linux-3.0.3/drivers/net/usb/hso.c linux-3.0.3/drivers/net/usb/hso.c
29659--- linux-3.0.3/drivers/net/usb/hso.c 2011-07-21 22:17:23.000000000 -0400
29660+++ linux-3.0.3/drivers/net/usb/hso.c 2011-08-23 21:47:55.000000000 -0400
29661@@ -71,7 +71,7 @@
29662 #include <asm/byteorder.h>
29663 #include <linux/serial_core.h>
29664 #include <linux/serial.h>
29665-
29666+#include <asm/local.h>
29667
29668 #define MOD_AUTHOR "Option Wireless"
29669 #define MOD_DESCRIPTION "USB High Speed Option driver"
29670@@ -257,7 +257,7 @@ struct hso_serial {
29671
29672 /* from usb_serial_port */
29673 struct tty_struct *tty;
29674- int open_count;
29675+ local_t open_count;
29676 spinlock_t serial_lock;
29677
29678 int (*write_data) (struct hso_serial *serial);
29679@@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_
29680 struct urb *urb;
29681
29682 urb = serial->rx_urb[0];
29683- if (serial->open_count > 0) {
29684+ if (local_read(&serial->open_count) > 0) {
29685 count = put_rxbuf_data(urb, serial);
29686 if (count == -1)
29687 return;
29688@@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_cal
29689 DUMP1(urb->transfer_buffer, urb->actual_length);
29690
29691 /* Anyone listening? */
29692- if (serial->open_count == 0)
29693+ if (local_read(&serial->open_count) == 0)
29694 return;
29695
29696 if (status == 0) {
29697@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
29698 spin_unlock_irq(&serial->serial_lock);
29699
29700 /* check for port already opened, if not set the termios */
29701- serial->open_count++;
29702- if (serial->open_count == 1) {
29703+ if (local_inc_return(&serial->open_count) == 1) {
29704 serial->rx_state = RX_IDLE;
29705 /* Force default termio settings */
29706 _hso_serial_set_termios(tty, NULL);
29707@@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_st
29708 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
29709 if (result) {
29710 hso_stop_serial_device(serial->parent);
29711- serial->open_count--;
29712+ local_dec(&serial->open_count);
29713 kref_put(&serial->parent->ref, hso_serial_ref_free);
29714 }
29715 } else {
29716@@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_
29717
29718 /* reset the rts and dtr */
29719 /* do the actual close */
29720- serial->open_count--;
29721+ local_dec(&serial->open_count);
29722
29723- if (serial->open_count <= 0) {
29724- serial->open_count = 0;
29725+ if (local_read(&serial->open_count) <= 0) {
29726+ local_set(&serial->open_count, 0);
29727 spin_lock_irq(&serial->serial_lock);
29728 if (serial->tty == tty) {
29729 serial->tty->driver_data = NULL;
29730@@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struc
29731
29732 /* the actual setup */
29733 spin_lock_irqsave(&serial->serial_lock, flags);
29734- if (serial->open_count)
29735+ if (local_read(&serial->open_count))
29736 _hso_serial_set_termios(tty, old);
29737 else
29738 tty->termios = old;
29739@@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *ur
29740 D1("Pending read interrupt on port %d\n", i);
29741 spin_lock(&serial->serial_lock);
29742 if (serial->rx_state == RX_IDLE &&
29743- serial->open_count > 0) {
29744+ local_read(&serial->open_count) > 0) {
29745 /* Setup and send a ctrl req read on
29746 * port i */
29747 if (!serial->rx_urb_filled[0]) {
29748@@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interfa
29749 /* Start all serial ports */
29750 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
29751 if (serial_table[i] && (serial_table[i]->interface == iface)) {
29752- if (dev2ser(serial_table[i])->open_count) {
29753+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
29754 result =
29755 hso_start_serial_device(serial_table[i], GFP_NOIO);
29756 hso_kick_transmit(dev2ser(serial_table[i]));
29757diff -urNp linux-3.0.3/drivers/net/vmxnet3/vmxnet3_ethtool.c linux-3.0.3/drivers/net/vmxnet3/vmxnet3_ethtool.c
29758--- linux-3.0.3/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-07-21 22:17:23.000000000 -0400
29759+++ linux-3.0.3/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-08-23 21:47:55.000000000 -0400
29760@@ -594,8 +594,7 @@ vmxnet3_set_rss_indir(struct net_device
29761 * Return with error code if any of the queue indices
29762 * is out of range
29763 */
29764- if (p->ring_index[i] < 0 ||
29765- p->ring_index[i] >= adapter->num_rx_queues)
29766+ if (p->ring_index[i] >= adapter->num_rx_queues)
29767 return -EINVAL;
29768 }
29769
29770diff -urNp linux-3.0.3/drivers/net/vxge/vxge-config.h linux-3.0.3/drivers/net/vxge/vxge-config.h
29771--- linux-3.0.3/drivers/net/vxge/vxge-config.h 2011-07-21 22:17:23.000000000 -0400
29772+++ linux-3.0.3/drivers/net/vxge/vxge-config.h 2011-08-23 21:47:55.000000000 -0400
29773@@ -512,7 +512,7 @@ struct vxge_hw_uld_cbs {
29774 void (*link_down)(struct __vxge_hw_device *devh);
29775 void (*crit_err)(struct __vxge_hw_device *devh,
29776 enum vxge_hw_event type, u64 ext_data);
29777-};
29778+} __no_const;
29779
29780 /*
29781 * struct __vxge_hw_blockpool_entry - Block private data structure
29782diff -urNp linux-3.0.3/drivers/net/vxge/vxge-main.c linux-3.0.3/drivers/net/vxge/vxge-main.c
29783--- linux-3.0.3/drivers/net/vxge/vxge-main.c 2011-07-21 22:17:23.000000000 -0400
29784+++ linux-3.0.3/drivers/net/vxge/vxge-main.c 2011-08-23 21:48:14.000000000 -0400
29785@@ -98,6 +98,8 @@ static inline void VXGE_COMPLETE_VPATH_T
29786 struct sk_buff *completed[NR_SKB_COMPLETED];
29787 int more;
29788
29789+ pax_track_stack();
29790+
29791 do {
29792 more = 0;
29793 skb_ptr = completed;
29794@@ -1920,6 +1922,8 @@ static enum vxge_hw_status vxge_rth_conf
29795 u8 mtable[256] = {0}; /* CPU to vpath mapping */
29796 int index;
29797
29798+ pax_track_stack();
29799+
29800 /*
29801 * Filling
29802 * - itable with bucket numbers
29803diff -urNp linux-3.0.3/drivers/net/vxge/vxge-traffic.h linux-3.0.3/drivers/net/vxge/vxge-traffic.h
29804--- linux-3.0.3/drivers/net/vxge/vxge-traffic.h 2011-07-21 22:17:23.000000000 -0400
29805+++ linux-3.0.3/drivers/net/vxge/vxge-traffic.h 2011-08-23 21:47:55.000000000 -0400
29806@@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
29807 struct vxge_hw_mempool_dma *dma_object,
29808 u32 index,
29809 u32 is_last);
29810-};
29811+} __no_const;
29812
29813 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
29814 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
29815diff -urNp linux-3.0.3/drivers/net/wan/cycx_x25.c linux-3.0.3/drivers/net/wan/cycx_x25.c
29816--- linux-3.0.3/drivers/net/wan/cycx_x25.c 2011-07-21 22:17:23.000000000 -0400
29817+++ linux-3.0.3/drivers/net/wan/cycx_x25.c 2011-08-23 21:48:14.000000000 -0400
29818@@ -1018,6 +1018,8 @@ static void hex_dump(char *msg, unsigned
29819 unsigned char hex[1024],
29820 * phex = hex;
29821
29822+ pax_track_stack();
29823+
29824 if (len >= (sizeof(hex) / 2))
29825 len = (sizeof(hex) / 2) - 1;
29826
29827diff -urNp linux-3.0.3/drivers/net/wan/hdlc_x25.c linux-3.0.3/drivers/net/wan/hdlc_x25.c
29828--- linux-3.0.3/drivers/net/wan/hdlc_x25.c 2011-07-21 22:17:23.000000000 -0400
29829+++ linux-3.0.3/drivers/net/wan/hdlc_x25.c 2011-08-23 21:47:55.000000000 -0400
29830@@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
29831
29832 static int x25_open(struct net_device *dev)
29833 {
29834- struct lapb_register_struct cb;
29835+ static struct lapb_register_struct cb = {
29836+ .connect_confirmation = x25_connected,
29837+ .connect_indication = x25_connected,
29838+ .disconnect_confirmation = x25_disconnected,
29839+ .disconnect_indication = x25_disconnected,
29840+ .data_indication = x25_data_indication,
29841+ .data_transmit = x25_data_transmit
29842+ };
29843 int result;
29844
29845- cb.connect_confirmation = x25_connected;
29846- cb.connect_indication = x25_connected;
29847- cb.disconnect_confirmation = x25_disconnected;
29848- cb.disconnect_indication = x25_disconnected;
29849- cb.data_indication = x25_data_indication;
29850- cb.data_transmit = x25_data_transmit;
29851-
29852 result = lapb_register(dev, &cb);
29853 if (result != LAPB_OK)
29854 return result;
29855diff -urNp linux-3.0.3/drivers/net/wimax/i2400m/usb-fw.c linux-3.0.3/drivers/net/wimax/i2400m/usb-fw.c
29856--- linux-3.0.3/drivers/net/wimax/i2400m/usb-fw.c 2011-07-21 22:17:23.000000000 -0400
29857+++ linux-3.0.3/drivers/net/wimax/i2400m/usb-fw.c 2011-08-23 21:48:14.000000000 -0400
29858@@ -287,6 +287,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
29859 int do_autopm = 1;
29860 DECLARE_COMPLETION_ONSTACK(notif_completion);
29861
29862+ pax_track_stack();
29863+
29864 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
29865 i2400m, ack, ack_size);
29866 BUG_ON(_ack == i2400m->bm_ack_buf);
29867diff -urNp linux-3.0.3/drivers/net/wireless/airo.c linux-3.0.3/drivers/net/wireless/airo.c
29868--- linux-3.0.3/drivers/net/wireless/airo.c 2011-08-23 21:44:40.000000000 -0400
29869+++ linux-3.0.3/drivers/net/wireless/airo.c 2011-08-23 21:48:14.000000000 -0400
29870@@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
29871 BSSListElement * loop_net;
29872 BSSListElement * tmp_net;
29873
29874+ pax_track_stack();
29875+
29876 /* Blow away current list of scan results */
29877 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
29878 list_move_tail (&loop_net->list, &ai->network_free_list);
29879@@ -3794,6 +3796,8 @@ static u16 setup_card(struct airo_info *
29880 WepKeyRid wkr;
29881 int rc;
29882
29883+ pax_track_stack();
29884+
29885 memset( &mySsid, 0, sizeof( mySsid ) );
29886 kfree (ai->flash);
29887 ai->flash = NULL;
29888@@ -4753,6 +4757,8 @@ static int proc_stats_rid_open( struct i
29889 __le32 *vals = stats.vals;
29890 int len;
29891
29892+ pax_track_stack();
29893+
29894 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
29895 return -ENOMEM;
29896 data = file->private_data;
29897@@ -5476,6 +5482,8 @@ static int proc_BSSList_open( struct ino
29898 /* If doLoseSync is not 1, we won't do a Lose Sync */
29899 int doLoseSync = -1;
29900
29901+ pax_track_stack();
29902+
29903 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
29904 return -ENOMEM;
29905 data = file->private_data;
29906@@ -7181,6 +7189,8 @@ static int airo_get_aplist(struct net_de
29907 int i;
29908 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
29909
29910+ pax_track_stack();
29911+
29912 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
29913 if (!qual)
29914 return -ENOMEM;
29915@@ -7741,6 +7751,8 @@ static void airo_read_wireless_stats(str
29916 CapabilityRid cap_rid;
29917 __le32 *vals = stats_rid.vals;
29918
29919+ pax_track_stack();
29920+
29921 /* Get stats out of the card */
29922 clear_bit(JOB_WSTATS, &local->jobs);
29923 if (local->power.event) {
29924diff -urNp linux-3.0.3/drivers/net/wireless/ath/ath5k/debug.c linux-3.0.3/drivers/net/wireless/ath/ath5k/debug.c
29925--- linux-3.0.3/drivers/net/wireless/ath/ath5k/debug.c 2011-07-21 22:17:23.000000000 -0400
29926+++ linux-3.0.3/drivers/net/wireless/ath/ath5k/debug.c 2011-08-23 21:48:14.000000000 -0400
29927@@ -204,6 +204,8 @@ static ssize_t read_file_beacon(struct f
29928 unsigned int v;
29929 u64 tsf;
29930
29931+ pax_track_stack();
29932+
29933 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
29934 len += snprintf(buf+len, sizeof(buf)-len,
29935 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
29936@@ -323,6 +325,8 @@ static ssize_t read_file_debug(struct fi
29937 unsigned int len = 0;
29938 unsigned int i;
29939
29940+ pax_track_stack();
29941+
29942 len += snprintf(buf+len, sizeof(buf)-len,
29943 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
29944
29945@@ -384,6 +388,8 @@ static ssize_t read_file_antenna(struct
29946 unsigned int i;
29947 unsigned int v;
29948
29949+ pax_track_stack();
29950+
29951 len += snprintf(buf+len, sizeof(buf)-len, "antenna mode\t%d\n",
29952 sc->ah->ah_ant_mode);
29953 len += snprintf(buf+len, sizeof(buf)-len, "default antenna\t%d\n",
29954@@ -494,6 +500,8 @@ static ssize_t read_file_misc(struct fil
29955 unsigned int len = 0;
29956 u32 filt = ath5k_hw_get_rx_filter(sc->ah);
29957
29958+ pax_track_stack();
29959+
29960 len += snprintf(buf+len, sizeof(buf)-len, "bssid-mask: %pM\n",
29961 sc->bssidmask);
29962 len += snprintf(buf+len, sizeof(buf)-len, "filter-flags: 0x%x ",
29963@@ -550,6 +558,8 @@ static ssize_t read_file_frameerrors(str
29964 unsigned int len = 0;
29965 int i;
29966
29967+ pax_track_stack();
29968+
29969 len += snprintf(buf+len, sizeof(buf)-len,
29970 "RX\n---------------------\n");
29971 len += snprintf(buf+len, sizeof(buf)-len, "CRC\t%u\t(%u%%)\n",
29972@@ -667,6 +677,8 @@ static ssize_t read_file_ani(struct file
29973 char buf[700];
29974 unsigned int len = 0;
29975
29976+ pax_track_stack();
29977+
29978 len += snprintf(buf+len, sizeof(buf)-len,
29979 "HW has PHY error counters:\t%s\n",
29980 sc->ah->ah_capabilities.cap_has_phyerr_counters ?
29981@@ -827,6 +839,8 @@ static ssize_t read_file_queue(struct fi
29982 struct ath5k_buf *bf, *bf0;
29983 int i, n;
29984
29985+ pax_track_stack();
29986+
29987 len += snprintf(buf+len, sizeof(buf)-len,
29988 "available txbuffers: %d\n", sc->txbuf_len);
29989
29990diff -urNp linux-3.0.3/drivers/net/wireless/ath/ath9k/ar9003_calib.c linux-3.0.3/drivers/net/wireless/ath/ath9k/ar9003_calib.c
29991--- linux-3.0.3/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-07-21 22:17:23.000000000 -0400
29992+++ linux-3.0.3/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-08-23 21:48:14.000000000 -0400
29993@@ -757,6 +757,8 @@ static void ar9003_hw_tx_iq_cal_post_pro
29994 int i, im, j;
29995 int nmeasurement;
29996
29997+ pax_track_stack();
29998+
29999 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
30000 if (ah->txchainmask & (1 << i))
30001 num_chains++;
30002diff -urNp linux-3.0.3/drivers/net/wireless/ath/ath9k/ar9003_paprd.c linux-3.0.3/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
30003--- linux-3.0.3/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-07-21 22:17:23.000000000 -0400
30004+++ linux-3.0.3/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-08-23 21:48:14.000000000 -0400
30005@@ -356,6 +356,8 @@ static bool create_pa_curve(u32 *data_L,
30006 int theta_low_bin = 0;
30007 int i;
30008
30009+ pax_track_stack();
30010+
30011 /* disregard any bin that contains <= 16 samples */
30012 thresh_accum_cnt = 16;
30013 scale_factor = 5;
30014diff -urNp linux-3.0.3/drivers/net/wireless/ath/ath9k/debug.c linux-3.0.3/drivers/net/wireless/ath/ath9k/debug.c
30015--- linux-3.0.3/drivers/net/wireless/ath/ath9k/debug.c 2011-07-21 22:17:23.000000000 -0400
30016+++ linux-3.0.3/drivers/net/wireless/ath/ath9k/debug.c 2011-08-23 21:48:14.000000000 -0400
30017@@ -337,6 +337,8 @@ static ssize_t read_file_interrupt(struc
30018 char buf[512];
30019 unsigned int len = 0;
30020
30021+ pax_track_stack();
30022+
30023 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
30024 len += snprintf(buf + len, sizeof(buf) - len,
30025 "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
30026@@ -427,6 +429,8 @@ static ssize_t read_file_wiphy(struct fi
30027 u8 addr[ETH_ALEN];
30028 u32 tmp;
30029
30030+ pax_track_stack();
30031+
30032 len += snprintf(buf + len, sizeof(buf) - len,
30033 "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n",
30034 wiphy_name(sc->hw->wiphy),
30035diff -urNp linux-3.0.3/drivers/net/wireless/ath/ath9k/htc_drv_debug.c linux-3.0.3/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
30036--- linux-3.0.3/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-07-21 22:17:23.000000000 -0400
30037+++ linux-3.0.3/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-08-23 21:48:14.000000000 -0400
30038@@ -31,6 +31,8 @@ static ssize_t read_file_tgt_int_stats(s
30039 unsigned int len = 0;
30040 int ret = 0;
30041
30042+ pax_track_stack();
30043+
30044 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
30045
30046 ath9k_htc_ps_wakeup(priv);
30047@@ -89,6 +91,8 @@ static ssize_t read_file_tgt_tx_stats(st
30048 unsigned int len = 0;
30049 int ret = 0;
30050
30051+ pax_track_stack();
30052+
30053 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
30054
30055 ath9k_htc_ps_wakeup(priv);
30056@@ -159,6 +163,8 @@ static ssize_t read_file_tgt_rx_stats(st
30057 unsigned int len = 0;
30058 int ret = 0;
30059
30060+ pax_track_stack();
30061+
30062 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
30063
30064 ath9k_htc_ps_wakeup(priv);
30065@@ -203,6 +209,8 @@ static ssize_t read_file_xmit(struct fil
30066 char buf[512];
30067 unsigned int len = 0;
30068
30069+ pax_track_stack();
30070+
30071 len += snprintf(buf + len, sizeof(buf) - len,
30072 "%20s : %10u\n", "Buffers queued",
30073 priv->debug.tx_stats.buf_queued);
30074@@ -376,6 +384,8 @@ static ssize_t read_file_slot(struct fil
30075 char buf[512];
30076 unsigned int len = 0;
30077
30078+ pax_track_stack();
30079+
30080 spin_lock_bh(&priv->tx.tx_lock);
30081
30082 len += snprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
30083@@ -411,6 +421,8 @@ static ssize_t read_file_queue(struct fi
30084 char buf[512];
30085 unsigned int len = 0;
30086
30087+ pax_track_stack();
30088+
30089 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
30090 "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
30091
30092diff -urNp linux-3.0.3/drivers/net/wireless/ath/ath9k/hw.h linux-3.0.3/drivers/net/wireless/ath/ath9k/hw.h
30093--- linux-3.0.3/drivers/net/wireless/ath/ath9k/hw.h 2011-08-23 21:44:40.000000000 -0400
30094+++ linux-3.0.3/drivers/net/wireless/ath/ath9k/hw.h 2011-08-23 21:47:55.000000000 -0400
30095@@ -585,7 +585,7 @@ struct ath_hw_private_ops {
30096
30097 /* ANI */
30098 void (*ani_cache_ini_regs)(struct ath_hw *ah);
30099-};
30100+} __no_const;
30101
30102 /**
30103 * struct ath_hw_ops - callbacks used by hardware code and driver code
30104@@ -637,7 +637,7 @@ struct ath_hw_ops {
30105 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
30106 struct ath_hw_antcomb_conf *antconf);
30107
30108-};
30109+} __no_const;
30110
30111 struct ath_nf_limits {
30112 s16 max;
30113@@ -650,7 +650,7 @@ struct ath_nf_limits {
30114 #define AH_UNPLUGGED 0x2 /* The card has been physically removed. */
30115
30116 struct ath_hw {
30117- struct ath_ops reg_ops;
30118+ ath_ops_no_const reg_ops;
30119
30120 struct ieee80211_hw *hw;
30121 struct ath_common common;
30122diff -urNp linux-3.0.3/drivers/net/wireless/ath/ath.h linux-3.0.3/drivers/net/wireless/ath/ath.h
30123--- linux-3.0.3/drivers/net/wireless/ath/ath.h 2011-07-21 22:17:23.000000000 -0400
30124+++ linux-3.0.3/drivers/net/wireless/ath/ath.h 2011-08-23 21:47:55.000000000 -0400
30125@@ -121,6 +121,7 @@ struct ath_ops {
30126 void (*write_flush) (void *);
30127 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
30128 };
30129+typedef struct ath_ops __no_const ath_ops_no_const;
30130
30131 struct ath_common;
30132 struct ath_bus_ops;
30133diff -urNp linux-3.0.3/drivers/net/wireless/ipw2x00/ipw2100.c linux-3.0.3/drivers/net/wireless/ipw2x00/ipw2100.c
30134--- linux-3.0.3/drivers/net/wireless/ipw2x00/ipw2100.c 2011-07-21 22:17:23.000000000 -0400
30135+++ linux-3.0.3/drivers/net/wireless/ipw2x00/ipw2100.c 2011-08-23 21:48:14.000000000 -0400
30136@@ -2100,6 +2100,8 @@ static int ipw2100_set_essid(struct ipw2
30137 int err;
30138 DECLARE_SSID_BUF(ssid);
30139
30140+ pax_track_stack();
30141+
30142 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
30143
30144 if (ssid_len)
30145@@ -5449,6 +5451,8 @@ static int ipw2100_set_key(struct ipw210
30146 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
30147 int err;
30148
30149+ pax_track_stack();
30150+
30151 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
30152 idx, keylen, len);
30153
30154diff -urNp linux-3.0.3/drivers/net/wireless/ipw2x00/libipw_rx.c linux-3.0.3/drivers/net/wireless/ipw2x00/libipw_rx.c
30155--- linux-3.0.3/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-07-21 22:17:23.000000000 -0400
30156+++ linux-3.0.3/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-08-23 21:48:14.000000000 -0400
30157@@ -1565,6 +1565,8 @@ static void libipw_process_probe_respons
30158 unsigned long flags;
30159 DECLARE_SSID_BUF(ssid);
30160
30161+ pax_track_stack();
30162+
30163 LIBIPW_DEBUG_SCAN("'%s' (%pM"
30164 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
30165 print_ssid(ssid, info_element->data, info_element->len),
30166diff -urNp linux-3.0.3/drivers/net/wireless/iwlegacy/iwl3945-base.c linux-3.0.3/drivers/net/wireless/iwlegacy/iwl3945-base.c
30167--- linux-3.0.3/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-07-21 22:17:23.000000000 -0400
30168+++ linux-3.0.3/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-08-23 21:47:55.000000000 -0400
30169@@ -3962,7 +3962,9 @@ static int iwl3945_pci_probe(struct pci_
30170 */
30171 if (iwl3945_mod_params.disable_hw_scan) {
30172 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
30173- iwl3945_hw_ops.hw_scan = NULL;
30174+ pax_open_kernel();
30175+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
30176+ pax_close_kernel();
30177 }
30178
30179 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
30180diff -urNp linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
30181--- linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-07-21 22:17:23.000000000 -0400
30182+++ linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-08-23 21:48:14.000000000 -0400
30183@@ -910,6 +910,8 @@ static void rs_tx_status(void *priv_r, s
30184 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
30185 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
30186
30187+ pax_track_stack();
30188+
30189 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
30190
30191 /* Treat uninitialized rate scaling data same as non-existing. */
30192@@ -2918,6 +2920,8 @@ static void rs_fill_link_cmd(struct iwl_
30193 container_of(lq_sta, struct iwl_station_priv, lq_sta);
30194 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
30195
30196+ pax_track_stack();
30197+
30198 /* Override starting rate (index 0) if needed for debug purposes */
30199 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
30200
30201diff -urNp linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-debugfs.c
30202--- linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-07-21 22:17:23.000000000 -0400
30203+++ linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-08-23 21:48:14.000000000 -0400
30204@@ -548,6 +548,8 @@ static ssize_t iwl_dbgfs_status_read(str
30205 int pos = 0;
30206 const size_t bufsz = sizeof(buf);
30207
30208+ pax_track_stack();
30209+
30210 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
30211 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
30212 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
30213@@ -680,6 +682,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
30214 char buf[256 * NUM_IWL_RXON_CTX];
30215 const size_t bufsz = sizeof(buf);
30216
30217+ pax_track_stack();
30218+
30219 for_each_context(priv, ctx) {
30220 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
30221 ctx->ctxid);
30222diff -urNp linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-debug.h linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-debug.h
30223--- linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-07-21 22:17:23.000000000 -0400
30224+++ linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-08-23 21:47:55.000000000 -0400
30225@@ -68,8 +68,8 @@ do {
30226 } while (0)
30227
30228 #else
30229-#define IWL_DEBUG(__priv, level, fmt, args...)
30230-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
30231+#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
30232+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
30233 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
30234 const void *p, u32 len)
30235 {}
30236diff -urNp linux-3.0.3/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-3.0.3/drivers/net/wireless/iwmc3200wifi/debugfs.c
30237--- linux-3.0.3/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-07-21 22:17:23.000000000 -0400
30238+++ linux-3.0.3/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-08-23 21:48:14.000000000 -0400
30239@@ -327,6 +327,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
30240 int buf_len = 512;
30241 size_t len = 0;
30242
30243+ pax_track_stack();
30244+
30245 if (*ppos != 0)
30246 return 0;
30247 if (count < sizeof(buf))
30248diff -urNp linux-3.0.3/drivers/net/wireless/mac80211_hwsim.c linux-3.0.3/drivers/net/wireless/mac80211_hwsim.c
30249--- linux-3.0.3/drivers/net/wireless/mac80211_hwsim.c 2011-07-21 22:17:23.000000000 -0400
30250+++ linux-3.0.3/drivers/net/wireless/mac80211_hwsim.c 2011-08-23 21:47:55.000000000 -0400
30251@@ -1260,9 +1260,11 @@ static int __init init_mac80211_hwsim(vo
30252 return -EINVAL;
30253
30254 if (fake_hw_scan) {
30255- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
30256- mac80211_hwsim_ops.sw_scan_start = NULL;
30257- mac80211_hwsim_ops.sw_scan_complete = NULL;
30258+ pax_open_kernel();
30259+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
30260+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
30261+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
30262+ pax_close_kernel();
30263 }
30264
30265 spin_lock_init(&hwsim_radio_lock);
30266diff -urNp linux-3.0.3/drivers/net/wireless/rndis_wlan.c linux-3.0.3/drivers/net/wireless/rndis_wlan.c
30267--- linux-3.0.3/drivers/net/wireless/rndis_wlan.c 2011-07-21 22:17:23.000000000 -0400
30268+++ linux-3.0.3/drivers/net/wireless/rndis_wlan.c 2011-08-23 21:47:55.000000000 -0400
30269@@ -1277,7 +1277,7 @@ static int set_rts_threshold(struct usbn
30270
30271 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
30272
30273- if (rts_threshold < 0 || rts_threshold > 2347)
30274+ if (rts_threshold > 2347)
30275 rts_threshold = 2347;
30276
30277 tmp = cpu_to_le32(rts_threshold);
30278diff -urNp linux-3.0.3/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c linux-3.0.3/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
30279--- linux-3.0.3/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-07-21 22:17:23.000000000 -0400
30280+++ linux-3.0.3/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-08-23 21:48:14.000000000 -0400
30281@@ -837,6 +837,8 @@ bool _rtl92c_phy_sw_chnl_step_by_step(st
30282 u8 rfpath;
30283 u8 num_total_rfpath = rtlphy->num_total_rfpath;
30284
30285+ pax_track_stack();
30286+
30287 precommoncmdcnt = 0;
30288 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
30289 MAX_PRECMD_CNT,
30290diff -urNp linux-3.0.3/drivers/net/wireless/wl1251/wl1251.h linux-3.0.3/drivers/net/wireless/wl1251/wl1251.h
30291--- linux-3.0.3/drivers/net/wireless/wl1251/wl1251.h 2011-07-21 22:17:23.000000000 -0400
30292+++ linux-3.0.3/drivers/net/wireless/wl1251/wl1251.h 2011-08-23 21:47:55.000000000 -0400
30293@@ -266,7 +266,7 @@ struct wl1251_if_operations {
30294 void (*reset)(struct wl1251 *wl);
30295 void (*enable_irq)(struct wl1251 *wl);
30296 void (*disable_irq)(struct wl1251 *wl);
30297-};
30298+} __no_const;
30299
30300 struct wl1251 {
30301 struct ieee80211_hw *hw;
30302diff -urNp linux-3.0.3/drivers/net/wireless/wl12xx/spi.c linux-3.0.3/drivers/net/wireless/wl12xx/spi.c
30303--- linux-3.0.3/drivers/net/wireless/wl12xx/spi.c 2011-07-21 22:17:23.000000000 -0400
30304+++ linux-3.0.3/drivers/net/wireless/wl12xx/spi.c 2011-08-23 21:48:14.000000000 -0400
30305@@ -280,6 +280,8 @@ static void wl1271_spi_raw_write(struct
30306 u32 chunk_len;
30307 int i;
30308
30309+ pax_track_stack();
30310+
30311 WARN_ON(len > WL1271_AGGR_BUFFER_SIZE);
30312
30313 spi_message_init(&m);
30314diff -urNp linux-3.0.3/drivers/oprofile/buffer_sync.c linux-3.0.3/drivers/oprofile/buffer_sync.c
30315--- linux-3.0.3/drivers/oprofile/buffer_sync.c 2011-07-21 22:17:23.000000000 -0400
30316+++ linux-3.0.3/drivers/oprofile/buffer_sync.c 2011-08-23 21:47:55.000000000 -0400
30317@@ -343,7 +343,7 @@ static void add_data(struct op_entry *en
30318 if (cookie == NO_COOKIE)
30319 offset = pc;
30320 if (cookie == INVALID_COOKIE) {
30321- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
30322+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
30323 offset = pc;
30324 }
30325 if (cookie != last_cookie) {
30326@@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct
30327 /* add userspace sample */
30328
30329 if (!mm) {
30330- atomic_inc(&oprofile_stats.sample_lost_no_mm);
30331+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
30332 return 0;
30333 }
30334
30335 cookie = lookup_dcookie(mm, s->eip, &offset);
30336
30337 if (cookie == INVALID_COOKIE) {
30338- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
30339+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
30340 return 0;
30341 }
30342
30343@@ -563,7 +563,7 @@ void sync_buffer(int cpu)
30344 /* ignore backtraces if failed to add a sample */
30345 if (state == sb_bt_start) {
30346 state = sb_bt_ignore;
30347- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
30348+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
30349 }
30350 }
30351 release_mm(mm);
30352diff -urNp linux-3.0.3/drivers/oprofile/event_buffer.c linux-3.0.3/drivers/oprofile/event_buffer.c
30353--- linux-3.0.3/drivers/oprofile/event_buffer.c 2011-07-21 22:17:23.000000000 -0400
30354+++ linux-3.0.3/drivers/oprofile/event_buffer.c 2011-08-23 21:47:55.000000000 -0400
30355@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
30356 }
30357
30358 if (buffer_pos == buffer_size) {
30359- atomic_inc(&oprofile_stats.event_lost_overflow);
30360+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
30361 return;
30362 }
30363
30364diff -urNp linux-3.0.3/drivers/oprofile/oprof.c linux-3.0.3/drivers/oprofile/oprof.c
30365--- linux-3.0.3/drivers/oprofile/oprof.c 2011-07-21 22:17:23.000000000 -0400
30366+++ linux-3.0.3/drivers/oprofile/oprof.c 2011-08-23 21:47:55.000000000 -0400
30367@@ -110,7 +110,7 @@ static void switch_worker(struct work_st
30368 if (oprofile_ops.switch_events())
30369 return;
30370
30371- atomic_inc(&oprofile_stats.multiplex_counter);
30372+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
30373 start_switch_worker();
30374 }
30375
30376diff -urNp linux-3.0.3/drivers/oprofile/oprofilefs.c linux-3.0.3/drivers/oprofile/oprofilefs.c
30377--- linux-3.0.3/drivers/oprofile/oprofilefs.c 2011-07-21 22:17:23.000000000 -0400
30378+++ linux-3.0.3/drivers/oprofile/oprofilefs.c 2011-08-23 21:47:55.000000000 -0400
30379@@ -186,7 +186,7 @@ static const struct file_operations atom
30380
30381
30382 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
30383- char const *name, atomic_t *val)
30384+ char const *name, atomic_unchecked_t *val)
30385 {
30386 return __oprofilefs_create_file(sb, root, name,
30387 &atomic_ro_fops, 0444, val);
30388diff -urNp linux-3.0.3/drivers/oprofile/oprofile_stats.c linux-3.0.3/drivers/oprofile/oprofile_stats.c
30389--- linux-3.0.3/drivers/oprofile/oprofile_stats.c 2011-07-21 22:17:23.000000000 -0400
30390+++ linux-3.0.3/drivers/oprofile/oprofile_stats.c 2011-08-23 21:47:55.000000000 -0400
30391@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
30392 cpu_buf->sample_invalid_eip = 0;
30393 }
30394
30395- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
30396- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
30397- atomic_set(&oprofile_stats.event_lost_overflow, 0);
30398- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
30399- atomic_set(&oprofile_stats.multiplex_counter, 0);
30400+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
30401+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
30402+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
30403+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
30404+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
30405 }
30406
30407
30408diff -urNp linux-3.0.3/drivers/oprofile/oprofile_stats.h linux-3.0.3/drivers/oprofile/oprofile_stats.h
30409--- linux-3.0.3/drivers/oprofile/oprofile_stats.h 2011-07-21 22:17:23.000000000 -0400
30410+++ linux-3.0.3/drivers/oprofile/oprofile_stats.h 2011-08-23 21:47:55.000000000 -0400
30411@@ -13,11 +13,11 @@
30412 #include <asm/atomic.h>
30413
30414 struct oprofile_stat_struct {
30415- atomic_t sample_lost_no_mm;
30416- atomic_t sample_lost_no_mapping;
30417- atomic_t bt_lost_no_mapping;
30418- atomic_t event_lost_overflow;
30419- atomic_t multiplex_counter;
30420+ atomic_unchecked_t sample_lost_no_mm;
30421+ atomic_unchecked_t sample_lost_no_mapping;
30422+ atomic_unchecked_t bt_lost_no_mapping;
30423+ atomic_unchecked_t event_lost_overflow;
30424+ atomic_unchecked_t multiplex_counter;
30425 };
30426
30427 extern struct oprofile_stat_struct oprofile_stats;
30428diff -urNp linux-3.0.3/drivers/parport/procfs.c linux-3.0.3/drivers/parport/procfs.c
30429--- linux-3.0.3/drivers/parport/procfs.c 2011-07-21 22:17:23.000000000 -0400
30430+++ linux-3.0.3/drivers/parport/procfs.c 2011-08-23 21:47:55.000000000 -0400
30431@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
30432
30433 *ppos += len;
30434
30435- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
30436+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
30437 }
30438
30439 #ifdef CONFIG_PARPORT_1284
30440@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
30441
30442 *ppos += len;
30443
30444- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
30445+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
30446 }
30447 #endif /* IEEE1284.3 support. */
30448
30449diff -urNp linux-3.0.3/drivers/pci/hotplug/cpci_hotplug.h linux-3.0.3/drivers/pci/hotplug/cpci_hotplug.h
30450--- linux-3.0.3/drivers/pci/hotplug/cpci_hotplug.h 2011-07-21 22:17:23.000000000 -0400
30451+++ linux-3.0.3/drivers/pci/hotplug/cpci_hotplug.h 2011-08-23 21:47:55.000000000 -0400
30452@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
30453 int (*hardware_test) (struct slot* slot, u32 value);
30454 u8 (*get_power) (struct slot* slot);
30455 int (*set_power) (struct slot* slot, int value);
30456-};
30457+} __no_const;
30458
30459 struct cpci_hp_controller {
30460 unsigned int irq;
30461diff -urNp linux-3.0.3/drivers/pci/hotplug/cpqphp_nvram.c linux-3.0.3/drivers/pci/hotplug/cpqphp_nvram.c
30462--- linux-3.0.3/drivers/pci/hotplug/cpqphp_nvram.c 2011-07-21 22:17:23.000000000 -0400
30463+++ linux-3.0.3/drivers/pci/hotplug/cpqphp_nvram.c 2011-08-23 21:47:55.000000000 -0400
30464@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
30465
30466 void compaq_nvram_init (void __iomem *rom_start)
30467 {
30468+
30469+#ifndef CONFIG_PAX_KERNEXEC
30470 if (rom_start) {
30471 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
30472 }
30473+#endif
30474+
30475 dbg("int15 entry = %p\n", compaq_int15_entry_point);
30476
30477 /* initialize our int15 lock */
30478diff -urNp linux-3.0.3/drivers/pci/pcie/aspm.c linux-3.0.3/drivers/pci/pcie/aspm.c
30479--- linux-3.0.3/drivers/pci/pcie/aspm.c 2011-07-21 22:17:23.000000000 -0400
30480+++ linux-3.0.3/drivers/pci/pcie/aspm.c 2011-08-23 21:47:55.000000000 -0400
30481@@ -27,9 +27,9 @@
30482 #define MODULE_PARAM_PREFIX "pcie_aspm."
30483
30484 /* Note: those are not register definitions */
30485-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
30486-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
30487-#define ASPM_STATE_L1 (4) /* L1 state */
30488+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
30489+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
30490+#define ASPM_STATE_L1 (4U) /* L1 state */
30491 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
30492 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
30493
30494diff -urNp linux-3.0.3/drivers/pci/probe.c linux-3.0.3/drivers/pci/probe.c
30495--- linux-3.0.3/drivers/pci/probe.c 2011-07-21 22:17:23.000000000 -0400
30496+++ linux-3.0.3/drivers/pci/probe.c 2011-08-23 21:47:55.000000000 -0400
30497@@ -129,7 +129,7 @@ int __pci_read_base(struct pci_dev *dev,
30498 u32 l, sz, mask;
30499 u16 orig_cmd;
30500
30501- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
30502+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
30503
30504 if (!dev->mmio_always_on) {
30505 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
30506diff -urNp linux-3.0.3/drivers/pci/proc.c linux-3.0.3/drivers/pci/proc.c
30507--- linux-3.0.3/drivers/pci/proc.c 2011-07-21 22:17:23.000000000 -0400
30508+++ linux-3.0.3/drivers/pci/proc.c 2011-08-23 21:48:14.000000000 -0400
30509@@ -476,7 +476,16 @@ static const struct file_operations proc
30510 static int __init pci_proc_init(void)
30511 {
30512 struct pci_dev *dev = NULL;
30513+
30514+#ifdef CONFIG_GRKERNSEC_PROC_ADD
30515+#ifdef CONFIG_GRKERNSEC_PROC_USER
30516+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
30517+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
30518+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
30519+#endif
30520+#else
30521 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
30522+#endif
30523 proc_create("devices", 0, proc_bus_pci_dir,
30524 &proc_bus_pci_dev_operations);
30525 proc_initialized = 1;
30526diff -urNp linux-3.0.3/drivers/pci/xen-pcifront.c linux-3.0.3/drivers/pci/xen-pcifront.c
30527--- linux-3.0.3/drivers/pci/xen-pcifront.c 2011-07-21 22:17:23.000000000 -0400
30528+++ linux-3.0.3/drivers/pci/xen-pcifront.c 2011-08-23 21:48:14.000000000 -0400
30529@@ -187,6 +187,8 @@ static int pcifront_bus_read(struct pci_
30530 struct pcifront_sd *sd = bus->sysdata;
30531 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30532
30533+ pax_track_stack();
30534+
30535 if (verbose_request)
30536 dev_info(&pdev->xdev->dev,
30537 "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
30538@@ -226,6 +228,8 @@ static int pcifront_bus_write(struct pci
30539 struct pcifront_sd *sd = bus->sysdata;
30540 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30541
30542+ pax_track_stack();
30543+
30544 if (verbose_request)
30545 dev_info(&pdev->xdev->dev,
30546 "write dev=%04x:%02x:%02x.%01x - "
30547@@ -258,6 +262,8 @@ static int pci_frontend_enable_msix(stru
30548 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30549 struct msi_desc *entry;
30550
30551+ pax_track_stack();
30552+
30553 if (nvec > SH_INFO_MAX_VEC) {
30554 dev_err(&dev->dev, "too much vector for pci frontend: %x."
30555 " Increase SH_INFO_MAX_VEC.\n", nvec);
30556@@ -309,6 +315,8 @@ static void pci_frontend_disable_msix(st
30557 struct pcifront_sd *sd = dev->bus->sysdata;
30558 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30559
30560+ pax_track_stack();
30561+
30562 err = do_pci_op(pdev, &op);
30563
30564 /* What should do for error ? */
30565@@ -328,6 +336,8 @@ static int pci_frontend_enable_msi(struc
30566 struct pcifront_sd *sd = dev->bus->sysdata;
30567 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30568
30569+ pax_track_stack();
30570+
30571 err = do_pci_op(pdev, &op);
30572 if (likely(!err)) {
30573 vector[0] = op.value;
30574diff -urNp linux-3.0.3/drivers/platform/x86/thinkpad_acpi.c linux-3.0.3/drivers/platform/x86/thinkpad_acpi.c
30575--- linux-3.0.3/drivers/platform/x86/thinkpad_acpi.c 2011-07-21 22:17:23.000000000 -0400
30576+++ linux-3.0.3/drivers/platform/x86/thinkpad_acpi.c 2011-08-23 21:47:55.000000000 -0400
30577@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
30578 return 0;
30579 }
30580
30581-void static hotkey_mask_warn_incomplete_mask(void)
30582+static void hotkey_mask_warn_incomplete_mask(void)
30583 {
30584 /* log only what the user can fix... */
30585 const u32 wantedmask = hotkey_driver_mask &
30586diff -urNp linux-3.0.3/drivers/pnp/pnpbios/bioscalls.c linux-3.0.3/drivers/pnp/pnpbios/bioscalls.c
30587--- linux-3.0.3/drivers/pnp/pnpbios/bioscalls.c 2011-07-21 22:17:23.000000000 -0400
30588+++ linux-3.0.3/drivers/pnp/pnpbios/bioscalls.c 2011-08-23 21:47:55.000000000 -0400
30589@@ -59,7 +59,7 @@ do { \
30590 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
30591 } while(0)
30592
30593-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
30594+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
30595 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
30596
30597 /*
30598@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func
30599
30600 cpu = get_cpu();
30601 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
30602+
30603+ pax_open_kernel();
30604 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
30605+ pax_close_kernel();
30606
30607 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
30608 spin_lock_irqsave(&pnp_bios_lock, flags);
30609@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func
30610 :"memory");
30611 spin_unlock_irqrestore(&pnp_bios_lock, flags);
30612
30613+ pax_open_kernel();
30614 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
30615+ pax_close_kernel();
30616+
30617 put_cpu();
30618
30619 /* If we get here and this is set then the PnP BIOS faulted on us. */
30620@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 n
30621 return status;
30622 }
30623
30624-void pnpbios_calls_init(union pnp_bios_install_struct *header)
30625+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
30626 {
30627 int i;
30628
30629@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_i
30630 pnp_bios_callpoint.offset = header->fields.pm16offset;
30631 pnp_bios_callpoint.segment = PNP_CS16;
30632
30633+ pax_open_kernel();
30634+
30635 for_each_possible_cpu(i) {
30636 struct desc_struct *gdt = get_cpu_gdt_table(i);
30637 if (!gdt)
30638@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_i
30639 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
30640 (unsigned long)__va(header->fields.pm16dseg));
30641 }
30642+
30643+ pax_close_kernel();
30644 }
30645diff -urNp linux-3.0.3/drivers/pnp/resource.c linux-3.0.3/drivers/pnp/resource.c
30646--- linux-3.0.3/drivers/pnp/resource.c 2011-07-21 22:17:23.000000000 -0400
30647+++ linux-3.0.3/drivers/pnp/resource.c 2011-08-23 21:47:55.000000000 -0400
30648@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
30649 return 1;
30650
30651 /* check if the resource is valid */
30652- if (*irq < 0 || *irq > 15)
30653+ if (*irq > 15)
30654 return 0;
30655
30656 /* check if the resource is reserved */
30657@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
30658 return 1;
30659
30660 /* check if the resource is valid */
30661- if (*dma < 0 || *dma == 4 || *dma > 7)
30662+ if (*dma == 4 || *dma > 7)
30663 return 0;
30664
30665 /* check if the resource is reserved */
30666diff -urNp linux-3.0.3/drivers/power/bq27x00_battery.c linux-3.0.3/drivers/power/bq27x00_battery.c
30667--- linux-3.0.3/drivers/power/bq27x00_battery.c 2011-07-21 22:17:23.000000000 -0400
30668+++ linux-3.0.3/drivers/power/bq27x00_battery.c 2011-08-23 21:47:55.000000000 -0400
30669@@ -67,7 +67,7 @@
30670 struct bq27x00_device_info;
30671 struct bq27x00_access_methods {
30672 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
30673-};
30674+} __no_const;
30675
30676 enum bq27x00_chip { BQ27000, BQ27500 };
30677
30678diff -urNp linux-3.0.3/drivers/regulator/max8660.c linux-3.0.3/drivers/regulator/max8660.c
30679--- linux-3.0.3/drivers/regulator/max8660.c 2011-07-21 22:17:23.000000000 -0400
30680+++ linux-3.0.3/drivers/regulator/max8660.c 2011-08-23 21:47:55.000000000 -0400
30681@@ -383,8 +383,10 @@ static int __devinit max8660_probe(struc
30682 max8660->shadow_regs[MAX8660_OVER1] = 5;
30683 } else {
30684 /* Otherwise devices can be toggled via software */
30685- max8660_dcdc_ops.enable = max8660_dcdc_enable;
30686- max8660_dcdc_ops.disable = max8660_dcdc_disable;
30687+ pax_open_kernel();
30688+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
30689+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
30690+ pax_close_kernel();
30691 }
30692
30693 /*
30694diff -urNp linux-3.0.3/drivers/regulator/mc13892-regulator.c linux-3.0.3/drivers/regulator/mc13892-regulator.c
30695--- linux-3.0.3/drivers/regulator/mc13892-regulator.c 2011-07-21 22:17:23.000000000 -0400
30696+++ linux-3.0.3/drivers/regulator/mc13892-regulator.c 2011-08-23 21:47:55.000000000 -0400
30697@@ -564,10 +564,12 @@ static int __devinit mc13892_regulator_p
30698 }
30699 mc13xxx_unlock(mc13892);
30700
30701- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
30702+ pax_open_kernel();
30703+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
30704 = mc13892_vcam_set_mode;
30705- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
30706+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
30707 = mc13892_vcam_get_mode;
30708+ pax_close_kernel();
30709 for (i = 0; i < pdata->num_regulators; i++) {
30710 init_data = &pdata->regulators[i];
30711 priv->regulators[i] = regulator_register(
30712diff -urNp linux-3.0.3/drivers/rtc/rtc-dev.c linux-3.0.3/drivers/rtc/rtc-dev.c
30713--- linux-3.0.3/drivers/rtc/rtc-dev.c 2011-07-21 22:17:23.000000000 -0400
30714+++ linux-3.0.3/drivers/rtc/rtc-dev.c 2011-08-23 21:48:14.000000000 -0400
30715@@ -14,6 +14,7 @@
30716 #include <linux/module.h>
30717 #include <linux/rtc.h>
30718 #include <linux/sched.h>
30719+#include <linux/grsecurity.h>
30720 #include "rtc-core.h"
30721
30722 static dev_t rtc_devt;
30723@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *f
30724 if (copy_from_user(&tm, uarg, sizeof(tm)))
30725 return -EFAULT;
30726
30727+ gr_log_timechange();
30728+
30729 return rtc_set_time(rtc, &tm);
30730
30731 case RTC_PIE_ON:
30732diff -urNp linux-3.0.3/drivers/scsi/aacraid/aacraid.h linux-3.0.3/drivers/scsi/aacraid/aacraid.h
30733--- linux-3.0.3/drivers/scsi/aacraid/aacraid.h 2011-07-21 22:17:23.000000000 -0400
30734+++ linux-3.0.3/drivers/scsi/aacraid/aacraid.h 2011-08-23 21:47:55.000000000 -0400
30735@@ -492,7 +492,7 @@ struct adapter_ops
30736 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
30737 /* Administrative operations */
30738 int (*adapter_comm)(struct aac_dev * dev, int comm);
30739-};
30740+} __no_const;
30741
30742 /*
30743 * Define which interrupt handler needs to be installed
30744diff -urNp linux-3.0.3/drivers/scsi/aacraid/commctrl.c linux-3.0.3/drivers/scsi/aacraid/commctrl.c
30745--- linux-3.0.3/drivers/scsi/aacraid/commctrl.c 2011-07-21 22:17:23.000000000 -0400
30746+++ linux-3.0.3/drivers/scsi/aacraid/commctrl.c 2011-08-23 21:48:14.000000000 -0400
30747@@ -482,6 +482,7 @@ static int aac_send_raw_srb(struct aac_d
30748 u32 actual_fibsize64, actual_fibsize = 0;
30749 int i;
30750
30751+ pax_track_stack();
30752
30753 if (dev->in_reset) {
30754 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
30755diff -urNp linux-3.0.3/drivers/scsi/bfa/bfad.c linux-3.0.3/drivers/scsi/bfa/bfad.c
30756--- linux-3.0.3/drivers/scsi/bfa/bfad.c 2011-07-21 22:17:23.000000000 -0400
30757+++ linux-3.0.3/drivers/scsi/bfa/bfad.c 2011-08-23 21:48:14.000000000 -0400
30758@@ -1032,6 +1032,8 @@ bfad_start_ops(struct bfad_s *bfad) {
30759 struct bfad_vport_s *vport, *vport_new;
30760 struct bfa_fcs_driver_info_s driver_info;
30761
30762+ pax_track_stack();
30763+
30764 /* Fill the driver_info info to fcs*/
30765 memset(&driver_info, 0, sizeof(driver_info));
30766 strncpy(driver_info.version, BFAD_DRIVER_VERSION,
30767diff -urNp linux-3.0.3/drivers/scsi/bfa/bfa_fcs_lport.c linux-3.0.3/drivers/scsi/bfa/bfa_fcs_lport.c
30768--- linux-3.0.3/drivers/scsi/bfa/bfa_fcs_lport.c 2011-07-21 22:17:23.000000000 -0400
30769+++ linux-3.0.3/drivers/scsi/bfa/bfa_fcs_lport.c 2011-08-23 21:48:14.000000000 -0400
30770@@ -1559,6 +1559,8 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struc
30771 u16 len, count;
30772 u16 templen;
30773
30774+ pax_track_stack();
30775+
30776 /*
30777 * get hba attributes
30778 */
30779@@ -1836,6 +1838,8 @@ bfa_fcs_lport_fdmi_build_portattr_block(
30780 u8 count = 0;
30781 u16 templen;
30782
30783+ pax_track_stack();
30784+
30785 /*
30786 * get port attributes
30787 */
30788diff -urNp linux-3.0.3/drivers/scsi/bfa/bfa_fcs_rport.c linux-3.0.3/drivers/scsi/bfa/bfa_fcs_rport.c
30789--- linux-3.0.3/drivers/scsi/bfa/bfa_fcs_rport.c 2011-07-21 22:17:23.000000000 -0400
30790+++ linux-3.0.3/drivers/scsi/bfa/bfa_fcs_rport.c 2011-08-23 21:48:14.000000000 -0400
30791@@ -1844,6 +1844,8 @@ bfa_fcs_rport_process_rpsc(struct bfa_fc
30792 struct fc_rpsc_speed_info_s speeds;
30793 struct bfa_port_attr_s pport_attr;
30794
30795+ pax_track_stack();
30796+
30797 bfa_trc(port->fcs, rx_fchs->s_id);
30798 bfa_trc(port->fcs, rx_fchs->d_id);
30799
30800diff -urNp linux-3.0.3/drivers/scsi/bfa/bfa.h linux-3.0.3/drivers/scsi/bfa/bfa.h
30801--- linux-3.0.3/drivers/scsi/bfa/bfa.h 2011-07-21 22:17:23.000000000 -0400
30802+++ linux-3.0.3/drivers/scsi/bfa/bfa.h 2011-08-23 21:47:55.000000000 -0400
30803@@ -238,7 +238,7 @@ struct bfa_hwif_s {
30804 u32 *nvecs, u32 *maxvec);
30805 void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start,
30806 u32 *end);
30807-};
30808+} __no_const;
30809 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
30810
30811 struct bfa_iocfc_s {
30812diff -urNp linux-3.0.3/drivers/scsi/bfa/bfa_ioc.h linux-3.0.3/drivers/scsi/bfa/bfa_ioc.h
30813--- linux-3.0.3/drivers/scsi/bfa/bfa_ioc.h 2011-07-21 22:17:23.000000000 -0400
30814+++ linux-3.0.3/drivers/scsi/bfa/bfa_ioc.h 2011-08-23 21:47:55.000000000 -0400
30815@@ -196,7 +196,7 @@ struct bfa_ioc_cbfn_s {
30816 bfa_ioc_disable_cbfn_t disable_cbfn;
30817 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
30818 bfa_ioc_reset_cbfn_t reset_cbfn;
30819-};
30820+} __no_const;
30821
30822 /*
30823 * Heartbeat failure notification queue element.
30824@@ -268,7 +268,7 @@ struct bfa_ioc_hwif_s {
30825 void (*ioc_sync_leave) (struct bfa_ioc_s *ioc);
30826 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
30827 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
30828-};
30829+} __no_const;
30830
30831 #define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
30832 #define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id)
30833diff -urNp linux-3.0.3/drivers/scsi/BusLogic.c linux-3.0.3/drivers/scsi/BusLogic.c
30834--- linux-3.0.3/drivers/scsi/BusLogic.c 2011-07-21 22:17:23.000000000 -0400
30835+++ linux-3.0.3/drivers/scsi/BusLogic.c 2011-08-23 21:48:14.000000000 -0400
30836@@ -962,6 +962,8 @@ static int __init BusLogic_InitializeFla
30837 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
30838 *PrototypeHostAdapter)
30839 {
30840+ pax_track_stack();
30841+
30842 /*
30843 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
30844 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
30845diff -urNp linux-3.0.3/drivers/scsi/dpt_i2o.c linux-3.0.3/drivers/scsi/dpt_i2o.c
30846--- linux-3.0.3/drivers/scsi/dpt_i2o.c 2011-07-21 22:17:23.000000000 -0400
30847+++ linux-3.0.3/drivers/scsi/dpt_i2o.c 2011-08-23 21:48:14.000000000 -0400
30848@@ -1811,6 +1811,8 @@ static int adpt_i2o_passthru(adpt_hba* p
30849 dma_addr_t addr;
30850 ulong flags = 0;
30851
30852+ pax_track_stack();
30853+
30854 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
30855 // get user msg size in u32s
30856 if(get_user(size, &user_msg[0])){
30857@@ -2317,6 +2319,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
30858 s32 rcode;
30859 dma_addr_t addr;
30860
30861+ pax_track_stack();
30862+
30863 memset(msg, 0 , sizeof(msg));
30864 len = scsi_bufflen(cmd);
30865 direction = 0x00000000;
30866diff -urNp linux-3.0.3/drivers/scsi/eata.c linux-3.0.3/drivers/scsi/eata.c
30867--- linux-3.0.3/drivers/scsi/eata.c 2011-07-21 22:17:23.000000000 -0400
30868+++ linux-3.0.3/drivers/scsi/eata.c 2011-08-23 21:48:14.000000000 -0400
30869@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
30870 struct hostdata *ha;
30871 char name[16];
30872
30873+ pax_track_stack();
30874+
30875 sprintf(name, "%s%d", driver_name, j);
30876
30877 if (!request_region(port_base, REGION_SIZE, driver_name)) {
30878diff -urNp linux-3.0.3/drivers/scsi/fcoe/fcoe_ctlr.c linux-3.0.3/drivers/scsi/fcoe/fcoe_ctlr.c
30879--- linux-3.0.3/drivers/scsi/fcoe/fcoe_ctlr.c 2011-07-21 22:17:23.000000000 -0400
30880+++ linux-3.0.3/drivers/scsi/fcoe/fcoe_ctlr.c 2011-08-23 21:48:14.000000000 -0400
30881@@ -2503,6 +2503,8 @@ static int fcoe_ctlr_vn_recv(struct fcoe
30882 } buf;
30883 int rc;
30884
30885+ pax_track_stack();
30886+
30887 fiph = (struct fip_header *)skb->data;
30888 sub = fiph->fip_subcode;
30889
30890diff -urNp linux-3.0.3/drivers/scsi/gdth.c linux-3.0.3/drivers/scsi/gdth.c
30891--- linux-3.0.3/drivers/scsi/gdth.c 2011-07-21 22:17:23.000000000 -0400
30892+++ linux-3.0.3/drivers/scsi/gdth.c 2011-08-23 21:48:14.000000000 -0400
30893@@ -4107,6 +4107,8 @@ static int ioc_lockdrv(void __user *arg)
30894 unsigned long flags;
30895 gdth_ha_str *ha;
30896
30897+ pax_track_stack();
30898+
30899 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
30900 return -EFAULT;
30901 ha = gdth_find_ha(ldrv.ionode);
30902@@ -4139,6 +4141,8 @@ static int ioc_resetdrv(void __user *arg
30903 gdth_ha_str *ha;
30904 int rval;
30905
30906+ pax_track_stack();
30907+
30908 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
30909 res.number >= MAX_HDRIVES)
30910 return -EFAULT;
30911@@ -4174,6 +4178,8 @@ static int ioc_general(void __user *arg,
30912 gdth_ha_str *ha;
30913 int rval;
30914
30915+ pax_track_stack();
30916+
30917 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
30918 return -EFAULT;
30919 ha = gdth_find_ha(gen.ionode);
30920@@ -4642,6 +4648,9 @@ static void gdth_flush(gdth_ha_str *ha)
30921 int i;
30922 gdth_cmd_str gdtcmd;
30923 char cmnd[MAX_COMMAND_SIZE];
30924+
30925+ pax_track_stack();
30926+
30927 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
30928
30929 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
30930diff -urNp linux-3.0.3/drivers/scsi/gdth_proc.c linux-3.0.3/drivers/scsi/gdth_proc.c
30931--- linux-3.0.3/drivers/scsi/gdth_proc.c 2011-07-21 22:17:23.000000000 -0400
30932+++ linux-3.0.3/drivers/scsi/gdth_proc.c 2011-08-23 21:48:14.000000000 -0400
30933@@ -47,6 +47,9 @@ static int gdth_set_asc_info(struct Scsi
30934 u64 paddr;
30935
30936 char cmnd[MAX_COMMAND_SIZE];
30937+
30938+ pax_track_stack();
30939+
30940 memset(cmnd, 0xff, 12);
30941 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
30942
30943@@ -175,6 +178,8 @@ static int gdth_get_info(char *buffer,ch
30944 gdth_hget_str *phg;
30945 char cmnd[MAX_COMMAND_SIZE];
30946
30947+ pax_track_stack();
30948+
30949 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
30950 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
30951 if (!gdtcmd || !estr)
30952diff -urNp linux-3.0.3/drivers/scsi/hosts.c linux-3.0.3/drivers/scsi/hosts.c
30953--- linux-3.0.3/drivers/scsi/hosts.c 2011-07-21 22:17:23.000000000 -0400
30954+++ linux-3.0.3/drivers/scsi/hosts.c 2011-08-23 21:47:55.000000000 -0400
30955@@ -42,7 +42,7 @@
30956 #include "scsi_logging.h"
30957
30958
30959-static atomic_t scsi_host_next_hn; /* host_no for next new host */
30960+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
30961
30962
30963 static void scsi_host_cls_release(struct device *dev)
30964@@ -354,7 +354,7 @@ struct Scsi_Host *scsi_host_alloc(struct
30965 * subtract one because we increment first then return, but we need to
30966 * know what the next host number was before increment
30967 */
30968- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
30969+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
30970 shost->dma_channel = 0xff;
30971
30972 /* These three are default values which can be overridden */
30973diff -urNp linux-3.0.3/drivers/scsi/hpsa.c linux-3.0.3/drivers/scsi/hpsa.c
30974--- linux-3.0.3/drivers/scsi/hpsa.c 2011-07-21 22:17:23.000000000 -0400
30975+++ linux-3.0.3/drivers/scsi/hpsa.c 2011-08-23 21:47:55.000000000 -0400
30976@@ -498,7 +498,7 @@ static inline u32 next_command(struct ct
30977 u32 a;
30978
30979 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
30980- return h->access.command_completed(h);
30981+ return h->access->command_completed(h);
30982
30983 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
30984 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
30985@@ -2938,7 +2938,7 @@ static void start_io(struct ctlr_info *h
30986 while (!list_empty(&h->reqQ)) {
30987 c = list_entry(h->reqQ.next, struct CommandList, list);
30988 /* can't do anything if fifo is full */
30989- if ((h->access.fifo_full(h))) {
30990+ if ((h->access->fifo_full(h))) {
30991 dev_warn(&h->pdev->dev, "fifo full\n");
30992 break;
30993 }
30994@@ -2948,7 +2948,7 @@ static void start_io(struct ctlr_info *h
30995 h->Qdepth--;
30996
30997 /* Tell the controller execute command */
30998- h->access.submit_command(h, c);
30999+ h->access->submit_command(h, c);
31000
31001 /* Put job onto the completed Q */
31002 addQ(&h->cmpQ, c);
31003@@ -2957,17 +2957,17 @@ static void start_io(struct ctlr_info *h
31004
31005 static inline unsigned long get_next_completion(struct ctlr_info *h)
31006 {
31007- return h->access.command_completed(h);
31008+ return h->access->command_completed(h);
31009 }
31010
31011 static inline bool interrupt_pending(struct ctlr_info *h)
31012 {
31013- return h->access.intr_pending(h);
31014+ return h->access->intr_pending(h);
31015 }
31016
31017 static inline long interrupt_not_for_us(struct ctlr_info *h)
31018 {
31019- return (h->access.intr_pending(h) == 0) ||
31020+ return (h->access->intr_pending(h) == 0) ||
31021 (h->interrupts_enabled == 0);
31022 }
31023
31024@@ -3857,7 +3857,7 @@ static int __devinit hpsa_pci_init(struc
31025 if (prod_index < 0)
31026 return -ENODEV;
31027 h->product_name = products[prod_index].product_name;
31028- h->access = *(products[prod_index].access);
31029+ h->access = products[prod_index].access;
31030
31031 if (hpsa_board_disabled(h->pdev)) {
31032 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
31033@@ -4134,7 +4134,7 @@ reinit_after_soft_reset:
31034 }
31035
31036 /* make sure the board interrupts are off */
31037- h->access.set_intr_mask(h, HPSA_INTR_OFF);
31038+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
31039
31040 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
31041 goto clean2;
31042@@ -4168,7 +4168,7 @@ reinit_after_soft_reset:
31043 * fake ones to scoop up any residual completions.
31044 */
31045 spin_lock_irqsave(&h->lock, flags);
31046- h->access.set_intr_mask(h, HPSA_INTR_OFF);
31047+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
31048 spin_unlock_irqrestore(&h->lock, flags);
31049 free_irq(h->intr[h->intr_mode], h);
31050 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
31051@@ -4187,9 +4187,9 @@ reinit_after_soft_reset:
31052 dev_info(&h->pdev->dev, "Board READY.\n");
31053 dev_info(&h->pdev->dev,
31054 "Waiting for stale completions to drain.\n");
31055- h->access.set_intr_mask(h, HPSA_INTR_ON);
31056+ h->access->set_intr_mask(h, HPSA_INTR_ON);
31057 msleep(10000);
31058- h->access.set_intr_mask(h, HPSA_INTR_OFF);
31059+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
31060
31061 rc = controller_reset_failed(h->cfgtable);
31062 if (rc)
31063@@ -4210,7 +4210,7 @@ reinit_after_soft_reset:
31064 }
31065
31066 /* Turn the interrupts on so we can service requests */
31067- h->access.set_intr_mask(h, HPSA_INTR_ON);
31068+ h->access->set_intr_mask(h, HPSA_INTR_ON);
31069
31070 hpsa_hba_inquiry(h);
31071 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
31072@@ -4263,7 +4263,7 @@ static void hpsa_shutdown(struct pci_dev
31073 * To write all data in the battery backed cache to disks
31074 */
31075 hpsa_flush_cache(h);
31076- h->access.set_intr_mask(h, HPSA_INTR_OFF);
31077+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
31078 free_irq(h->intr[h->intr_mode], h);
31079 #ifdef CONFIG_PCI_MSI
31080 if (h->msix_vector)
31081@@ -4426,7 +4426,7 @@ static __devinit void hpsa_enter_perform
31082 return;
31083 }
31084 /* Change the access methods to the performant access methods */
31085- h->access = SA5_performant_access;
31086+ h->access = &SA5_performant_access;
31087 h->transMethod = CFGTBL_Trans_Performant;
31088 }
31089
31090diff -urNp linux-3.0.3/drivers/scsi/hpsa.h linux-3.0.3/drivers/scsi/hpsa.h
31091--- linux-3.0.3/drivers/scsi/hpsa.h 2011-08-23 21:44:40.000000000 -0400
31092+++ linux-3.0.3/drivers/scsi/hpsa.h 2011-08-23 21:47:55.000000000 -0400
31093@@ -73,7 +73,7 @@ struct ctlr_info {
31094 unsigned int msix_vector;
31095 unsigned int msi_vector;
31096 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
31097- struct access_method access;
31098+ struct access_method *access;
31099
31100 /* queue and queue Info */
31101 struct list_head reqQ;
31102diff -urNp linux-3.0.3/drivers/scsi/ips.h linux-3.0.3/drivers/scsi/ips.h
31103--- linux-3.0.3/drivers/scsi/ips.h 2011-07-21 22:17:23.000000000 -0400
31104+++ linux-3.0.3/drivers/scsi/ips.h 2011-08-23 21:47:55.000000000 -0400
31105@@ -1027,7 +1027,7 @@ typedef struct {
31106 int (*intr)(struct ips_ha *);
31107 void (*enableint)(struct ips_ha *);
31108 uint32_t (*statupd)(struct ips_ha *);
31109-} ips_hw_func_t;
31110+} __no_const ips_hw_func_t;
31111
31112 typedef struct ips_ha {
31113 uint8_t ha_id[IPS_MAX_CHANNELS+1];
31114diff -urNp linux-3.0.3/drivers/scsi/libfc/fc_exch.c linux-3.0.3/drivers/scsi/libfc/fc_exch.c
31115--- linux-3.0.3/drivers/scsi/libfc/fc_exch.c 2011-07-21 22:17:23.000000000 -0400
31116+++ linux-3.0.3/drivers/scsi/libfc/fc_exch.c 2011-08-23 21:47:55.000000000 -0400
31117@@ -105,12 +105,12 @@ struct fc_exch_mgr {
31118 * all together if not used XXX
31119 */
31120 struct {
31121- atomic_t no_free_exch;
31122- atomic_t no_free_exch_xid;
31123- atomic_t xid_not_found;
31124- atomic_t xid_busy;
31125- atomic_t seq_not_found;
31126- atomic_t non_bls_resp;
31127+ atomic_unchecked_t no_free_exch;
31128+ atomic_unchecked_t no_free_exch_xid;
31129+ atomic_unchecked_t xid_not_found;
31130+ atomic_unchecked_t xid_busy;
31131+ atomic_unchecked_t seq_not_found;
31132+ atomic_unchecked_t non_bls_resp;
31133 } stats;
31134 };
31135
31136@@ -700,7 +700,7 @@ static struct fc_exch *fc_exch_em_alloc(
31137 /* allocate memory for exchange */
31138 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
31139 if (!ep) {
31140- atomic_inc(&mp->stats.no_free_exch);
31141+ atomic_inc_unchecked(&mp->stats.no_free_exch);
31142 goto out;
31143 }
31144 memset(ep, 0, sizeof(*ep));
31145@@ -761,7 +761,7 @@ out:
31146 return ep;
31147 err:
31148 spin_unlock_bh(&pool->lock);
31149- atomic_inc(&mp->stats.no_free_exch_xid);
31150+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
31151 mempool_free(ep, mp->ep_pool);
31152 return NULL;
31153 }
31154@@ -906,7 +906,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31155 xid = ntohs(fh->fh_ox_id); /* we originated exch */
31156 ep = fc_exch_find(mp, xid);
31157 if (!ep) {
31158- atomic_inc(&mp->stats.xid_not_found);
31159+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31160 reject = FC_RJT_OX_ID;
31161 goto out;
31162 }
31163@@ -936,7 +936,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31164 ep = fc_exch_find(mp, xid);
31165 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
31166 if (ep) {
31167- atomic_inc(&mp->stats.xid_busy);
31168+ atomic_inc_unchecked(&mp->stats.xid_busy);
31169 reject = FC_RJT_RX_ID;
31170 goto rel;
31171 }
31172@@ -947,7 +947,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31173 }
31174 xid = ep->xid; /* get our XID */
31175 } else if (!ep) {
31176- atomic_inc(&mp->stats.xid_not_found);
31177+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31178 reject = FC_RJT_RX_ID; /* XID not found */
31179 goto out;
31180 }
31181@@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31182 } else {
31183 sp = &ep->seq;
31184 if (sp->id != fh->fh_seq_id) {
31185- atomic_inc(&mp->stats.seq_not_found);
31186+ atomic_inc_unchecked(&mp->stats.seq_not_found);
31187 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
31188 goto rel;
31189 }
31190@@ -1392,22 +1392,22 @@ static void fc_exch_recv_seq_resp(struct
31191
31192 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
31193 if (!ep) {
31194- atomic_inc(&mp->stats.xid_not_found);
31195+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31196 goto out;
31197 }
31198 if (ep->esb_stat & ESB_ST_COMPLETE) {
31199- atomic_inc(&mp->stats.xid_not_found);
31200+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31201 goto rel;
31202 }
31203 if (ep->rxid == FC_XID_UNKNOWN)
31204 ep->rxid = ntohs(fh->fh_rx_id);
31205 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
31206- atomic_inc(&mp->stats.xid_not_found);
31207+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31208 goto rel;
31209 }
31210 if (ep->did != ntoh24(fh->fh_s_id) &&
31211 ep->did != FC_FID_FLOGI) {
31212- atomic_inc(&mp->stats.xid_not_found);
31213+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31214 goto rel;
31215 }
31216 sof = fr_sof(fp);
31217@@ -1416,7 +1416,7 @@ static void fc_exch_recv_seq_resp(struct
31218 sp->ssb_stat |= SSB_ST_RESP;
31219 sp->id = fh->fh_seq_id;
31220 } else if (sp->id != fh->fh_seq_id) {
31221- atomic_inc(&mp->stats.seq_not_found);
31222+ atomic_inc_unchecked(&mp->stats.seq_not_found);
31223 goto rel;
31224 }
31225
31226@@ -1480,9 +1480,9 @@ static void fc_exch_recv_resp(struct fc_
31227 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
31228
31229 if (!sp)
31230- atomic_inc(&mp->stats.xid_not_found);
31231+ atomic_inc_unchecked(&mp->stats.xid_not_found);
31232 else
31233- atomic_inc(&mp->stats.non_bls_resp);
31234+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
31235
31236 fc_frame_free(fp);
31237 }
31238diff -urNp linux-3.0.3/drivers/scsi/libsas/sas_ata.c linux-3.0.3/drivers/scsi/libsas/sas_ata.c
31239--- linux-3.0.3/drivers/scsi/libsas/sas_ata.c 2011-07-21 22:17:23.000000000 -0400
31240+++ linux-3.0.3/drivers/scsi/libsas/sas_ata.c 2011-08-23 21:47:55.000000000 -0400
31241@@ -368,7 +368,7 @@ static struct ata_port_operations sas_sa
31242 .postreset = ata_std_postreset,
31243 .error_handler = ata_std_error_handler,
31244 .post_internal_cmd = sas_ata_post_internal,
31245- .qc_defer = ata_std_qc_defer,
31246+ .qc_defer = ata_std_qc_defer,
31247 .qc_prep = ata_noop_qc_prep,
31248 .qc_issue = sas_ata_qc_issue,
31249 .qc_fill_rtf = sas_ata_qc_fill_rtf,
31250diff -urNp linux-3.0.3/drivers/scsi/lpfc/lpfc_debugfs.c linux-3.0.3/drivers/scsi/lpfc/lpfc_debugfs.c
31251--- linux-3.0.3/drivers/scsi/lpfc/lpfc_debugfs.c 2011-07-21 22:17:23.000000000 -0400
31252+++ linux-3.0.3/drivers/scsi/lpfc/lpfc_debugfs.c 2011-08-23 21:48:14.000000000 -0400
31253@@ -104,7 +104,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_
31254
31255 #include <linux/debugfs.h>
31256
31257-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
31258+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
31259 static unsigned long lpfc_debugfs_start_time = 0L;
31260
31261 /* iDiag */
31262@@ -141,7 +141,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
31263 lpfc_debugfs_enable = 0;
31264
31265 len = 0;
31266- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
31267+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
31268 (lpfc_debugfs_max_disc_trc - 1);
31269 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
31270 dtp = vport->disc_trc + i;
31271@@ -202,7 +202,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
31272 lpfc_debugfs_enable = 0;
31273
31274 len = 0;
31275- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
31276+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
31277 (lpfc_debugfs_max_slow_ring_trc - 1);
31278 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
31279 dtp = phba->slow_ring_trc + i;
31280@@ -380,6 +380,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
31281 uint32_t *ptr;
31282 char buffer[1024];
31283
31284+ pax_track_stack();
31285+
31286 off = 0;
31287 spin_lock_irq(&phba->hbalock);
31288
31289@@ -617,14 +619,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
31290 !vport || !vport->disc_trc)
31291 return;
31292
31293- index = atomic_inc_return(&vport->disc_trc_cnt) &
31294+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
31295 (lpfc_debugfs_max_disc_trc - 1);
31296 dtp = vport->disc_trc + index;
31297 dtp->fmt = fmt;
31298 dtp->data1 = data1;
31299 dtp->data2 = data2;
31300 dtp->data3 = data3;
31301- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
31302+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
31303 dtp->jif = jiffies;
31304 #endif
31305 return;
31306@@ -655,14 +657,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
31307 !phba || !phba->slow_ring_trc)
31308 return;
31309
31310- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
31311+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
31312 (lpfc_debugfs_max_slow_ring_trc - 1);
31313 dtp = phba->slow_ring_trc + index;
31314 dtp->fmt = fmt;
31315 dtp->data1 = data1;
31316 dtp->data2 = data2;
31317 dtp->data3 = data3;
31318- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
31319+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
31320 dtp->jif = jiffies;
31321 #endif
31322 return;
31323@@ -2606,7 +2608,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
31324 "slow_ring buffer\n");
31325 goto debug_failed;
31326 }
31327- atomic_set(&phba->slow_ring_trc_cnt, 0);
31328+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
31329 memset(phba->slow_ring_trc, 0,
31330 (sizeof(struct lpfc_debugfs_trc) *
31331 lpfc_debugfs_max_slow_ring_trc));
31332@@ -2652,7 +2654,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
31333 "buffer\n");
31334 goto debug_failed;
31335 }
31336- atomic_set(&vport->disc_trc_cnt, 0);
31337+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
31338
31339 snprintf(name, sizeof(name), "discovery_trace");
31340 vport->debug_disc_trc =
31341diff -urNp linux-3.0.3/drivers/scsi/lpfc/lpfc.h linux-3.0.3/drivers/scsi/lpfc/lpfc.h
31342--- linux-3.0.3/drivers/scsi/lpfc/lpfc.h 2011-07-21 22:17:23.000000000 -0400
31343+++ linux-3.0.3/drivers/scsi/lpfc/lpfc.h 2011-08-23 21:47:55.000000000 -0400
31344@@ -420,7 +420,7 @@ struct lpfc_vport {
31345 struct dentry *debug_nodelist;
31346 struct dentry *vport_debugfs_root;
31347 struct lpfc_debugfs_trc *disc_trc;
31348- atomic_t disc_trc_cnt;
31349+ atomic_unchecked_t disc_trc_cnt;
31350 #endif
31351 uint8_t stat_data_enabled;
31352 uint8_t stat_data_blocked;
31353@@ -826,8 +826,8 @@ struct lpfc_hba {
31354 struct timer_list fabric_block_timer;
31355 unsigned long bit_flags;
31356 #define FABRIC_COMANDS_BLOCKED 0
31357- atomic_t num_rsrc_err;
31358- atomic_t num_cmd_success;
31359+ atomic_unchecked_t num_rsrc_err;
31360+ atomic_unchecked_t num_cmd_success;
31361 unsigned long last_rsrc_error_time;
31362 unsigned long last_ramp_down_time;
31363 unsigned long last_ramp_up_time;
31364@@ -841,7 +841,7 @@ struct lpfc_hba {
31365 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
31366 struct dentry *debug_slow_ring_trc;
31367 struct lpfc_debugfs_trc *slow_ring_trc;
31368- atomic_t slow_ring_trc_cnt;
31369+ atomic_unchecked_t slow_ring_trc_cnt;
31370 /* iDiag debugfs sub-directory */
31371 struct dentry *idiag_root;
31372 struct dentry *idiag_pci_cfg;
31373diff -urNp linux-3.0.3/drivers/scsi/lpfc/lpfc_init.c linux-3.0.3/drivers/scsi/lpfc/lpfc_init.c
31374--- linux-3.0.3/drivers/scsi/lpfc/lpfc_init.c 2011-07-21 22:17:23.000000000 -0400
31375+++ linux-3.0.3/drivers/scsi/lpfc/lpfc_init.c 2011-08-23 21:47:56.000000000 -0400
31376@@ -9923,8 +9923,10 @@ lpfc_init(void)
31377 printk(LPFC_COPYRIGHT "\n");
31378
31379 if (lpfc_enable_npiv) {
31380- lpfc_transport_functions.vport_create = lpfc_vport_create;
31381- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
31382+ pax_open_kernel();
31383+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
31384+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
31385+ pax_close_kernel();
31386 }
31387 lpfc_transport_template =
31388 fc_attach_transport(&lpfc_transport_functions);
31389diff -urNp linux-3.0.3/drivers/scsi/lpfc/lpfc_scsi.c linux-3.0.3/drivers/scsi/lpfc/lpfc_scsi.c
31390--- linux-3.0.3/drivers/scsi/lpfc/lpfc_scsi.c 2011-07-21 22:17:23.000000000 -0400
31391+++ linux-3.0.3/drivers/scsi/lpfc/lpfc_scsi.c 2011-08-23 21:47:56.000000000 -0400
31392@@ -297,7 +297,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
31393 uint32_t evt_posted;
31394
31395 spin_lock_irqsave(&phba->hbalock, flags);
31396- atomic_inc(&phba->num_rsrc_err);
31397+ atomic_inc_unchecked(&phba->num_rsrc_err);
31398 phba->last_rsrc_error_time = jiffies;
31399
31400 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
31401@@ -338,7 +338,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
31402 unsigned long flags;
31403 struct lpfc_hba *phba = vport->phba;
31404 uint32_t evt_posted;
31405- atomic_inc(&phba->num_cmd_success);
31406+ atomic_inc_unchecked(&phba->num_cmd_success);
31407
31408 if (vport->cfg_lun_queue_depth <= queue_depth)
31409 return;
31410@@ -382,8 +382,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
31411 unsigned long num_rsrc_err, num_cmd_success;
31412 int i;
31413
31414- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
31415- num_cmd_success = atomic_read(&phba->num_cmd_success);
31416+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
31417+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
31418
31419 vports = lpfc_create_vport_work_array(phba);
31420 if (vports != NULL)
31421@@ -403,8 +403,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
31422 }
31423 }
31424 lpfc_destroy_vport_work_array(phba, vports);
31425- atomic_set(&phba->num_rsrc_err, 0);
31426- atomic_set(&phba->num_cmd_success, 0);
31427+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
31428+ atomic_set_unchecked(&phba->num_cmd_success, 0);
31429 }
31430
31431 /**
31432@@ -438,8 +438,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
31433 }
31434 }
31435 lpfc_destroy_vport_work_array(phba, vports);
31436- atomic_set(&phba->num_rsrc_err, 0);
31437- atomic_set(&phba->num_cmd_success, 0);
31438+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
31439+ atomic_set_unchecked(&phba->num_cmd_success, 0);
31440 }
31441
31442 /**
31443diff -urNp linux-3.0.3/drivers/scsi/megaraid/megaraid_mbox.c linux-3.0.3/drivers/scsi/megaraid/megaraid_mbox.c
31444--- linux-3.0.3/drivers/scsi/megaraid/megaraid_mbox.c 2011-07-21 22:17:23.000000000 -0400
31445+++ linux-3.0.3/drivers/scsi/megaraid/megaraid_mbox.c 2011-08-23 21:48:14.000000000 -0400
31446@@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
31447 int rval;
31448 int i;
31449
31450+ pax_track_stack();
31451+
31452 // Allocate memory for the base list of scb for management module.
31453 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
31454
31455diff -urNp linux-3.0.3/drivers/scsi/osd/osd_initiator.c linux-3.0.3/drivers/scsi/osd/osd_initiator.c
31456--- linux-3.0.3/drivers/scsi/osd/osd_initiator.c 2011-07-21 22:17:23.000000000 -0400
31457+++ linux-3.0.3/drivers/scsi/osd/osd_initiator.c 2011-08-23 21:48:14.000000000 -0400
31458@@ -97,6 +97,8 @@ static int _osd_get_print_system_info(st
31459 int nelem = ARRAY_SIZE(get_attrs), a = 0;
31460 int ret;
31461
31462+ pax_track_stack();
31463+
31464 or = osd_start_request(od, GFP_KERNEL);
31465 if (!or)
31466 return -ENOMEM;
31467diff -urNp linux-3.0.3/drivers/scsi/pmcraid.c linux-3.0.3/drivers/scsi/pmcraid.c
31468--- linux-3.0.3/drivers/scsi/pmcraid.c 2011-08-23 21:44:40.000000000 -0400
31469+++ linux-3.0.3/drivers/scsi/pmcraid.c 2011-08-23 21:47:56.000000000 -0400
31470@@ -201,8 +201,8 @@ static int pmcraid_slave_alloc(struct sc
31471 res->scsi_dev = scsi_dev;
31472 scsi_dev->hostdata = res;
31473 res->change_detected = 0;
31474- atomic_set(&res->read_failures, 0);
31475- atomic_set(&res->write_failures, 0);
31476+ atomic_set_unchecked(&res->read_failures, 0);
31477+ atomic_set_unchecked(&res->write_failures, 0);
31478 rc = 0;
31479 }
31480 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
31481@@ -2677,9 +2677,9 @@ static int pmcraid_error_handler(struct
31482
31483 /* If this was a SCSI read/write command keep count of errors */
31484 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
31485- atomic_inc(&res->read_failures);
31486+ atomic_inc_unchecked(&res->read_failures);
31487 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
31488- atomic_inc(&res->write_failures);
31489+ atomic_inc_unchecked(&res->write_failures);
31490
31491 if (!RES_IS_GSCSI(res->cfg_entry) &&
31492 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
31493@@ -3535,7 +3535,7 @@ static int pmcraid_queuecommand_lck(
31494 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
31495 * hrrq_id assigned here in queuecommand
31496 */
31497- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
31498+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
31499 pinstance->num_hrrq;
31500 cmd->cmd_done = pmcraid_io_done;
31501
31502@@ -3860,7 +3860,7 @@ static long pmcraid_ioctl_passthrough(
31503 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
31504 * hrrq_id assigned here in queuecommand
31505 */
31506- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
31507+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
31508 pinstance->num_hrrq;
31509
31510 if (request_size) {
31511@@ -4498,7 +4498,7 @@ static void pmcraid_worker_function(stru
31512
31513 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
31514 /* add resources only after host is added into system */
31515- if (!atomic_read(&pinstance->expose_resources))
31516+ if (!atomic_read_unchecked(&pinstance->expose_resources))
31517 return;
31518
31519 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
31520@@ -5332,8 +5332,8 @@ static int __devinit pmcraid_init_instan
31521 init_waitqueue_head(&pinstance->reset_wait_q);
31522
31523 atomic_set(&pinstance->outstanding_cmds, 0);
31524- atomic_set(&pinstance->last_message_id, 0);
31525- atomic_set(&pinstance->expose_resources, 0);
31526+ atomic_set_unchecked(&pinstance->last_message_id, 0);
31527+ atomic_set_unchecked(&pinstance->expose_resources, 0);
31528
31529 INIT_LIST_HEAD(&pinstance->free_res_q);
31530 INIT_LIST_HEAD(&pinstance->used_res_q);
31531@@ -6048,7 +6048,7 @@ static int __devinit pmcraid_probe(
31532 /* Schedule worker thread to handle CCN and take care of adding and
31533 * removing devices to OS
31534 */
31535- atomic_set(&pinstance->expose_resources, 1);
31536+ atomic_set_unchecked(&pinstance->expose_resources, 1);
31537 schedule_work(&pinstance->worker_q);
31538 return rc;
31539
31540diff -urNp linux-3.0.3/drivers/scsi/pmcraid.h linux-3.0.3/drivers/scsi/pmcraid.h
31541--- linux-3.0.3/drivers/scsi/pmcraid.h 2011-07-21 22:17:23.000000000 -0400
31542+++ linux-3.0.3/drivers/scsi/pmcraid.h 2011-08-23 21:47:56.000000000 -0400
31543@@ -749,7 +749,7 @@ struct pmcraid_instance {
31544 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
31545
31546 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
31547- atomic_t last_message_id;
31548+ atomic_unchecked_t last_message_id;
31549
31550 /* configuration table */
31551 struct pmcraid_config_table *cfg_table;
31552@@ -778,7 +778,7 @@ struct pmcraid_instance {
31553 atomic_t outstanding_cmds;
31554
31555 /* should add/delete resources to mid-layer now ?*/
31556- atomic_t expose_resources;
31557+ atomic_unchecked_t expose_resources;
31558
31559
31560
31561@@ -814,8 +814,8 @@ struct pmcraid_resource_entry {
31562 struct pmcraid_config_table_entry_ext cfg_entry_ext;
31563 };
31564 struct scsi_device *scsi_dev; /* Link scsi_device structure */
31565- atomic_t read_failures; /* count of failed READ commands */
31566- atomic_t write_failures; /* count of failed WRITE commands */
31567+ atomic_unchecked_t read_failures; /* count of failed READ commands */
31568+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
31569
31570 /* To indicate add/delete/modify during CCN */
31571 u8 change_detected;
31572diff -urNp linux-3.0.3/drivers/scsi/qla2xxx/qla_def.h linux-3.0.3/drivers/scsi/qla2xxx/qla_def.h
31573--- linux-3.0.3/drivers/scsi/qla2xxx/qla_def.h 2011-07-21 22:17:23.000000000 -0400
31574+++ linux-3.0.3/drivers/scsi/qla2xxx/qla_def.h 2011-08-23 21:47:56.000000000 -0400
31575@@ -2244,7 +2244,7 @@ struct isp_operations {
31576 int (*get_flash_version) (struct scsi_qla_host *, void *);
31577 int (*start_scsi) (srb_t *);
31578 int (*abort_isp) (struct scsi_qla_host *);
31579-};
31580+} __no_const;
31581
31582 /* MSI-X Support *************************************************************/
31583
31584diff -urNp linux-3.0.3/drivers/scsi/qla4xxx/ql4_def.h linux-3.0.3/drivers/scsi/qla4xxx/ql4_def.h
31585--- linux-3.0.3/drivers/scsi/qla4xxx/ql4_def.h 2011-07-21 22:17:23.000000000 -0400
31586+++ linux-3.0.3/drivers/scsi/qla4xxx/ql4_def.h 2011-08-23 21:47:56.000000000 -0400
31587@@ -256,7 +256,7 @@ struct ddb_entry {
31588 atomic_t retry_relogin_timer; /* Min Time between relogins
31589 * (4000 only) */
31590 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
31591- atomic_t relogin_retry_count; /* Num of times relogin has been
31592+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
31593 * retried */
31594
31595 uint16_t port;
31596diff -urNp linux-3.0.3/drivers/scsi/qla4xxx/ql4_init.c linux-3.0.3/drivers/scsi/qla4xxx/ql4_init.c
31597--- linux-3.0.3/drivers/scsi/qla4xxx/ql4_init.c 2011-07-21 22:17:23.000000000 -0400
31598+++ linux-3.0.3/drivers/scsi/qla4xxx/ql4_init.c 2011-08-23 21:47:56.000000000 -0400
31599@@ -680,7 +680,7 @@ static struct ddb_entry * qla4xxx_alloc_
31600 ddb_entry->fw_ddb_index = fw_ddb_index;
31601 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
31602 atomic_set(&ddb_entry->relogin_timer, 0);
31603- atomic_set(&ddb_entry->relogin_retry_count, 0);
31604+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
31605 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
31606 list_add_tail(&ddb_entry->list, &ha->ddb_list);
31607 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
31608@@ -1433,7 +1433,7 @@ int qla4xxx_process_ddb_changed(struct s
31609 if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) &&
31610 (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) {
31611 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
31612- atomic_set(&ddb_entry->relogin_retry_count, 0);
31613+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
31614 atomic_set(&ddb_entry->relogin_timer, 0);
31615 clear_bit(DF_RELOGIN, &ddb_entry->flags);
31616 iscsi_unblock_session(ddb_entry->sess);
31617diff -urNp linux-3.0.3/drivers/scsi/qla4xxx/ql4_os.c linux-3.0.3/drivers/scsi/qla4xxx/ql4_os.c
31618--- linux-3.0.3/drivers/scsi/qla4xxx/ql4_os.c 2011-07-21 22:17:23.000000000 -0400
31619+++ linux-3.0.3/drivers/scsi/qla4xxx/ql4_os.c 2011-08-23 21:47:56.000000000 -0400
31620@@ -811,13 +811,13 @@ static void qla4xxx_timer(struct scsi_ql
31621 ddb_entry->fw_ddb_device_state ==
31622 DDB_DS_SESSION_FAILED) {
31623 /* Reset retry relogin timer */
31624- atomic_inc(&ddb_entry->relogin_retry_count);
31625+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
31626 DEBUG2(printk("scsi%ld: ddb [%d] relogin"
31627 " timed out-retrying"
31628 " relogin (%d)\n",
31629 ha->host_no,
31630 ddb_entry->fw_ddb_index,
31631- atomic_read(&ddb_entry->
31632+ atomic_read_unchecked(&ddb_entry->
31633 relogin_retry_count))
31634 );
31635 start_dpc++;
31636diff -urNp linux-3.0.3/drivers/scsi/scsi.c linux-3.0.3/drivers/scsi/scsi.c
31637--- linux-3.0.3/drivers/scsi/scsi.c 2011-07-21 22:17:23.000000000 -0400
31638+++ linux-3.0.3/drivers/scsi/scsi.c 2011-08-23 21:47:56.000000000 -0400
31639@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
31640 unsigned long timeout;
31641 int rtn = 0;
31642
31643- atomic_inc(&cmd->device->iorequest_cnt);
31644+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
31645
31646 /* check if the device is still usable */
31647 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
31648diff -urNp linux-3.0.3/drivers/scsi/scsi_debug.c linux-3.0.3/drivers/scsi/scsi_debug.c
31649--- linux-3.0.3/drivers/scsi/scsi_debug.c 2011-07-21 22:17:23.000000000 -0400
31650+++ linux-3.0.3/drivers/scsi/scsi_debug.c 2011-08-23 21:48:14.000000000 -0400
31651@@ -1493,6 +1493,8 @@ static int resp_mode_select(struct scsi_
31652 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
31653 unsigned char *cmd = (unsigned char *)scp->cmnd;
31654
31655+ pax_track_stack();
31656+
31657 if ((errsts = check_readiness(scp, 1, devip)))
31658 return errsts;
31659 memset(arr, 0, sizeof(arr));
31660@@ -1590,6 +1592,8 @@ static int resp_log_sense(struct scsi_cm
31661 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
31662 unsigned char *cmd = (unsigned char *)scp->cmnd;
31663
31664+ pax_track_stack();
31665+
31666 if ((errsts = check_readiness(scp, 1, devip)))
31667 return errsts;
31668 memset(arr, 0, sizeof(arr));
31669diff -urNp linux-3.0.3/drivers/scsi/scsi_lib.c linux-3.0.3/drivers/scsi/scsi_lib.c
31670--- linux-3.0.3/drivers/scsi/scsi_lib.c 2011-08-23 21:44:40.000000000 -0400
31671+++ linux-3.0.3/drivers/scsi/scsi_lib.c 2011-08-23 21:47:56.000000000 -0400
31672@@ -1412,7 +1412,7 @@ static void scsi_kill_request(struct req
31673 shost = sdev->host;
31674 scsi_init_cmd_errh(cmd);
31675 cmd->result = DID_NO_CONNECT << 16;
31676- atomic_inc(&cmd->device->iorequest_cnt);
31677+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
31678
31679 /*
31680 * SCSI request completion path will do scsi_device_unbusy(),
31681@@ -1438,9 +1438,9 @@ static void scsi_softirq_done(struct req
31682
31683 INIT_LIST_HEAD(&cmd->eh_entry);
31684
31685- atomic_inc(&cmd->device->iodone_cnt);
31686+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
31687 if (cmd->result)
31688- atomic_inc(&cmd->device->ioerr_cnt);
31689+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
31690
31691 disposition = scsi_decide_disposition(cmd);
31692 if (disposition != SUCCESS &&
31693diff -urNp linux-3.0.3/drivers/scsi/scsi_sysfs.c linux-3.0.3/drivers/scsi/scsi_sysfs.c
31694--- linux-3.0.3/drivers/scsi/scsi_sysfs.c 2011-07-21 22:17:23.000000000 -0400
31695+++ linux-3.0.3/drivers/scsi/scsi_sysfs.c 2011-08-23 21:47:56.000000000 -0400
31696@@ -622,7 +622,7 @@ show_iostat_##field(struct device *dev,
31697 char *buf) \
31698 { \
31699 struct scsi_device *sdev = to_scsi_device(dev); \
31700- unsigned long long count = atomic_read(&sdev->field); \
31701+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
31702 return snprintf(buf, 20, "0x%llx\n", count); \
31703 } \
31704 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
31705diff -urNp linux-3.0.3/drivers/scsi/scsi_transport_fc.c linux-3.0.3/drivers/scsi/scsi_transport_fc.c
31706--- linux-3.0.3/drivers/scsi/scsi_transport_fc.c 2011-07-21 22:17:23.000000000 -0400
31707+++ linux-3.0.3/drivers/scsi/scsi_transport_fc.c 2011-08-23 21:47:56.000000000 -0400
31708@@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_
31709 * Netlink Infrastructure
31710 */
31711
31712-static atomic_t fc_event_seq;
31713+static atomic_unchecked_t fc_event_seq;
31714
31715 /**
31716 * fc_get_event_number - Obtain the next sequential FC event number
31717@@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
31718 u32
31719 fc_get_event_number(void)
31720 {
31721- return atomic_add_return(1, &fc_event_seq);
31722+ return atomic_add_return_unchecked(1, &fc_event_seq);
31723 }
31724 EXPORT_SYMBOL(fc_get_event_number);
31725
31726@@ -645,7 +645,7 @@ static __init int fc_transport_init(void
31727 {
31728 int error;
31729
31730- atomic_set(&fc_event_seq, 0);
31731+ atomic_set_unchecked(&fc_event_seq, 0);
31732
31733 error = transport_class_register(&fc_host_class);
31734 if (error)
31735@@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char
31736 char *cp;
31737
31738 *val = simple_strtoul(buf, &cp, 0);
31739- if ((*cp && (*cp != '\n')) || (*val < 0))
31740+ if (*cp && (*cp != '\n'))
31741 return -EINVAL;
31742 /*
31743 * Check for overflow; dev_loss_tmo is u32
31744diff -urNp linux-3.0.3/drivers/scsi/scsi_transport_iscsi.c linux-3.0.3/drivers/scsi/scsi_transport_iscsi.c
31745--- linux-3.0.3/drivers/scsi/scsi_transport_iscsi.c 2011-07-21 22:17:23.000000000 -0400
31746+++ linux-3.0.3/drivers/scsi/scsi_transport_iscsi.c 2011-08-23 21:47:56.000000000 -0400
31747@@ -83,7 +83,7 @@ struct iscsi_internal {
31748 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
31749 };
31750
31751-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
31752+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
31753 static struct workqueue_struct *iscsi_eh_timer_workq;
31754
31755 /*
31756@@ -761,7 +761,7 @@ int iscsi_add_session(struct iscsi_cls_s
31757 int err;
31758
31759 ihost = shost->shost_data;
31760- session->sid = atomic_add_return(1, &iscsi_session_nr);
31761+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
31762
31763 if (id == ISCSI_MAX_TARGET) {
31764 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
31765@@ -2200,7 +2200,7 @@ static __init int iscsi_transport_init(v
31766 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
31767 ISCSI_TRANSPORT_VERSION);
31768
31769- atomic_set(&iscsi_session_nr, 0);
31770+ atomic_set_unchecked(&iscsi_session_nr, 0);
31771
31772 err = class_register(&iscsi_transport_class);
31773 if (err)
31774diff -urNp linux-3.0.3/drivers/scsi/scsi_transport_srp.c linux-3.0.3/drivers/scsi/scsi_transport_srp.c
31775--- linux-3.0.3/drivers/scsi/scsi_transport_srp.c 2011-07-21 22:17:23.000000000 -0400
31776+++ linux-3.0.3/drivers/scsi/scsi_transport_srp.c 2011-08-23 21:47:56.000000000 -0400
31777@@ -33,7 +33,7 @@
31778 #include "scsi_transport_srp_internal.h"
31779
31780 struct srp_host_attrs {
31781- atomic_t next_port_id;
31782+ atomic_unchecked_t next_port_id;
31783 };
31784 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
31785
31786@@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
31787 struct Scsi_Host *shost = dev_to_shost(dev);
31788 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
31789
31790- atomic_set(&srp_host->next_port_id, 0);
31791+ atomic_set_unchecked(&srp_host->next_port_id, 0);
31792 return 0;
31793 }
31794
31795@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
31796 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
31797 rport->roles = ids->roles;
31798
31799- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
31800+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
31801 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
31802
31803 transport_setup_device(&rport->dev);
31804diff -urNp linux-3.0.3/drivers/scsi/sg.c linux-3.0.3/drivers/scsi/sg.c
31805--- linux-3.0.3/drivers/scsi/sg.c 2011-07-21 22:17:23.000000000 -0400
31806+++ linux-3.0.3/drivers/scsi/sg.c 2011-08-23 21:47:56.000000000 -0400
31807@@ -2310,7 +2310,7 @@ struct sg_proc_leaf {
31808 const struct file_operations * fops;
31809 };
31810
31811-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
31812+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
31813 {"allow_dio", &adio_fops},
31814 {"debug", &debug_fops},
31815 {"def_reserved_size", &dressz_fops},
31816@@ -2325,7 +2325,7 @@ sg_proc_init(void)
31817 {
31818 int k, mask;
31819 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
31820- struct sg_proc_leaf * leaf;
31821+ const struct sg_proc_leaf * leaf;
31822
31823 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
31824 if (!sg_proc_sgp)
31825diff -urNp linux-3.0.3/drivers/scsi/sym53c8xx_2/sym_glue.c linux-3.0.3/drivers/scsi/sym53c8xx_2/sym_glue.c
31826--- linux-3.0.3/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-07-21 22:17:23.000000000 -0400
31827+++ linux-3.0.3/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-08-23 21:48:14.000000000 -0400
31828@@ -1756,6 +1756,8 @@ static int __devinit sym2_probe(struct p
31829 int do_iounmap = 0;
31830 int do_disable_device = 1;
31831
31832+ pax_track_stack();
31833+
31834 memset(&sym_dev, 0, sizeof(sym_dev));
31835 memset(&nvram, 0, sizeof(nvram));
31836 sym_dev.pdev = pdev;
31837diff -urNp linux-3.0.3/drivers/scsi/vmw_pvscsi.c linux-3.0.3/drivers/scsi/vmw_pvscsi.c
31838--- linux-3.0.3/drivers/scsi/vmw_pvscsi.c 2011-07-21 22:17:23.000000000 -0400
31839+++ linux-3.0.3/drivers/scsi/vmw_pvscsi.c 2011-08-23 21:48:14.000000000 -0400
31840@@ -447,6 +447,8 @@ static void pvscsi_setup_all_rings(const
31841 dma_addr_t base;
31842 unsigned i;
31843
31844+ pax_track_stack();
31845+
31846 cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
31847 cmd.reqRingNumPages = adapter->req_pages;
31848 cmd.cmpRingNumPages = adapter->cmp_pages;
31849diff -urNp linux-3.0.3/drivers/spi/spi.c linux-3.0.3/drivers/spi/spi.c
31850--- linux-3.0.3/drivers/spi/spi.c 2011-07-21 22:17:23.000000000 -0400
31851+++ linux-3.0.3/drivers/spi/spi.c 2011-08-23 21:47:56.000000000 -0400
31852@@ -1023,7 +1023,7 @@ int spi_bus_unlock(struct spi_master *ma
31853 EXPORT_SYMBOL_GPL(spi_bus_unlock);
31854
31855 /* portable code must never pass more than 32 bytes */
31856-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
31857+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
31858
31859 static u8 *buf;
31860
31861diff -urNp linux-3.0.3/drivers/staging/ath6kl/os/linux/ar6000_drv.c linux-3.0.3/drivers/staging/ath6kl/os/linux/ar6000_drv.c
31862--- linux-3.0.3/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-08-23 21:44:40.000000000 -0400
31863+++ linux-3.0.3/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-08-23 21:48:14.000000000 -0400
31864@@ -362,7 +362,7 @@ static struct ar_cookie s_ar_cookie_mem[
31865 (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0))
31866
31867
31868-static struct net_device_ops ar6000_netdev_ops = {
31869+static net_device_ops_no_const ar6000_netdev_ops = {
31870 .ndo_init = NULL,
31871 .ndo_open = ar6000_open,
31872 .ndo_stop = ar6000_close,
31873diff -urNp linux-3.0.3/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h linux-3.0.3/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
31874--- linux-3.0.3/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-07-21 22:17:23.000000000 -0400
31875+++ linux-3.0.3/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-08-23 21:47:56.000000000 -0400
31876@@ -30,7 +30,7 @@ typedef bool (*ar6k_pal_recv_pkt_t)(void
31877 typedef struct ar6k_pal_config_s
31878 {
31879 ar6k_pal_recv_pkt_t fpar6k_pal_recv_pkt;
31880-}ar6k_pal_config_t;
31881+} __no_const ar6k_pal_config_t;
31882
31883 void register_pal_cb(ar6k_pal_config_t *palConfig_p);
31884 #endif /* _AR6K_PAL_H_ */
31885diff -urNp linux-3.0.3/drivers/staging/brcm80211/brcmfmac/dhd_linux.c linux-3.0.3/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
31886--- linux-3.0.3/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-07-21 22:17:23.000000000 -0400
31887+++ linux-3.0.3/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-08-23 21:47:56.000000000 -0400
31888@@ -853,14 +853,14 @@ static void dhd_op_if(dhd_if_t *ifp)
31889 free_netdev(ifp->net);
31890 }
31891 /* Allocate etherdev, including space for private structure */
31892- ifp->net = alloc_etherdev(sizeof(dhd));
31893+ ifp->net = alloc_etherdev(sizeof(*dhd));
31894 if (!ifp->net) {
31895 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
31896 ret = -ENOMEM;
31897 }
31898 if (ret == 0) {
31899 strcpy(ifp->net->name, ifp->name);
31900- memcpy(netdev_priv(ifp->net), &dhd, sizeof(dhd));
31901+ memcpy(netdev_priv(ifp->net), dhd, sizeof(*dhd));
31902 err = dhd_net_attach(&dhd->pub, ifp->idx);
31903 if (err != 0) {
31904 DHD_ERROR(("%s: dhd_net_attach failed, "
31905@@ -1872,7 +1872,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
31906 strcpy(nv_path, nvram_path);
31907
31908 /* Allocate etherdev, including space for private structure */
31909- net = alloc_etherdev(sizeof(dhd));
31910+ net = alloc_etherdev(sizeof(*dhd));
31911 if (!net) {
31912 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
31913 goto fail;
31914@@ -1888,7 +1888,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
31915 /*
31916 * Save the dhd_info into the priv
31917 */
31918- memcpy(netdev_priv(net), &dhd, sizeof(dhd));
31919+ memcpy(netdev_priv(net), dhd, sizeof(*dhd));
31920
31921 /* Set network interface name if it was provided as module parameter */
31922 if (iface_name[0]) {
31923@@ -2004,7 +2004,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
31924 /*
31925 * Save the dhd_info into the priv
31926 */
31927- memcpy(netdev_priv(net), &dhd, sizeof(dhd));
31928+ memcpy(netdev_priv(net), dhd, sizeof(*dhd));
31929
31930 #if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
31931 g_bus = bus;
31932diff -urNp linux-3.0.3/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h linux-3.0.3/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h
31933--- linux-3.0.3/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h 2011-07-21 22:17:23.000000000 -0400
31934+++ linux-3.0.3/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h 2011-08-23 21:47:56.000000000 -0400
31935@@ -593,7 +593,7 @@ struct phy_func_ptr {
31936 initfn_t carrsuppr;
31937 rxsigpwrfn_t rxsigpwr;
31938 detachfn_t detach;
31939-};
31940+} __no_const;
31941 typedef struct phy_func_ptr phy_func_ptr_t;
31942
31943 struct phy_info {
31944diff -urNp linux-3.0.3/drivers/staging/brcm80211/include/bcmsdh.h linux-3.0.3/drivers/staging/brcm80211/include/bcmsdh.h
31945--- linux-3.0.3/drivers/staging/brcm80211/include/bcmsdh.h 2011-07-21 22:17:23.000000000 -0400
31946+++ linux-3.0.3/drivers/staging/brcm80211/include/bcmsdh.h 2011-08-23 21:47:56.000000000 -0400
31947@@ -185,7 +185,7 @@ typedef struct {
31948 u16 func, uint bustype, void *regsva, void *param);
31949 /* detach from device */
31950 void (*detach) (void *ch);
31951-} bcmsdh_driver_t;
31952+} __no_const bcmsdh_driver_t;
31953
31954 /* platform specific/high level functions */
31955 extern int bcmsdh_register(bcmsdh_driver_t *driver);
31956diff -urNp linux-3.0.3/drivers/staging/et131x/et1310_tx.c linux-3.0.3/drivers/staging/et131x/et1310_tx.c
31957--- linux-3.0.3/drivers/staging/et131x/et1310_tx.c 2011-07-21 22:17:23.000000000 -0400
31958+++ linux-3.0.3/drivers/staging/et131x/et1310_tx.c 2011-08-23 21:47:56.000000000 -0400
31959@@ -635,11 +635,11 @@ inline void et131x_free_send_packet(stru
31960 struct net_device_stats *stats = &etdev->net_stats;
31961
31962 if (tcb->flags & fMP_DEST_BROAD)
31963- atomic_inc(&etdev->Stats.brdcstxmt);
31964+ atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
31965 else if (tcb->flags & fMP_DEST_MULTI)
31966- atomic_inc(&etdev->Stats.multixmt);
31967+ atomic_inc_unchecked(&etdev->Stats.multixmt);
31968 else
31969- atomic_inc(&etdev->Stats.unixmt);
31970+ atomic_inc_unchecked(&etdev->Stats.unixmt);
31971
31972 if (tcb->skb) {
31973 stats->tx_bytes += tcb->skb->len;
31974diff -urNp linux-3.0.3/drivers/staging/et131x/et131x_adapter.h linux-3.0.3/drivers/staging/et131x/et131x_adapter.h
31975--- linux-3.0.3/drivers/staging/et131x/et131x_adapter.h 2011-07-21 22:17:23.000000000 -0400
31976+++ linux-3.0.3/drivers/staging/et131x/et131x_adapter.h 2011-08-23 21:47:56.000000000 -0400
31977@@ -110,11 +110,11 @@ typedef struct _ce_stats_t {
31978 * operations
31979 */
31980 u32 unircv; /* # multicast packets received */
31981- atomic_t unixmt; /* # multicast packets for Tx */
31982+ atomic_unchecked_t unixmt; /* # multicast packets for Tx */
31983 u32 multircv; /* # multicast packets received */
31984- atomic_t multixmt; /* # multicast packets for Tx */
31985+ atomic_unchecked_t multixmt; /* # multicast packets for Tx */
31986 u32 brdcstrcv; /* # broadcast packets received */
31987- atomic_t brdcstxmt; /* # broadcast packets for Tx */
31988+ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
31989 u32 norcvbuf; /* # Rx packets discarded */
31990 u32 noxmtbuf; /* # Tx packets discarded */
31991
31992diff -urNp linux-3.0.3/drivers/staging/hv/channel.c linux-3.0.3/drivers/staging/hv/channel.c
31993--- linux-3.0.3/drivers/staging/hv/channel.c 2011-08-23 21:44:40.000000000 -0400
31994+++ linux-3.0.3/drivers/staging/hv/channel.c 2011-08-23 21:47:56.000000000 -0400
31995@@ -433,8 +433,8 @@ int vmbus_establish_gpadl(struct vmbus_c
31996 int ret = 0;
31997 int t;
31998
31999- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
32000- atomic_inc(&vmbus_connection.next_gpadl_handle);
32001+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
32002+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
32003
32004 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
32005 if (ret)
32006diff -urNp linux-3.0.3/drivers/staging/hv/hv.c linux-3.0.3/drivers/staging/hv/hv.c
32007--- linux-3.0.3/drivers/staging/hv/hv.c 2011-07-21 22:17:23.000000000 -0400
32008+++ linux-3.0.3/drivers/staging/hv/hv.c 2011-08-23 21:47:56.000000000 -0400
32009@@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, voi
32010 u64 output_address = (output) ? virt_to_phys(output) : 0;
32011 u32 output_address_hi = output_address >> 32;
32012 u32 output_address_lo = output_address & 0xFFFFFFFF;
32013- volatile void *hypercall_page = hv_context.hypercall_page;
32014+ volatile void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
32015
32016 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
32017 "=a"(hv_status_lo) : "d" (control_hi),
32018diff -urNp linux-3.0.3/drivers/staging/hv/hv_mouse.c linux-3.0.3/drivers/staging/hv/hv_mouse.c
32019--- linux-3.0.3/drivers/staging/hv/hv_mouse.c 2011-07-21 22:17:23.000000000 -0400
32020+++ linux-3.0.3/drivers/staging/hv/hv_mouse.c 2011-08-23 21:47:56.000000000 -0400
32021@@ -879,8 +879,10 @@ static void reportdesc_callback(struct h
32022 if (hid_dev) {
32023 DPRINT_INFO(INPUTVSC_DRV, "hid_device created");
32024
32025- hid_dev->ll_driver->open = mousevsc_hid_open;
32026- hid_dev->ll_driver->close = mousevsc_hid_close;
32027+ pax_open_kernel();
32028+ *(void **)&hid_dev->ll_driver->open = mousevsc_hid_open;
32029+ *(void **)&hid_dev->ll_driver->close = mousevsc_hid_close;
32030+ pax_close_kernel();
32031
32032 hid_dev->bus = BUS_VIRTUAL;
32033 hid_dev->vendor = input_device_ctx->device_info.vendor;
32034diff -urNp linux-3.0.3/drivers/staging/hv/hyperv_vmbus.h linux-3.0.3/drivers/staging/hv/hyperv_vmbus.h
32035--- linux-3.0.3/drivers/staging/hv/hyperv_vmbus.h 2011-07-21 22:17:23.000000000 -0400
32036+++ linux-3.0.3/drivers/staging/hv/hyperv_vmbus.h 2011-08-23 21:47:56.000000000 -0400
32037@@ -559,7 +559,7 @@ enum vmbus_connect_state {
32038 struct vmbus_connection {
32039 enum vmbus_connect_state conn_state;
32040
32041- atomic_t next_gpadl_handle;
32042+ atomic_unchecked_t next_gpadl_handle;
32043
32044 /*
32045 * Represents channel interrupts. Each bit position represents a
32046diff -urNp linux-3.0.3/drivers/staging/hv/rndis_filter.c linux-3.0.3/drivers/staging/hv/rndis_filter.c
32047--- linux-3.0.3/drivers/staging/hv/rndis_filter.c 2011-08-23 21:44:40.000000000 -0400
32048+++ linux-3.0.3/drivers/staging/hv/rndis_filter.c 2011-08-23 21:47:56.000000000 -0400
32049@@ -43,7 +43,7 @@ struct rndis_device {
32050
32051 enum rndis_device_state state;
32052 u32 link_stat;
32053- atomic_t new_req_id;
32054+ atomic_unchecked_t new_req_id;
32055
32056 spinlock_t request_lock;
32057 struct list_head req_list;
32058@@ -117,7 +117,7 @@ static struct rndis_request *get_rndis_r
32059 * template
32060 */
32061 set = &rndis_msg->msg.set_req;
32062- set->req_id = atomic_inc_return(&dev->new_req_id);
32063+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
32064
32065 /* Add to the request list */
32066 spin_lock_irqsave(&dev->request_lock, flags);
32067@@ -637,7 +637,7 @@ static void rndis_filter_halt_device(str
32068
32069 /* Setup the rndis set */
32070 halt = &request->request_msg.msg.halt_req;
32071- halt->req_id = atomic_inc_return(&dev->new_req_id);
32072+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
32073
32074 /* Ignore return since this msg is optional. */
32075 rndis_filter_send_request(dev, request);
32076diff -urNp linux-3.0.3/drivers/staging/hv/vmbus_drv.c linux-3.0.3/drivers/staging/hv/vmbus_drv.c
32077--- linux-3.0.3/drivers/staging/hv/vmbus_drv.c 2011-07-21 22:17:23.000000000 -0400
32078+++ linux-3.0.3/drivers/staging/hv/vmbus_drv.c 2011-08-23 21:47:56.000000000 -0400
32079@@ -668,11 +668,11 @@ int vmbus_child_device_register(struct h
32080 {
32081 int ret = 0;
32082
32083- static atomic_t device_num = ATOMIC_INIT(0);
32084+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
32085
32086 /* Set the device name. Otherwise, device_register() will fail. */
32087 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
32088- atomic_inc_return(&device_num));
32089+ atomic_inc_return_unchecked(&device_num));
32090
32091 /* The new device belongs to this bus */
32092 child_device_obj->device.bus = &hv_bus; /* device->dev.bus; */
32093diff -urNp linux-3.0.3/drivers/staging/iio/ring_generic.h linux-3.0.3/drivers/staging/iio/ring_generic.h
32094--- linux-3.0.3/drivers/staging/iio/ring_generic.h 2011-07-21 22:17:23.000000000 -0400
32095+++ linux-3.0.3/drivers/staging/iio/ring_generic.h 2011-08-23 21:47:56.000000000 -0400
32096@@ -62,7 +62,7 @@ struct iio_ring_access_funcs {
32097
32098 int (*is_enabled)(struct iio_ring_buffer *ring);
32099 int (*enable)(struct iio_ring_buffer *ring);
32100-};
32101+} __no_const;
32102
32103 struct iio_ring_setup_ops {
32104 int (*preenable)(struct iio_dev *);
32105diff -urNp linux-3.0.3/drivers/staging/octeon/ethernet.c linux-3.0.3/drivers/staging/octeon/ethernet.c
32106--- linux-3.0.3/drivers/staging/octeon/ethernet.c 2011-07-21 22:17:23.000000000 -0400
32107+++ linux-3.0.3/drivers/staging/octeon/ethernet.c 2011-08-23 21:47:56.000000000 -0400
32108@@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_
32109 * since the RX tasklet also increments it.
32110 */
32111 #ifdef CONFIG_64BIT
32112- atomic64_add(rx_status.dropped_packets,
32113- (atomic64_t *)&priv->stats.rx_dropped);
32114+ atomic64_add_unchecked(rx_status.dropped_packets,
32115+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
32116 #else
32117- atomic_add(rx_status.dropped_packets,
32118- (atomic_t *)&priv->stats.rx_dropped);
32119+ atomic_add_unchecked(rx_status.dropped_packets,
32120+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
32121 #endif
32122 }
32123
32124diff -urNp linux-3.0.3/drivers/staging/octeon/ethernet-rx.c linux-3.0.3/drivers/staging/octeon/ethernet-rx.c
32125--- linux-3.0.3/drivers/staging/octeon/ethernet-rx.c 2011-07-21 22:17:23.000000000 -0400
32126+++ linux-3.0.3/drivers/staging/octeon/ethernet-rx.c 2011-08-23 21:47:56.000000000 -0400
32127@@ -417,11 +417,11 @@ static int cvm_oct_napi_poll(struct napi
32128 /* Increment RX stats for virtual ports */
32129 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
32130 #ifdef CONFIG_64BIT
32131- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
32132- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
32133+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
32134+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
32135 #else
32136- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
32137- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
32138+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
32139+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
32140 #endif
32141 }
32142 netif_receive_skb(skb);
32143@@ -433,9 +433,9 @@ static int cvm_oct_napi_poll(struct napi
32144 dev->name);
32145 */
32146 #ifdef CONFIG_64BIT
32147- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
32148+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
32149 #else
32150- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
32151+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
32152 #endif
32153 dev_kfree_skb_irq(skb);
32154 }
32155diff -urNp linux-3.0.3/drivers/staging/pohmelfs/inode.c linux-3.0.3/drivers/staging/pohmelfs/inode.c
32156--- linux-3.0.3/drivers/staging/pohmelfs/inode.c 2011-07-21 22:17:23.000000000 -0400
32157+++ linux-3.0.3/drivers/staging/pohmelfs/inode.c 2011-08-23 21:47:56.000000000 -0400
32158@@ -1856,7 +1856,7 @@ static int pohmelfs_fill_super(struct su
32159 mutex_init(&psb->mcache_lock);
32160 psb->mcache_root = RB_ROOT;
32161 psb->mcache_timeout = msecs_to_jiffies(5000);
32162- atomic_long_set(&psb->mcache_gen, 0);
32163+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
32164
32165 psb->trans_max_pages = 100;
32166
32167@@ -1871,7 +1871,7 @@ static int pohmelfs_fill_super(struct su
32168 INIT_LIST_HEAD(&psb->crypto_ready_list);
32169 INIT_LIST_HEAD(&psb->crypto_active_list);
32170
32171- atomic_set(&psb->trans_gen, 1);
32172+ atomic_set_unchecked(&psb->trans_gen, 1);
32173 atomic_long_set(&psb->total_inodes, 0);
32174
32175 mutex_init(&psb->state_lock);
32176diff -urNp linux-3.0.3/drivers/staging/pohmelfs/mcache.c linux-3.0.3/drivers/staging/pohmelfs/mcache.c
32177--- linux-3.0.3/drivers/staging/pohmelfs/mcache.c 2011-07-21 22:17:23.000000000 -0400
32178+++ linux-3.0.3/drivers/staging/pohmelfs/mcache.c 2011-08-23 21:47:56.000000000 -0400
32179@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
32180 m->data = data;
32181 m->start = start;
32182 m->size = size;
32183- m->gen = atomic_long_inc_return(&psb->mcache_gen);
32184+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
32185
32186 mutex_lock(&psb->mcache_lock);
32187 err = pohmelfs_mcache_insert(psb, m);
32188diff -urNp linux-3.0.3/drivers/staging/pohmelfs/netfs.h linux-3.0.3/drivers/staging/pohmelfs/netfs.h
32189--- linux-3.0.3/drivers/staging/pohmelfs/netfs.h 2011-07-21 22:17:23.000000000 -0400
32190+++ linux-3.0.3/drivers/staging/pohmelfs/netfs.h 2011-08-23 21:47:56.000000000 -0400
32191@@ -571,14 +571,14 @@ struct pohmelfs_config;
32192 struct pohmelfs_sb {
32193 struct rb_root mcache_root;
32194 struct mutex mcache_lock;
32195- atomic_long_t mcache_gen;
32196+ atomic_long_unchecked_t mcache_gen;
32197 unsigned long mcache_timeout;
32198
32199 unsigned int idx;
32200
32201 unsigned int trans_retries;
32202
32203- atomic_t trans_gen;
32204+ atomic_unchecked_t trans_gen;
32205
32206 unsigned int crypto_attached_size;
32207 unsigned int crypto_align_size;
32208diff -urNp linux-3.0.3/drivers/staging/pohmelfs/trans.c linux-3.0.3/drivers/staging/pohmelfs/trans.c
32209--- linux-3.0.3/drivers/staging/pohmelfs/trans.c 2011-07-21 22:17:23.000000000 -0400
32210+++ linux-3.0.3/drivers/staging/pohmelfs/trans.c 2011-08-23 21:47:56.000000000 -0400
32211@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
32212 int err;
32213 struct netfs_cmd *cmd = t->iovec.iov_base;
32214
32215- t->gen = atomic_inc_return(&psb->trans_gen);
32216+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
32217
32218 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
32219 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
32220diff -urNp linux-3.0.3/drivers/staging/rtl8712/rtl871x_io.h linux-3.0.3/drivers/staging/rtl8712/rtl871x_io.h
32221--- linux-3.0.3/drivers/staging/rtl8712/rtl871x_io.h 2011-07-21 22:17:23.000000000 -0400
32222+++ linux-3.0.3/drivers/staging/rtl8712/rtl871x_io.h 2011-08-23 21:47:56.000000000 -0400
32223@@ -83,7 +83,7 @@ struct _io_ops {
32224 u8 *pmem);
32225 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
32226 u8 *pmem);
32227-};
32228+} __no_const;
32229
32230 struct io_req {
32231 struct list_head list;
32232diff -urNp linux-3.0.3/drivers/staging/sbe-2t3e3/netdev.c linux-3.0.3/drivers/staging/sbe-2t3e3/netdev.c
32233--- linux-3.0.3/drivers/staging/sbe-2t3e3/netdev.c 2011-07-21 22:17:23.000000000 -0400
32234+++ linux-3.0.3/drivers/staging/sbe-2t3e3/netdev.c 2011-08-24 18:21:41.000000000 -0400
32235@@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, s
32236 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
32237
32238 if (rlen)
32239- if (copy_to_user(data, &resp, rlen))
32240+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
32241 return -EFAULT;
32242
32243 return 0;
32244diff -urNp linux-3.0.3/drivers/staging/tty/stallion.c linux-3.0.3/drivers/staging/tty/stallion.c
32245--- linux-3.0.3/drivers/staging/tty/stallion.c 2011-07-21 22:17:23.000000000 -0400
32246+++ linux-3.0.3/drivers/staging/tty/stallion.c 2011-08-23 21:48:14.000000000 -0400
32247@@ -2406,6 +2406,8 @@ static int stl_getportstruct(struct stlp
32248 struct stlport stl_dummyport;
32249 struct stlport *portp;
32250
32251+ pax_track_stack();
32252+
32253 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
32254 return -EFAULT;
32255 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
32256diff -urNp linux-3.0.3/drivers/staging/usbip/usbip_common.h linux-3.0.3/drivers/staging/usbip/usbip_common.h
32257--- linux-3.0.3/drivers/staging/usbip/usbip_common.h 2011-07-21 22:17:23.000000000 -0400
32258+++ linux-3.0.3/drivers/staging/usbip/usbip_common.h 2011-08-23 21:47:56.000000000 -0400
32259@@ -315,7 +315,7 @@ struct usbip_device {
32260 void (*shutdown)(struct usbip_device *);
32261 void (*reset)(struct usbip_device *);
32262 void (*unusable)(struct usbip_device *);
32263- } eh_ops;
32264+ } __no_const eh_ops;
32265 };
32266
32267 void usbip_pack_pdu(struct usbip_header *pdu, struct urb *urb, int cmd,
32268diff -urNp linux-3.0.3/drivers/staging/usbip/vhci.h linux-3.0.3/drivers/staging/usbip/vhci.h
32269--- linux-3.0.3/drivers/staging/usbip/vhci.h 2011-07-21 22:17:23.000000000 -0400
32270+++ linux-3.0.3/drivers/staging/usbip/vhci.h 2011-08-23 21:47:56.000000000 -0400
32271@@ -94,7 +94,7 @@ struct vhci_hcd {
32272 unsigned resuming:1;
32273 unsigned long re_timeout;
32274
32275- atomic_t seqnum;
32276+ atomic_unchecked_t seqnum;
32277
32278 /*
32279 * NOTE:
32280diff -urNp linux-3.0.3/drivers/staging/usbip/vhci_hcd.c linux-3.0.3/drivers/staging/usbip/vhci_hcd.c
32281--- linux-3.0.3/drivers/staging/usbip/vhci_hcd.c 2011-08-23 21:44:40.000000000 -0400
32282+++ linux-3.0.3/drivers/staging/usbip/vhci_hcd.c 2011-08-23 21:47:56.000000000 -0400
32283@@ -511,7 +511,7 @@ static void vhci_tx_urb(struct urb *urb)
32284 return;
32285 }
32286
32287- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
32288+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
32289 if (priv->seqnum == 0xffff)
32290 dev_info(&urb->dev->dev, "seqnum max\n");
32291
32292@@ -765,7 +765,7 @@ static int vhci_urb_dequeue(struct usb_h
32293 return -ENOMEM;
32294 }
32295
32296- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
32297+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
32298 if (unlink->seqnum == 0xffff)
32299 pr_info("seqnum max\n");
32300
32301@@ -955,7 +955,7 @@ static int vhci_start(struct usb_hcd *hc
32302 vdev->rhport = rhport;
32303 }
32304
32305- atomic_set(&vhci->seqnum, 0);
32306+ atomic_set_unchecked(&vhci->seqnum, 0);
32307 spin_lock_init(&vhci->lock);
32308
32309 hcd->power_budget = 0; /* no limit */
32310diff -urNp linux-3.0.3/drivers/staging/usbip/vhci_rx.c linux-3.0.3/drivers/staging/usbip/vhci_rx.c
32311--- linux-3.0.3/drivers/staging/usbip/vhci_rx.c 2011-07-21 22:17:23.000000000 -0400
32312+++ linux-3.0.3/drivers/staging/usbip/vhci_rx.c 2011-08-23 21:47:56.000000000 -0400
32313@@ -76,7 +76,7 @@ static void vhci_recv_ret_submit(struct
32314 if (!urb) {
32315 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
32316 pr_info("max seqnum %d\n",
32317- atomic_read(&the_controller->seqnum));
32318+ atomic_read_unchecked(&the_controller->seqnum));
32319 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
32320 return;
32321 }
32322diff -urNp linux-3.0.3/drivers/staging/vt6655/hostap.c linux-3.0.3/drivers/staging/vt6655/hostap.c
32323--- linux-3.0.3/drivers/staging/vt6655/hostap.c 2011-07-21 22:17:23.000000000 -0400
32324+++ linux-3.0.3/drivers/staging/vt6655/hostap.c 2011-08-23 21:47:56.000000000 -0400
32325@@ -79,14 +79,13 @@ static int msglevel
32326 *
32327 */
32328
32329+static net_device_ops_no_const apdev_netdev_ops;
32330+
32331 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
32332 {
32333 PSDevice apdev_priv;
32334 struct net_device *dev = pDevice->dev;
32335 int ret;
32336- const struct net_device_ops apdev_netdev_ops = {
32337- .ndo_start_xmit = pDevice->tx_80211,
32338- };
32339
32340 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
32341
32342@@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevic
32343 *apdev_priv = *pDevice;
32344 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
32345
32346+ /* only half broken now */
32347+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
32348 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
32349
32350 pDevice->apdev->type = ARPHRD_IEEE80211;
32351diff -urNp linux-3.0.3/drivers/staging/vt6656/hostap.c linux-3.0.3/drivers/staging/vt6656/hostap.c
32352--- linux-3.0.3/drivers/staging/vt6656/hostap.c 2011-07-21 22:17:23.000000000 -0400
32353+++ linux-3.0.3/drivers/staging/vt6656/hostap.c 2011-08-23 21:47:56.000000000 -0400
32354@@ -80,14 +80,13 @@ static int msglevel
32355 *
32356 */
32357
32358+static net_device_ops_no_const apdev_netdev_ops;
32359+
32360 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
32361 {
32362 PSDevice apdev_priv;
32363 struct net_device *dev = pDevice->dev;
32364 int ret;
32365- const struct net_device_ops apdev_netdev_ops = {
32366- .ndo_start_xmit = pDevice->tx_80211,
32367- };
32368
32369 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
32370
32371@@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevic
32372 *apdev_priv = *pDevice;
32373 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
32374
32375+ /* only half broken now */
32376+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
32377 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
32378
32379 pDevice->apdev->type = ARPHRD_IEEE80211;
32380diff -urNp linux-3.0.3/drivers/staging/wlan-ng/hfa384x_usb.c linux-3.0.3/drivers/staging/wlan-ng/hfa384x_usb.c
32381--- linux-3.0.3/drivers/staging/wlan-ng/hfa384x_usb.c 2011-07-21 22:17:23.000000000 -0400
32382+++ linux-3.0.3/drivers/staging/wlan-ng/hfa384x_usb.c 2011-08-23 21:47:56.000000000 -0400
32383@@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hf
32384
32385 struct usbctlx_completor {
32386 int (*complete) (struct usbctlx_completor *);
32387-};
32388+} __no_const;
32389
32390 static int
32391 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
32392diff -urNp linux-3.0.3/drivers/staging/zcache/tmem.c linux-3.0.3/drivers/staging/zcache/tmem.c
32393--- linux-3.0.3/drivers/staging/zcache/tmem.c 2011-07-21 22:17:23.000000000 -0400
32394+++ linux-3.0.3/drivers/staging/zcache/tmem.c 2011-08-23 21:47:56.000000000 -0400
32395@@ -39,7 +39,7 @@
32396 * A tmem host implementation must use this function to register callbacks
32397 * for memory allocation.
32398 */
32399-static struct tmem_hostops tmem_hostops;
32400+static tmem_hostops_no_const tmem_hostops;
32401
32402 static void tmem_objnode_tree_init(void);
32403
32404@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_h
32405 * A tmem host implementation must use this function to register
32406 * callbacks for a page-accessible memory (PAM) implementation
32407 */
32408-static struct tmem_pamops tmem_pamops;
32409+static tmem_pamops_no_const tmem_pamops;
32410
32411 void tmem_register_pamops(struct tmem_pamops *m)
32412 {
32413diff -urNp linux-3.0.3/drivers/staging/zcache/tmem.h linux-3.0.3/drivers/staging/zcache/tmem.h
32414--- linux-3.0.3/drivers/staging/zcache/tmem.h 2011-07-21 22:17:23.000000000 -0400
32415+++ linux-3.0.3/drivers/staging/zcache/tmem.h 2011-08-23 21:47:56.000000000 -0400
32416@@ -171,6 +171,7 @@ struct tmem_pamops {
32417 int (*get_data)(struct page *, void *, struct tmem_pool *);
32418 void (*free)(void *, struct tmem_pool *);
32419 };
32420+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
32421 extern void tmem_register_pamops(struct tmem_pamops *m);
32422
32423 /* memory allocation methods provided by the host implementation */
32424@@ -180,6 +181,7 @@ struct tmem_hostops {
32425 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
32426 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
32427 };
32428+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
32429 extern void tmem_register_hostops(struct tmem_hostops *m);
32430
32431 /* core tmem accessor functions */
32432diff -urNp linux-3.0.3/drivers/target/target_core_alua.c linux-3.0.3/drivers/target/target_core_alua.c
32433--- linux-3.0.3/drivers/target/target_core_alua.c 2011-07-21 22:17:23.000000000 -0400
32434+++ linux-3.0.3/drivers/target/target_core_alua.c 2011-08-23 21:48:14.000000000 -0400
32435@@ -675,6 +675,8 @@ static int core_alua_update_tpg_primary_
32436 char path[ALUA_METADATA_PATH_LEN];
32437 int len;
32438
32439+ pax_track_stack();
32440+
32441 memset(path, 0, ALUA_METADATA_PATH_LEN);
32442
32443 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
32444@@ -938,6 +940,8 @@ static int core_alua_update_tpg_secondar
32445 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
32446 int len;
32447
32448+ pax_track_stack();
32449+
32450 memset(path, 0, ALUA_METADATA_PATH_LEN);
32451 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
32452
32453diff -urNp linux-3.0.3/drivers/target/target_core_cdb.c linux-3.0.3/drivers/target/target_core_cdb.c
32454--- linux-3.0.3/drivers/target/target_core_cdb.c 2011-07-21 22:17:23.000000000 -0400
32455+++ linux-3.0.3/drivers/target/target_core_cdb.c 2011-08-23 21:48:14.000000000 -0400
32456@@ -838,6 +838,8 @@ target_emulate_modesense(struct se_cmd *
32457 int length = 0;
32458 unsigned char buf[SE_MODE_PAGE_BUF];
32459
32460+ pax_track_stack();
32461+
32462 memset(buf, 0, SE_MODE_PAGE_BUF);
32463
32464 switch (cdb[2] & 0x3f) {
32465diff -urNp linux-3.0.3/drivers/target/target_core_configfs.c linux-3.0.3/drivers/target/target_core_configfs.c
32466--- linux-3.0.3/drivers/target/target_core_configfs.c 2011-07-21 22:17:23.000000000 -0400
32467+++ linux-3.0.3/drivers/target/target_core_configfs.c 2011-08-23 21:48:14.000000000 -0400
32468@@ -1276,6 +1276,8 @@ static ssize_t target_core_dev_pr_show_a
32469 ssize_t len = 0;
32470 int reg_count = 0, prf_isid;
32471
32472+ pax_track_stack();
32473+
32474 if (!(su_dev->se_dev_ptr))
32475 return -ENODEV;
32476
32477diff -urNp linux-3.0.3/drivers/target/target_core_pr.c linux-3.0.3/drivers/target/target_core_pr.c
32478--- linux-3.0.3/drivers/target/target_core_pr.c 2011-07-21 22:17:23.000000000 -0400
32479+++ linux-3.0.3/drivers/target/target_core_pr.c 2011-08-23 21:48:14.000000000 -0400
32480@@ -918,6 +918,8 @@ static int __core_scsi3_check_aptpl_regi
32481 unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
32482 u16 tpgt;
32483
32484+ pax_track_stack();
32485+
32486 memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
32487 memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
32488 /*
32489@@ -1861,6 +1863,8 @@ static int __core_scsi3_update_aptpl_buf
32490 ssize_t len = 0;
32491 int reg_count = 0;
32492
32493+ pax_track_stack();
32494+
32495 memset(buf, 0, pr_aptpl_buf_len);
32496 /*
32497 * Called to clear metadata once APTPL has been deactivated.
32498@@ -1983,6 +1987,8 @@ static int __core_scsi3_write_aptpl_to_f
32499 char path[512];
32500 int ret;
32501
32502+ pax_track_stack();
32503+
32504 memset(iov, 0, sizeof(struct iovec));
32505 memset(path, 0, 512);
32506
32507diff -urNp linux-3.0.3/drivers/target/target_core_tmr.c linux-3.0.3/drivers/target/target_core_tmr.c
32508--- linux-3.0.3/drivers/target/target_core_tmr.c 2011-07-21 22:17:23.000000000 -0400
32509+++ linux-3.0.3/drivers/target/target_core_tmr.c 2011-08-23 21:47:56.000000000 -0400
32510@@ -269,7 +269,7 @@ int core_tmr_lun_reset(
32511 CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key,
32512 T_TASK(cmd)->t_task_cdbs,
32513 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32514- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32515+ atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32516 atomic_read(&T_TASK(cmd)->t_transport_active),
32517 atomic_read(&T_TASK(cmd)->t_transport_stop),
32518 atomic_read(&T_TASK(cmd)->t_transport_sent));
32519@@ -311,7 +311,7 @@ int core_tmr_lun_reset(
32520 DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
32521 " task: %p, t_fe_count: %d dev: %p\n", task,
32522 fe_count, dev);
32523- atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
32524+ atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
32525 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
32526 flags);
32527 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
32528@@ -321,7 +321,7 @@ int core_tmr_lun_reset(
32529 }
32530 DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
32531 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
32532- atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
32533+ atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
32534 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
32535 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
32536
32537diff -urNp linux-3.0.3/drivers/target/target_core_transport.c linux-3.0.3/drivers/target/target_core_transport.c
32538--- linux-3.0.3/drivers/target/target_core_transport.c 2011-07-21 22:17:23.000000000 -0400
32539+++ linux-3.0.3/drivers/target/target_core_transport.c 2011-08-23 21:47:56.000000000 -0400
32540@@ -1681,7 +1681,7 @@ struct se_device *transport_add_device_t
32541
32542 dev->queue_depth = dev_limits->queue_depth;
32543 atomic_set(&dev->depth_left, dev->queue_depth);
32544- atomic_set(&dev->dev_ordered_id, 0);
32545+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
32546
32547 se_dev_set_default_attribs(dev, dev_limits);
32548
32549@@ -1882,7 +1882,7 @@ static int transport_check_alloc_task_at
32550 * Used to determine when ORDERED commands should go from
32551 * Dormant to Active status.
32552 */
32553- cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id);
32554+ cmd->se_ordered_id = atomic_inc_return_unchecked(&SE_DEV(cmd)->dev_ordered_id);
32555 smp_mb__after_atomic_inc();
32556 DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
32557 cmd->se_ordered_id, cmd->sam_task_attr,
32558@@ -2169,7 +2169,7 @@ static void transport_generic_request_fa
32559 " t_transport_active: %d t_transport_stop: %d"
32560 " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs,
32561 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32562- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32563+ atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32564 atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left),
32565 atomic_read(&T_TASK(cmd)->t_transport_active),
32566 atomic_read(&T_TASK(cmd)->t_transport_stop),
32567@@ -2673,9 +2673,9 @@ check_depth:
32568 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
32569 atomic_set(&task->task_active, 1);
32570 atomic_set(&task->task_sent, 1);
32571- atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent);
32572+ atomic_inc_unchecked(&T_TASK(cmd)->t_task_cdbs_sent);
32573
32574- if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) ==
32575+ if (atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent) ==
32576 T_TASK(cmd)->t_task_cdbs)
32577 atomic_set(&cmd->transport_sent, 1);
32578
32579@@ -5568,7 +5568,7 @@ static void transport_generic_wait_for_t
32580 atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
32581 }
32582 if (!atomic_read(&T_TASK(cmd)->t_transport_active) ||
32583- atomic_read(&T_TASK(cmd)->t_transport_aborted))
32584+ atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted))
32585 goto remove;
32586
32587 atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
32588@@ -5797,7 +5797,7 @@ int transport_check_aborted_status(struc
32589 {
32590 int ret = 0;
32591
32592- if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) {
32593+ if (atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted) != 0) {
32594 if (!(send_status) ||
32595 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
32596 return 1;
32597@@ -5825,7 +5825,7 @@ void transport_send_task_abort(struct se
32598 */
32599 if (cmd->data_direction == DMA_TO_DEVICE) {
32600 if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) {
32601- atomic_inc(&T_TASK(cmd)->t_transport_aborted);
32602+ atomic_inc_unchecked(&T_TASK(cmd)->t_transport_aborted);
32603 smp_mb__after_atomic_inc();
32604 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
32605 transport_new_cmd_failure(cmd);
32606@@ -5949,7 +5949,7 @@ static void transport_processing_shutdow
32607 CMD_TFO(cmd)->get_task_tag(cmd),
32608 T_TASK(cmd)->t_task_cdbs,
32609 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32610- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32611+ atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32612 atomic_read(&T_TASK(cmd)->t_transport_active),
32613 atomic_read(&T_TASK(cmd)->t_transport_stop),
32614 atomic_read(&T_TASK(cmd)->t_transport_sent));
32615diff -urNp linux-3.0.3/drivers/telephony/ixj.c linux-3.0.3/drivers/telephony/ixj.c
32616--- linux-3.0.3/drivers/telephony/ixj.c 2011-07-21 22:17:23.000000000 -0400
32617+++ linux-3.0.3/drivers/telephony/ixj.c 2011-08-23 21:48:14.000000000 -0400
32618@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
32619 bool mContinue;
32620 char *pIn, *pOut;
32621
32622+ pax_track_stack();
32623+
32624 if (!SCI_Prepare(j))
32625 return 0;
32626
32627diff -urNp linux-3.0.3/drivers/tty/hvc/hvcs.c linux-3.0.3/drivers/tty/hvc/hvcs.c
32628--- linux-3.0.3/drivers/tty/hvc/hvcs.c 2011-07-21 22:17:23.000000000 -0400
32629+++ linux-3.0.3/drivers/tty/hvc/hvcs.c 2011-08-23 21:47:56.000000000 -0400
32630@@ -83,6 +83,7 @@
32631 #include <asm/hvcserver.h>
32632 #include <asm/uaccess.h>
32633 #include <asm/vio.h>
32634+#include <asm/local.h>
32635
32636 /*
32637 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
32638@@ -270,7 +271,7 @@ struct hvcs_struct {
32639 unsigned int index;
32640
32641 struct tty_struct *tty;
32642- int open_count;
32643+ local_t open_count;
32644
32645 /*
32646 * Used to tell the driver kernel_thread what operations need to take
32647@@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(st
32648
32649 spin_lock_irqsave(&hvcsd->lock, flags);
32650
32651- if (hvcsd->open_count > 0) {
32652+ if (local_read(&hvcsd->open_count) > 0) {
32653 spin_unlock_irqrestore(&hvcsd->lock, flags);
32654 printk(KERN_INFO "HVCS: vterm state unchanged. "
32655 "The hvcs device node is still in use.\n");
32656@@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *
32657 if ((retval = hvcs_partner_connect(hvcsd)))
32658 goto error_release;
32659
32660- hvcsd->open_count = 1;
32661+ local_set(&hvcsd->open_count, 1);
32662 hvcsd->tty = tty;
32663 tty->driver_data = hvcsd;
32664
32665@@ -1179,7 +1180,7 @@ fast_open:
32666
32667 spin_lock_irqsave(&hvcsd->lock, flags);
32668 kref_get(&hvcsd->kref);
32669- hvcsd->open_count++;
32670+ local_inc(&hvcsd->open_count);
32671 hvcsd->todo_mask |= HVCS_SCHED_READ;
32672 spin_unlock_irqrestore(&hvcsd->lock, flags);
32673
32674@@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct
32675 hvcsd = tty->driver_data;
32676
32677 spin_lock_irqsave(&hvcsd->lock, flags);
32678- if (--hvcsd->open_count == 0) {
32679+ if (local_dec_and_test(&hvcsd->open_count)) {
32680
32681 vio_disable_interrupts(hvcsd->vdev);
32682
32683@@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct
32684 free_irq(irq, hvcsd);
32685 kref_put(&hvcsd->kref, destroy_hvcs_struct);
32686 return;
32687- } else if (hvcsd->open_count < 0) {
32688+ } else if (local_read(&hvcsd->open_count) < 0) {
32689 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
32690 " is missmanaged.\n",
32691- hvcsd->vdev->unit_address, hvcsd->open_count);
32692+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
32693 }
32694
32695 spin_unlock_irqrestore(&hvcsd->lock, flags);
32696@@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struc
32697
32698 spin_lock_irqsave(&hvcsd->lock, flags);
32699 /* Preserve this so that we know how many kref refs to put */
32700- temp_open_count = hvcsd->open_count;
32701+ temp_open_count = local_read(&hvcsd->open_count);
32702
32703 /*
32704 * Don't kref put inside the spinlock because the destruction
32705@@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struc
32706 hvcsd->tty->driver_data = NULL;
32707 hvcsd->tty = NULL;
32708
32709- hvcsd->open_count = 0;
32710+ local_set(&hvcsd->open_count, 0);
32711
32712 /* This will drop any buffered data on the floor which is OK in a hangup
32713 * scenario. */
32714@@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct
32715 * the middle of a write operation? This is a crummy place to do this
32716 * but we want to keep it all in the spinlock.
32717 */
32718- if (hvcsd->open_count <= 0) {
32719+ if (local_read(&hvcsd->open_count) <= 0) {
32720 spin_unlock_irqrestore(&hvcsd->lock, flags);
32721 return -ENODEV;
32722 }
32723@@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_st
32724 {
32725 struct hvcs_struct *hvcsd = tty->driver_data;
32726
32727- if (!hvcsd || hvcsd->open_count <= 0)
32728+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
32729 return 0;
32730
32731 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
32732diff -urNp linux-3.0.3/drivers/tty/ipwireless/tty.c linux-3.0.3/drivers/tty/ipwireless/tty.c
32733--- linux-3.0.3/drivers/tty/ipwireless/tty.c 2011-07-21 22:17:23.000000000 -0400
32734+++ linux-3.0.3/drivers/tty/ipwireless/tty.c 2011-08-23 21:47:56.000000000 -0400
32735@@ -29,6 +29,7 @@
32736 #include <linux/tty_driver.h>
32737 #include <linux/tty_flip.h>
32738 #include <linux/uaccess.h>
32739+#include <asm/local.h>
32740
32741 #include "tty.h"
32742 #include "network.h"
32743@@ -51,7 +52,7 @@ struct ipw_tty {
32744 int tty_type;
32745 struct ipw_network *network;
32746 struct tty_struct *linux_tty;
32747- int open_count;
32748+ local_t open_count;
32749 unsigned int control_lines;
32750 struct mutex ipw_tty_mutex;
32751 int tx_bytes_queued;
32752@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
32753 mutex_unlock(&tty->ipw_tty_mutex);
32754 return -ENODEV;
32755 }
32756- if (tty->open_count == 0)
32757+ if (local_read(&tty->open_count) == 0)
32758 tty->tx_bytes_queued = 0;
32759
32760- tty->open_count++;
32761+ local_inc(&tty->open_count);
32762
32763 tty->linux_tty = linux_tty;
32764 linux_tty->driver_data = tty;
32765@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
32766
32767 static void do_ipw_close(struct ipw_tty *tty)
32768 {
32769- tty->open_count--;
32770-
32771- if (tty->open_count == 0) {
32772+ if (local_dec_return(&tty->open_count) == 0) {
32773 struct tty_struct *linux_tty = tty->linux_tty;
32774
32775 if (linux_tty != NULL) {
32776@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
32777 return;
32778
32779 mutex_lock(&tty->ipw_tty_mutex);
32780- if (tty->open_count == 0) {
32781+ if (local_read(&tty->open_count) == 0) {
32782 mutex_unlock(&tty->ipw_tty_mutex);
32783 return;
32784 }
32785@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
32786 return;
32787 }
32788
32789- if (!tty->open_count) {
32790+ if (!local_read(&tty->open_count)) {
32791 mutex_unlock(&tty->ipw_tty_mutex);
32792 return;
32793 }
32794@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
32795 return -ENODEV;
32796
32797 mutex_lock(&tty->ipw_tty_mutex);
32798- if (!tty->open_count) {
32799+ if (!local_read(&tty->open_count)) {
32800 mutex_unlock(&tty->ipw_tty_mutex);
32801 return -EINVAL;
32802 }
32803@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
32804 if (!tty)
32805 return -ENODEV;
32806
32807- if (!tty->open_count)
32808+ if (!local_read(&tty->open_count))
32809 return -EINVAL;
32810
32811 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
32812@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
32813 if (!tty)
32814 return 0;
32815
32816- if (!tty->open_count)
32817+ if (!local_read(&tty->open_count))
32818 return 0;
32819
32820 return tty->tx_bytes_queued;
32821@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
32822 if (!tty)
32823 return -ENODEV;
32824
32825- if (!tty->open_count)
32826+ if (!local_read(&tty->open_count))
32827 return -EINVAL;
32828
32829 return get_control_lines(tty);
32830@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
32831 if (!tty)
32832 return -ENODEV;
32833
32834- if (!tty->open_count)
32835+ if (!local_read(&tty->open_count))
32836 return -EINVAL;
32837
32838 return set_control_lines(tty, set, clear);
32839@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
32840 if (!tty)
32841 return -ENODEV;
32842
32843- if (!tty->open_count)
32844+ if (!local_read(&tty->open_count))
32845 return -EINVAL;
32846
32847 /* FIXME: Exactly how is the tty object locked here .. */
32848@@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty
32849 against a parallel ioctl etc */
32850 mutex_lock(&ttyj->ipw_tty_mutex);
32851 }
32852- while (ttyj->open_count)
32853+ while (local_read(&ttyj->open_count))
32854 do_ipw_close(ttyj);
32855 ipwireless_disassociate_network_ttys(network,
32856 ttyj->channel_idx);
32857diff -urNp linux-3.0.3/drivers/tty/n_gsm.c linux-3.0.3/drivers/tty/n_gsm.c
32858--- linux-3.0.3/drivers/tty/n_gsm.c 2011-08-23 21:44:40.000000000 -0400
32859+++ linux-3.0.3/drivers/tty/n_gsm.c 2011-08-23 21:47:56.000000000 -0400
32860@@ -1589,7 +1589,7 @@ static struct gsm_dlci *gsm_dlci_alloc(s
32861 return NULL;
32862 spin_lock_init(&dlci->lock);
32863 dlci->fifo = &dlci->_fifo;
32864- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
32865+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
32866 kfree(dlci);
32867 return NULL;
32868 }
32869diff -urNp linux-3.0.3/drivers/tty/n_tty.c linux-3.0.3/drivers/tty/n_tty.c
32870--- linux-3.0.3/drivers/tty/n_tty.c 2011-07-21 22:17:23.000000000 -0400
32871+++ linux-3.0.3/drivers/tty/n_tty.c 2011-08-23 21:47:56.000000000 -0400
32872@@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_
32873 {
32874 *ops = tty_ldisc_N_TTY;
32875 ops->owner = NULL;
32876- ops->refcount = ops->flags = 0;
32877+ atomic_set(&ops->refcount, 0);
32878+ ops->flags = 0;
32879 }
32880 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
32881diff -urNp linux-3.0.3/drivers/tty/pty.c linux-3.0.3/drivers/tty/pty.c
32882--- linux-3.0.3/drivers/tty/pty.c 2011-07-21 22:17:23.000000000 -0400
32883+++ linux-3.0.3/drivers/tty/pty.c 2011-08-23 21:47:56.000000000 -0400
32884@@ -754,8 +754,10 @@ static void __init unix98_pty_init(void)
32885 register_sysctl_table(pty_root_table);
32886
32887 /* Now create the /dev/ptmx special device */
32888+ pax_open_kernel();
32889 tty_default_fops(&ptmx_fops);
32890- ptmx_fops.open = ptmx_open;
32891+ *(void **)&ptmx_fops.open = ptmx_open;
32892+ pax_close_kernel();
32893
32894 cdev_init(&ptmx_cdev, &ptmx_fops);
32895 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
32896diff -urNp linux-3.0.3/drivers/tty/rocket.c linux-3.0.3/drivers/tty/rocket.c
32897--- linux-3.0.3/drivers/tty/rocket.c 2011-07-21 22:17:23.000000000 -0400
32898+++ linux-3.0.3/drivers/tty/rocket.c 2011-08-23 21:48:14.000000000 -0400
32899@@ -1277,6 +1277,8 @@ static int get_ports(struct r_port *info
32900 struct rocket_ports tmp;
32901 int board;
32902
32903+ pax_track_stack();
32904+
32905 if (!retports)
32906 return -EFAULT;
32907 memset(&tmp, 0, sizeof (tmp));
32908diff -urNp linux-3.0.3/drivers/tty/serial/kgdboc.c linux-3.0.3/drivers/tty/serial/kgdboc.c
32909--- linux-3.0.3/drivers/tty/serial/kgdboc.c 2011-07-21 22:17:23.000000000 -0400
32910+++ linux-3.0.3/drivers/tty/serial/kgdboc.c 2011-08-23 21:47:56.000000000 -0400
32911@@ -23,8 +23,9 @@
32912 #define MAX_CONFIG_LEN 40
32913
32914 static struct kgdb_io kgdboc_io_ops;
32915+static struct kgdb_io kgdboc_io_ops_console;
32916
32917-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
32918+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
32919 static int configured = -1;
32920
32921 static char config[MAX_CONFIG_LEN];
32922@@ -147,6 +148,8 @@ static void cleanup_kgdboc(void)
32923 kgdboc_unregister_kbd();
32924 if (configured == 1)
32925 kgdb_unregister_io_module(&kgdboc_io_ops);
32926+ else if (configured == 2)
32927+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
32928 }
32929
32930 static int configure_kgdboc(void)
32931@@ -156,13 +159,13 @@ static int configure_kgdboc(void)
32932 int err;
32933 char *cptr = config;
32934 struct console *cons;
32935+ int is_console = 0;
32936
32937 err = kgdboc_option_setup(config);
32938 if (err || !strlen(config) || isspace(config[0]))
32939 goto noconfig;
32940
32941 err = -ENODEV;
32942- kgdboc_io_ops.is_console = 0;
32943 kgdb_tty_driver = NULL;
32944
32945 kgdboc_use_kms = 0;
32946@@ -183,7 +186,7 @@ static int configure_kgdboc(void)
32947 int idx;
32948 if (cons->device && cons->device(cons, &idx) == p &&
32949 idx == tty_line) {
32950- kgdboc_io_ops.is_console = 1;
32951+ is_console = 1;
32952 break;
32953 }
32954 cons = cons->next;
32955@@ -193,12 +196,16 @@ static int configure_kgdboc(void)
32956 kgdb_tty_line = tty_line;
32957
32958 do_register:
32959- err = kgdb_register_io_module(&kgdboc_io_ops);
32960+ if (is_console) {
32961+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
32962+ configured = 2;
32963+ } else {
32964+ err = kgdb_register_io_module(&kgdboc_io_ops);
32965+ configured = 1;
32966+ }
32967 if (err)
32968 goto noconfig;
32969
32970- configured = 1;
32971-
32972 return 0;
32973
32974 noconfig:
32975@@ -212,7 +219,7 @@ noconfig:
32976 static int __init init_kgdboc(void)
32977 {
32978 /* Already configured? */
32979- if (configured == 1)
32980+ if (configured >= 1)
32981 return 0;
32982
32983 return configure_kgdboc();
32984@@ -261,7 +268,7 @@ static int param_set_kgdboc_var(const ch
32985 if (config[len - 1] == '\n')
32986 config[len - 1] = '\0';
32987
32988- if (configured == 1)
32989+ if (configured >= 1)
32990 cleanup_kgdboc();
32991
32992 /* Go and configure with the new params. */
32993@@ -301,6 +308,15 @@ static struct kgdb_io kgdboc_io_ops = {
32994 .post_exception = kgdboc_post_exp_handler,
32995 };
32996
32997+static struct kgdb_io kgdboc_io_ops_console = {
32998+ .name = "kgdboc",
32999+ .read_char = kgdboc_get_char,
33000+ .write_char = kgdboc_put_char,
33001+ .pre_exception = kgdboc_pre_exp_handler,
33002+ .post_exception = kgdboc_post_exp_handler,
33003+ .is_console = 1
33004+};
33005+
33006 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
33007 /* This is only available if kgdboc is a built in for early debugging */
33008 static int __init kgdboc_early_init(char *opt)
33009diff -urNp linux-3.0.3/drivers/tty/serial/mrst_max3110.c linux-3.0.3/drivers/tty/serial/mrst_max3110.c
33010--- linux-3.0.3/drivers/tty/serial/mrst_max3110.c 2011-07-21 22:17:23.000000000 -0400
33011+++ linux-3.0.3/drivers/tty/serial/mrst_max3110.c 2011-08-23 21:48:14.000000000 -0400
33012@@ -393,6 +393,8 @@ static void max3110_con_receive(struct u
33013 int loop = 1, num, total = 0;
33014 u8 recv_buf[512], *pbuf;
33015
33016+ pax_track_stack();
33017+
33018 pbuf = recv_buf;
33019 do {
33020 num = max3110_read_multi(max, pbuf);
33021diff -urNp linux-3.0.3/drivers/tty/tty_io.c linux-3.0.3/drivers/tty/tty_io.c
33022--- linux-3.0.3/drivers/tty/tty_io.c 2011-07-21 22:17:23.000000000 -0400
33023+++ linux-3.0.3/drivers/tty/tty_io.c 2011-08-23 21:47:56.000000000 -0400
33024@@ -3215,7 +3215,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
33025
33026 void tty_default_fops(struct file_operations *fops)
33027 {
33028- *fops = tty_fops;
33029+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
33030 }
33031
33032 /*
33033diff -urNp linux-3.0.3/drivers/tty/tty_ldisc.c linux-3.0.3/drivers/tty/tty_ldisc.c
33034--- linux-3.0.3/drivers/tty/tty_ldisc.c 2011-07-21 22:17:23.000000000 -0400
33035+++ linux-3.0.3/drivers/tty/tty_ldisc.c 2011-08-23 21:47:56.000000000 -0400
33036@@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
33037 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
33038 struct tty_ldisc_ops *ldo = ld->ops;
33039
33040- ldo->refcount--;
33041+ atomic_dec(&ldo->refcount);
33042 module_put(ldo->owner);
33043 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33044
33045@@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
33046 spin_lock_irqsave(&tty_ldisc_lock, flags);
33047 tty_ldiscs[disc] = new_ldisc;
33048 new_ldisc->num = disc;
33049- new_ldisc->refcount = 0;
33050+ atomic_set(&new_ldisc->refcount, 0);
33051 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33052
33053 return ret;
33054@@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
33055 return -EINVAL;
33056
33057 spin_lock_irqsave(&tty_ldisc_lock, flags);
33058- if (tty_ldiscs[disc]->refcount)
33059+ if (atomic_read(&tty_ldiscs[disc]->refcount))
33060 ret = -EBUSY;
33061 else
33062 tty_ldiscs[disc] = NULL;
33063@@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
33064 if (ldops) {
33065 ret = ERR_PTR(-EAGAIN);
33066 if (try_module_get(ldops->owner)) {
33067- ldops->refcount++;
33068+ atomic_inc(&ldops->refcount);
33069 ret = ldops;
33070 }
33071 }
33072@@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
33073 unsigned long flags;
33074
33075 spin_lock_irqsave(&tty_ldisc_lock, flags);
33076- ldops->refcount--;
33077+ atomic_dec(&ldops->refcount);
33078 module_put(ldops->owner);
33079 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33080 }
33081diff -urNp linux-3.0.3/drivers/tty/vt/keyboard.c linux-3.0.3/drivers/tty/vt/keyboard.c
33082--- linux-3.0.3/drivers/tty/vt/keyboard.c 2011-07-21 22:17:23.000000000 -0400
33083+++ linux-3.0.3/drivers/tty/vt/keyboard.c 2011-08-23 21:48:14.000000000 -0400
33084@@ -656,6 +656,16 @@ static void k_spec(struct vc_data *vc, u
33085 kbd->kbdmode == VC_OFF) &&
33086 value != KVAL(K_SAK))
33087 return; /* SAK is allowed even in raw mode */
33088+
33089+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
33090+ {
33091+ void *func = fn_handler[value];
33092+ if (func == fn_show_state || func == fn_show_ptregs ||
33093+ func == fn_show_mem)
33094+ return;
33095+ }
33096+#endif
33097+
33098 fn_handler[value](vc);
33099 }
33100
33101diff -urNp linux-3.0.3/drivers/tty/vt/vt.c linux-3.0.3/drivers/tty/vt/vt.c
33102--- linux-3.0.3/drivers/tty/vt/vt.c 2011-07-21 22:17:23.000000000 -0400
33103+++ linux-3.0.3/drivers/tty/vt/vt.c 2011-08-23 21:47:56.000000000 -0400
33104@@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
33105
33106 static void notify_write(struct vc_data *vc, unsigned int unicode)
33107 {
33108- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
33109+ struct vt_notifier_param param = { .vc = vc, .c = unicode };
33110 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
33111 }
33112
33113diff -urNp linux-3.0.3/drivers/tty/vt/vt_ioctl.c linux-3.0.3/drivers/tty/vt/vt_ioctl.c
33114--- linux-3.0.3/drivers/tty/vt/vt_ioctl.c 2011-07-21 22:17:23.000000000 -0400
33115+++ linux-3.0.3/drivers/tty/vt/vt_ioctl.c 2011-08-23 21:48:14.000000000 -0400
33116@@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
33117 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
33118 return -EFAULT;
33119
33120- if (!capable(CAP_SYS_TTY_CONFIG))
33121- perm = 0;
33122-
33123 switch (cmd) {
33124 case KDGKBENT:
33125 key_map = key_maps[s];
33126@@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __
33127 val = (i ? K_HOLE : K_NOSUCHMAP);
33128 return put_user(val, &user_kbe->kb_value);
33129 case KDSKBENT:
33130+ if (!capable(CAP_SYS_TTY_CONFIG))
33131+ perm = 0;
33132+
33133 if (!perm)
33134 return -EPERM;
33135 if (!i && v == K_NOSUCHMAP) {
33136@@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
33137 int i, j, k;
33138 int ret;
33139
33140- if (!capable(CAP_SYS_TTY_CONFIG))
33141- perm = 0;
33142-
33143 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
33144 if (!kbs) {
33145 ret = -ENOMEM;
33146@@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
33147 kfree(kbs);
33148 return ((p && *p) ? -EOVERFLOW : 0);
33149 case KDSKBSENT:
33150+ if (!capable(CAP_SYS_TTY_CONFIG))
33151+ perm = 0;
33152+
33153 if (!perm) {
33154 ret = -EPERM;
33155 goto reterr;
33156diff -urNp linux-3.0.3/drivers/uio/uio.c linux-3.0.3/drivers/uio/uio.c
33157--- linux-3.0.3/drivers/uio/uio.c 2011-07-21 22:17:23.000000000 -0400
33158+++ linux-3.0.3/drivers/uio/uio.c 2011-08-23 21:47:56.000000000 -0400
33159@@ -25,6 +25,7 @@
33160 #include <linux/kobject.h>
33161 #include <linux/cdev.h>
33162 #include <linux/uio_driver.h>
33163+#include <asm/local.h>
33164
33165 #define UIO_MAX_DEVICES (1U << MINORBITS)
33166
33167@@ -32,10 +33,10 @@ struct uio_device {
33168 struct module *owner;
33169 struct device *dev;
33170 int minor;
33171- atomic_t event;
33172+ atomic_unchecked_t event;
33173 struct fasync_struct *async_queue;
33174 wait_queue_head_t wait;
33175- int vma_count;
33176+ local_t vma_count;
33177 struct uio_info *info;
33178 struct kobject *map_dir;
33179 struct kobject *portio_dir;
33180@@ -242,7 +243,7 @@ static ssize_t show_event(struct device
33181 struct device_attribute *attr, char *buf)
33182 {
33183 struct uio_device *idev = dev_get_drvdata(dev);
33184- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
33185+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
33186 }
33187
33188 static struct device_attribute uio_class_attributes[] = {
33189@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *i
33190 {
33191 struct uio_device *idev = info->uio_dev;
33192
33193- atomic_inc(&idev->event);
33194+ atomic_inc_unchecked(&idev->event);
33195 wake_up_interruptible(&idev->wait);
33196 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
33197 }
33198@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode,
33199 }
33200
33201 listener->dev = idev;
33202- listener->event_count = atomic_read(&idev->event);
33203+ listener->event_count = atomic_read_unchecked(&idev->event);
33204 filep->private_data = listener;
33205
33206 if (idev->info->open) {
33207@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file
33208 return -EIO;
33209
33210 poll_wait(filep, &idev->wait, wait);
33211- if (listener->event_count != atomic_read(&idev->event))
33212+ if (listener->event_count != atomic_read_unchecked(&idev->event))
33213 return POLLIN | POLLRDNORM;
33214 return 0;
33215 }
33216@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *fil
33217 do {
33218 set_current_state(TASK_INTERRUPTIBLE);
33219
33220- event_count = atomic_read(&idev->event);
33221+ event_count = atomic_read_unchecked(&idev->event);
33222 if (event_count != listener->event_count) {
33223 if (copy_to_user(buf, &event_count, count))
33224 retval = -EFAULT;
33225@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_
33226 static void uio_vma_open(struct vm_area_struct *vma)
33227 {
33228 struct uio_device *idev = vma->vm_private_data;
33229- idev->vma_count++;
33230+ local_inc(&idev->vma_count);
33231 }
33232
33233 static void uio_vma_close(struct vm_area_struct *vma)
33234 {
33235 struct uio_device *idev = vma->vm_private_data;
33236- idev->vma_count--;
33237+ local_dec(&idev->vma_count);
33238 }
33239
33240 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33241@@ -823,7 +824,7 @@ int __uio_register_device(struct module
33242 idev->owner = owner;
33243 idev->info = info;
33244 init_waitqueue_head(&idev->wait);
33245- atomic_set(&idev->event, 0);
33246+ atomic_set_unchecked(&idev->event, 0);
33247
33248 ret = uio_get_minor(idev);
33249 if (ret)
33250diff -urNp linux-3.0.3/drivers/usb/atm/cxacru.c linux-3.0.3/drivers/usb/atm/cxacru.c
33251--- linux-3.0.3/drivers/usb/atm/cxacru.c 2011-07-21 22:17:23.000000000 -0400
33252+++ linux-3.0.3/drivers/usb/atm/cxacru.c 2011-08-23 21:47:56.000000000 -0400
33253@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_c
33254 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
33255 if (ret < 2)
33256 return -EINVAL;
33257- if (index < 0 || index > 0x7f)
33258+ if (index > 0x7f)
33259 return -EINVAL;
33260 pos += tmp;
33261
33262diff -urNp linux-3.0.3/drivers/usb/atm/usbatm.c linux-3.0.3/drivers/usb/atm/usbatm.c
33263--- linux-3.0.3/drivers/usb/atm/usbatm.c 2011-07-21 22:17:23.000000000 -0400
33264+++ linux-3.0.3/drivers/usb/atm/usbatm.c 2011-08-23 21:47:56.000000000 -0400
33265@@ -332,7 +332,7 @@ static void usbatm_extract_one_cell(stru
33266 if (printk_ratelimit())
33267 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
33268 __func__, vpi, vci);
33269- atomic_inc(&vcc->stats->rx_err);
33270+ atomic_inc_unchecked(&vcc->stats->rx_err);
33271 return;
33272 }
33273
33274@@ -360,7 +360,7 @@ static void usbatm_extract_one_cell(stru
33275 if (length > ATM_MAX_AAL5_PDU) {
33276 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
33277 __func__, length, vcc);
33278- atomic_inc(&vcc->stats->rx_err);
33279+ atomic_inc_unchecked(&vcc->stats->rx_err);
33280 goto out;
33281 }
33282
33283@@ -369,14 +369,14 @@ static void usbatm_extract_one_cell(stru
33284 if (sarb->len < pdu_length) {
33285 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
33286 __func__, pdu_length, sarb->len, vcc);
33287- atomic_inc(&vcc->stats->rx_err);
33288+ atomic_inc_unchecked(&vcc->stats->rx_err);
33289 goto out;
33290 }
33291
33292 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
33293 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
33294 __func__, vcc);
33295- atomic_inc(&vcc->stats->rx_err);
33296+ atomic_inc_unchecked(&vcc->stats->rx_err);
33297 goto out;
33298 }
33299
33300@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(stru
33301 if (printk_ratelimit())
33302 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
33303 __func__, length);
33304- atomic_inc(&vcc->stats->rx_drop);
33305+ atomic_inc_unchecked(&vcc->stats->rx_drop);
33306 goto out;
33307 }
33308
33309@@ -411,7 +411,7 @@ static void usbatm_extract_one_cell(stru
33310
33311 vcc->push(vcc, skb);
33312
33313- atomic_inc(&vcc->stats->rx);
33314+ atomic_inc_unchecked(&vcc->stats->rx);
33315 out:
33316 skb_trim(sarb, 0);
33317 }
33318@@ -614,7 +614,7 @@ static void usbatm_tx_process(unsigned l
33319 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
33320
33321 usbatm_pop(vcc, skb);
33322- atomic_inc(&vcc->stats->tx);
33323+ atomic_inc_unchecked(&vcc->stats->tx);
33324
33325 skb = skb_dequeue(&instance->sndqueue);
33326 }
33327@@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct a
33328 if (!left--)
33329 return sprintf(page,
33330 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
33331- atomic_read(&atm_dev->stats.aal5.tx),
33332- atomic_read(&atm_dev->stats.aal5.tx_err),
33333- atomic_read(&atm_dev->stats.aal5.rx),
33334- atomic_read(&atm_dev->stats.aal5.rx_err),
33335- atomic_read(&atm_dev->stats.aal5.rx_drop));
33336+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
33337+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
33338+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
33339+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
33340+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
33341
33342 if (!left--) {
33343 if (instance->disconnected)
33344diff -urNp linux-3.0.3/drivers/usb/core/devices.c linux-3.0.3/drivers/usb/core/devices.c
33345--- linux-3.0.3/drivers/usb/core/devices.c 2011-07-21 22:17:23.000000000 -0400
33346+++ linux-3.0.3/drivers/usb/core/devices.c 2011-08-23 21:47:56.000000000 -0400
33347@@ -126,7 +126,7 @@ static const char format_endpt[] =
33348 * time it gets called.
33349 */
33350 static struct device_connect_event {
33351- atomic_t count;
33352+ atomic_unchecked_t count;
33353 wait_queue_head_t wait;
33354 } device_event = {
33355 .count = ATOMIC_INIT(1),
33356@@ -164,7 +164,7 @@ static const struct class_info clas_info
33357
33358 void usbfs_conn_disc_event(void)
33359 {
33360- atomic_add(2, &device_event.count);
33361+ atomic_add_unchecked(2, &device_event.count);
33362 wake_up(&device_event.wait);
33363 }
33364
33365@@ -648,7 +648,7 @@ static unsigned int usb_device_poll(stru
33366
33367 poll_wait(file, &device_event.wait, wait);
33368
33369- event_count = atomic_read(&device_event.count);
33370+ event_count = atomic_read_unchecked(&device_event.count);
33371 if (file->f_version != event_count) {
33372 file->f_version = event_count;
33373 return POLLIN | POLLRDNORM;
33374diff -urNp linux-3.0.3/drivers/usb/core/message.c linux-3.0.3/drivers/usb/core/message.c
33375--- linux-3.0.3/drivers/usb/core/message.c 2011-07-21 22:17:23.000000000 -0400
33376+++ linux-3.0.3/drivers/usb/core/message.c 2011-08-23 21:47:56.000000000 -0400
33377@@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device
33378 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
33379 if (buf) {
33380 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
33381- if (len > 0) {
33382- smallbuf = kmalloc(++len, GFP_NOIO);
33383+ if (len++ > 0) {
33384+ smallbuf = kmalloc(len, GFP_NOIO);
33385 if (!smallbuf)
33386 return buf;
33387 memcpy(smallbuf, buf, len);
33388diff -urNp linux-3.0.3/drivers/usb/early/ehci-dbgp.c linux-3.0.3/drivers/usb/early/ehci-dbgp.c
33389--- linux-3.0.3/drivers/usb/early/ehci-dbgp.c 2011-07-21 22:17:23.000000000 -0400
33390+++ linux-3.0.3/drivers/usb/early/ehci-dbgp.c 2011-08-23 21:47:56.000000000 -0400
33391@@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x,
33392
33393 #ifdef CONFIG_KGDB
33394 static struct kgdb_io kgdbdbgp_io_ops;
33395-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
33396+static struct kgdb_io kgdbdbgp_io_ops_console;
33397+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
33398 #else
33399 #define dbgp_kgdb_mode (0)
33400 #endif
33401@@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops =
33402 .write_char = kgdbdbgp_write_char,
33403 };
33404
33405+static struct kgdb_io kgdbdbgp_io_ops_console = {
33406+ .name = "kgdbdbgp",
33407+ .read_char = kgdbdbgp_read_char,
33408+ .write_char = kgdbdbgp_write_char,
33409+ .is_console = 1
33410+};
33411+
33412 static int kgdbdbgp_wait_time;
33413
33414 static int __init kgdbdbgp_parse_config(char *str)
33415@@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(
33416 ptr++;
33417 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
33418 }
33419- kgdb_register_io_module(&kgdbdbgp_io_ops);
33420- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
33421+ if (early_dbgp_console.index != -1)
33422+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
33423+ else
33424+ kgdb_register_io_module(&kgdbdbgp_io_ops);
33425
33426 return 0;
33427 }
33428diff -urNp linux-3.0.3/drivers/usb/host/xhci-mem.c linux-3.0.3/drivers/usb/host/xhci-mem.c
33429--- linux-3.0.3/drivers/usb/host/xhci-mem.c 2011-07-21 22:17:23.000000000 -0400
33430+++ linux-3.0.3/drivers/usb/host/xhci-mem.c 2011-08-23 21:48:14.000000000 -0400
33431@@ -1685,6 +1685,8 @@ static int xhci_check_trb_in_td_math(str
33432 unsigned int num_tests;
33433 int i, ret;
33434
33435+ pax_track_stack();
33436+
33437 num_tests = ARRAY_SIZE(simple_test_vector);
33438 for (i = 0; i < num_tests; i++) {
33439 ret = xhci_test_trb_in_td(xhci,
33440diff -urNp linux-3.0.3/drivers/usb/wusbcore/wa-hc.h linux-3.0.3/drivers/usb/wusbcore/wa-hc.h
33441--- linux-3.0.3/drivers/usb/wusbcore/wa-hc.h 2011-07-21 22:17:23.000000000 -0400
33442+++ linux-3.0.3/drivers/usb/wusbcore/wa-hc.h 2011-08-23 21:47:56.000000000 -0400
33443@@ -192,7 +192,7 @@ struct wahc {
33444 struct list_head xfer_delayed_list;
33445 spinlock_t xfer_list_lock;
33446 struct work_struct xfer_work;
33447- atomic_t xfer_id_count;
33448+ atomic_unchecked_t xfer_id_count;
33449 };
33450
33451
33452@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
33453 INIT_LIST_HEAD(&wa->xfer_delayed_list);
33454 spin_lock_init(&wa->xfer_list_lock);
33455 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
33456- atomic_set(&wa->xfer_id_count, 1);
33457+ atomic_set_unchecked(&wa->xfer_id_count, 1);
33458 }
33459
33460 /**
33461diff -urNp linux-3.0.3/drivers/usb/wusbcore/wa-xfer.c linux-3.0.3/drivers/usb/wusbcore/wa-xfer.c
33462--- linux-3.0.3/drivers/usb/wusbcore/wa-xfer.c 2011-07-21 22:17:23.000000000 -0400
33463+++ linux-3.0.3/drivers/usb/wusbcore/wa-xfer.c 2011-08-23 21:47:56.000000000 -0400
33464@@ -294,7 +294,7 @@ out:
33465 */
33466 static void wa_xfer_id_init(struct wa_xfer *xfer)
33467 {
33468- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
33469+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
33470 }
33471
33472 /*
33473diff -urNp linux-3.0.3/drivers/vhost/vhost.c linux-3.0.3/drivers/vhost/vhost.c
33474--- linux-3.0.3/drivers/vhost/vhost.c 2011-07-21 22:17:23.000000000 -0400
33475+++ linux-3.0.3/drivers/vhost/vhost.c 2011-08-23 21:47:56.000000000 -0400
33476@@ -589,7 +589,7 @@ static int init_used(struct vhost_virtqu
33477 return get_user(vq->last_used_idx, &used->idx);
33478 }
33479
33480-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
33481+static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
33482 {
33483 struct file *eventfp, *filep = NULL,
33484 *pollstart = NULL, *pollstop = NULL;
33485diff -urNp linux-3.0.3/drivers/video/fbcmap.c linux-3.0.3/drivers/video/fbcmap.c
33486--- linux-3.0.3/drivers/video/fbcmap.c 2011-07-21 22:17:23.000000000 -0400
33487+++ linux-3.0.3/drivers/video/fbcmap.c 2011-08-23 21:47:56.000000000 -0400
33488@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user
33489 rc = -ENODEV;
33490 goto out;
33491 }
33492- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
33493- !info->fbops->fb_setcmap)) {
33494+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
33495 rc = -EINVAL;
33496 goto out1;
33497 }
33498diff -urNp linux-3.0.3/drivers/video/fbmem.c linux-3.0.3/drivers/video/fbmem.c
33499--- linux-3.0.3/drivers/video/fbmem.c 2011-07-21 22:17:23.000000000 -0400
33500+++ linux-3.0.3/drivers/video/fbmem.c 2011-08-23 21:48:14.000000000 -0400
33501@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_in
33502 image->dx += image->width + 8;
33503 }
33504 } else if (rotate == FB_ROTATE_UD) {
33505- for (x = 0; x < num && image->dx >= 0; x++) {
33506+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
33507 info->fbops->fb_imageblit(info, image);
33508 image->dx -= image->width + 8;
33509 }
33510@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_in
33511 image->dy += image->height + 8;
33512 }
33513 } else if (rotate == FB_ROTATE_CCW) {
33514- for (x = 0; x < num && image->dy >= 0; x++) {
33515+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
33516 info->fbops->fb_imageblit(info, image);
33517 image->dy -= image->height + 8;
33518 }
33519@@ -939,6 +939,8 @@ fb_set_var(struct fb_info *info, struct
33520 int flags = info->flags;
33521 int ret = 0;
33522
33523+ pax_track_stack();
33524+
33525 if (var->activate & FB_ACTIVATE_INV_MODE) {
33526 struct fb_videomode mode1, mode2;
33527
33528@@ -1064,6 +1066,8 @@ static long do_fb_ioctl(struct fb_info *
33529 void __user *argp = (void __user *)arg;
33530 long ret = 0;
33531
33532+ pax_track_stack();
33533+
33534 switch (cmd) {
33535 case FBIOGET_VSCREENINFO:
33536 if (!lock_fb_info(info))
33537@@ -1143,7 +1147,7 @@ static long do_fb_ioctl(struct fb_info *
33538 return -EFAULT;
33539 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
33540 return -EINVAL;
33541- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
33542+ if (con2fb.framebuffer >= FB_MAX)
33543 return -EINVAL;
33544 if (!registered_fb[con2fb.framebuffer])
33545 request_module("fb%d", con2fb.framebuffer);
33546diff -urNp linux-3.0.3/drivers/video/i810/i810_accel.c linux-3.0.3/drivers/video/i810/i810_accel.c
33547--- linux-3.0.3/drivers/video/i810/i810_accel.c 2011-07-21 22:17:23.000000000 -0400
33548+++ linux-3.0.3/drivers/video/i810/i810_accel.c 2011-08-23 21:47:56.000000000 -0400
33549@@ -73,6 +73,7 @@ static inline int wait_for_space(struct
33550 }
33551 }
33552 printk("ringbuffer lockup!!!\n");
33553+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
33554 i810_report_error(mmio);
33555 par->dev_flags |= LOCKUP;
33556 info->pixmap.scan_align = 1;
33557diff -urNp linux-3.0.3/drivers/video/udlfb.c linux-3.0.3/drivers/video/udlfb.c
33558--- linux-3.0.3/drivers/video/udlfb.c 2011-07-21 22:17:23.000000000 -0400
33559+++ linux-3.0.3/drivers/video/udlfb.c 2011-08-23 21:47:56.000000000 -0400
33560@@ -586,11 +586,11 @@ int dlfb_handle_damage(struct dlfb_data
33561 dlfb_urb_completion(urb);
33562
33563 error:
33564- atomic_add(bytes_sent, &dev->bytes_sent);
33565- atomic_add(bytes_identical, &dev->bytes_identical);
33566- atomic_add(width*height*2, &dev->bytes_rendered);
33567+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
33568+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
33569+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
33570 end_cycles = get_cycles();
33571- atomic_add(((unsigned int) ((end_cycles - start_cycles)
33572+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
33573 >> 10)), /* Kcycles */
33574 &dev->cpu_kcycles_used);
33575
33576@@ -711,11 +711,11 @@ static void dlfb_dpy_deferred_io(struct
33577 dlfb_urb_completion(urb);
33578
33579 error:
33580- atomic_add(bytes_sent, &dev->bytes_sent);
33581- atomic_add(bytes_identical, &dev->bytes_identical);
33582- atomic_add(bytes_rendered, &dev->bytes_rendered);
33583+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
33584+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
33585+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
33586 end_cycles = get_cycles();
33587- atomic_add(((unsigned int) ((end_cycles - start_cycles)
33588+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
33589 >> 10)), /* Kcycles */
33590 &dev->cpu_kcycles_used);
33591 }
33592@@ -1307,7 +1307,7 @@ static ssize_t metrics_bytes_rendered_sh
33593 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33594 struct dlfb_data *dev = fb_info->par;
33595 return snprintf(buf, PAGE_SIZE, "%u\n",
33596- atomic_read(&dev->bytes_rendered));
33597+ atomic_read_unchecked(&dev->bytes_rendered));
33598 }
33599
33600 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
33601@@ -1315,7 +1315,7 @@ static ssize_t metrics_bytes_identical_s
33602 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33603 struct dlfb_data *dev = fb_info->par;
33604 return snprintf(buf, PAGE_SIZE, "%u\n",
33605- atomic_read(&dev->bytes_identical));
33606+ atomic_read_unchecked(&dev->bytes_identical));
33607 }
33608
33609 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
33610@@ -1323,7 +1323,7 @@ static ssize_t metrics_bytes_sent_show(s
33611 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33612 struct dlfb_data *dev = fb_info->par;
33613 return snprintf(buf, PAGE_SIZE, "%u\n",
33614- atomic_read(&dev->bytes_sent));
33615+ atomic_read_unchecked(&dev->bytes_sent));
33616 }
33617
33618 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
33619@@ -1331,7 +1331,7 @@ static ssize_t metrics_cpu_kcycles_used_
33620 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33621 struct dlfb_data *dev = fb_info->par;
33622 return snprintf(buf, PAGE_SIZE, "%u\n",
33623- atomic_read(&dev->cpu_kcycles_used));
33624+ atomic_read_unchecked(&dev->cpu_kcycles_used));
33625 }
33626
33627 static ssize_t edid_show(
33628@@ -1388,10 +1388,10 @@ static ssize_t metrics_reset_store(struc
33629 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33630 struct dlfb_data *dev = fb_info->par;
33631
33632- atomic_set(&dev->bytes_rendered, 0);
33633- atomic_set(&dev->bytes_identical, 0);
33634- atomic_set(&dev->bytes_sent, 0);
33635- atomic_set(&dev->cpu_kcycles_used, 0);
33636+ atomic_set_unchecked(&dev->bytes_rendered, 0);
33637+ atomic_set_unchecked(&dev->bytes_identical, 0);
33638+ atomic_set_unchecked(&dev->bytes_sent, 0);
33639+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
33640
33641 return count;
33642 }
33643diff -urNp linux-3.0.3/drivers/video/uvesafb.c linux-3.0.3/drivers/video/uvesafb.c
33644--- linux-3.0.3/drivers/video/uvesafb.c 2011-07-21 22:17:23.000000000 -0400
33645+++ linux-3.0.3/drivers/video/uvesafb.c 2011-08-23 21:47:56.000000000 -0400
33646@@ -19,6 +19,7 @@
33647 #include <linux/io.h>
33648 #include <linux/mutex.h>
33649 #include <linux/slab.h>
33650+#include <linux/moduleloader.h>
33651 #include <video/edid.h>
33652 #include <video/uvesafb.h>
33653 #ifdef CONFIG_X86
33654@@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
33655 NULL,
33656 };
33657
33658- return call_usermodehelper(v86d_path, argv, envp, 1);
33659+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
33660 }
33661
33662 /*
33663@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(
33664 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
33665 par->pmi_setpal = par->ypan = 0;
33666 } else {
33667+
33668+#ifdef CONFIG_PAX_KERNEXEC
33669+#ifdef CONFIG_MODULES
33670+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
33671+#endif
33672+ if (!par->pmi_code) {
33673+ par->pmi_setpal = par->ypan = 0;
33674+ return 0;
33675+ }
33676+#endif
33677+
33678 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
33679 + task->t.regs.edi);
33680+
33681+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33682+ pax_open_kernel();
33683+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
33684+ pax_close_kernel();
33685+
33686+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
33687+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
33688+#else
33689 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
33690 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
33691+#endif
33692+
33693 printk(KERN_INFO "uvesafb: protected mode interface info at "
33694 "%04x:%04x\n",
33695 (u16)task->t.regs.es, (u16)task->t.regs.edi);
33696@@ -1821,6 +1844,11 @@ out:
33697 if (par->vbe_modes)
33698 kfree(par->vbe_modes);
33699
33700+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33701+ if (par->pmi_code)
33702+ module_free_exec(NULL, par->pmi_code);
33703+#endif
33704+
33705 framebuffer_release(info);
33706 return err;
33707 }
33708@@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platfor
33709 kfree(par->vbe_state_orig);
33710 if (par->vbe_state_saved)
33711 kfree(par->vbe_state_saved);
33712+
33713+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33714+ if (par->pmi_code)
33715+ module_free_exec(NULL, par->pmi_code);
33716+#endif
33717+
33718 }
33719
33720 framebuffer_release(info);
33721diff -urNp linux-3.0.3/drivers/video/vesafb.c linux-3.0.3/drivers/video/vesafb.c
33722--- linux-3.0.3/drivers/video/vesafb.c 2011-07-21 22:17:23.000000000 -0400
33723+++ linux-3.0.3/drivers/video/vesafb.c 2011-08-23 21:47:56.000000000 -0400
33724@@ -9,6 +9,7 @@
33725 */
33726
33727 #include <linux/module.h>
33728+#include <linux/moduleloader.h>
33729 #include <linux/kernel.h>
33730 #include <linux/errno.h>
33731 #include <linux/string.h>
33732@@ -52,8 +53,8 @@ static int vram_remap __initdata; /*
33733 static int vram_total __initdata; /* Set total amount of memory */
33734 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
33735 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
33736-static void (*pmi_start)(void) __read_mostly;
33737-static void (*pmi_pal) (void) __read_mostly;
33738+static void (*pmi_start)(void) __read_only;
33739+static void (*pmi_pal) (void) __read_only;
33740 static int depth __read_mostly;
33741 static int vga_compat __read_mostly;
33742 /* --------------------------------------------------------------------- */
33743@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
33744 unsigned int size_vmode;
33745 unsigned int size_remap;
33746 unsigned int size_total;
33747+ void *pmi_code = NULL;
33748
33749 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
33750 return -ENODEV;
33751@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
33752 size_remap = size_total;
33753 vesafb_fix.smem_len = size_remap;
33754
33755-#ifndef __i386__
33756- screen_info.vesapm_seg = 0;
33757-#endif
33758-
33759 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
33760 printk(KERN_WARNING
33761 "vesafb: cannot reserve video memory at 0x%lx\n",
33762@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct pl
33763 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
33764 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
33765
33766+#ifdef __i386__
33767+
33768+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33769+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
33770+ if (!pmi_code)
33771+#elif !defined(CONFIG_PAX_KERNEXEC)
33772+ if (0)
33773+#endif
33774+
33775+#endif
33776+ screen_info.vesapm_seg = 0;
33777+
33778 if (screen_info.vesapm_seg) {
33779- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
33780- screen_info.vesapm_seg,screen_info.vesapm_off);
33781+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
33782+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
33783 }
33784
33785 if (screen_info.vesapm_seg < 0xc000)
33786@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct pl
33787
33788 if (ypan || pmi_setpal) {
33789 unsigned short *pmi_base;
33790+
33791 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
33792- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
33793- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
33794+
33795+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33796+ pax_open_kernel();
33797+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
33798+#else
33799+ pmi_code = pmi_base;
33800+#endif
33801+
33802+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
33803+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
33804+
33805+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33806+ pmi_start = ktva_ktla(pmi_start);
33807+ pmi_pal = ktva_ktla(pmi_pal);
33808+ pax_close_kernel();
33809+#endif
33810+
33811 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
33812 if (pmi_base[3]) {
33813 printk(KERN_INFO "vesafb: pmi: ports = ");
33814@@ -488,6 +514,11 @@ static int __init vesafb_probe(struct pl
33815 info->node, info->fix.id);
33816 return 0;
33817 err:
33818+
33819+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33820+ module_free_exec(NULL, pmi_code);
33821+#endif
33822+
33823 if (info->screen_base)
33824 iounmap(info->screen_base);
33825 framebuffer_release(info);
33826diff -urNp linux-3.0.3/drivers/video/via/via_clock.h linux-3.0.3/drivers/video/via/via_clock.h
33827--- linux-3.0.3/drivers/video/via/via_clock.h 2011-07-21 22:17:23.000000000 -0400
33828+++ linux-3.0.3/drivers/video/via/via_clock.h 2011-08-23 21:47:56.000000000 -0400
33829@@ -56,7 +56,7 @@ struct via_clock {
33830
33831 void (*set_engine_pll_state)(u8 state);
33832 void (*set_engine_pll)(struct via_pll_config config);
33833-};
33834+} __no_const;
33835
33836
33837 static inline u32 get_pll_internal_frequency(u32 ref_freq,
33838diff -urNp linux-3.0.3/drivers/virtio/virtio_balloon.c linux-3.0.3/drivers/virtio/virtio_balloon.c
33839--- linux-3.0.3/drivers/virtio/virtio_balloon.c 2011-07-21 22:17:23.000000000 -0400
33840+++ linux-3.0.3/drivers/virtio/virtio_balloon.c 2011-08-23 21:48:14.000000000 -0400
33841@@ -174,6 +174,8 @@ static void update_balloon_stats(struct
33842 struct sysinfo i;
33843 int idx = 0;
33844
33845+ pax_track_stack();
33846+
33847 all_vm_events(events);
33848 si_meminfo(&i);
33849
33850diff -urNp linux-3.0.3/fs/9p/vfs_inode.c linux-3.0.3/fs/9p/vfs_inode.c
33851--- linux-3.0.3/fs/9p/vfs_inode.c 2011-07-21 22:17:23.000000000 -0400
33852+++ linux-3.0.3/fs/9p/vfs_inode.c 2011-08-23 21:47:56.000000000 -0400
33853@@ -1210,7 +1210,7 @@ static void *v9fs_vfs_follow_link(struct
33854 void
33855 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
33856 {
33857- char *s = nd_get_link(nd);
33858+ const char *s = nd_get_link(nd);
33859
33860 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
33861 IS_ERR(s) ? "<error>" : s);
33862diff -urNp linux-3.0.3/fs/aio.c linux-3.0.3/fs/aio.c
33863--- linux-3.0.3/fs/aio.c 2011-07-21 22:17:23.000000000 -0400
33864+++ linux-3.0.3/fs/aio.c 2011-08-23 21:48:14.000000000 -0400
33865@@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx
33866 size += sizeof(struct io_event) * nr_events;
33867 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
33868
33869- if (nr_pages < 0)
33870+ if (nr_pages <= 0)
33871 return -EINVAL;
33872
33873 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
33874@@ -1088,6 +1088,8 @@ static int read_events(struct kioctx *ct
33875 struct aio_timeout to;
33876 int retry = 0;
33877
33878+ pax_track_stack();
33879+
33880 /* needed to zero any padding within an entry (there shouldn't be
33881 * any, but C is fun!
33882 */
33883@@ -1381,22 +1383,27 @@ static ssize_t aio_fsync(struct kiocb *i
33884 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
33885 {
33886 ssize_t ret;
33887+ struct iovec iovstack;
33888
33889 #ifdef CONFIG_COMPAT
33890 if (compat)
33891 ret = compat_rw_copy_check_uvector(type,
33892 (struct compat_iovec __user *)kiocb->ki_buf,
33893- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
33894+ kiocb->ki_nbytes, 1, &iovstack,
33895 &kiocb->ki_iovec);
33896 else
33897 #endif
33898 ret = rw_copy_check_uvector(type,
33899 (struct iovec __user *)kiocb->ki_buf,
33900- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
33901+ kiocb->ki_nbytes, 1, &iovstack,
33902 &kiocb->ki_iovec);
33903 if (ret < 0)
33904 goto out;
33905
33906+ if (kiocb->ki_iovec == &iovstack) {
33907+ kiocb->ki_inline_vec = iovstack;
33908+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
33909+ }
33910 kiocb->ki_nr_segs = kiocb->ki_nbytes;
33911 kiocb->ki_cur_seg = 0;
33912 /* ki_nbytes/left now reflect bytes instead of segs */
33913diff -urNp linux-3.0.3/fs/attr.c linux-3.0.3/fs/attr.c
33914--- linux-3.0.3/fs/attr.c 2011-07-21 22:17:23.000000000 -0400
33915+++ linux-3.0.3/fs/attr.c 2011-08-23 21:48:14.000000000 -0400
33916@@ -98,6 +98,7 @@ int inode_newsize_ok(const struct inode
33917 unsigned long limit;
33918
33919 limit = rlimit(RLIMIT_FSIZE);
33920+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
33921 if (limit != RLIM_INFINITY && offset > limit)
33922 goto out_sig;
33923 if (offset > inode->i_sb->s_maxbytes)
33924diff -urNp linux-3.0.3/fs/befs/linuxvfs.c linux-3.0.3/fs/befs/linuxvfs.c
33925--- linux-3.0.3/fs/befs/linuxvfs.c 2011-07-21 22:17:23.000000000 -0400
33926+++ linux-3.0.3/fs/befs/linuxvfs.c 2011-08-23 21:47:56.000000000 -0400
33927@@ -498,7 +498,7 @@ static void befs_put_link(struct dentry
33928 {
33929 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
33930 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
33931- char *link = nd_get_link(nd);
33932+ const char *link = nd_get_link(nd);
33933 if (!IS_ERR(link))
33934 kfree(link);
33935 }
33936diff -urNp linux-3.0.3/fs/binfmt_aout.c linux-3.0.3/fs/binfmt_aout.c
33937--- linux-3.0.3/fs/binfmt_aout.c 2011-07-21 22:17:23.000000000 -0400
33938+++ linux-3.0.3/fs/binfmt_aout.c 2011-08-23 21:48:14.000000000 -0400
33939@@ -16,6 +16,7 @@
33940 #include <linux/string.h>
33941 #include <linux/fs.h>
33942 #include <linux/file.h>
33943+#include <linux/security.h>
33944 #include <linux/stat.h>
33945 #include <linux/fcntl.h>
33946 #include <linux/ptrace.h>
33947@@ -86,6 +87,8 @@ static int aout_core_dump(struct coredum
33948 #endif
33949 # define START_STACK(u) ((void __user *)u.start_stack)
33950
33951+ memset(&dump, 0, sizeof(dump));
33952+
33953 fs = get_fs();
33954 set_fs(KERNEL_DS);
33955 has_dumped = 1;
33956@@ -97,10 +100,12 @@ static int aout_core_dump(struct coredum
33957
33958 /* If the size of the dump file exceeds the rlimit, then see what would happen
33959 if we wrote the stack, but not the data area. */
33960+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
33961 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
33962 dump.u_dsize = 0;
33963
33964 /* Make sure we have enough room to write the stack and data areas. */
33965+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
33966 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
33967 dump.u_ssize = 0;
33968
33969@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux
33970 rlim = rlimit(RLIMIT_DATA);
33971 if (rlim >= RLIM_INFINITY)
33972 rlim = ~0;
33973+
33974+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
33975 if (ex.a_data + ex.a_bss > rlim)
33976 return -ENOMEM;
33977
33978@@ -262,6 +269,27 @@ static int load_aout_binary(struct linux
33979 install_exec_creds(bprm);
33980 current->flags &= ~PF_FORKNOEXEC;
33981
33982+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
33983+ current->mm->pax_flags = 0UL;
33984+#endif
33985+
33986+#ifdef CONFIG_PAX_PAGEEXEC
33987+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
33988+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
33989+
33990+#ifdef CONFIG_PAX_EMUTRAMP
33991+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
33992+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
33993+#endif
33994+
33995+#ifdef CONFIG_PAX_MPROTECT
33996+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
33997+ current->mm->pax_flags |= MF_PAX_MPROTECT;
33998+#endif
33999+
34000+ }
34001+#endif
34002+
34003 if (N_MAGIC(ex) == OMAGIC) {
34004 unsigned long text_addr, map_size;
34005 loff_t pos;
34006@@ -334,7 +362,7 @@ static int load_aout_binary(struct linux
34007
34008 down_write(&current->mm->mmap_sem);
34009 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
34010- PROT_READ | PROT_WRITE | PROT_EXEC,
34011+ PROT_READ | PROT_WRITE,
34012 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
34013 fd_offset + ex.a_text);
34014 up_write(&current->mm->mmap_sem);
34015diff -urNp linux-3.0.3/fs/binfmt_elf.c linux-3.0.3/fs/binfmt_elf.c
34016--- linux-3.0.3/fs/binfmt_elf.c 2011-07-21 22:17:23.000000000 -0400
34017+++ linux-3.0.3/fs/binfmt_elf.c 2011-08-23 21:48:14.000000000 -0400
34018@@ -51,6 +51,10 @@ static int elf_core_dump(struct coredump
34019 #define elf_core_dump NULL
34020 #endif
34021
34022+#ifdef CONFIG_PAX_MPROTECT
34023+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
34024+#endif
34025+
34026 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
34027 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
34028 #else
34029@@ -70,6 +74,11 @@ static struct linux_binfmt elf_format =
34030 .load_binary = load_elf_binary,
34031 .load_shlib = load_elf_library,
34032 .core_dump = elf_core_dump,
34033+
34034+#ifdef CONFIG_PAX_MPROTECT
34035+ .handle_mprotect= elf_handle_mprotect,
34036+#endif
34037+
34038 .min_coredump = ELF_EXEC_PAGESIZE,
34039 };
34040
34041@@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
34042
34043 static int set_brk(unsigned long start, unsigned long end)
34044 {
34045+ unsigned long e = end;
34046+
34047 start = ELF_PAGEALIGN(start);
34048 end = ELF_PAGEALIGN(end);
34049 if (end > start) {
34050@@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
34051 if (BAD_ADDR(addr))
34052 return addr;
34053 }
34054- current->mm->start_brk = current->mm->brk = end;
34055+ current->mm->start_brk = current->mm->brk = e;
34056 return 0;
34057 }
34058
34059@@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
34060 elf_addr_t __user *u_rand_bytes;
34061 const char *k_platform = ELF_PLATFORM;
34062 const char *k_base_platform = ELF_BASE_PLATFORM;
34063- unsigned char k_rand_bytes[16];
34064+ u32 k_rand_bytes[4];
34065 int items;
34066 elf_addr_t *elf_info;
34067 int ei_index = 0;
34068 const struct cred *cred = current_cred();
34069 struct vm_area_struct *vma;
34070+ unsigned long saved_auxv[AT_VECTOR_SIZE];
34071+
34072+ pax_track_stack();
34073
34074 /*
34075 * In some cases (e.g. Hyper-Threading), we want to avoid L1
34076@@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
34077 * Generate 16 random bytes for userspace PRNG seeding.
34078 */
34079 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
34080- u_rand_bytes = (elf_addr_t __user *)
34081- STACK_ALLOC(p, sizeof(k_rand_bytes));
34082+ srandom32(k_rand_bytes[0] ^ random32());
34083+ srandom32(k_rand_bytes[1] ^ random32());
34084+ srandom32(k_rand_bytes[2] ^ random32());
34085+ srandom32(k_rand_bytes[3] ^ random32());
34086+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
34087+ u_rand_bytes = (elf_addr_t __user *) p;
34088 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
34089 return -EFAULT;
34090
34091@@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
34092 return -EFAULT;
34093 current->mm->env_end = p;
34094
34095+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
34096+
34097 /* Put the elf_info on the stack in the right place. */
34098 sp = (elf_addr_t __user *)envp + 1;
34099- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
34100+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
34101 return -EFAULT;
34102 return 0;
34103 }
34104@@ -381,10 +401,10 @@ static unsigned long load_elf_interp(str
34105 {
34106 struct elf_phdr *elf_phdata;
34107 struct elf_phdr *eppnt;
34108- unsigned long load_addr = 0;
34109+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
34110 int load_addr_set = 0;
34111 unsigned long last_bss = 0, elf_bss = 0;
34112- unsigned long error = ~0UL;
34113+ unsigned long error = -EINVAL;
34114 unsigned long total_size;
34115 int retval, i, size;
34116
34117@@ -430,6 +450,11 @@ static unsigned long load_elf_interp(str
34118 goto out_close;
34119 }
34120
34121+#ifdef CONFIG_PAX_SEGMEXEC
34122+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
34123+ pax_task_size = SEGMEXEC_TASK_SIZE;
34124+#endif
34125+
34126 eppnt = elf_phdata;
34127 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
34128 if (eppnt->p_type == PT_LOAD) {
34129@@ -473,8 +498,8 @@ static unsigned long load_elf_interp(str
34130 k = load_addr + eppnt->p_vaddr;
34131 if (BAD_ADDR(k) ||
34132 eppnt->p_filesz > eppnt->p_memsz ||
34133- eppnt->p_memsz > TASK_SIZE ||
34134- TASK_SIZE - eppnt->p_memsz < k) {
34135+ eppnt->p_memsz > pax_task_size ||
34136+ pax_task_size - eppnt->p_memsz < k) {
34137 error = -ENOMEM;
34138 goto out_close;
34139 }
34140@@ -528,6 +553,193 @@ out:
34141 return error;
34142 }
34143
34144+#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
34145+static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
34146+{
34147+ unsigned long pax_flags = 0UL;
34148+
34149+#ifdef CONFIG_PAX_PAGEEXEC
34150+ if (elf_phdata->p_flags & PF_PAGEEXEC)
34151+ pax_flags |= MF_PAX_PAGEEXEC;
34152+#endif
34153+
34154+#ifdef CONFIG_PAX_SEGMEXEC
34155+ if (elf_phdata->p_flags & PF_SEGMEXEC)
34156+ pax_flags |= MF_PAX_SEGMEXEC;
34157+#endif
34158+
34159+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
34160+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34161+ if ((__supported_pte_mask & _PAGE_NX))
34162+ pax_flags &= ~MF_PAX_SEGMEXEC;
34163+ else
34164+ pax_flags &= ~MF_PAX_PAGEEXEC;
34165+ }
34166+#endif
34167+
34168+#ifdef CONFIG_PAX_EMUTRAMP
34169+ if (elf_phdata->p_flags & PF_EMUTRAMP)
34170+ pax_flags |= MF_PAX_EMUTRAMP;
34171+#endif
34172+
34173+#ifdef CONFIG_PAX_MPROTECT
34174+ if (elf_phdata->p_flags & PF_MPROTECT)
34175+ pax_flags |= MF_PAX_MPROTECT;
34176+#endif
34177+
34178+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
34179+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
34180+ pax_flags |= MF_PAX_RANDMMAP;
34181+#endif
34182+
34183+ return pax_flags;
34184+}
34185+#endif
34186+
34187+#ifdef CONFIG_PAX_PT_PAX_FLAGS
34188+static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
34189+{
34190+ unsigned long pax_flags = 0UL;
34191+
34192+#ifdef CONFIG_PAX_PAGEEXEC
34193+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
34194+ pax_flags |= MF_PAX_PAGEEXEC;
34195+#endif
34196+
34197+#ifdef CONFIG_PAX_SEGMEXEC
34198+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
34199+ pax_flags |= MF_PAX_SEGMEXEC;
34200+#endif
34201+
34202+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
34203+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34204+ if ((__supported_pte_mask & _PAGE_NX))
34205+ pax_flags &= ~MF_PAX_SEGMEXEC;
34206+ else
34207+ pax_flags &= ~MF_PAX_PAGEEXEC;
34208+ }
34209+#endif
34210+
34211+#ifdef CONFIG_PAX_EMUTRAMP
34212+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
34213+ pax_flags |= MF_PAX_EMUTRAMP;
34214+#endif
34215+
34216+#ifdef CONFIG_PAX_MPROTECT
34217+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
34218+ pax_flags |= MF_PAX_MPROTECT;
34219+#endif
34220+
34221+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
34222+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
34223+ pax_flags |= MF_PAX_RANDMMAP;
34224+#endif
34225+
34226+ return pax_flags;
34227+}
34228+#endif
34229+
34230+#ifdef CONFIG_PAX_EI_PAX
34231+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
34232+{
34233+ unsigned long pax_flags = 0UL;
34234+
34235+#ifdef CONFIG_PAX_PAGEEXEC
34236+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
34237+ pax_flags |= MF_PAX_PAGEEXEC;
34238+#endif
34239+
34240+#ifdef CONFIG_PAX_SEGMEXEC
34241+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
34242+ pax_flags |= MF_PAX_SEGMEXEC;
34243+#endif
34244+
34245+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
34246+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34247+ if ((__supported_pte_mask & _PAGE_NX))
34248+ pax_flags &= ~MF_PAX_SEGMEXEC;
34249+ else
34250+ pax_flags &= ~MF_PAX_PAGEEXEC;
34251+ }
34252+#endif
34253+
34254+#ifdef CONFIG_PAX_EMUTRAMP
34255+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
34256+ pax_flags |= MF_PAX_EMUTRAMP;
34257+#endif
34258+
34259+#ifdef CONFIG_PAX_MPROTECT
34260+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
34261+ pax_flags |= MF_PAX_MPROTECT;
34262+#endif
34263+
34264+#ifdef CONFIG_PAX_ASLR
34265+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
34266+ pax_flags |= MF_PAX_RANDMMAP;
34267+#endif
34268+
34269+ return pax_flags;
34270+}
34271+#endif
34272+
34273+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
34274+static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
34275+{
34276+ unsigned long pax_flags = 0UL;
34277+
34278+#ifdef CONFIG_PAX_PT_PAX_FLAGS
34279+ unsigned long i;
34280+ int found_flags = 0;
34281+#endif
34282+
34283+#ifdef CONFIG_PAX_EI_PAX
34284+ pax_flags = pax_parse_ei_pax(elf_ex);
34285+#endif
34286+
34287+#ifdef CONFIG_PAX_PT_PAX_FLAGS
34288+ for (i = 0UL; i < elf_ex->e_phnum; i++)
34289+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
34290+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
34291+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
34292+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
34293+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
34294+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
34295+ return -EINVAL;
34296+
34297+#ifdef CONFIG_PAX_SOFTMODE
34298+ if (pax_softmode)
34299+ pax_flags = pax_parse_softmode(&elf_phdata[i]);
34300+ else
34301+#endif
34302+
34303+ pax_flags = pax_parse_hardmode(&elf_phdata[i]);
34304+ found_flags = 1;
34305+ break;
34306+ }
34307+#endif
34308+
34309+#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
34310+ if (found_flags == 0) {
34311+ struct elf_phdr phdr;
34312+ memset(&phdr, 0, sizeof(phdr));
34313+ phdr.p_flags = PF_NOEMUTRAMP;
34314+#ifdef CONFIG_PAX_SOFTMODE
34315+ if (pax_softmode)
34316+ pax_flags = pax_parse_softmode(&phdr);
34317+ else
34318+#endif
34319+ pax_flags = pax_parse_hardmode(&phdr);
34320+ }
34321+#endif
34322+
34323+ if (0 > pax_check_flags(&pax_flags))
34324+ return -EINVAL;
34325+
34326+ current->mm->pax_flags = pax_flags;
34327+ return 0;
34328+}
34329+#endif
34330+
34331 /*
34332 * These are the functions used to load ELF style executables and shared
34333 * libraries. There is no binary dependent code anywhere else.
34334@@ -544,6 +756,11 @@ static unsigned long randomize_stack_top
34335 {
34336 unsigned int random_variable = 0;
34337
34338+#ifdef CONFIG_PAX_RANDUSTACK
34339+ if (randomize_va_space)
34340+ return stack_top - current->mm->delta_stack;
34341+#endif
34342+
34343 if ((current->flags & PF_RANDOMIZE) &&
34344 !(current->personality & ADDR_NO_RANDOMIZE)) {
34345 random_variable = get_random_int() & STACK_RND_MASK;
34346@@ -562,7 +779,7 @@ static int load_elf_binary(struct linux_
34347 unsigned long load_addr = 0, load_bias = 0;
34348 int load_addr_set = 0;
34349 char * elf_interpreter = NULL;
34350- unsigned long error;
34351+ unsigned long error = 0;
34352 struct elf_phdr *elf_ppnt, *elf_phdata;
34353 unsigned long elf_bss, elf_brk;
34354 int retval, i;
34355@@ -572,11 +789,11 @@ static int load_elf_binary(struct linux_
34356 unsigned long start_code, end_code, start_data, end_data;
34357 unsigned long reloc_func_desc __maybe_unused = 0;
34358 int executable_stack = EXSTACK_DEFAULT;
34359- unsigned long def_flags = 0;
34360 struct {
34361 struct elfhdr elf_ex;
34362 struct elfhdr interp_elf_ex;
34363 } *loc;
34364+ unsigned long pax_task_size = TASK_SIZE;
34365
34366 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
34367 if (!loc) {
34368@@ -714,11 +931,81 @@ static int load_elf_binary(struct linux_
34369
34370 /* OK, This is the point of no return */
34371 current->flags &= ~PF_FORKNOEXEC;
34372- current->mm->def_flags = def_flags;
34373+
34374+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
34375+ current->mm->pax_flags = 0UL;
34376+#endif
34377+
34378+#ifdef CONFIG_PAX_DLRESOLVE
34379+ current->mm->call_dl_resolve = 0UL;
34380+#endif
34381+
34382+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
34383+ current->mm->call_syscall = 0UL;
34384+#endif
34385+
34386+#ifdef CONFIG_PAX_ASLR
34387+ current->mm->delta_mmap = 0UL;
34388+ current->mm->delta_stack = 0UL;
34389+#endif
34390+
34391+ current->mm->def_flags = 0;
34392+
34393+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
34394+ if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
34395+ send_sig(SIGKILL, current, 0);
34396+ goto out_free_dentry;
34397+ }
34398+#endif
34399+
34400+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
34401+ pax_set_initial_flags(bprm);
34402+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
34403+ if (pax_set_initial_flags_func)
34404+ (pax_set_initial_flags_func)(bprm);
34405+#endif
34406+
34407+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
34408+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
34409+ current->mm->context.user_cs_limit = PAGE_SIZE;
34410+ current->mm->def_flags |= VM_PAGEEXEC;
34411+ }
34412+#endif
34413+
34414+#ifdef CONFIG_PAX_SEGMEXEC
34415+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
34416+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
34417+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
34418+ pax_task_size = SEGMEXEC_TASK_SIZE;
34419+ current->mm->def_flags |= VM_NOHUGEPAGE;
34420+ }
34421+#endif
34422+
34423+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
34424+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34425+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
34426+ put_cpu();
34427+ }
34428+#endif
34429
34430 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
34431 may depend on the personality. */
34432 SET_PERSONALITY(loc->elf_ex);
34433+
34434+#ifdef CONFIG_PAX_ASLR
34435+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
34436+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
34437+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
34438+ }
34439+#endif
34440+
34441+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
34442+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34443+ executable_stack = EXSTACK_DISABLE_X;
34444+ current->personality &= ~READ_IMPLIES_EXEC;
34445+ } else
34446+#endif
34447+
34448 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
34449 current->personality |= READ_IMPLIES_EXEC;
34450
34451@@ -800,6 +1087,20 @@ static int load_elf_binary(struct linux_
34452 #else
34453 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
34454 #endif
34455+
34456+#ifdef CONFIG_PAX_RANDMMAP
34457+ /* PaX: randomize base address at the default exe base if requested */
34458+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
34459+#ifdef CONFIG_SPARC64
34460+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
34461+#else
34462+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
34463+#endif
34464+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
34465+ elf_flags |= MAP_FIXED;
34466+ }
34467+#endif
34468+
34469 }
34470
34471 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
34472@@ -832,9 +1133,9 @@ static int load_elf_binary(struct linux_
34473 * allowed task size. Note that p_filesz must always be
34474 * <= p_memsz so it is only necessary to check p_memsz.
34475 */
34476- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
34477- elf_ppnt->p_memsz > TASK_SIZE ||
34478- TASK_SIZE - elf_ppnt->p_memsz < k) {
34479+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
34480+ elf_ppnt->p_memsz > pax_task_size ||
34481+ pax_task_size - elf_ppnt->p_memsz < k) {
34482 /* set_brk can never work. Avoid overflows. */
34483 send_sig(SIGKILL, current, 0);
34484 retval = -EINVAL;
34485@@ -862,6 +1163,11 @@ static int load_elf_binary(struct linux_
34486 start_data += load_bias;
34487 end_data += load_bias;
34488
34489+#ifdef CONFIG_PAX_RANDMMAP
34490+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
34491+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
34492+#endif
34493+
34494 /* Calling set_brk effectively mmaps the pages that we need
34495 * for the bss and break sections. We must do this before
34496 * mapping in the interpreter, to make sure it doesn't wind
34497@@ -873,9 +1179,11 @@ static int load_elf_binary(struct linux_
34498 goto out_free_dentry;
34499 }
34500 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
34501- send_sig(SIGSEGV, current, 0);
34502- retval = -EFAULT; /* Nobody gets to see this, but.. */
34503- goto out_free_dentry;
34504+ /*
34505+ * This bss-zeroing can fail if the ELF
34506+ * file specifies odd protections. So
34507+ * we don't check the return value
34508+ */
34509 }
34510
34511 if (elf_interpreter) {
34512@@ -1090,7 +1398,7 @@ out:
34513 * Decide what to dump of a segment, part, all or none.
34514 */
34515 static unsigned long vma_dump_size(struct vm_area_struct *vma,
34516- unsigned long mm_flags)
34517+ unsigned long mm_flags, long signr)
34518 {
34519 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
34520
34521@@ -1124,7 +1432,7 @@ static unsigned long vma_dump_size(struc
34522 if (vma->vm_file == NULL)
34523 return 0;
34524
34525- if (FILTER(MAPPED_PRIVATE))
34526+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
34527 goto whole;
34528
34529 /*
34530@@ -1346,9 +1654,9 @@ static void fill_auxv_note(struct memelf
34531 {
34532 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
34533 int i = 0;
34534- do
34535+ do {
34536 i += 2;
34537- while (auxv[i - 2] != AT_NULL);
34538+ } while (auxv[i - 2] != AT_NULL);
34539 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
34540 }
34541
34542@@ -1854,14 +2162,14 @@ static void fill_extnum_info(struct elfh
34543 }
34544
34545 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
34546- unsigned long mm_flags)
34547+ struct coredump_params *cprm)
34548 {
34549 struct vm_area_struct *vma;
34550 size_t size = 0;
34551
34552 for (vma = first_vma(current, gate_vma); vma != NULL;
34553 vma = next_vma(vma, gate_vma))
34554- size += vma_dump_size(vma, mm_flags);
34555+ size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
34556 return size;
34557 }
34558
34559@@ -1955,7 +2263,7 @@ static int elf_core_dump(struct coredump
34560
34561 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
34562
34563- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
34564+ offset += elf_core_vma_data_size(gate_vma, cprm);
34565 offset += elf_core_extra_data_size();
34566 e_shoff = offset;
34567
34568@@ -1969,10 +2277,12 @@ static int elf_core_dump(struct coredump
34569 offset = dataoff;
34570
34571 size += sizeof(*elf);
34572+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
34573 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
34574 goto end_coredump;
34575
34576 size += sizeof(*phdr4note);
34577+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
34578 if (size > cprm->limit
34579 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
34580 goto end_coredump;
34581@@ -1986,7 +2296,7 @@ static int elf_core_dump(struct coredump
34582 phdr.p_offset = offset;
34583 phdr.p_vaddr = vma->vm_start;
34584 phdr.p_paddr = 0;
34585- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
34586+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
34587 phdr.p_memsz = vma->vm_end - vma->vm_start;
34588 offset += phdr.p_filesz;
34589 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
34590@@ -1997,6 +2307,7 @@ static int elf_core_dump(struct coredump
34591 phdr.p_align = ELF_EXEC_PAGESIZE;
34592
34593 size += sizeof(phdr);
34594+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
34595 if (size > cprm->limit
34596 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
34597 goto end_coredump;
34598@@ -2021,7 +2332,7 @@ static int elf_core_dump(struct coredump
34599 unsigned long addr;
34600 unsigned long end;
34601
34602- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
34603+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
34604
34605 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
34606 struct page *page;
34607@@ -2030,6 +2341,7 @@ static int elf_core_dump(struct coredump
34608 page = get_dump_page(addr);
34609 if (page) {
34610 void *kaddr = kmap(page);
34611+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
34612 stop = ((size += PAGE_SIZE) > cprm->limit) ||
34613 !dump_write(cprm->file, kaddr,
34614 PAGE_SIZE);
34615@@ -2047,6 +2359,7 @@ static int elf_core_dump(struct coredump
34616
34617 if (e_phnum == PN_XNUM) {
34618 size += sizeof(*shdr4extnum);
34619+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
34620 if (size > cprm->limit
34621 || !dump_write(cprm->file, shdr4extnum,
34622 sizeof(*shdr4extnum)))
34623@@ -2067,6 +2380,97 @@ out:
34624
34625 #endif /* CONFIG_ELF_CORE */
34626
34627+#ifdef CONFIG_PAX_MPROTECT
34628+/* PaX: non-PIC ELF libraries need relocations on their executable segments
34629+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
34630+ * we'll remove VM_MAYWRITE for good on RELRO segments.
34631+ *
34632+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
34633+ * basis because we want to allow the common case and not the special ones.
34634+ */
34635+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
34636+{
34637+ struct elfhdr elf_h;
34638+ struct elf_phdr elf_p;
34639+ unsigned long i;
34640+ unsigned long oldflags;
34641+ bool is_textrel_rw, is_textrel_rx, is_relro;
34642+
34643+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
34644+ return;
34645+
34646+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
34647+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
34648+
34649+#ifdef CONFIG_PAX_ELFRELOCS
34650+ /* possible TEXTREL */
34651+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
34652+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
34653+#else
34654+ is_textrel_rw = false;
34655+ is_textrel_rx = false;
34656+#endif
34657+
34658+ /* possible RELRO */
34659+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
34660+
34661+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
34662+ return;
34663+
34664+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
34665+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
34666+
34667+#ifdef CONFIG_PAX_ETEXECRELOCS
34668+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
34669+#else
34670+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
34671+#endif
34672+
34673+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
34674+ !elf_check_arch(&elf_h) ||
34675+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
34676+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
34677+ return;
34678+
34679+ for (i = 0UL; i < elf_h.e_phnum; i++) {
34680+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
34681+ return;
34682+ switch (elf_p.p_type) {
34683+ case PT_DYNAMIC:
34684+ if (!is_textrel_rw && !is_textrel_rx)
34685+ continue;
34686+ i = 0UL;
34687+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
34688+ elf_dyn dyn;
34689+
34690+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
34691+ return;
34692+ if (dyn.d_tag == DT_NULL)
34693+ return;
34694+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
34695+ gr_log_textrel(vma);
34696+ if (is_textrel_rw)
34697+ vma->vm_flags |= VM_MAYWRITE;
34698+ else
34699+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
34700+ vma->vm_flags &= ~VM_MAYWRITE;
34701+ return;
34702+ }
34703+ i++;
34704+ }
34705+ return;
34706+
34707+ case PT_GNU_RELRO:
34708+ if (!is_relro)
34709+ continue;
34710+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
34711+ vma->vm_flags &= ~VM_MAYWRITE;
34712+ return;
34713+ }
34714+ }
34715+}
34716+#endif
34717+
34718 static int __init init_elf_binfmt(void)
34719 {
34720 return register_binfmt(&elf_format);
34721diff -urNp linux-3.0.3/fs/binfmt_flat.c linux-3.0.3/fs/binfmt_flat.c
34722--- linux-3.0.3/fs/binfmt_flat.c 2011-07-21 22:17:23.000000000 -0400
34723+++ linux-3.0.3/fs/binfmt_flat.c 2011-08-23 21:47:56.000000000 -0400
34724@@ -567,7 +567,9 @@ static int load_flat_file(struct linux_b
34725 realdatastart = (unsigned long) -ENOMEM;
34726 printk("Unable to allocate RAM for process data, errno %d\n",
34727 (int)-realdatastart);
34728+ down_write(&current->mm->mmap_sem);
34729 do_munmap(current->mm, textpos, text_len);
34730+ up_write(&current->mm->mmap_sem);
34731 ret = realdatastart;
34732 goto err;
34733 }
34734@@ -591,8 +593,10 @@ static int load_flat_file(struct linux_b
34735 }
34736 if (IS_ERR_VALUE(result)) {
34737 printk("Unable to read data+bss, errno %d\n", (int)-result);
34738+ down_write(&current->mm->mmap_sem);
34739 do_munmap(current->mm, textpos, text_len);
34740 do_munmap(current->mm, realdatastart, len);
34741+ up_write(&current->mm->mmap_sem);
34742 ret = result;
34743 goto err;
34744 }
34745@@ -661,8 +665,10 @@ static int load_flat_file(struct linux_b
34746 }
34747 if (IS_ERR_VALUE(result)) {
34748 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
34749+ down_write(&current->mm->mmap_sem);
34750 do_munmap(current->mm, textpos, text_len + data_len + extra +
34751 MAX_SHARED_LIBS * sizeof(unsigned long));
34752+ up_write(&current->mm->mmap_sem);
34753 ret = result;
34754 goto err;
34755 }
34756diff -urNp linux-3.0.3/fs/bio.c linux-3.0.3/fs/bio.c
34757--- linux-3.0.3/fs/bio.c 2011-07-21 22:17:23.000000000 -0400
34758+++ linux-3.0.3/fs/bio.c 2011-08-23 21:47:56.000000000 -0400
34759@@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct b
34760 const int read = bio_data_dir(bio) == READ;
34761 struct bio_map_data *bmd = bio->bi_private;
34762 int i;
34763- char *p = bmd->sgvecs[0].iov_base;
34764+ char *p = (__force char *)bmd->sgvecs[0].iov_base;
34765
34766 __bio_for_each_segment(bvec, bio, i, 0) {
34767 char *addr = page_address(bvec->bv_page);
34768diff -urNp linux-3.0.3/fs/block_dev.c linux-3.0.3/fs/block_dev.c
34769--- linux-3.0.3/fs/block_dev.c 2011-07-21 22:17:23.000000000 -0400
34770+++ linux-3.0.3/fs/block_dev.c 2011-08-23 21:47:56.000000000 -0400
34771@@ -671,7 +671,7 @@ static bool bd_may_claim(struct block_de
34772 else if (bdev->bd_contains == bdev)
34773 return true; /* is a whole device which isn't held */
34774
34775- else if (whole->bd_holder == bd_may_claim)
34776+ else if (whole->bd_holder == (void *)bd_may_claim)
34777 return true; /* is a partition of a device that is being partitioned */
34778 else if (whole->bd_holder != NULL)
34779 return false; /* is a partition of a held device */
34780diff -urNp linux-3.0.3/fs/btrfs/ctree.c linux-3.0.3/fs/btrfs/ctree.c
34781--- linux-3.0.3/fs/btrfs/ctree.c 2011-07-21 22:17:23.000000000 -0400
34782+++ linux-3.0.3/fs/btrfs/ctree.c 2011-08-23 21:47:56.000000000 -0400
34783@@ -454,9 +454,12 @@ static noinline int __btrfs_cow_block(st
34784 free_extent_buffer(buf);
34785 add_root_to_dirty_list(root);
34786 } else {
34787- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
34788- parent_start = parent->start;
34789- else
34790+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
34791+ if (parent)
34792+ parent_start = parent->start;
34793+ else
34794+ parent_start = 0;
34795+ } else
34796 parent_start = 0;
34797
34798 WARN_ON(trans->transid != btrfs_header_generation(parent));
34799diff -urNp linux-3.0.3/fs/btrfs/inode.c linux-3.0.3/fs/btrfs/inode.c
34800--- linux-3.0.3/fs/btrfs/inode.c 2011-07-21 22:17:23.000000000 -0400
34801+++ linux-3.0.3/fs/btrfs/inode.c 2011-08-23 21:48:14.000000000 -0400
34802@@ -6895,7 +6895,7 @@ fail:
34803 return -ENOMEM;
34804 }
34805
34806-static int btrfs_getattr(struct vfsmount *mnt,
34807+int btrfs_getattr(struct vfsmount *mnt,
34808 struct dentry *dentry, struct kstat *stat)
34809 {
34810 struct inode *inode = dentry->d_inode;
34811@@ -6907,6 +6907,14 @@ static int btrfs_getattr(struct vfsmount
34812 return 0;
34813 }
34814
34815+EXPORT_SYMBOL(btrfs_getattr);
34816+
34817+dev_t get_btrfs_dev_from_inode(struct inode *inode)
34818+{
34819+ return BTRFS_I(inode)->root->anon_super.s_dev;
34820+}
34821+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
34822+
34823 /*
34824 * If a file is moved, it will inherit the cow and compression flags of the new
34825 * directory.
34826diff -urNp linux-3.0.3/fs/btrfs/ioctl.c linux-3.0.3/fs/btrfs/ioctl.c
34827--- linux-3.0.3/fs/btrfs/ioctl.c 2011-07-21 22:17:23.000000000 -0400
34828+++ linux-3.0.3/fs/btrfs/ioctl.c 2011-08-23 21:48:14.000000000 -0400
34829@@ -2676,9 +2676,12 @@ long btrfs_ioctl_space_info(struct btrfs
34830 for (i = 0; i < num_types; i++) {
34831 struct btrfs_space_info *tmp;
34832
34833+ /* Don't copy in more than we allocated */
34834 if (!slot_count)
34835 break;
34836
34837+ slot_count--;
34838+
34839 info = NULL;
34840 rcu_read_lock();
34841 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
34842@@ -2700,10 +2703,7 @@ long btrfs_ioctl_space_info(struct btrfs
34843 memcpy(dest, &space, sizeof(space));
34844 dest++;
34845 space_args.total_spaces++;
34846- slot_count--;
34847 }
34848- if (!slot_count)
34849- break;
34850 }
34851 up_read(&info->groups_sem);
34852 }
34853diff -urNp linux-3.0.3/fs/btrfs/relocation.c linux-3.0.3/fs/btrfs/relocation.c
34854--- linux-3.0.3/fs/btrfs/relocation.c 2011-07-21 22:17:23.000000000 -0400
34855+++ linux-3.0.3/fs/btrfs/relocation.c 2011-08-23 21:47:56.000000000 -0400
34856@@ -1242,7 +1242,7 @@ static int __update_reloc_root(struct bt
34857 }
34858 spin_unlock(&rc->reloc_root_tree.lock);
34859
34860- BUG_ON((struct btrfs_root *)node->data != root);
34861+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
34862
34863 if (!del) {
34864 spin_lock(&rc->reloc_root_tree.lock);
34865diff -urNp linux-3.0.3/fs/cachefiles/bind.c linux-3.0.3/fs/cachefiles/bind.c
34866--- linux-3.0.3/fs/cachefiles/bind.c 2011-07-21 22:17:23.000000000 -0400
34867+++ linux-3.0.3/fs/cachefiles/bind.c 2011-08-23 21:47:56.000000000 -0400
34868@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
34869 args);
34870
34871 /* start by checking things over */
34872- ASSERT(cache->fstop_percent >= 0 &&
34873- cache->fstop_percent < cache->fcull_percent &&
34874+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
34875 cache->fcull_percent < cache->frun_percent &&
34876 cache->frun_percent < 100);
34877
34878- ASSERT(cache->bstop_percent >= 0 &&
34879- cache->bstop_percent < cache->bcull_percent &&
34880+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
34881 cache->bcull_percent < cache->brun_percent &&
34882 cache->brun_percent < 100);
34883
34884diff -urNp linux-3.0.3/fs/cachefiles/daemon.c linux-3.0.3/fs/cachefiles/daemon.c
34885--- linux-3.0.3/fs/cachefiles/daemon.c 2011-07-21 22:17:23.000000000 -0400
34886+++ linux-3.0.3/fs/cachefiles/daemon.c 2011-08-23 21:47:56.000000000 -0400
34887@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(st
34888 if (n > buflen)
34889 return -EMSGSIZE;
34890
34891- if (copy_to_user(_buffer, buffer, n) != 0)
34892+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
34893 return -EFAULT;
34894
34895 return n;
34896@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(s
34897 if (test_bit(CACHEFILES_DEAD, &cache->flags))
34898 return -EIO;
34899
34900- if (datalen < 0 || datalen > PAGE_SIZE - 1)
34901+ if (datalen > PAGE_SIZE - 1)
34902 return -EOPNOTSUPP;
34903
34904 /* drag the command string into the kernel so we can parse it */
34905@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struc
34906 if (args[0] != '%' || args[1] != '\0')
34907 return -EINVAL;
34908
34909- if (fstop < 0 || fstop >= cache->fcull_percent)
34910+ if (fstop >= cache->fcull_percent)
34911 return cachefiles_daemon_range_error(cache, args);
34912
34913 cache->fstop_percent = fstop;
34914@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struc
34915 if (args[0] != '%' || args[1] != '\0')
34916 return -EINVAL;
34917
34918- if (bstop < 0 || bstop >= cache->bcull_percent)
34919+ if (bstop >= cache->bcull_percent)
34920 return cachefiles_daemon_range_error(cache, args);
34921
34922 cache->bstop_percent = bstop;
34923diff -urNp linux-3.0.3/fs/cachefiles/internal.h linux-3.0.3/fs/cachefiles/internal.h
34924--- linux-3.0.3/fs/cachefiles/internal.h 2011-07-21 22:17:23.000000000 -0400
34925+++ linux-3.0.3/fs/cachefiles/internal.h 2011-08-23 21:47:56.000000000 -0400
34926@@ -57,7 +57,7 @@ struct cachefiles_cache {
34927 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
34928 struct rb_root active_nodes; /* active nodes (can't be culled) */
34929 rwlock_t active_lock; /* lock for active_nodes */
34930- atomic_t gravecounter; /* graveyard uniquifier */
34931+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
34932 unsigned frun_percent; /* when to stop culling (% files) */
34933 unsigned fcull_percent; /* when to start culling (% files) */
34934 unsigned fstop_percent; /* when to stop allocating (% files) */
34935@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struc
34936 * proc.c
34937 */
34938 #ifdef CONFIG_CACHEFILES_HISTOGRAM
34939-extern atomic_t cachefiles_lookup_histogram[HZ];
34940-extern atomic_t cachefiles_mkdir_histogram[HZ];
34941-extern atomic_t cachefiles_create_histogram[HZ];
34942+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
34943+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
34944+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
34945
34946 extern int __init cachefiles_proc_init(void);
34947 extern void cachefiles_proc_cleanup(void);
34948 static inline
34949-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
34950+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
34951 {
34952 unsigned long jif = jiffies - start_jif;
34953 if (jif >= HZ)
34954 jif = HZ - 1;
34955- atomic_inc(&histogram[jif]);
34956+ atomic_inc_unchecked(&histogram[jif]);
34957 }
34958
34959 #else
34960diff -urNp linux-3.0.3/fs/cachefiles/namei.c linux-3.0.3/fs/cachefiles/namei.c
34961--- linux-3.0.3/fs/cachefiles/namei.c 2011-07-21 22:17:23.000000000 -0400
34962+++ linux-3.0.3/fs/cachefiles/namei.c 2011-08-23 21:47:56.000000000 -0400
34963@@ -318,7 +318,7 @@ try_again:
34964 /* first step is to make up a grave dentry in the graveyard */
34965 sprintf(nbuffer, "%08x%08x",
34966 (uint32_t) get_seconds(),
34967- (uint32_t) atomic_inc_return(&cache->gravecounter));
34968+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
34969
34970 /* do the multiway lock magic */
34971 trap = lock_rename(cache->graveyard, dir);
34972diff -urNp linux-3.0.3/fs/cachefiles/proc.c linux-3.0.3/fs/cachefiles/proc.c
34973--- linux-3.0.3/fs/cachefiles/proc.c 2011-07-21 22:17:23.000000000 -0400
34974+++ linux-3.0.3/fs/cachefiles/proc.c 2011-08-23 21:47:56.000000000 -0400
34975@@ -14,9 +14,9 @@
34976 #include <linux/seq_file.h>
34977 #include "internal.h"
34978
34979-atomic_t cachefiles_lookup_histogram[HZ];
34980-atomic_t cachefiles_mkdir_histogram[HZ];
34981-atomic_t cachefiles_create_histogram[HZ];
34982+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
34983+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
34984+atomic_unchecked_t cachefiles_create_histogram[HZ];
34985
34986 /*
34987 * display the latency histogram
34988@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
34989 return 0;
34990 default:
34991 index = (unsigned long) v - 3;
34992- x = atomic_read(&cachefiles_lookup_histogram[index]);
34993- y = atomic_read(&cachefiles_mkdir_histogram[index]);
34994- z = atomic_read(&cachefiles_create_histogram[index]);
34995+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
34996+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
34997+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
34998 if (x == 0 && y == 0 && z == 0)
34999 return 0;
35000
35001diff -urNp linux-3.0.3/fs/cachefiles/rdwr.c linux-3.0.3/fs/cachefiles/rdwr.c
35002--- linux-3.0.3/fs/cachefiles/rdwr.c 2011-07-21 22:17:23.000000000 -0400
35003+++ linux-3.0.3/fs/cachefiles/rdwr.c 2011-08-23 21:47:56.000000000 -0400
35004@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache
35005 old_fs = get_fs();
35006 set_fs(KERNEL_DS);
35007 ret = file->f_op->write(
35008- file, (const void __user *) data, len, &pos);
35009+ file, (__force const void __user *) data, len, &pos);
35010 set_fs(old_fs);
35011 kunmap(page);
35012 if (ret != len)
35013diff -urNp linux-3.0.3/fs/ceph/dir.c linux-3.0.3/fs/ceph/dir.c
35014--- linux-3.0.3/fs/ceph/dir.c 2011-07-21 22:17:23.000000000 -0400
35015+++ linux-3.0.3/fs/ceph/dir.c 2011-08-23 21:47:56.000000000 -0400
35016@@ -226,7 +226,7 @@ static int ceph_readdir(struct file *fil
35017 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
35018 struct ceph_mds_client *mdsc = fsc->mdsc;
35019 unsigned frag = fpos_frag(filp->f_pos);
35020- int off = fpos_off(filp->f_pos);
35021+ unsigned int off = fpos_off(filp->f_pos);
35022 int err;
35023 u32 ftype;
35024 struct ceph_mds_reply_info_parsed *rinfo;
35025diff -urNp linux-3.0.3/fs/cifs/cifs_debug.c linux-3.0.3/fs/cifs/cifs_debug.c
35026--- linux-3.0.3/fs/cifs/cifs_debug.c 2011-07-21 22:17:23.000000000 -0400
35027+++ linux-3.0.3/fs/cifs/cifs_debug.c 2011-08-25 17:18:05.000000000 -0400
35028@@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(str
35029
35030 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
35031 #ifdef CONFIG_CIFS_STATS2
35032- atomic_set(&totBufAllocCount, 0);
35033- atomic_set(&totSmBufAllocCount, 0);
35034+ atomic_set_unchecked(&totBufAllocCount, 0);
35035+ atomic_set_unchecked(&totSmBufAllocCount, 0);
35036 #endif /* CONFIG_CIFS_STATS2 */
35037 spin_lock(&cifs_tcp_ses_lock);
35038 list_for_each(tmp1, &cifs_tcp_ses_list) {
35039@@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(str
35040 tcon = list_entry(tmp3,
35041 struct cifs_tcon,
35042 tcon_list);
35043- atomic_set(&tcon->num_smbs_sent, 0);
35044- atomic_set(&tcon->num_writes, 0);
35045- atomic_set(&tcon->num_reads, 0);
35046- atomic_set(&tcon->num_oplock_brks, 0);
35047- atomic_set(&tcon->num_opens, 0);
35048- atomic_set(&tcon->num_posixopens, 0);
35049- atomic_set(&tcon->num_posixmkdirs, 0);
35050- atomic_set(&tcon->num_closes, 0);
35051- atomic_set(&tcon->num_deletes, 0);
35052- atomic_set(&tcon->num_mkdirs, 0);
35053- atomic_set(&tcon->num_rmdirs, 0);
35054- atomic_set(&tcon->num_renames, 0);
35055- atomic_set(&tcon->num_t2renames, 0);
35056- atomic_set(&tcon->num_ffirst, 0);
35057- atomic_set(&tcon->num_fnext, 0);
35058- atomic_set(&tcon->num_fclose, 0);
35059- atomic_set(&tcon->num_hardlinks, 0);
35060- atomic_set(&tcon->num_symlinks, 0);
35061- atomic_set(&tcon->num_locks, 0);
35062+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
35063+ atomic_set_unchecked(&tcon->num_writes, 0);
35064+ atomic_set_unchecked(&tcon->num_reads, 0);
35065+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
35066+ atomic_set_unchecked(&tcon->num_opens, 0);
35067+ atomic_set_unchecked(&tcon->num_posixopens, 0);
35068+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
35069+ atomic_set_unchecked(&tcon->num_closes, 0);
35070+ atomic_set_unchecked(&tcon->num_deletes, 0);
35071+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
35072+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
35073+ atomic_set_unchecked(&tcon->num_renames, 0);
35074+ atomic_set_unchecked(&tcon->num_t2renames, 0);
35075+ atomic_set_unchecked(&tcon->num_ffirst, 0);
35076+ atomic_set_unchecked(&tcon->num_fnext, 0);
35077+ atomic_set_unchecked(&tcon->num_fclose, 0);
35078+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
35079+ atomic_set_unchecked(&tcon->num_symlinks, 0);
35080+ atomic_set_unchecked(&tcon->num_locks, 0);
35081 }
35082 }
35083 }
35084@@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct s
35085 smBufAllocCount.counter, cifs_min_small);
35086 #ifdef CONFIG_CIFS_STATS2
35087 seq_printf(m, "Total Large %d Small %d Allocations\n",
35088- atomic_read(&totBufAllocCount),
35089- atomic_read(&totSmBufAllocCount));
35090+ atomic_read_unchecked(&totBufAllocCount),
35091+ atomic_read_unchecked(&totSmBufAllocCount));
35092 #endif /* CONFIG_CIFS_STATS2 */
35093
35094 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
35095@@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct s
35096 if (tcon->need_reconnect)
35097 seq_puts(m, "\tDISCONNECTED ");
35098 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
35099- atomic_read(&tcon->num_smbs_sent),
35100- atomic_read(&tcon->num_oplock_brks));
35101+ atomic_read_unchecked(&tcon->num_smbs_sent),
35102+ atomic_read_unchecked(&tcon->num_oplock_brks));
35103 seq_printf(m, "\nReads: %d Bytes: %lld",
35104- atomic_read(&tcon->num_reads),
35105+ atomic_read_unchecked(&tcon->num_reads),
35106 (long long)(tcon->bytes_read));
35107 seq_printf(m, "\nWrites: %d Bytes: %lld",
35108- atomic_read(&tcon->num_writes),
35109+ atomic_read_unchecked(&tcon->num_writes),
35110 (long long)(tcon->bytes_written));
35111 seq_printf(m, "\nFlushes: %d",
35112- atomic_read(&tcon->num_flushes));
35113+ atomic_read_unchecked(&tcon->num_flushes));
35114 seq_printf(m, "\nLocks: %d HardLinks: %d "
35115 "Symlinks: %d",
35116- atomic_read(&tcon->num_locks),
35117- atomic_read(&tcon->num_hardlinks),
35118- atomic_read(&tcon->num_symlinks));
35119+ atomic_read_unchecked(&tcon->num_locks),
35120+ atomic_read_unchecked(&tcon->num_hardlinks),
35121+ atomic_read_unchecked(&tcon->num_symlinks));
35122 seq_printf(m, "\nOpens: %d Closes: %d "
35123 "Deletes: %d",
35124- atomic_read(&tcon->num_opens),
35125- atomic_read(&tcon->num_closes),
35126- atomic_read(&tcon->num_deletes));
35127+ atomic_read_unchecked(&tcon->num_opens),
35128+ atomic_read_unchecked(&tcon->num_closes),
35129+ atomic_read_unchecked(&tcon->num_deletes));
35130 seq_printf(m, "\nPosix Opens: %d "
35131 "Posix Mkdirs: %d",
35132- atomic_read(&tcon->num_posixopens),
35133- atomic_read(&tcon->num_posixmkdirs));
35134+ atomic_read_unchecked(&tcon->num_posixopens),
35135+ atomic_read_unchecked(&tcon->num_posixmkdirs));
35136 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
35137- atomic_read(&tcon->num_mkdirs),
35138- atomic_read(&tcon->num_rmdirs));
35139+ atomic_read_unchecked(&tcon->num_mkdirs),
35140+ atomic_read_unchecked(&tcon->num_rmdirs));
35141 seq_printf(m, "\nRenames: %d T2 Renames %d",
35142- atomic_read(&tcon->num_renames),
35143- atomic_read(&tcon->num_t2renames));
35144+ atomic_read_unchecked(&tcon->num_renames),
35145+ atomic_read_unchecked(&tcon->num_t2renames));
35146 seq_printf(m, "\nFindFirst: %d FNext %d "
35147 "FClose %d",
35148- atomic_read(&tcon->num_ffirst),
35149- atomic_read(&tcon->num_fnext),
35150- atomic_read(&tcon->num_fclose));
35151+ atomic_read_unchecked(&tcon->num_ffirst),
35152+ atomic_read_unchecked(&tcon->num_fnext),
35153+ atomic_read_unchecked(&tcon->num_fclose));
35154 }
35155 }
35156 }
35157diff -urNp linux-3.0.3/fs/cifs/cifsfs.c linux-3.0.3/fs/cifs/cifsfs.c
35158--- linux-3.0.3/fs/cifs/cifsfs.c 2011-08-23 21:44:40.000000000 -0400
35159+++ linux-3.0.3/fs/cifs/cifsfs.c 2011-08-25 17:18:05.000000000 -0400
35160@@ -994,7 +994,7 @@ cifs_init_request_bufs(void)
35161 cifs_req_cachep = kmem_cache_create("cifs_request",
35162 CIFSMaxBufSize +
35163 MAX_CIFS_HDR_SIZE, 0,
35164- SLAB_HWCACHE_ALIGN, NULL);
35165+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
35166 if (cifs_req_cachep == NULL)
35167 return -ENOMEM;
35168
35169@@ -1021,7 +1021,7 @@ cifs_init_request_bufs(void)
35170 efficient to alloc 1 per page off the slab compared to 17K (5page)
35171 alloc of large cifs buffers even when page debugging is on */
35172 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
35173- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
35174+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
35175 NULL);
35176 if (cifs_sm_req_cachep == NULL) {
35177 mempool_destroy(cifs_req_poolp);
35178@@ -1106,8 +1106,8 @@ init_cifs(void)
35179 atomic_set(&bufAllocCount, 0);
35180 atomic_set(&smBufAllocCount, 0);
35181 #ifdef CONFIG_CIFS_STATS2
35182- atomic_set(&totBufAllocCount, 0);
35183- atomic_set(&totSmBufAllocCount, 0);
35184+ atomic_set_unchecked(&totBufAllocCount, 0);
35185+ atomic_set_unchecked(&totSmBufAllocCount, 0);
35186 #endif /* CONFIG_CIFS_STATS2 */
35187
35188 atomic_set(&midCount, 0);
35189diff -urNp linux-3.0.3/fs/cifs/cifsglob.h linux-3.0.3/fs/cifs/cifsglob.h
35190--- linux-3.0.3/fs/cifs/cifsglob.h 2011-07-21 22:17:23.000000000 -0400
35191+++ linux-3.0.3/fs/cifs/cifsglob.h 2011-08-25 17:18:05.000000000 -0400
35192@@ -381,28 +381,28 @@ struct cifs_tcon {
35193 __u16 Flags; /* optional support bits */
35194 enum statusEnum tidStatus;
35195 #ifdef CONFIG_CIFS_STATS
35196- atomic_t num_smbs_sent;
35197- atomic_t num_writes;
35198- atomic_t num_reads;
35199- atomic_t num_flushes;
35200- atomic_t num_oplock_brks;
35201- atomic_t num_opens;
35202- atomic_t num_closes;
35203- atomic_t num_deletes;
35204- atomic_t num_mkdirs;
35205- atomic_t num_posixopens;
35206- atomic_t num_posixmkdirs;
35207- atomic_t num_rmdirs;
35208- atomic_t num_renames;
35209- atomic_t num_t2renames;
35210- atomic_t num_ffirst;
35211- atomic_t num_fnext;
35212- atomic_t num_fclose;
35213- atomic_t num_hardlinks;
35214- atomic_t num_symlinks;
35215- atomic_t num_locks;
35216- atomic_t num_acl_get;
35217- atomic_t num_acl_set;
35218+ atomic_unchecked_t num_smbs_sent;
35219+ atomic_unchecked_t num_writes;
35220+ atomic_unchecked_t num_reads;
35221+ atomic_unchecked_t num_flushes;
35222+ atomic_unchecked_t num_oplock_brks;
35223+ atomic_unchecked_t num_opens;
35224+ atomic_unchecked_t num_closes;
35225+ atomic_unchecked_t num_deletes;
35226+ atomic_unchecked_t num_mkdirs;
35227+ atomic_unchecked_t num_posixopens;
35228+ atomic_unchecked_t num_posixmkdirs;
35229+ atomic_unchecked_t num_rmdirs;
35230+ atomic_unchecked_t num_renames;
35231+ atomic_unchecked_t num_t2renames;
35232+ atomic_unchecked_t num_ffirst;
35233+ atomic_unchecked_t num_fnext;
35234+ atomic_unchecked_t num_fclose;
35235+ atomic_unchecked_t num_hardlinks;
35236+ atomic_unchecked_t num_symlinks;
35237+ atomic_unchecked_t num_locks;
35238+ atomic_unchecked_t num_acl_get;
35239+ atomic_unchecked_t num_acl_set;
35240 #ifdef CONFIG_CIFS_STATS2
35241 unsigned long long time_writes;
35242 unsigned long long time_reads;
35243@@ -613,7 +613,7 @@ convert_delimiter(char *path, char delim
35244 }
35245
35246 #ifdef CONFIG_CIFS_STATS
35247-#define cifs_stats_inc atomic_inc
35248+#define cifs_stats_inc atomic_inc_unchecked
35249
35250 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
35251 unsigned int bytes)
35252@@ -911,8 +911,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnect
35253 /* Various Debug counters */
35254 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
35255 #ifdef CONFIG_CIFS_STATS2
35256-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
35257-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
35258+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
35259+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
35260 #endif
35261 GLOBAL_EXTERN atomic_t smBufAllocCount;
35262 GLOBAL_EXTERN atomic_t midCount;
35263diff -urNp linux-3.0.3/fs/cifs/link.c linux-3.0.3/fs/cifs/link.c
35264--- linux-3.0.3/fs/cifs/link.c 2011-07-21 22:17:23.000000000 -0400
35265+++ linux-3.0.3/fs/cifs/link.c 2011-08-23 21:47:56.000000000 -0400
35266@@ -587,7 +587,7 @@ symlink_exit:
35267
35268 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
35269 {
35270- char *p = nd_get_link(nd);
35271+ const char *p = nd_get_link(nd);
35272 if (!IS_ERR(p))
35273 kfree(p);
35274 }
35275diff -urNp linux-3.0.3/fs/cifs/misc.c linux-3.0.3/fs/cifs/misc.c
35276--- linux-3.0.3/fs/cifs/misc.c 2011-07-21 22:17:23.000000000 -0400
35277+++ linux-3.0.3/fs/cifs/misc.c 2011-08-25 17:18:05.000000000 -0400
35278@@ -156,7 +156,7 @@ cifs_buf_get(void)
35279 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
35280 atomic_inc(&bufAllocCount);
35281 #ifdef CONFIG_CIFS_STATS2
35282- atomic_inc(&totBufAllocCount);
35283+ atomic_inc_unchecked(&totBufAllocCount);
35284 #endif /* CONFIG_CIFS_STATS2 */
35285 }
35286
35287@@ -191,7 +191,7 @@ cifs_small_buf_get(void)
35288 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
35289 atomic_inc(&smBufAllocCount);
35290 #ifdef CONFIG_CIFS_STATS2
35291- atomic_inc(&totSmBufAllocCount);
35292+ atomic_inc_unchecked(&totSmBufAllocCount);
35293 #endif /* CONFIG_CIFS_STATS2 */
35294
35295 }
35296diff -urNp linux-3.0.3/fs/coda/cache.c linux-3.0.3/fs/coda/cache.c
35297--- linux-3.0.3/fs/coda/cache.c 2011-07-21 22:17:23.000000000 -0400
35298+++ linux-3.0.3/fs/coda/cache.c 2011-08-23 21:47:56.000000000 -0400
35299@@ -24,7 +24,7 @@
35300 #include "coda_linux.h"
35301 #include "coda_cache.h"
35302
35303-static atomic_t permission_epoch = ATOMIC_INIT(0);
35304+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
35305
35306 /* replace or extend an acl cache hit */
35307 void coda_cache_enter(struct inode *inode, int mask)
35308@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inod
35309 struct coda_inode_info *cii = ITOC(inode);
35310
35311 spin_lock(&cii->c_lock);
35312- cii->c_cached_epoch = atomic_read(&permission_epoch);
35313+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
35314 if (cii->c_uid != current_fsuid()) {
35315 cii->c_uid = current_fsuid();
35316 cii->c_cached_perm = mask;
35317@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode
35318 {
35319 struct coda_inode_info *cii = ITOC(inode);
35320 spin_lock(&cii->c_lock);
35321- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
35322+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
35323 spin_unlock(&cii->c_lock);
35324 }
35325
35326 /* remove all acl caches */
35327 void coda_cache_clear_all(struct super_block *sb)
35328 {
35329- atomic_inc(&permission_epoch);
35330+ atomic_inc_unchecked(&permission_epoch);
35331 }
35332
35333
35334@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode
35335 spin_lock(&cii->c_lock);
35336 hit = (mask & cii->c_cached_perm) == mask &&
35337 cii->c_uid == current_fsuid() &&
35338- cii->c_cached_epoch == atomic_read(&permission_epoch);
35339+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
35340 spin_unlock(&cii->c_lock);
35341
35342 return hit;
35343diff -urNp linux-3.0.3/fs/compat_binfmt_elf.c linux-3.0.3/fs/compat_binfmt_elf.c
35344--- linux-3.0.3/fs/compat_binfmt_elf.c 2011-07-21 22:17:23.000000000 -0400
35345+++ linux-3.0.3/fs/compat_binfmt_elf.c 2011-08-23 21:47:56.000000000 -0400
35346@@ -30,11 +30,13 @@
35347 #undef elf_phdr
35348 #undef elf_shdr
35349 #undef elf_note
35350+#undef elf_dyn
35351 #undef elf_addr_t
35352 #define elfhdr elf32_hdr
35353 #define elf_phdr elf32_phdr
35354 #define elf_shdr elf32_shdr
35355 #define elf_note elf32_note
35356+#define elf_dyn Elf32_Dyn
35357 #define elf_addr_t Elf32_Addr
35358
35359 /*
35360diff -urNp linux-3.0.3/fs/compat.c linux-3.0.3/fs/compat.c
35361--- linux-3.0.3/fs/compat.c 2011-07-21 22:17:23.000000000 -0400
35362+++ linux-3.0.3/fs/compat.c 2011-08-23 22:49:33.000000000 -0400
35363@@ -566,7 +566,7 @@ ssize_t compat_rw_copy_check_uvector(int
35364 goto out;
35365
35366 ret = -EINVAL;
35367- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
35368+ if (nr_segs > UIO_MAXIOV)
35369 goto out;
35370 if (nr_segs > fast_segs) {
35371 ret = -ENOMEM;
35372@@ -848,6 +848,7 @@ struct compat_old_linux_dirent {
35373
35374 struct compat_readdir_callback {
35375 struct compat_old_linux_dirent __user *dirent;
35376+ struct file * file;
35377 int result;
35378 };
35379
35380@@ -865,6 +866,10 @@ static int compat_fillonedir(void *__buf
35381 buf->result = -EOVERFLOW;
35382 return -EOVERFLOW;
35383 }
35384+
35385+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
35386+ return 0;
35387+
35388 buf->result++;
35389 dirent = buf->dirent;
35390 if (!access_ok(VERIFY_WRITE, dirent,
35391@@ -897,6 +902,7 @@ asmlinkage long compat_sys_old_readdir(u
35392
35393 buf.result = 0;
35394 buf.dirent = dirent;
35395+ buf.file = file;
35396
35397 error = vfs_readdir(file, compat_fillonedir, &buf);
35398 if (buf.result)
35399@@ -917,6 +923,7 @@ struct compat_linux_dirent {
35400 struct compat_getdents_callback {
35401 struct compat_linux_dirent __user *current_dir;
35402 struct compat_linux_dirent __user *previous;
35403+ struct file * file;
35404 int count;
35405 int error;
35406 };
35407@@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, c
35408 buf->error = -EOVERFLOW;
35409 return -EOVERFLOW;
35410 }
35411+
35412+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
35413+ return 0;
35414+
35415 dirent = buf->previous;
35416 if (dirent) {
35417 if (__put_user(offset, &dirent->d_off))
35418@@ -985,6 +996,7 @@ asmlinkage long compat_sys_getdents(unsi
35419 buf.previous = NULL;
35420 buf.count = count;
35421 buf.error = 0;
35422+ buf.file = file;
35423
35424 error = vfs_readdir(file, compat_filldir, &buf);
35425 if (error >= 0)
35426@@ -1006,6 +1018,7 @@ out:
35427 struct compat_getdents_callback64 {
35428 struct linux_dirent64 __user *current_dir;
35429 struct linux_dirent64 __user *previous;
35430+ struct file * file;
35431 int count;
35432 int error;
35433 };
35434@@ -1022,6 +1035,10 @@ static int compat_filldir64(void * __buf
35435 buf->error = -EINVAL; /* only used if we fail.. */
35436 if (reclen > buf->count)
35437 return -EINVAL;
35438+
35439+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
35440+ return 0;
35441+
35442 dirent = buf->previous;
35443
35444 if (dirent) {
35445@@ -1073,6 +1090,7 @@ asmlinkage long compat_sys_getdents64(un
35446 buf.previous = NULL;
35447 buf.count = count;
35448 buf.error = 0;
35449+ buf.file = file;
35450
35451 error = vfs_readdir(file, compat_filldir64, &buf);
35452 if (error >= 0)
35453@@ -1446,6 +1464,8 @@ int compat_core_sys_select(int n, compat
35454 struct fdtable *fdt;
35455 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
35456
35457+ pax_track_stack();
35458+
35459 if (n < 0)
35460 goto out_nofds;
35461
35462diff -urNp linux-3.0.3/fs/compat_ioctl.c linux-3.0.3/fs/compat_ioctl.c
35463--- linux-3.0.3/fs/compat_ioctl.c 2011-07-21 22:17:23.000000000 -0400
35464+++ linux-3.0.3/fs/compat_ioctl.c 2011-08-23 21:47:56.000000000 -0400
35465@@ -208,6 +208,8 @@ static int do_video_set_spu_palette(unsi
35466
35467 err = get_user(palp, &up->palette);
35468 err |= get_user(length, &up->length);
35469+ if (err)
35470+ return -EFAULT;
35471
35472 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
35473 err = put_user(compat_ptr(palp), &up_native->palette);
35474@@ -1638,8 +1640,8 @@ asmlinkage long compat_sys_ioctl(unsigne
35475 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
35476 {
35477 unsigned int a, b;
35478- a = *(unsigned int *)p;
35479- b = *(unsigned int *)q;
35480+ a = *(const unsigned int *)p;
35481+ b = *(const unsigned int *)q;
35482 if (a > b)
35483 return 1;
35484 if (a < b)
35485diff -urNp linux-3.0.3/fs/configfs/dir.c linux-3.0.3/fs/configfs/dir.c
35486--- linux-3.0.3/fs/configfs/dir.c 2011-07-21 22:17:23.000000000 -0400
35487+++ linux-3.0.3/fs/configfs/dir.c 2011-08-23 21:47:56.000000000 -0400
35488@@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file
35489 }
35490 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
35491 struct configfs_dirent *next;
35492- const char * name;
35493+ const unsigned char * name;
35494+ char d_name[sizeof(next->s_dentry->d_iname)];
35495 int len;
35496 struct inode *inode = NULL;
35497
35498@@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file
35499 continue;
35500
35501 name = configfs_get_name(next);
35502- len = strlen(name);
35503+ if (next->s_dentry && name == next->s_dentry->d_iname) {
35504+ len = next->s_dentry->d_name.len;
35505+ memcpy(d_name, name, len);
35506+ name = d_name;
35507+ } else
35508+ len = strlen(name);
35509
35510 /*
35511 * We'll have a dentry and an inode for
35512diff -urNp linux-3.0.3/fs/dcache.c linux-3.0.3/fs/dcache.c
35513--- linux-3.0.3/fs/dcache.c 2011-07-21 22:17:23.000000000 -0400
35514+++ linux-3.0.3/fs/dcache.c 2011-08-23 21:47:56.000000000 -0400
35515@@ -3089,7 +3089,7 @@ void __init vfs_caches_init(unsigned lon
35516 mempages -= reserve;
35517
35518 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
35519- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
35520+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
35521
35522 dcache_init();
35523 inode_init();
35524diff -urNp linux-3.0.3/fs/ecryptfs/inode.c linux-3.0.3/fs/ecryptfs/inode.c
35525--- linux-3.0.3/fs/ecryptfs/inode.c 2011-08-23 21:44:40.000000000 -0400
35526+++ linux-3.0.3/fs/ecryptfs/inode.c 2011-08-23 21:47:56.000000000 -0400
35527@@ -704,7 +704,7 @@ static int ecryptfs_readlink_lower(struc
35528 old_fs = get_fs();
35529 set_fs(get_ds());
35530 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
35531- (char __user *)lower_buf,
35532+ (__force char __user *)lower_buf,
35533 lower_bufsiz);
35534 set_fs(old_fs);
35535 if (rc < 0)
35536@@ -750,7 +750,7 @@ static void *ecryptfs_follow_link(struct
35537 }
35538 old_fs = get_fs();
35539 set_fs(get_ds());
35540- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
35541+ rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
35542 set_fs(old_fs);
35543 if (rc < 0) {
35544 kfree(buf);
35545@@ -765,7 +765,7 @@ out:
35546 static void
35547 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
35548 {
35549- char *buf = nd_get_link(nd);
35550+ const char *buf = nd_get_link(nd);
35551 if (!IS_ERR(buf)) {
35552 /* Free the char* */
35553 kfree(buf);
35554diff -urNp linux-3.0.3/fs/ecryptfs/miscdev.c linux-3.0.3/fs/ecryptfs/miscdev.c
35555--- linux-3.0.3/fs/ecryptfs/miscdev.c 2011-07-21 22:17:23.000000000 -0400
35556+++ linux-3.0.3/fs/ecryptfs/miscdev.c 2011-08-23 21:47:56.000000000 -0400
35557@@ -328,7 +328,7 @@ check_list:
35558 goto out_unlock_msg_ctx;
35559 i = 5;
35560 if (msg_ctx->msg) {
35561- if (copy_to_user(&buf[i], packet_length, packet_length_size))
35562+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
35563 goto out_unlock_msg_ctx;
35564 i += packet_length_size;
35565 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
35566diff -urNp linux-3.0.3/fs/exec.c linux-3.0.3/fs/exec.c
35567--- linux-3.0.3/fs/exec.c 2011-07-21 22:17:23.000000000 -0400
35568+++ linux-3.0.3/fs/exec.c 2011-08-25 17:26:58.000000000 -0400
35569@@ -55,12 +55,24 @@
35570 #include <linux/pipe_fs_i.h>
35571 #include <linux/oom.h>
35572 #include <linux/compat.h>
35573+#include <linux/random.h>
35574+#include <linux/seq_file.h>
35575+
35576+#ifdef CONFIG_PAX_REFCOUNT
35577+#include <linux/kallsyms.h>
35578+#include <linux/kdebug.h>
35579+#endif
35580
35581 #include <asm/uaccess.h>
35582 #include <asm/mmu_context.h>
35583 #include <asm/tlb.h>
35584 #include "internal.h"
35585
35586+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
35587+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
35588+EXPORT_SYMBOL(pax_set_initial_flags_func);
35589+#endif
35590+
35591 int core_uses_pid;
35592 char core_pattern[CORENAME_MAX_SIZE] = "core";
35593 unsigned int core_pipe_limit;
35594@@ -70,7 +82,7 @@ struct core_name {
35595 char *corename;
35596 int used, size;
35597 };
35598-static atomic_t call_count = ATOMIC_INIT(1);
35599+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
35600
35601 /* The maximal length of core_pattern is also specified in sysctl.c */
35602
35603@@ -116,7 +128,7 @@ SYSCALL_DEFINE1(uselib, const char __use
35604 char *tmp = getname(library);
35605 int error = PTR_ERR(tmp);
35606 static const struct open_flags uselib_flags = {
35607- .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
35608+ .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
35609 .acc_mode = MAY_READ | MAY_EXEC | MAY_OPEN,
35610 .intent = LOOKUP_OPEN
35611 };
35612@@ -195,18 +207,10 @@ static struct page *get_arg_page(struct
35613 int write)
35614 {
35615 struct page *page;
35616- int ret;
35617
35618-#ifdef CONFIG_STACK_GROWSUP
35619- if (write) {
35620- ret = expand_downwards(bprm->vma, pos);
35621- if (ret < 0)
35622- return NULL;
35623- }
35624-#endif
35625- ret = get_user_pages(current, bprm->mm, pos,
35626- 1, write, 1, &page, NULL);
35627- if (ret <= 0)
35628+ if (0 > expand_downwards(bprm->vma, pos))
35629+ return NULL;
35630+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
35631 return NULL;
35632
35633 if (write) {
35634@@ -281,6 +285,11 @@ static int __bprm_mm_init(struct linux_b
35635 vma->vm_end = STACK_TOP_MAX;
35636 vma->vm_start = vma->vm_end - PAGE_SIZE;
35637 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
35638+
35639+#ifdef CONFIG_PAX_SEGMEXEC
35640+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
35641+#endif
35642+
35643 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
35644 INIT_LIST_HEAD(&vma->anon_vma_chain);
35645
35646@@ -295,6 +304,12 @@ static int __bprm_mm_init(struct linux_b
35647 mm->stack_vm = mm->total_vm = 1;
35648 up_write(&mm->mmap_sem);
35649 bprm->p = vma->vm_end - sizeof(void *);
35650+
35651+#ifdef CONFIG_PAX_RANDUSTACK
35652+ if (randomize_va_space)
35653+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
35654+#endif
35655+
35656 return 0;
35657 err:
35658 up_write(&mm->mmap_sem);
35659@@ -403,19 +418,7 @@ err:
35660 return err;
35661 }
35662
35663-struct user_arg_ptr {
35664-#ifdef CONFIG_COMPAT
35665- bool is_compat;
35666-#endif
35667- union {
35668- const char __user *const __user *native;
35669-#ifdef CONFIG_COMPAT
35670- compat_uptr_t __user *compat;
35671-#endif
35672- } ptr;
35673-};
35674-
35675-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
35676+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
35677 {
35678 const char __user *native;
35679
35680@@ -566,7 +569,7 @@ int copy_strings_kernel(int argc, const
35681 int r;
35682 mm_segment_t oldfs = get_fs();
35683 struct user_arg_ptr argv = {
35684- .ptr.native = (const char __user *const __user *)__argv,
35685+ .ptr.native = (__force const char __user *const __user *)__argv,
35686 };
35687
35688 set_fs(KERNEL_DS);
35689@@ -601,7 +604,8 @@ static int shift_arg_pages(struct vm_are
35690 unsigned long new_end = old_end - shift;
35691 struct mmu_gather tlb;
35692
35693- BUG_ON(new_start > new_end);
35694+ if (new_start >= new_end || new_start < mmap_min_addr)
35695+ return -ENOMEM;
35696
35697 /*
35698 * ensure there are no vmas between where we want to go
35699@@ -610,6 +614,10 @@ static int shift_arg_pages(struct vm_are
35700 if (vma != find_vma(mm, new_start))
35701 return -EFAULT;
35702
35703+#ifdef CONFIG_PAX_SEGMEXEC
35704+ BUG_ON(pax_find_mirror_vma(vma));
35705+#endif
35706+
35707 /*
35708 * cover the whole range: [new_start, old_end)
35709 */
35710@@ -690,10 +698,6 @@ int setup_arg_pages(struct linux_binprm
35711 stack_top = arch_align_stack(stack_top);
35712 stack_top = PAGE_ALIGN(stack_top);
35713
35714- if (unlikely(stack_top < mmap_min_addr) ||
35715- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
35716- return -ENOMEM;
35717-
35718 stack_shift = vma->vm_end - stack_top;
35719
35720 bprm->p -= stack_shift;
35721@@ -705,8 +709,28 @@ int setup_arg_pages(struct linux_binprm
35722 bprm->exec -= stack_shift;
35723
35724 down_write(&mm->mmap_sem);
35725+
35726+ /* Move stack pages down in memory. */
35727+ if (stack_shift) {
35728+ ret = shift_arg_pages(vma, stack_shift);
35729+ if (ret)
35730+ goto out_unlock;
35731+ }
35732+
35733 vm_flags = VM_STACK_FLAGS;
35734
35735+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
35736+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
35737+ vm_flags &= ~VM_EXEC;
35738+
35739+#ifdef CONFIG_PAX_MPROTECT
35740+ if (mm->pax_flags & MF_PAX_MPROTECT)
35741+ vm_flags &= ~VM_MAYEXEC;
35742+#endif
35743+
35744+ }
35745+#endif
35746+
35747 /*
35748 * Adjust stack execute permissions; explicitly enable for
35749 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
35750@@ -725,13 +749,6 @@ int setup_arg_pages(struct linux_binprm
35751 goto out_unlock;
35752 BUG_ON(prev != vma);
35753
35754- /* Move stack pages down in memory. */
35755- if (stack_shift) {
35756- ret = shift_arg_pages(vma, stack_shift);
35757- if (ret)
35758- goto out_unlock;
35759- }
35760-
35761 /* mprotect_fixup is overkill to remove the temporary stack flags */
35762 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
35763
35764@@ -771,7 +788,7 @@ struct file *open_exec(const char *name)
35765 struct file *file;
35766 int err;
35767 static const struct open_flags open_exec_flags = {
35768- .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
35769+ .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
35770 .acc_mode = MAY_EXEC | MAY_OPEN,
35771 .intent = LOOKUP_OPEN
35772 };
35773@@ -812,7 +829,7 @@ int kernel_read(struct file *file, loff_
35774 old_fs = get_fs();
35775 set_fs(get_ds());
35776 /* The cast to a user pointer is valid due to the set_fs() */
35777- result = vfs_read(file, (void __user *)addr, count, &pos);
35778+ result = vfs_read(file, (__force void __user *)addr, count, &pos);
35779 set_fs(old_fs);
35780 return result;
35781 }
35782@@ -1236,7 +1253,7 @@ int check_unsafe_exec(struct linux_binpr
35783 }
35784 rcu_read_unlock();
35785
35786- if (p->fs->users > n_fs) {
35787+ if (atomic_read(&p->fs->users) > n_fs) {
35788 bprm->unsafe |= LSM_UNSAFE_SHARE;
35789 } else {
35790 res = -EAGAIN;
35791@@ -1428,11 +1445,35 @@ static int do_execve_common(const char *
35792 struct user_arg_ptr envp,
35793 struct pt_regs *regs)
35794 {
35795+#ifdef CONFIG_GRKERNSEC
35796+ struct file *old_exec_file;
35797+ struct acl_subject_label *old_acl;
35798+ struct rlimit old_rlim[RLIM_NLIMITS];
35799+#endif
35800 struct linux_binprm *bprm;
35801 struct file *file;
35802 struct files_struct *displaced;
35803 bool clear_in_exec;
35804 int retval;
35805+ const struct cred *cred = current_cred();
35806+
35807+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
35808+
35809+ /*
35810+ * We move the actual failure in case of RLIMIT_NPROC excess from
35811+ * set*uid() to execve() because too many poorly written programs
35812+ * don't check setuid() return code. Here we additionally recheck
35813+ * whether NPROC limit is still exceeded.
35814+ */
35815+ if ((current->flags & PF_NPROC_EXCEEDED) &&
35816+ atomic_read(&cred->user->processes) > rlimit(RLIMIT_NPROC)) {
35817+ retval = -EAGAIN;
35818+ goto out_ret;
35819+ }
35820+
35821+ /* We're below the limit (still or again), so we don't want to make
35822+ * further execve() calls fail. */
35823+ current->flags &= ~PF_NPROC_EXCEEDED;
35824
35825 retval = unshare_files(&displaced);
35826 if (retval)
35827@@ -1464,6 +1505,16 @@ static int do_execve_common(const char *
35828 bprm->filename = filename;
35829 bprm->interp = filename;
35830
35831+ if (gr_process_user_ban()) {
35832+ retval = -EPERM;
35833+ goto out_file;
35834+ }
35835+
35836+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
35837+ retval = -EACCES;
35838+ goto out_file;
35839+ }
35840+
35841 retval = bprm_mm_init(bprm);
35842 if (retval)
35843 goto out_file;
35844@@ -1493,9 +1544,40 @@ static int do_execve_common(const char *
35845 if (retval < 0)
35846 goto out;
35847
35848+ if (!gr_tpe_allow(file)) {
35849+ retval = -EACCES;
35850+ goto out;
35851+ }
35852+
35853+ if (gr_check_crash_exec(file)) {
35854+ retval = -EACCES;
35855+ goto out;
35856+ }
35857+
35858+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
35859+
35860+ gr_handle_exec_args(bprm, argv);
35861+
35862+#ifdef CONFIG_GRKERNSEC
35863+ old_acl = current->acl;
35864+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
35865+ old_exec_file = current->exec_file;
35866+ get_file(file);
35867+ current->exec_file = file;
35868+#endif
35869+
35870+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
35871+ bprm->unsafe & LSM_UNSAFE_SHARE);
35872+ if (retval < 0)
35873+ goto out_fail;
35874+
35875 retval = search_binary_handler(bprm,regs);
35876 if (retval < 0)
35877- goto out;
35878+ goto out_fail;
35879+#ifdef CONFIG_GRKERNSEC
35880+ if (old_exec_file)
35881+ fput(old_exec_file);
35882+#endif
35883
35884 /* execve succeeded */
35885 current->fs->in_exec = 0;
35886@@ -1506,6 +1588,14 @@ static int do_execve_common(const char *
35887 put_files_struct(displaced);
35888 return retval;
35889
35890+out_fail:
35891+#ifdef CONFIG_GRKERNSEC
35892+ current->acl = old_acl;
35893+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
35894+ fput(current->exec_file);
35895+ current->exec_file = old_exec_file;
35896+#endif
35897+
35898 out:
35899 if (bprm->mm) {
35900 acct_arg_size(bprm, 0);
35901@@ -1579,7 +1669,7 @@ static int expand_corename(struct core_n
35902 {
35903 char *old_corename = cn->corename;
35904
35905- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
35906+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
35907 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
35908
35909 if (!cn->corename) {
35910@@ -1667,7 +1757,7 @@ static int format_corename(struct core_n
35911 int pid_in_pattern = 0;
35912 int err = 0;
35913
35914- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
35915+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
35916 cn->corename = kmalloc(cn->size, GFP_KERNEL);
35917 cn->used = 0;
35918
35919@@ -1758,6 +1848,219 @@ out:
35920 return ispipe;
35921 }
35922
35923+int pax_check_flags(unsigned long *flags)
35924+{
35925+ int retval = 0;
35926+
35927+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
35928+ if (*flags & MF_PAX_SEGMEXEC)
35929+ {
35930+ *flags &= ~MF_PAX_SEGMEXEC;
35931+ retval = -EINVAL;
35932+ }
35933+#endif
35934+
35935+ if ((*flags & MF_PAX_PAGEEXEC)
35936+
35937+#ifdef CONFIG_PAX_PAGEEXEC
35938+ && (*flags & MF_PAX_SEGMEXEC)
35939+#endif
35940+
35941+ )
35942+ {
35943+ *flags &= ~MF_PAX_PAGEEXEC;
35944+ retval = -EINVAL;
35945+ }
35946+
35947+ if ((*flags & MF_PAX_MPROTECT)
35948+
35949+#ifdef CONFIG_PAX_MPROTECT
35950+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
35951+#endif
35952+
35953+ )
35954+ {
35955+ *flags &= ~MF_PAX_MPROTECT;
35956+ retval = -EINVAL;
35957+ }
35958+
35959+ if ((*flags & MF_PAX_EMUTRAMP)
35960+
35961+#ifdef CONFIG_PAX_EMUTRAMP
35962+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
35963+#endif
35964+
35965+ )
35966+ {
35967+ *flags &= ~MF_PAX_EMUTRAMP;
35968+ retval = -EINVAL;
35969+ }
35970+
35971+ return retval;
35972+}
35973+
35974+EXPORT_SYMBOL(pax_check_flags);
35975+
35976+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
35977+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
35978+{
35979+ struct task_struct *tsk = current;
35980+ struct mm_struct *mm = current->mm;
35981+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
35982+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
35983+ char *path_exec = NULL;
35984+ char *path_fault = NULL;
35985+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
35986+
35987+ if (buffer_exec && buffer_fault) {
35988+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
35989+
35990+ down_read(&mm->mmap_sem);
35991+ vma = mm->mmap;
35992+ while (vma && (!vma_exec || !vma_fault)) {
35993+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
35994+ vma_exec = vma;
35995+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
35996+ vma_fault = vma;
35997+ vma = vma->vm_next;
35998+ }
35999+ if (vma_exec) {
36000+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
36001+ if (IS_ERR(path_exec))
36002+ path_exec = "<path too long>";
36003+ else {
36004+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
36005+ if (path_exec) {
36006+ *path_exec = 0;
36007+ path_exec = buffer_exec;
36008+ } else
36009+ path_exec = "<path too long>";
36010+ }
36011+ }
36012+ if (vma_fault) {
36013+ start = vma_fault->vm_start;
36014+ end = vma_fault->vm_end;
36015+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
36016+ if (vma_fault->vm_file) {
36017+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
36018+ if (IS_ERR(path_fault))
36019+ path_fault = "<path too long>";
36020+ else {
36021+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
36022+ if (path_fault) {
36023+ *path_fault = 0;
36024+ path_fault = buffer_fault;
36025+ } else
36026+ path_fault = "<path too long>";
36027+ }
36028+ } else
36029+ path_fault = "<anonymous mapping>";
36030+ }
36031+ up_read(&mm->mmap_sem);
36032+ }
36033+ if (tsk->signal->curr_ip)
36034+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
36035+ else
36036+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
36037+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
36038+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
36039+ task_uid(tsk), task_euid(tsk), pc, sp);
36040+ free_page((unsigned long)buffer_exec);
36041+ free_page((unsigned long)buffer_fault);
36042+ pax_report_insns(pc, sp);
36043+ do_coredump(SIGKILL, SIGKILL, regs);
36044+}
36045+#endif
36046+
36047+#ifdef CONFIG_PAX_REFCOUNT
36048+void pax_report_refcount_overflow(struct pt_regs *regs)
36049+{
36050+ if (current->signal->curr_ip)
36051+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
36052+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
36053+ else
36054+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
36055+ current->comm, task_pid_nr(current), current_uid(), current_euid());
36056+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
36057+ show_regs(regs);
36058+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
36059+}
36060+#endif
36061+
36062+#ifdef CONFIG_PAX_USERCOPY
36063+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
36064+int object_is_on_stack(const void *obj, unsigned long len)
36065+{
36066+ const void * const stack = task_stack_page(current);
36067+ const void * const stackend = stack + THREAD_SIZE;
36068+
36069+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
36070+ const void *frame = NULL;
36071+ const void *oldframe;
36072+#endif
36073+
36074+ if (obj + len < obj)
36075+ return -1;
36076+
36077+ if (obj + len <= stack || stackend <= obj)
36078+ return 0;
36079+
36080+ if (obj < stack || stackend < obj + len)
36081+ return -1;
36082+
36083+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
36084+ oldframe = __builtin_frame_address(1);
36085+ if (oldframe)
36086+ frame = __builtin_frame_address(2);
36087+ /*
36088+ low ----------------------------------------------> high
36089+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
36090+ ^----------------^
36091+ allow copies only within here
36092+ */
36093+ while (stack <= frame && frame < stackend) {
36094+ /* if obj + len extends past the last frame, this
36095+ check won't pass and the next frame will be 0,
36096+ causing us to bail out and correctly report
36097+ the copy as invalid
36098+ */
36099+ if (obj + len <= frame)
36100+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
36101+ oldframe = frame;
36102+ frame = *(const void * const *)frame;
36103+ }
36104+ return -1;
36105+#else
36106+ return 1;
36107+#endif
36108+}
36109+
36110+
36111+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
36112+{
36113+ if (current->signal->curr_ip)
36114+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
36115+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
36116+ else
36117+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
36118+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
36119+ dump_stack();
36120+ gr_handle_kernel_exploit();
36121+ do_group_exit(SIGKILL);
36122+}
36123+#endif
36124+
36125+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
36126+void pax_track_stack(void)
36127+{
36128+ unsigned long sp = (unsigned long)&sp;
36129+ if (sp < current_thread_info()->lowest_stack &&
36130+ sp > (unsigned long)task_stack_page(current))
36131+ current_thread_info()->lowest_stack = sp;
36132+}
36133+EXPORT_SYMBOL(pax_track_stack);
36134+#endif
36135+
36136 static int zap_process(struct task_struct *start, int exit_code)
36137 {
36138 struct task_struct *t;
36139@@ -1969,17 +2272,17 @@ static void wait_for_dump_helpers(struct
36140 pipe = file->f_path.dentry->d_inode->i_pipe;
36141
36142 pipe_lock(pipe);
36143- pipe->readers++;
36144- pipe->writers--;
36145+ atomic_inc(&pipe->readers);
36146+ atomic_dec(&pipe->writers);
36147
36148- while ((pipe->readers > 1) && (!signal_pending(current))) {
36149+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
36150 wake_up_interruptible_sync(&pipe->wait);
36151 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
36152 pipe_wait(pipe);
36153 }
36154
36155- pipe->readers--;
36156- pipe->writers++;
36157+ atomic_dec(&pipe->readers);
36158+ atomic_inc(&pipe->writers);
36159 pipe_unlock(pipe);
36160
36161 }
36162@@ -2040,7 +2343,7 @@ void do_coredump(long signr, int exit_co
36163 int retval = 0;
36164 int flag = 0;
36165 int ispipe;
36166- static atomic_t core_dump_count = ATOMIC_INIT(0);
36167+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
36168 struct coredump_params cprm = {
36169 .signr = signr,
36170 .regs = regs,
36171@@ -2055,6 +2358,9 @@ void do_coredump(long signr, int exit_co
36172
36173 audit_core_dumps(signr);
36174
36175+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
36176+ gr_handle_brute_attach(current, cprm.mm_flags);
36177+
36178 binfmt = mm->binfmt;
36179 if (!binfmt || !binfmt->core_dump)
36180 goto fail;
36181@@ -2095,6 +2401,8 @@ void do_coredump(long signr, int exit_co
36182 goto fail_corename;
36183 }
36184
36185+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
36186+
36187 if (ispipe) {
36188 int dump_count;
36189 char **helper_argv;
36190@@ -2122,7 +2430,7 @@ void do_coredump(long signr, int exit_co
36191 }
36192 cprm.limit = RLIM_INFINITY;
36193
36194- dump_count = atomic_inc_return(&core_dump_count);
36195+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
36196 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
36197 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
36198 task_tgid_vnr(current), current->comm);
36199@@ -2192,7 +2500,7 @@ close_fail:
36200 filp_close(cprm.file, NULL);
36201 fail_dropcount:
36202 if (ispipe)
36203- atomic_dec(&core_dump_count);
36204+ atomic_dec_unchecked(&core_dump_count);
36205 fail_unlock:
36206 kfree(cn.corename);
36207 fail_corename:
36208diff -urNp linux-3.0.3/fs/ext2/balloc.c linux-3.0.3/fs/ext2/balloc.c
36209--- linux-3.0.3/fs/ext2/balloc.c 2011-07-21 22:17:23.000000000 -0400
36210+++ linux-3.0.3/fs/ext2/balloc.c 2011-08-23 21:48:14.000000000 -0400
36211@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
36212
36213 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
36214 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
36215- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
36216+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
36217 sbi->s_resuid != current_fsuid() &&
36218 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
36219 return 0;
36220diff -urNp linux-3.0.3/fs/ext3/balloc.c linux-3.0.3/fs/ext3/balloc.c
36221--- linux-3.0.3/fs/ext3/balloc.c 2011-07-21 22:17:23.000000000 -0400
36222+++ linux-3.0.3/fs/ext3/balloc.c 2011-08-23 21:48:14.000000000 -0400
36223@@ -1441,7 +1441,7 @@ static int ext3_has_free_blocks(struct e
36224
36225 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
36226 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
36227- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
36228+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
36229 sbi->s_resuid != current_fsuid() &&
36230 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
36231 return 0;
36232diff -urNp linux-3.0.3/fs/ext4/balloc.c linux-3.0.3/fs/ext4/balloc.c
36233--- linux-3.0.3/fs/ext4/balloc.c 2011-07-21 22:17:23.000000000 -0400
36234+++ linux-3.0.3/fs/ext4/balloc.c 2011-08-23 21:48:14.000000000 -0400
36235@@ -394,8 +394,8 @@ static int ext4_has_free_blocks(struct e
36236 /* Hm, nope. Are (enough) root reserved blocks available? */
36237 if (sbi->s_resuid == current_fsuid() ||
36238 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
36239- capable(CAP_SYS_RESOURCE) ||
36240- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
36241+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
36242+ capable_nolog(CAP_SYS_RESOURCE)) {
36243
36244 if (free_blocks >= (nblocks + dirty_blocks))
36245 return 1;
36246diff -urNp linux-3.0.3/fs/ext4/ext4.h linux-3.0.3/fs/ext4/ext4.h
36247--- linux-3.0.3/fs/ext4/ext4.h 2011-08-23 21:44:40.000000000 -0400
36248+++ linux-3.0.3/fs/ext4/ext4.h 2011-08-23 21:47:56.000000000 -0400
36249@@ -1177,19 +1177,19 @@ struct ext4_sb_info {
36250 unsigned long s_mb_last_start;
36251
36252 /* stats for buddy allocator */
36253- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
36254- atomic_t s_bal_success; /* we found long enough chunks */
36255- atomic_t s_bal_allocated; /* in blocks */
36256- atomic_t s_bal_ex_scanned; /* total extents scanned */
36257- atomic_t s_bal_goals; /* goal hits */
36258- atomic_t s_bal_breaks; /* too long searches */
36259- atomic_t s_bal_2orders; /* 2^order hits */
36260+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
36261+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
36262+ atomic_unchecked_t s_bal_allocated; /* in blocks */
36263+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
36264+ atomic_unchecked_t s_bal_goals; /* goal hits */
36265+ atomic_unchecked_t s_bal_breaks; /* too long searches */
36266+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
36267 spinlock_t s_bal_lock;
36268 unsigned long s_mb_buddies_generated;
36269 unsigned long long s_mb_generation_time;
36270- atomic_t s_mb_lost_chunks;
36271- atomic_t s_mb_preallocated;
36272- atomic_t s_mb_discarded;
36273+ atomic_unchecked_t s_mb_lost_chunks;
36274+ atomic_unchecked_t s_mb_preallocated;
36275+ atomic_unchecked_t s_mb_discarded;
36276 atomic_t s_lock_busy;
36277
36278 /* locality groups */
36279diff -urNp linux-3.0.3/fs/ext4/mballoc.c linux-3.0.3/fs/ext4/mballoc.c
36280--- linux-3.0.3/fs/ext4/mballoc.c 2011-08-23 21:44:40.000000000 -0400
36281+++ linux-3.0.3/fs/ext4/mballoc.c 2011-08-23 21:48:14.000000000 -0400
36282@@ -1793,7 +1793,7 @@ void ext4_mb_simple_scan_group(struct ex
36283 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
36284
36285 if (EXT4_SB(sb)->s_mb_stats)
36286- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
36287+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
36288
36289 break;
36290 }
36291@@ -2087,7 +2087,7 @@ repeat:
36292 ac->ac_status = AC_STATUS_CONTINUE;
36293 ac->ac_flags |= EXT4_MB_HINT_FIRST;
36294 cr = 3;
36295- atomic_inc(&sbi->s_mb_lost_chunks);
36296+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
36297 goto repeat;
36298 }
36299 }
36300@@ -2130,6 +2130,8 @@ static int ext4_mb_seq_groups_show(struc
36301 ext4_grpblk_t counters[16];
36302 } sg;
36303
36304+ pax_track_stack();
36305+
36306 group--;
36307 if (group == 0)
36308 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
36309@@ -2553,25 +2555,25 @@ int ext4_mb_release(struct super_block *
36310 if (sbi->s_mb_stats) {
36311 printk(KERN_INFO
36312 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
36313- atomic_read(&sbi->s_bal_allocated),
36314- atomic_read(&sbi->s_bal_reqs),
36315- atomic_read(&sbi->s_bal_success));
36316+ atomic_read_unchecked(&sbi->s_bal_allocated),
36317+ atomic_read_unchecked(&sbi->s_bal_reqs),
36318+ atomic_read_unchecked(&sbi->s_bal_success));
36319 printk(KERN_INFO
36320 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
36321 "%u 2^N hits, %u breaks, %u lost\n",
36322- atomic_read(&sbi->s_bal_ex_scanned),
36323- atomic_read(&sbi->s_bal_goals),
36324- atomic_read(&sbi->s_bal_2orders),
36325- atomic_read(&sbi->s_bal_breaks),
36326- atomic_read(&sbi->s_mb_lost_chunks));
36327+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
36328+ atomic_read_unchecked(&sbi->s_bal_goals),
36329+ atomic_read_unchecked(&sbi->s_bal_2orders),
36330+ atomic_read_unchecked(&sbi->s_bal_breaks),
36331+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
36332 printk(KERN_INFO
36333 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
36334 sbi->s_mb_buddies_generated++,
36335 sbi->s_mb_generation_time);
36336 printk(KERN_INFO
36337 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
36338- atomic_read(&sbi->s_mb_preallocated),
36339- atomic_read(&sbi->s_mb_discarded));
36340+ atomic_read_unchecked(&sbi->s_mb_preallocated),
36341+ atomic_read_unchecked(&sbi->s_mb_discarded));
36342 }
36343
36344 free_percpu(sbi->s_locality_groups);
36345@@ -3041,16 +3043,16 @@ static void ext4_mb_collect_stats(struct
36346 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
36347
36348 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
36349- atomic_inc(&sbi->s_bal_reqs);
36350- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
36351+ atomic_inc_unchecked(&sbi->s_bal_reqs);
36352+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
36353 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
36354- atomic_inc(&sbi->s_bal_success);
36355- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
36356+ atomic_inc_unchecked(&sbi->s_bal_success);
36357+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
36358 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
36359 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
36360- atomic_inc(&sbi->s_bal_goals);
36361+ atomic_inc_unchecked(&sbi->s_bal_goals);
36362 if (ac->ac_found > sbi->s_mb_max_to_scan)
36363- atomic_inc(&sbi->s_bal_breaks);
36364+ atomic_inc_unchecked(&sbi->s_bal_breaks);
36365 }
36366
36367 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
36368@@ -3448,7 +3450,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
36369 trace_ext4_mb_new_inode_pa(ac, pa);
36370
36371 ext4_mb_use_inode_pa(ac, pa);
36372- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36373+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36374
36375 ei = EXT4_I(ac->ac_inode);
36376 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
36377@@ -3508,7 +3510,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
36378 trace_ext4_mb_new_group_pa(ac, pa);
36379
36380 ext4_mb_use_group_pa(ac, pa);
36381- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36382+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36383
36384 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
36385 lg = ac->ac_lg;
36386@@ -3595,7 +3597,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
36387 * from the bitmap and continue.
36388 */
36389 }
36390- atomic_add(free, &sbi->s_mb_discarded);
36391+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
36392
36393 return err;
36394 }
36395@@ -3613,7 +3615,7 @@ ext4_mb_release_group_pa(struct ext4_bud
36396 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
36397 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
36398 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
36399- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
36400+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
36401 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
36402
36403 return 0;
36404diff -urNp linux-3.0.3/fs/fcntl.c linux-3.0.3/fs/fcntl.c
36405--- linux-3.0.3/fs/fcntl.c 2011-07-21 22:17:23.000000000 -0400
36406+++ linux-3.0.3/fs/fcntl.c 2011-08-23 21:48:14.000000000 -0400
36407@@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct
36408 if (err)
36409 return err;
36410
36411+ if (gr_handle_chroot_fowner(pid, type))
36412+ return -ENOENT;
36413+ if (gr_check_protected_task_fowner(pid, type))
36414+ return -EACCES;
36415+
36416 f_modown(filp, pid, type, force);
36417 return 0;
36418 }
36419@@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned in
36420 switch (cmd) {
36421 case F_DUPFD:
36422 case F_DUPFD_CLOEXEC:
36423+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
36424 if (arg >= rlimit(RLIMIT_NOFILE))
36425 break;
36426 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
36427@@ -835,14 +841,14 @@ static int __init fcntl_init(void)
36428 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
36429 * is defined as O_NONBLOCK on some platforms and not on others.
36430 */
36431- BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
36432+ BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
36433 O_RDONLY | O_WRONLY | O_RDWR |
36434 O_CREAT | O_EXCL | O_NOCTTY |
36435 O_TRUNC | O_APPEND | /* O_NONBLOCK | */
36436 __O_SYNC | O_DSYNC | FASYNC |
36437 O_DIRECT | O_LARGEFILE | O_DIRECTORY |
36438 O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
36439- __FMODE_EXEC | O_PATH
36440+ __FMODE_EXEC | O_PATH | FMODE_GREXEC
36441 ));
36442
36443 fasync_cache = kmem_cache_create("fasync_cache",
36444diff -urNp linux-3.0.3/fs/fifo.c linux-3.0.3/fs/fifo.c
36445--- linux-3.0.3/fs/fifo.c 2011-07-21 22:17:23.000000000 -0400
36446+++ linux-3.0.3/fs/fifo.c 2011-08-23 21:47:56.000000000 -0400
36447@@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode
36448 */
36449 filp->f_op = &read_pipefifo_fops;
36450 pipe->r_counter++;
36451- if (pipe->readers++ == 0)
36452+ if (atomic_inc_return(&pipe->readers) == 1)
36453 wake_up_partner(inode);
36454
36455- if (!pipe->writers) {
36456+ if (!atomic_read(&pipe->writers)) {
36457 if ((filp->f_flags & O_NONBLOCK)) {
36458 /* suppress POLLHUP until we have
36459 * seen a writer */
36460@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode
36461 * errno=ENXIO when there is no process reading the FIFO.
36462 */
36463 ret = -ENXIO;
36464- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
36465+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
36466 goto err;
36467
36468 filp->f_op = &write_pipefifo_fops;
36469 pipe->w_counter++;
36470- if (!pipe->writers++)
36471+ if (atomic_inc_return(&pipe->writers) == 1)
36472 wake_up_partner(inode);
36473
36474- if (!pipe->readers) {
36475+ if (!atomic_read(&pipe->readers)) {
36476 wait_for_partner(inode, &pipe->r_counter);
36477 if (signal_pending(current))
36478 goto err_wr;
36479@@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode
36480 */
36481 filp->f_op = &rdwr_pipefifo_fops;
36482
36483- pipe->readers++;
36484- pipe->writers++;
36485+ atomic_inc(&pipe->readers);
36486+ atomic_inc(&pipe->writers);
36487 pipe->r_counter++;
36488 pipe->w_counter++;
36489- if (pipe->readers == 1 || pipe->writers == 1)
36490+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
36491 wake_up_partner(inode);
36492 break;
36493
36494@@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode
36495 return 0;
36496
36497 err_rd:
36498- if (!--pipe->readers)
36499+ if (atomic_dec_and_test(&pipe->readers))
36500 wake_up_interruptible(&pipe->wait);
36501 ret = -ERESTARTSYS;
36502 goto err;
36503
36504 err_wr:
36505- if (!--pipe->writers)
36506+ if (atomic_dec_and_test(&pipe->writers))
36507 wake_up_interruptible(&pipe->wait);
36508 ret = -ERESTARTSYS;
36509 goto err;
36510
36511 err:
36512- if (!pipe->readers && !pipe->writers)
36513+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
36514 free_pipe_info(inode);
36515
36516 err_nocleanup:
36517diff -urNp linux-3.0.3/fs/file.c linux-3.0.3/fs/file.c
36518--- linux-3.0.3/fs/file.c 2011-07-21 22:17:23.000000000 -0400
36519+++ linux-3.0.3/fs/file.c 2011-08-23 21:48:14.000000000 -0400
36520@@ -15,6 +15,7 @@
36521 #include <linux/slab.h>
36522 #include <linux/vmalloc.h>
36523 #include <linux/file.h>
36524+#include <linux/security.h>
36525 #include <linux/fdtable.h>
36526 #include <linux/bitops.h>
36527 #include <linux/interrupt.h>
36528@@ -254,6 +255,7 @@ int expand_files(struct files_struct *fi
36529 * N.B. For clone tasks sharing a files structure, this test
36530 * will limit the total number of files that can be opened.
36531 */
36532+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
36533 if (nr >= rlimit(RLIMIT_NOFILE))
36534 return -EMFILE;
36535
36536diff -urNp linux-3.0.3/fs/filesystems.c linux-3.0.3/fs/filesystems.c
36537--- linux-3.0.3/fs/filesystems.c 2011-07-21 22:17:23.000000000 -0400
36538+++ linux-3.0.3/fs/filesystems.c 2011-08-23 21:48:14.000000000 -0400
36539@@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(con
36540 int len = dot ? dot - name : strlen(name);
36541
36542 fs = __get_fs_type(name, len);
36543+
36544+#ifdef CONFIG_GRKERNSEC_MODHARDEN
36545+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
36546+#else
36547 if (!fs && (request_module("%.*s", len, name) == 0))
36548+#endif
36549 fs = __get_fs_type(name, len);
36550
36551 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
36552diff -urNp linux-3.0.3/fs/fscache/cookie.c linux-3.0.3/fs/fscache/cookie.c
36553--- linux-3.0.3/fs/fscache/cookie.c 2011-07-21 22:17:23.000000000 -0400
36554+++ linux-3.0.3/fs/fscache/cookie.c 2011-08-23 21:47:56.000000000 -0400
36555@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
36556 parent ? (char *) parent->def->name : "<no-parent>",
36557 def->name, netfs_data);
36558
36559- fscache_stat(&fscache_n_acquires);
36560+ fscache_stat_unchecked(&fscache_n_acquires);
36561
36562 /* if there's no parent cookie, then we don't create one here either */
36563 if (!parent) {
36564- fscache_stat(&fscache_n_acquires_null);
36565+ fscache_stat_unchecked(&fscache_n_acquires_null);
36566 _leave(" [no parent]");
36567 return NULL;
36568 }
36569@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
36570 /* allocate and initialise a cookie */
36571 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
36572 if (!cookie) {
36573- fscache_stat(&fscache_n_acquires_oom);
36574+ fscache_stat_unchecked(&fscache_n_acquires_oom);
36575 _leave(" [ENOMEM]");
36576 return NULL;
36577 }
36578@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
36579
36580 switch (cookie->def->type) {
36581 case FSCACHE_COOKIE_TYPE_INDEX:
36582- fscache_stat(&fscache_n_cookie_index);
36583+ fscache_stat_unchecked(&fscache_n_cookie_index);
36584 break;
36585 case FSCACHE_COOKIE_TYPE_DATAFILE:
36586- fscache_stat(&fscache_n_cookie_data);
36587+ fscache_stat_unchecked(&fscache_n_cookie_data);
36588 break;
36589 default:
36590- fscache_stat(&fscache_n_cookie_special);
36591+ fscache_stat_unchecked(&fscache_n_cookie_special);
36592 break;
36593 }
36594
36595@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
36596 if (fscache_acquire_non_index_cookie(cookie) < 0) {
36597 atomic_dec(&parent->n_children);
36598 __fscache_cookie_put(cookie);
36599- fscache_stat(&fscache_n_acquires_nobufs);
36600+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
36601 _leave(" = NULL");
36602 return NULL;
36603 }
36604 }
36605
36606- fscache_stat(&fscache_n_acquires_ok);
36607+ fscache_stat_unchecked(&fscache_n_acquires_ok);
36608 _leave(" = %p", cookie);
36609 return cookie;
36610 }
36611@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
36612 cache = fscache_select_cache_for_object(cookie->parent);
36613 if (!cache) {
36614 up_read(&fscache_addremove_sem);
36615- fscache_stat(&fscache_n_acquires_no_cache);
36616+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
36617 _leave(" = -ENOMEDIUM [no cache]");
36618 return -ENOMEDIUM;
36619 }
36620@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
36621 object = cache->ops->alloc_object(cache, cookie);
36622 fscache_stat_d(&fscache_n_cop_alloc_object);
36623 if (IS_ERR(object)) {
36624- fscache_stat(&fscache_n_object_no_alloc);
36625+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
36626 ret = PTR_ERR(object);
36627 goto error;
36628 }
36629
36630- fscache_stat(&fscache_n_object_alloc);
36631+ fscache_stat_unchecked(&fscache_n_object_alloc);
36632
36633 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
36634
36635@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
36636 struct fscache_object *object;
36637 struct hlist_node *_p;
36638
36639- fscache_stat(&fscache_n_updates);
36640+ fscache_stat_unchecked(&fscache_n_updates);
36641
36642 if (!cookie) {
36643- fscache_stat(&fscache_n_updates_null);
36644+ fscache_stat_unchecked(&fscache_n_updates_null);
36645 _leave(" [no cookie]");
36646 return;
36647 }
36648@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
36649 struct fscache_object *object;
36650 unsigned long event;
36651
36652- fscache_stat(&fscache_n_relinquishes);
36653+ fscache_stat_unchecked(&fscache_n_relinquishes);
36654 if (retire)
36655- fscache_stat(&fscache_n_relinquishes_retire);
36656+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
36657
36658 if (!cookie) {
36659- fscache_stat(&fscache_n_relinquishes_null);
36660+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
36661 _leave(" [no cookie]");
36662 return;
36663 }
36664@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
36665
36666 /* wait for the cookie to finish being instantiated (or to fail) */
36667 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
36668- fscache_stat(&fscache_n_relinquishes_waitcrt);
36669+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
36670 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
36671 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
36672 }
36673diff -urNp linux-3.0.3/fs/fscache/internal.h linux-3.0.3/fs/fscache/internal.h
36674--- linux-3.0.3/fs/fscache/internal.h 2011-07-21 22:17:23.000000000 -0400
36675+++ linux-3.0.3/fs/fscache/internal.h 2011-08-23 21:47:56.000000000 -0400
36676@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
36677 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
36678 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
36679
36680-extern atomic_t fscache_n_op_pend;
36681-extern atomic_t fscache_n_op_run;
36682-extern atomic_t fscache_n_op_enqueue;
36683-extern atomic_t fscache_n_op_deferred_release;
36684-extern atomic_t fscache_n_op_release;
36685-extern atomic_t fscache_n_op_gc;
36686-extern atomic_t fscache_n_op_cancelled;
36687-extern atomic_t fscache_n_op_rejected;
36688-
36689-extern atomic_t fscache_n_attr_changed;
36690-extern atomic_t fscache_n_attr_changed_ok;
36691-extern atomic_t fscache_n_attr_changed_nobufs;
36692-extern atomic_t fscache_n_attr_changed_nomem;
36693-extern atomic_t fscache_n_attr_changed_calls;
36694-
36695-extern atomic_t fscache_n_allocs;
36696-extern atomic_t fscache_n_allocs_ok;
36697-extern atomic_t fscache_n_allocs_wait;
36698-extern atomic_t fscache_n_allocs_nobufs;
36699-extern atomic_t fscache_n_allocs_intr;
36700-extern atomic_t fscache_n_allocs_object_dead;
36701-extern atomic_t fscache_n_alloc_ops;
36702-extern atomic_t fscache_n_alloc_op_waits;
36703-
36704-extern atomic_t fscache_n_retrievals;
36705-extern atomic_t fscache_n_retrievals_ok;
36706-extern atomic_t fscache_n_retrievals_wait;
36707-extern atomic_t fscache_n_retrievals_nodata;
36708-extern atomic_t fscache_n_retrievals_nobufs;
36709-extern atomic_t fscache_n_retrievals_intr;
36710-extern atomic_t fscache_n_retrievals_nomem;
36711-extern atomic_t fscache_n_retrievals_object_dead;
36712-extern atomic_t fscache_n_retrieval_ops;
36713-extern atomic_t fscache_n_retrieval_op_waits;
36714-
36715-extern atomic_t fscache_n_stores;
36716-extern atomic_t fscache_n_stores_ok;
36717-extern atomic_t fscache_n_stores_again;
36718-extern atomic_t fscache_n_stores_nobufs;
36719-extern atomic_t fscache_n_stores_oom;
36720-extern atomic_t fscache_n_store_ops;
36721-extern atomic_t fscache_n_store_calls;
36722-extern atomic_t fscache_n_store_pages;
36723-extern atomic_t fscache_n_store_radix_deletes;
36724-extern atomic_t fscache_n_store_pages_over_limit;
36725-
36726-extern atomic_t fscache_n_store_vmscan_not_storing;
36727-extern atomic_t fscache_n_store_vmscan_gone;
36728-extern atomic_t fscache_n_store_vmscan_busy;
36729-extern atomic_t fscache_n_store_vmscan_cancelled;
36730-
36731-extern atomic_t fscache_n_marks;
36732-extern atomic_t fscache_n_uncaches;
36733-
36734-extern atomic_t fscache_n_acquires;
36735-extern atomic_t fscache_n_acquires_null;
36736-extern atomic_t fscache_n_acquires_no_cache;
36737-extern atomic_t fscache_n_acquires_ok;
36738-extern atomic_t fscache_n_acquires_nobufs;
36739-extern atomic_t fscache_n_acquires_oom;
36740-
36741-extern atomic_t fscache_n_updates;
36742-extern atomic_t fscache_n_updates_null;
36743-extern atomic_t fscache_n_updates_run;
36744-
36745-extern atomic_t fscache_n_relinquishes;
36746-extern atomic_t fscache_n_relinquishes_null;
36747-extern atomic_t fscache_n_relinquishes_waitcrt;
36748-extern atomic_t fscache_n_relinquishes_retire;
36749-
36750-extern atomic_t fscache_n_cookie_index;
36751-extern atomic_t fscache_n_cookie_data;
36752-extern atomic_t fscache_n_cookie_special;
36753-
36754-extern atomic_t fscache_n_object_alloc;
36755-extern atomic_t fscache_n_object_no_alloc;
36756-extern atomic_t fscache_n_object_lookups;
36757-extern atomic_t fscache_n_object_lookups_negative;
36758-extern atomic_t fscache_n_object_lookups_positive;
36759-extern atomic_t fscache_n_object_lookups_timed_out;
36760-extern atomic_t fscache_n_object_created;
36761-extern atomic_t fscache_n_object_avail;
36762-extern atomic_t fscache_n_object_dead;
36763-
36764-extern atomic_t fscache_n_checkaux_none;
36765-extern atomic_t fscache_n_checkaux_okay;
36766-extern atomic_t fscache_n_checkaux_update;
36767-extern atomic_t fscache_n_checkaux_obsolete;
36768+extern atomic_unchecked_t fscache_n_op_pend;
36769+extern atomic_unchecked_t fscache_n_op_run;
36770+extern atomic_unchecked_t fscache_n_op_enqueue;
36771+extern atomic_unchecked_t fscache_n_op_deferred_release;
36772+extern atomic_unchecked_t fscache_n_op_release;
36773+extern atomic_unchecked_t fscache_n_op_gc;
36774+extern atomic_unchecked_t fscache_n_op_cancelled;
36775+extern atomic_unchecked_t fscache_n_op_rejected;
36776+
36777+extern atomic_unchecked_t fscache_n_attr_changed;
36778+extern atomic_unchecked_t fscache_n_attr_changed_ok;
36779+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
36780+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
36781+extern atomic_unchecked_t fscache_n_attr_changed_calls;
36782+
36783+extern atomic_unchecked_t fscache_n_allocs;
36784+extern atomic_unchecked_t fscache_n_allocs_ok;
36785+extern atomic_unchecked_t fscache_n_allocs_wait;
36786+extern atomic_unchecked_t fscache_n_allocs_nobufs;
36787+extern atomic_unchecked_t fscache_n_allocs_intr;
36788+extern atomic_unchecked_t fscache_n_allocs_object_dead;
36789+extern atomic_unchecked_t fscache_n_alloc_ops;
36790+extern atomic_unchecked_t fscache_n_alloc_op_waits;
36791+
36792+extern atomic_unchecked_t fscache_n_retrievals;
36793+extern atomic_unchecked_t fscache_n_retrievals_ok;
36794+extern atomic_unchecked_t fscache_n_retrievals_wait;
36795+extern atomic_unchecked_t fscache_n_retrievals_nodata;
36796+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
36797+extern atomic_unchecked_t fscache_n_retrievals_intr;
36798+extern atomic_unchecked_t fscache_n_retrievals_nomem;
36799+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
36800+extern atomic_unchecked_t fscache_n_retrieval_ops;
36801+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
36802+
36803+extern atomic_unchecked_t fscache_n_stores;
36804+extern atomic_unchecked_t fscache_n_stores_ok;
36805+extern atomic_unchecked_t fscache_n_stores_again;
36806+extern atomic_unchecked_t fscache_n_stores_nobufs;
36807+extern atomic_unchecked_t fscache_n_stores_oom;
36808+extern atomic_unchecked_t fscache_n_store_ops;
36809+extern atomic_unchecked_t fscache_n_store_calls;
36810+extern atomic_unchecked_t fscache_n_store_pages;
36811+extern atomic_unchecked_t fscache_n_store_radix_deletes;
36812+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
36813+
36814+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
36815+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
36816+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
36817+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
36818+
36819+extern atomic_unchecked_t fscache_n_marks;
36820+extern atomic_unchecked_t fscache_n_uncaches;
36821+
36822+extern atomic_unchecked_t fscache_n_acquires;
36823+extern atomic_unchecked_t fscache_n_acquires_null;
36824+extern atomic_unchecked_t fscache_n_acquires_no_cache;
36825+extern atomic_unchecked_t fscache_n_acquires_ok;
36826+extern atomic_unchecked_t fscache_n_acquires_nobufs;
36827+extern atomic_unchecked_t fscache_n_acquires_oom;
36828+
36829+extern atomic_unchecked_t fscache_n_updates;
36830+extern atomic_unchecked_t fscache_n_updates_null;
36831+extern atomic_unchecked_t fscache_n_updates_run;
36832+
36833+extern atomic_unchecked_t fscache_n_relinquishes;
36834+extern atomic_unchecked_t fscache_n_relinquishes_null;
36835+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
36836+extern atomic_unchecked_t fscache_n_relinquishes_retire;
36837+
36838+extern atomic_unchecked_t fscache_n_cookie_index;
36839+extern atomic_unchecked_t fscache_n_cookie_data;
36840+extern atomic_unchecked_t fscache_n_cookie_special;
36841+
36842+extern atomic_unchecked_t fscache_n_object_alloc;
36843+extern atomic_unchecked_t fscache_n_object_no_alloc;
36844+extern atomic_unchecked_t fscache_n_object_lookups;
36845+extern atomic_unchecked_t fscache_n_object_lookups_negative;
36846+extern atomic_unchecked_t fscache_n_object_lookups_positive;
36847+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
36848+extern atomic_unchecked_t fscache_n_object_created;
36849+extern atomic_unchecked_t fscache_n_object_avail;
36850+extern atomic_unchecked_t fscache_n_object_dead;
36851+
36852+extern atomic_unchecked_t fscache_n_checkaux_none;
36853+extern atomic_unchecked_t fscache_n_checkaux_okay;
36854+extern atomic_unchecked_t fscache_n_checkaux_update;
36855+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
36856
36857 extern atomic_t fscache_n_cop_alloc_object;
36858 extern atomic_t fscache_n_cop_lookup_object;
36859@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t
36860 atomic_inc(stat);
36861 }
36862
36863+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
36864+{
36865+ atomic_inc_unchecked(stat);
36866+}
36867+
36868 static inline void fscache_stat_d(atomic_t *stat)
36869 {
36870 atomic_dec(stat);
36871@@ -267,6 +272,7 @@ extern const struct file_operations fsca
36872
36873 #define __fscache_stat(stat) (NULL)
36874 #define fscache_stat(stat) do {} while (0)
36875+#define fscache_stat_unchecked(stat) do {} while (0)
36876 #define fscache_stat_d(stat) do {} while (0)
36877 #endif
36878
36879diff -urNp linux-3.0.3/fs/fscache/object.c linux-3.0.3/fs/fscache/object.c
36880--- linux-3.0.3/fs/fscache/object.c 2011-07-21 22:17:23.000000000 -0400
36881+++ linux-3.0.3/fs/fscache/object.c 2011-08-23 21:47:56.000000000 -0400
36882@@ -128,7 +128,7 @@ static void fscache_object_state_machine
36883 /* update the object metadata on disk */
36884 case FSCACHE_OBJECT_UPDATING:
36885 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
36886- fscache_stat(&fscache_n_updates_run);
36887+ fscache_stat_unchecked(&fscache_n_updates_run);
36888 fscache_stat(&fscache_n_cop_update_object);
36889 object->cache->ops->update_object(object);
36890 fscache_stat_d(&fscache_n_cop_update_object);
36891@@ -217,7 +217,7 @@ static void fscache_object_state_machine
36892 spin_lock(&object->lock);
36893 object->state = FSCACHE_OBJECT_DEAD;
36894 spin_unlock(&object->lock);
36895- fscache_stat(&fscache_n_object_dead);
36896+ fscache_stat_unchecked(&fscache_n_object_dead);
36897 goto terminal_transit;
36898
36899 /* handle the parent cache of this object being withdrawn from
36900@@ -232,7 +232,7 @@ static void fscache_object_state_machine
36901 spin_lock(&object->lock);
36902 object->state = FSCACHE_OBJECT_DEAD;
36903 spin_unlock(&object->lock);
36904- fscache_stat(&fscache_n_object_dead);
36905+ fscache_stat_unchecked(&fscache_n_object_dead);
36906 goto terminal_transit;
36907
36908 /* complain about the object being woken up once it is
36909@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct
36910 parent->cookie->def->name, cookie->def->name,
36911 object->cache->tag->name);
36912
36913- fscache_stat(&fscache_n_object_lookups);
36914+ fscache_stat_unchecked(&fscache_n_object_lookups);
36915 fscache_stat(&fscache_n_cop_lookup_object);
36916 ret = object->cache->ops->lookup_object(object);
36917 fscache_stat_d(&fscache_n_cop_lookup_object);
36918@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct
36919 if (ret == -ETIMEDOUT) {
36920 /* probably stuck behind another object, so move this one to
36921 * the back of the queue */
36922- fscache_stat(&fscache_n_object_lookups_timed_out);
36923+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
36924 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
36925 }
36926
36927@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(stru
36928
36929 spin_lock(&object->lock);
36930 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
36931- fscache_stat(&fscache_n_object_lookups_negative);
36932+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
36933
36934 /* transit here to allow write requests to begin stacking up
36935 * and read requests to begin returning ENODATA */
36936@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fsca
36937 * result, in which case there may be data available */
36938 spin_lock(&object->lock);
36939 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
36940- fscache_stat(&fscache_n_object_lookups_positive);
36941+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
36942
36943 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
36944
36945@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fsca
36946 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
36947 } else {
36948 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
36949- fscache_stat(&fscache_n_object_created);
36950+ fscache_stat_unchecked(&fscache_n_object_created);
36951
36952 object->state = FSCACHE_OBJECT_AVAILABLE;
36953 spin_unlock(&object->lock);
36954@@ -602,7 +602,7 @@ static void fscache_object_available(str
36955 fscache_enqueue_dependents(object);
36956
36957 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
36958- fscache_stat(&fscache_n_object_avail);
36959+ fscache_stat_unchecked(&fscache_n_object_avail);
36960
36961 _leave("");
36962 }
36963@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
36964 enum fscache_checkaux result;
36965
36966 if (!object->cookie->def->check_aux) {
36967- fscache_stat(&fscache_n_checkaux_none);
36968+ fscache_stat_unchecked(&fscache_n_checkaux_none);
36969 return FSCACHE_CHECKAUX_OKAY;
36970 }
36971
36972@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
36973 switch (result) {
36974 /* entry okay as is */
36975 case FSCACHE_CHECKAUX_OKAY:
36976- fscache_stat(&fscache_n_checkaux_okay);
36977+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
36978 break;
36979
36980 /* entry requires update */
36981 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
36982- fscache_stat(&fscache_n_checkaux_update);
36983+ fscache_stat_unchecked(&fscache_n_checkaux_update);
36984 break;
36985
36986 /* entry requires deletion */
36987 case FSCACHE_CHECKAUX_OBSOLETE:
36988- fscache_stat(&fscache_n_checkaux_obsolete);
36989+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
36990 break;
36991
36992 default:
36993diff -urNp linux-3.0.3/fs/fscache/operation.c linux-3.0.3/fs/fscache/operation.c
36994--- linux-3.0.3/fs/fscache/operation.c 2011-07-21 22:17:23.000000000 -0400
36995+++ linux-3.0.3/fs/fscache/operation.c 2011-08-23 21:47:56.000000000 -0400
36996@@ -17,7 +17,7 @@
36997 #include <linux/slab.h>
36998 #include "internal.h"
36999
37000-atomic_t fscache_op_debug_id;
37001+atomic_unchecked_t fscache_op_debug_id;
37002 EXPORT_SYMBOL(fscache_op_debug_id);
37003
37004 /**
37005@@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fs
37006 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
37007 ASSERTCMP(atomic_read(&op->usage), >, 0);
37008
37009- fscache_stat(&fscache_n_op_enqueue);
37010+ fscache_stat_unchecked(&fscache_n_op_enqueue);
37011 switch (op->flags & FSCACHE_OP_TYPE) {
37012 case FSCACHE_OP_ASYNC:
37013 _debug("queue async");
37014@@ -69,7 +69,7 @@ static void fscache_run_op(struct fscach
37015 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
37016 if (op->processor)
37017 fscache_enqueue_operation(op);
37018- fscache_stat(&fscache_n_op_run);
37019+ fscache_stat_unchecked(&fscache_n_op_run);
37020 }
37021
37022 /*
37023@@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct f
37024 if (object->n_ops > 1) {
37025 atomic_inc(&op->usage);
37026 list_add_tail(&op->pend_link, &object->pending_ops);
37027- fscache_stat(&fscache_n_op_pend);
37028+ fscache_stat_unchecked(&fscache_n_op_pend);
37029 } else if (!list_empty(&object->pending_ops)) {
37030 atomic_inc(&op->usage);
37031 list_add_tail(&op->pend_link, &object->pending_ops);
37032- fscache_stat(&fscache_n_op_pend);
37033+ fscache_stat_unchecked(&fscache_n_op_pend);
37034 fscache_start_operations(object);
37035 } else {
37036 ASSERTCMP(object->n_in_progress, ==, 0);
37037@@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct f
37038 object->n_exclusive++; /* reads and writes must wait */
37039 atomic_inc(&op->usage);
37040 list_add_tail(&op->pend_link, &object->pending_ops);
37041- fscache_stat(&fscache_n_op_pend);
37042+ fscache_stat_unchecked(&fscache_n_op_pend);
37043 ret = 0;
37044 } else {
37045 /* not allowed to submit ops in any other state */
37046@@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_obj
37047 if (object->n_exclusive > 0) {
37048 atomic_inc(&op->usage);
37049 list_add_tail(&op->pend_link, &object->pending_ops);
37050- fscache_stat(&fscache_n_op_pend);
37051+ fscache_stat_unchecked(&fscache_n_op_pend);
37052 } else if (!list_empty(&object->pending_ops)) {
37053 atomic_inc(&op->usage);
37054 list_add_tail(&op->pend_link, &object->pending_ops);
37055- fscache_stat(&fscache_n_op_pend);
37056+ fscache_stat_unchecked(&fscache_n_op_pend);
37057 fscache_start_operations(object);
37058 } else {
37059 ASSERTCMP(object->n_exclusive, ==, 0);
37060@@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_obj
37061 object->n_ops++;
37062 atomic_inc(&op->usage);
37063 list_add_tail(&op->pend_link, &object->pending_ops);
37064- fscache_stat(&fscache_n_op_pend);
37065+ fscache_stat_unchecked(&fscache_n_op_pend);
37066 ret = 0;
37067 } else if (object->state == FSCACHE_OBJECT_DYING ||
37068 object->state == FSCACHE_OBJECT_LC_DYING ||
37069 object->state == FSCACHE_OBJECT_WITHDRAWING) {
37070- fscache_stat(&fscache_n_op_rejected);
37071+ fscache_stat_unchecked(&fscache_n_op_rejected);
37072 ret = -ENOBUFS;
37073 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
37074 fscache_report_unexpected_submission(object, op, ostate);
37075@@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_ope
37076
37077 ret = -EBUSY;
37078 if (!list_empty(&op->pend_link)) {
37079- fscache_stat(&fscache_n_op_cancelled);
37080+ fscache_stat_unchecked(&fscache_n_op_cancelled);
37081 list_del_init(&op->pend_link);
37082 object->n_ops--;
37083 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
37084@@ -331,7 +331,7 @@ void fscache_put_operation(struct fscach
37085 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
37086 BUG();
37087
37088- fscache_stat(&fscache_n_op_release);
37089+ fscache_stat_unchecked(&fscache_n_op_release);
37090
37091 if (op->release) {
37092 op->release(op);
37093@@ -348,7 +348,7 @@ void fscache_put_operation(struct fscach
37094 * lock, and defer it otherwise */
37095 if (!spin_trylock(&object->lock)) {
37096 _debug("defer put");
37097- fscache_stat(&fscache_n_op_deferred_release);
37098+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
37099
37100 cache = object->cache;
37101 spin_lock(&cache->op_gc_list_lock);
37102@@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_st
37103
37104 _debug("GC DEFERRED REL OBJ%x OP%x",
37105 object->debug_id, op->debug_id);
37106- fscache_stat(&fscache_n_op_gc);
37107+ fscache_stat_unchecked(&fscache_n_op_gc);
37108
37109 ASSERTCMP(atomic_read(&op->usage), ==, 0);
37110
37111diff -urNp linux-3.0.3/fs/fscache/page.c linux-3.0.3/fs/fscache/page.c
37112--- linux-3.0.3/fs/fscache/page.c 2011-07-21 22:17:23.000000000 -0400
37113+++ linux-3.0.3/fs/fscache/page.c 2011-08-23 21:47:56.000000000 -0400
37114@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct
37115 val = radix_tree_lookup(&cookie->stores, page->index);
37116 if (!val) {
37117 rcu_read_unlock();
37118- fscache_stat(&fscache_n_store_vmscan_not_storing);
37119+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
37120 __fscache_uncache_page(cookie, page);
37121 return true;
37122 }
37123@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct
37124 spin_unlock(&cookie->stores_lock);
37125
37126 if (xpage) {
37127- fscache_stat(&fscache_n_store_vmscan_cancelled);
37128- fscache_stat(&fscache_n_store_radix_deletes);
37129+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
37130+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
37131 ASSERTCMP(xpage, ==, page);
37132 } else {
37133- fscache_stat(&fscache_n_store_vmscan_gone);
37134+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
37135 }
37136
37137 wake_up_bit(&cookie->flags, 0);
37138@@ -107,7 +107,7 @@ page_busy:
37139 /* we might want to wait here, but that could deadlock the allocator as
37140 * the work threads writing to the cache may all end up sleeping
37141 * on memory allocation */
37142- fscache_stat(&fscache_n_store_vmscan_busy);
37143+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
37144 return false;
37145 }
37146 EXPORT_SYMBOL(__fscache_maybe_release_page);
37147@@ -131,7 +131,7 @@ static void fscache_end_page_write(struc
37148 FSCACHE_COOKIE_STORING_TAG);
37149 if (!radix_tree_tag_get(&cookie->stores, page->index,
37150 FSCACHE_COOKIE_PENDING_TAG)) {
37151- fscache_stat(&fscache_n_store_radix_deletes);
37152+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
37153 xpage = radix_tree_delete(&cookie->stores, page->index);
37154 }
37155 spin_unlock(&cookie->stores_lock);
37156@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(stru
37157
37158 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
37159
37160- fscache_stat(&fscache_n_attr_changed_calls);
37161+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
37162
37163 if (fscache_object_is_active(object)) {
37164 fscache_stat(&fscache_n_cop_attr_changed);
37165@@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscach
37166
37167 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
37168
37169- fscache_stat(&fscache_n_attr_changed);
37170+ fscache_stat_unchecked(&fscache_n_attr_changed);
37171
37172 op = kzalloc(sizeof(*op), GFP_KERNEL);
37173 if (!op) {
37174- fscache_stat(&fscache_n_attr_changed_nomem);
37175+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
37176 _leave(" = -ENOMEM");
37177 return -ENOMEM;
37178 }
37179@@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscach
37180 if (fscache_submit_exclusive_op(object, op) < 0)
37181 goto nobufs;
37182 spin_unlock(&cookie->lock);
37183- fscache_stat(&fscache_n_attr_changed_ok);
37184+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
37185 fscache_put_operation(op);
37186 _leave(" = 0");
37187 return 0;
37188@@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscach
37189 nobufs:
37190 spin_unlock(&cookie->lock);
37191 kfree(op);
37192- fscache_stat(&fscache_n_attr_changed_nobufs);
37193+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
37194 _leave(" = %d", -ENOBUFS);
37195 return -ENOBUFS;
37196 }
37197@@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache
37198 /* allocate a retrieval operation and attempt to submit it */
37199 op = kzalloc(sizeof(*op), GFP_NOIO);
37200 if (!op) {
37201- fscache_stat(&fscache_n_retrievals_nomem);
37202+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
37203 return NULL;
37204 }
37205
37206@@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_loo
37207 return 0;
37208 }
37209
37210- fscache_stat(&fscache_n_retrievals_wait);
37211+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
37212
37213 jif = jiffies;
37214 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
37215 fscache_wait_bit_interruptible,
37216 TASK_INTERRUPTIBLE) != 0) {
37217- fscache_stat(&fscache_n_retrievals_intr);
37218+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
37219 _leave(" = -ERESTARTSYS");
37220 return -ERESTARTSYS;
37221 }
37222@@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_loo
37223 */
37224 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
37225 struct fscache_retrieval *op,
37226- atomic_t *stat_op_waits,
37227- atomic_t *stat_object_dead)
37228+ atomic_unchecked_t *stat_op_waits,
37229+ atomic_unchecked_t *stat_object_dead)
37230 {
37231 int ret;
37232
37233@@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_ac
37234 goto check_if_dead;
37235
37236 _debug(">>> WT");
37237- fscache_stat(stat_op_waits);
37238+ fscache_stat_unchecked(stat_op_waits);
37239 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
37240 fscache_wait_bit_interruptible,
37241 TASK_INTERRUPTIBLE) < 0) {
37242@@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_ac
37243
37244 check_if_dead:
37245 if (unlikely(fscache_object_is_dead(object))) {
37246- fscache_stat(stat_object_dead);
37247+ fscache_stat_unchecked(stat_object_dead);
37248 return -ENOBUFS;
37249 }
37250 return 0;
37251@@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct
37252
37253 _enter("%p,%p,,,", cookie, page);
37254
37255- fscache_stat(&fscache_n_retrievals);
37256+ fscache_stat_unchecked(&fscache_n_retrievals);
37257
37258 if (hlist_empty(&cookie->backing_objects))
37259 goto nobufs;
37260@@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct
37261 goto nobufs_unlock;
37262 spin_unlock(&cookie->lock);
37263
37264- fscache_stat(&fscache_n_retrieval_ops);
37265+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
37266
37267 /* pin the netfs read context in case we need to do the actual netfs
37268 * read because we've encountered a cache read failure */
37269@@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct
37270
37271 error:
37272 if (ret == -ENOMEM)
37273- fscache_stat(&fscache_n_retrievals_nomem);
37274+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
37275 else if (ret == -ERESTARTSYS)
37276- fscache_stat(&fscache_n_retrievals_intr);
37277+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
37278 else if (ret == -ENODATA)
37279- fscache_stat(&fscache_n_retrievals_nodata);
37280+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
37281 else if (ret < 0)
37282- fscache_stat(&fscache_n_retrievals_nobufs);
37283+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37284 else
37285- fscache_stat(&fscache_n_retrievals_ok);
37286+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
37287
37288 fscache_put_retrieval(op);
37289 _leave(" = %d", ret);
37290@@ -429,7 +429,7 @@ nobufs_unlock:
37291 spin_unlock(&cookie->lock);
37292 kfree(op);
37293 nobufs:
37294- fscache_stat(&fscache_n_retrievals_nobufs);
37295+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37296 _leave(" = -ENOBUFS");
37297 return -ENOBUFS;
37298 }
37299@@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct
37300
37301 _enter("%p,,%d,,,", cookie, *nr_pages);
37302
37303- fscache_stat(&fscache_n_retrievals);
37304+ fscache_stat_unchecked(&fscache_n_retrievals);
37305
37306 if (hlist_empty(&cookie->backing_objects))
37307 goto nobufs;
37308@@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct
37309 goto nobufs_unlock;
37310 spin_unlock(&cookie->lock);
37311
37312- fscache_stat(&fscache_n_retrieval_ops);
37313+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
37314
37315 /* pin the netfs read context in case we need to do the actual netfs
37316 * read because we've encountered a cache read failure */
37317@@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct
37318
37319 error:
37320 if (ret == -ENOMEM)
37321- fscache_stat(&fscache_n_retrievals_nomem);
37322+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
37323 else if (ret == -ERESTARTSYS)
37324- fscache_stat(&fscache_n_retrievals_intr);
37325+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
37326 else if (ret == -ENODATA)
37327- fscache_stat(&fscache_n_retrievals_nodata);
37328+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
37329 else if (ret < 0)
37330- fscache_stat(&fscache_n_retrievals_nobufs);
37331+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37332 else
37333- fscache_stat(&fscache_n_retrievals_ok);
37334+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
37335
37336 fscache_put_retrieval(op);
37337 _leave(" = %d", ret);
37338@@ -545,7 +545,7 @@ nobufs_unlock:
37339 spin_unlock(&cookie->lock);
37340 kfree(op);
37341 nobufs:
37342- fscache_stat(&fscache_n_retrievals_nobufs);
37343+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37344 _leave(" = -ENOBUFS");
37345 return -ENOBUFS;
37346 }
37347@@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_
37348
37349 _enter("%p,%p,,,", cookie, page);
37350
37351- fscache_stat(&fscache_n_allocs);
37352+ fscache_stat_unchecked(&fscache_n_allocs);
37353
37354 if (hlist_empty(&cookie->backing_objects))
37355 goto nobufs;
37356@@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_
37357 goto nobufs_unlock;
37358 spin_unlock(&cookie->lock);
37359
37360- fscache_stat(&fscache_n_alloc_ops);
37361+ fscache_stat_unchecked(&fscache_n_alloc_ops);
37362
37363 ret = fscache_wait_for_retrieval_activation(
37364 object, op,
37365@@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_
37366
37367 error:
37368 if (ret == -ERESTARTSYS)
37369- fscache_stat(&fscache_n_allocs_intr);
37370+ fscache_stat_unchecked(&fscache_n_allocs_intr);
37371 else if (ret < 0)
37372- fscache_stat(&fscache_n_allocs_nobufs);
37373+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
37374 else
37375- fscache_stat(&fscache_n_allocs_ok);
37376+ fscache_stat_unchecked(&fscache_n_allocs_ok);
37377
37378 fscache_put_retrieval(op);
37379 _leave(" = %d", ret);
37380@@ -625,7 +625,7 @@ nobufs_unlock:
37381 spin_unlock(&cookie->lock);
37382 kfree(op);
37383 nobufs:
37384- fscache_stat(&fscache_n_allocs_nobufs);
37385+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
37386 _leave(" = -ENOBUFS");
37387 return -ENOBUFS;
37388 }
37389@@ -666,7 +666,7 @@ static void fscache_write_op(struct fsca
37390
37391 spin_lock(&cookie->stores_lock);
37392
37393- fscache_stat(&fscache_n_store_calls);
37394+ fscache_stat_unchecked(&fscache_n_store_calls);
37395
37396 /* find a page to store */
37397 page = NULL;
37398@@ -677,7 +677,7 @@ static void fscache_write_op(struct fsca
37399 page = results[0];
37400 _debug("gang %d [%lx]", n, page->index);
37401 if (page->index > op->store_limit) {
37402- fscache_stat(&fscache_n_store_pages_over_limit);
37403+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
37404 goto superseded;
37405 }
37406
37407@@ -689,7 +689,7 @@ static void fscache_write_op(struct fsca
37408 spin_unlock(&cookie->stores_lock);
37409 spin_unlock(&object->lock);
37410
37411- fscache_stat(&fscache_n_store_pages);
37412+ fscache_stat_unchecked(&fscache_n_store_pages);
37413 fscache_stat(&fscache_n_cop_write_page);
37414 ret = object->cache->ops->write_page(op, page);
37415 fscache_stat_d(&fscache_n_cop_write_page);
37416@@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_
37417 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
37418 ASSERT(PageFsCache(page));
37419
37420- fscache_stat(&fscache_n_stores);
37421+ fscache_stat_unchecked(&fscache_n_stores);
37422
37423 op = kzalloc(sizeof(*op), GFP_NOIO);
37424 if (!op)
37425@@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_
37426 spin_unlock(&cookie->stores_lock);
37427 spin_unlock(&object->lock);
37428
37429- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
37430+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
37431 op->store_limit = object->store_limit;
37432
37433 if (fscache_submit_op(object, &op->op) < 0)
37434@@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_
37435
37436 spin_unlock(&cookie->lock);
37437 radix_tree_preload_end();
37438- fscache_stat(&fscache_n_store_ops);
37439- fscache_stat(&fscache_n_stores_ok);
37440+ fscache_stat_unchecked(&fscache_n_store_ops);
37441+ fscache_stat_unchecked(&fscache_n_stores_ok);
37442
37443 /* the work queue now carries its own ref on the object */
37444 fscache_put_operation(&op->op);
37445@@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_
37446 return 0;
37447
37448 already_queued:
37449- fscache_stat(&fscache_n_stores_again);
37450+ fscache_stat_unchecked(&fscache_n_stores_again);
37451 already_pending:
37452 spin_unlock(&cookie->stores_lock);
37453 spin_unlock(&object->lock);
37454 spin_unlock(&cookie->lock);
37455 radix_tree_preload_end();
37456 kfree(op);
37457- fscache_stat(&fscache_n_stores_ok);
37458+ fscache_stat_unchecked(&fscache_n_stores_ok);
37459 _leave(" = 0");
37460 return 0;
37461
37462@@ -851,14 +851,14 @@ nobufs:
37463 spin_unlock(&cookie->lock);
37464 radix_tree_preload_end();
37465 kfree(op);
37466- fscache_stat(&fscache_n_stores_nobufs);
37467+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
37468 _leave(" = -ENOBUFS");
37469 return -ENOBUFS;
37470
37471 nomem_free:
37472 kfree(op);
37473 nomem:
37474- fscache_stat(&fscache_n_stores_oom);
37475+ fscache_stat_unchecked(&fscache_n_stores_oom);
37476 _leave(" = -ENOMEM");
37477 return -ENOMEM;
37478 }
37479@@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscac
37480 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
37481 ASSERTCMP(page, !=, NULL);
37482
37483- fscache_stat(&fscache_n_uncaches);
37484+ fscache_stat_unchecked(&fscache_n_uncaches);
37485
37486 /* cache withdrawal may beat us to it */
37487 if (!PageFsCache(page))
37488@@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fs
37489 unsigned long loop;
37490
37491 #ifdef CONFIG_FSCACHE_STATS
37492- atomic_add(pagevec->nr, &fscache_n_marks);
37493+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
37494 #endif
37495
37496 for (loop = 0; loop < pagevec->nr; loop++) {
37497diff -urNp linux-3.0.3/fs/fscache/stats.c linux-3.0.3/fs/fscache/stats.c
37498--- linux-3.0.3/fs/fscache/stats.c 2011-07-21 22:17:23.000000000 -0400
37499+++ linux-3.0.3/fs/fscache/stats.c 2011-08-23 21:47:56.000000000 -0400
37500@@ -18,95 +18,95 @@
37501 /*
37502 * operation counters
37503 */
37504-atomic_t fscache_n_op_pend;
37505-atomic_t fscache_n_op_run;
37506-atomic_t fscache_n_op_enqueue;
37507-atomic_t fscache_n_op_requeue;
37508-atomic_t fscache_n_op_deferred_release;
37509-atomic_t fscache_n_op_release;
37510-atomic_t fscache_n_op_gc;
37511-atomic_t fscache_n_op_cancelled;
37512-atomic_t fscache_n_op_rejected;
37513-
37514-atomic_t fscache_n_attr_changed;
37515-atomic_t fscache_n_attr_changed_ok;
37516-atomic_t fscache_n_attr_changed_nobufs;
37517-atomic_t fscache_n_attr_changed_nomem;
37518-atomic_t fscache_n_attr_changed_calls;
37519-
37520-atomic_t fscache_n_allocs;
37521-atomic_t fscache_n_allocs_ok;
37522-atomic_t fscache_n_allocs_wait;
37523-atomic_t fscache_n_allocs_nobufs;
37524-atomic_t fscache_n_allocs_intr;
37525-atomic_t fscache_n_allocs_object_dead;
37526-atomic_t fscache_n_alloc_ops;
37527-atomic_t fscache_n_alloc_op_waits;
37528-
37529-atomic_t fscache_n_retrievals;
37530-atomic_t fscache_n_retrievals_ok;
37531-atomic_t fscache_n_retrievals_wait;
37532-atomic_t fscache_n_retrievals_nodata;
37533-atomic_t fscache_n_retrievals_nobufs;
37534-atomic_t fscache_n_retrievals_intr;
37535-atomic_t fscache_n_retrievals_nomem;
37536-atomic_t fscache_n_retrievals_object_dead;
37537-atomic_t fscache_n_retrieval_ops;
37538-atomic_t fscache_n_retrieval_op_waits;
37539-
37540-atomic_t fscache_n_stores;
37541-atomic_t fscache_n_stores_ok;
37542-atomic_t fscache_n_stores_again;
37543-atomic_t fscache_n_stores_nobufs;
37544-atomic_t fscache_n_stores_oom;
37545-atomic_t fscache_n_store_ops;
37546-atomic_t fscache_n_store_calls;
37547-atomic_t fscache_n_store_pages;
37548-atomic_t fscache_n_store_radix_deletes;
37549-atomic_t fscache_n_store_pages_over_limit;
37550-
37551-atomic_t fscache_n_store_vmscan_not_storing;
37552-atomic_t fscache_n_store_vmscan_gone;
37553-atomic_t fscache_n_store_vmscan_busy;
37554-atomic_t fscache_n_store_vmscan_cancelled;
37555-
37556-atomic_t fscache_n_marks;
37557-atomic_t fscache_n_uncaches;
37558-
37559-atomic_t fscache_n_acquires;
37560-atomic_t fscache_n_acquires_null;
37561-atomic_t fscache_n_acquires_no_cache;
37562-atomic_t fscache_n_acquires_ok;
37563-atomic_t fscache_n_acquires_nobufs;
37564-atomic_t fscache_n_acquires_oom;
37565-
37566-atomic_t fscache_n_updates;
37567-atomic_t fscache_n_updates_null;
37568-atomic_t fscache_n_updates_run;
37569-
37570-atomic_t fscache_n_relinquishes;
37571-atomic_t fscache_n_relinquishes_null;
37572-atomic_t fscache_n_relinquishes_waitcrt;
37573-atomic_t fscache_n_relinquishes_retire;
37574-
37575-atomic_t fscache_n_cookie_index;
37576-atomic_t fscache_n_cookie_data;
37577-atomic_t fscache_n_cookie_special;
37578-
37579-atomic_t fscache_n_object_alloc;
37580-atomic_t fscache_n_object_no_alloc;
37581-atomic_t fscache_n_object_lookups;
37582-atomic_t fscache_n_object_lookups_negative;
37583-atomic_t fscache_n_object_lookups_positive;
37584-atomic_t fscache_n_object_lookups_timed_out;
37585-atomic_t fscache_n_object_created;
37586-atomic_t fscache_n_object_avail;
37587-atomic_t fscache_n_object_dead;
37588-
37589-atomic_t fscache_n_checkaux_none;
37590-atomic_t fscache_n_checkaux_okay;
37591-atomic_t fscache_n_checkaux_update;
37592-atomic_t fscache_n_checkaux_obsolete;
37593+atomic_unchecked_t fscache_n_op_pend;
37594+atomic_unchecked_t fscache_n_op_run;
37595+atomic_unchecked_t fscache_n_op_enqueue;
37596+atomic_unchecked_t fscache_n_op_requeue;
37597+atomic_unchecked_t fscache_n_op_deferred_release;
37598+atomic_unchecked_t fscache_n_op_release;
37599+atomic_unchecked_t fscache_n_op_gc;
37600+atomic_unchecked_t fscache_n_op_cancelled;
37601+atomic_unchecked_t fscache_n_op_rejected;
37602+
37603+atomic_unchecked_t fscache_n_attr_changed;
37604+atomic_unchecked_t fscache_n_attr_changed_ok;
37605+atomic_unchecked_t fscache_n_attr_changed_nobufs;
37606+atomic_unchecked_t fscache_n_attr_changed_nomem;
37607+atomic_unchecked_t fscache_n_attr_changed_calls;
37608+
37609+atomic_unchecked_t fscache_n_allocs;
37610+atomic_unchecked_t fscache_n_allocs_ok;
37611+atomic_unchecked_t fscache_n_allocs_wait;
37612+atomic_unchecked_t fscache_n_allocs_nobufs;
37613+atomic_unchecked_t fscache_n_allocs_intr;
37614+atomic_unchecked_t fscache_n_allocs_object_dead;
37615+atomic_unchecked_t fscache_n_alloc_ops;
37616+atomic_unchecked_t fscache_n_alloc_op_waits;
37617+
37618+atomic_unchecked_t fscache_n_retrievals;
37619+atomic_unchecked_t fscache_n_retrievals_ok;
37620+atomic_unchecked_t fscache_n_retrievals_wait;
37621+atomic_unchecked_t fscache_n_retrievals_nodata;
37622+atomic_unchecked_t fscache_n_retrievals_nobufs;
37623+atomic_unchecked_t fscache_n_retrievals_intr;
37624+atomic_unchecked_t fscache_n_retrievals_nomem;
37625+atomic_unchecked_t fscache_n_retrievals_object_dead;
37626+atomic_unchecked_t fscache_n_retrieval_ops;
37627+atomic_unchecked_t fscache_n_retrieval_op_waits;
37628+
37629+atomic_unchecked_t fscache_n_stores;
37630+atomic_unchecked_t fscache_n_stores_ok;
37631+atomic_unchecked_t fscache_n_stores_again;
37632+atomic_unchecked_t fscache_n_stores_nobufs;
37633+atomic_unchecked_t fscache_n_stores_oom;
37634+atomic_unchecked_t fscache_n_store_ops;
37635+atomic_unchecked_t fscache_n_store_calls;
37636+atomic_unchecked_t fscache_n_store_pages;
37637+atomic_unchecked_t fscache_n_store_radix_deletes;
37638+atomic_unchecked_t fscache_n_store_pages_over_limit;
37639+
37640+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
37641+atomic_unchecked_t fscache_n_store_vmscan_gone;
37642+atomic_unchecked_t fscache_n_store_vmscan_busy;
37643+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
37644+
37645+atomic_unchecked_t fscache_n_marks;
37646+atomic_unchecked_t fscache_n_uncaches;
37647+
37648+atomic_unchecked_t fscache_n_acquires;
37649+atomic_unchecked_t fscache_n_acquires_null;
37650+atomic_unchecked_t fscache_n_acquires_no_cache;
37651+atomic_unchecked_t fscache_n_acquires_ok;
37652+atomic_unchecked_t fscache_n_acquires_nobufs;
37653+atomic_unchecked_t fscache_n_acquires_oom;
37654+
37655+atomic_unchecked_t fscache_n_updates;
37656+atomic_unchecked_t fscache_n_updates_null;
37657+atomic_unchecked_t fscache_n_updates_run;
37658+
37659+atomic_unchecked_t fscache_n_relinquishes;
37660+atomic_unchecked_t fscache_n_relinquishes_null;
37661+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
37662+atomic_unchecked_t fscache_n_relinquishes_retire;
37663+
37664+atomic_unchecked_t fscache_n_cookie_index;
37665+atomic_unchecked_t fscache_n_cookie_data;
37666+atomic_unchecked_t fscache_n_cookie_special;
37667+
37668+atomic_unchecked_t fscache_n_object_alloc;
37669+atomic_unchecked_t fscache_n_object_no_alloc;
37670+atomic_unchecked_t fscache_n_object_lookups;
37671+atomic_unchecked_t fscache_n_object_lookups_negative;
37672+atomic_unchecked_t fscache_n_object_lookups_positive;
37673+atomic_unchecked_t fscache_n_object_lookups_timed_out;
37674+atomic_unchecked_t fscache_n_object_created;
37675+atomic_unchecked_t fscache_n_object_avail;
37676+atomic_unchecked_t fscache_n_object_dead;
37677+
37678+atomic_unchecked_t fscache_n_checkaux_none;
37679+atomic_unchecked_t fscache_n_checkaux_okay;
37680+atomic_unchecked_t fscache_n_checkaux_update;
37681+atomic_unchecked_t fscache_n_checkaux_obsolete;
37682
37683 atomic_t fscache_n_cop_alloc_object;
37684 atomic_t fscache_n_cop_lookup_object;
37685@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
37686 seq_puts(m, "FS-Cache statistics\n");
37687
37688 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
37689- atomic_read(&fscache_n_cookie_index),
37690- atomic_read(&fscache_n_cookie_data),
37691- atomic_read(&fscache_n_cookie_special));
37692+ atomic_read_unchecked(&fscache_n_cookie_index),
37693+ atomic_read_unchecked(&fscache_n_cookie_data),
37694+ atomic_read_unchecked(&fscache_n_cookie_special));
37695
37696 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
37697- atomic_read(&fscache_n_object_alloc),
37698- atomic_read(&fscache_n_object_no_alloc),
37699- atomic_read(&fscache_n_object_avail),
37700- atomic_read(&fscache_n_object_dead));
37701+ atomic_read_unchecked(&fscache_n_object_alloc),
37702+ atomic_read_unchecked(&fscache_n_object_no_alloc),
37703+ atomic_read_unchecked(&fscache_n_object_avail),
37704+ atomic_read_unchecked(&fscache_n_object_dead));
37705 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
37706- atomic_read(&fscache_n_checkaux_none),
37707- atomic_read(&fscache_n_checkaux_okay),
37708- atomic_read(&fscache_n_checkaux_update),
37709- atomic_read(&fscache_n_checkaux_obsolete));
37710+ atomic_read_unchecked(&fscache_n_checkaux_none),
37711+ atomic_read_unchecked(&fscache_n_checkaux_okay),
37712+ atomic_read_unchecked(&fscache_n_checkaux_update),
37713+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
37714
37715 seq_printf(m, "Pages : mrk=%u unc=%u\n",
37716- atomic_read(&fscache_n_marks),
37717- atomic_read(&fscache_n_uncaches));
37718+ atomic_read_unchecked(&fscache_n_marks),
37719+ atomic_read_unchecked(&fscache_n_uncaches));
37720
37721 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
37722 " oom=%u\n",
37723- atomic_read(&fscache_n_acquires),
37724- atomic_read(&fscache_n_acquires_null),
37725- atomic_read(&fscache_n_acquires_no_cache),
37726- atomic_read(&fscache_n_acquires_ok),
37727- atomic_read(&fscache_n_acquires_nobufs),
37728- atomic_read(&fscache_n_acquires_oom));
37729+ atomic_read_unchecked(&fscache_n_acquires),
37730+ atomic_read_unchecked(&fscache_n_acquires_null),
37731+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
37732+ atomic_read_unchecked(&fscache_n_acquires_ok),
37733+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
37734+ atomic_read_unchecked(&fscache_n_acquires_oom));
37735
37736 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
37737- atomic_read(&fscache_n_object_lookups),
37738- atomic_read(&fscache_n_object_lookups_negative),
37739- atomic_read(&fscache_n_object_lookups_positive),
37740- atomic_read(&fscache_n_object_created),
37741- atomic_read(&fscache_n_object_lookups_timed_out));
37742+ atomic_read_unchecked(&fscache_n_object_lookups),
37743+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
37744+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
37745+ atomic_read_unchecked(&fscache_n_object_created),
37746+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
37747
37748 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
37749- atomic_read(&fscache_n_updates),
37750- atomic_read(&fscache_n_updates_null),
37751- atomic_read(&fscache_n_updates_run));
37752+ atomic_read_unchecked(&fscache_n_updates),
37753+ atomic_read_unchecked(&fscache_n_updates_null),
37754+ atomic_read_unchecked(&fscache_n_updates_run));
37755
37756 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
37757- atomic_read(&fscache_n_relinquishes),
37758- atomic_read(&fscache_n_relinquishes_null),
37759- atomic_read(&fscache_n_relinquishes_waitcrt),
37760- atomic_read(&fscache_n_relinquishes_retire));
37761+ atomic_read_unchecked(&fscache_n_relinquishes),
37762+ atomic_read_unchecked(&fscache_n_relinquishes_null),
37763+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
37764+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
37765
37766 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
37767- atomic_read(&fscache_n_attr_changed),
37768- atomic_read(&fscache_n_attr_changed_ok),
37769- atomic_read(&fscache_n_attr_changed_nobufs),
37770- atomic_read(&fscache_n_attr_changed_nomem),
37771- atomic_read(&fscache_n_attr_changed_calls));
37772+ atomic_read_unchecked(&fscache_n_attr_changed),
37773+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
37774+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
37775+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
37776+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
37777
37778 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
37779- atomic_read(&fscache_n_allocs),
37780- atomic_read(&fscache_n_allocs_ok),
37781- atomic_read(&fscache_n_allocs_wait),
37782- atomic_read(&fscache_n_allocs_nobufs),
37783- atomic_read(&fscache_n_allocs_intr));
37784+ atomic_read_unchecked(&fscache_n_allocs),
37785+ atomic_read_unchecked(&fscache_n_allocs_ok),
37786+ atomic_read_unchecked(&fscache_n_allocs_wait),
37787+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
37788+ atomic_read_unchecked(&fscache_n_allocs_intr));
37789 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
37790- atomic_read(&fscache_n_alloc_ops),
37791- atomic_read(&fscache_n_alloc_op_waits),
37792- atomic_read(&fscache_n_allocs_object_dead));
37793+ atomic_read_unchecked(&fscache_n_alloc_ops),
37794+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
37795+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
37796
37797 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
37798 " int=%u oom=%u\n",
37799- atomic_read(&fscache_n_retrievals),
37800- atomic_read(&fscache_n_retrievals_ok),
37801- atomic_read(&fscache_n_retrievals_wait),
37802- atomic_read(&fscache_n_retrievals_nodata),
37803- atomic_read(&fscache_n_retrievals_nobufs),
37804- atomic_read(&fscache_n_retrievals_intr),
37805- atomic_read(&fscache_n_retrievals_nomem));
37806+ atomic_read_unchecked(&fscache_n_retrievals),
37807+ atomic_read_unchecked(&fscache_n_retrievals_ok),
37808+ atomic_read_unchecked(&fscache_n_retrievals_wait),
37809+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
37810+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
37811+ atomic_read_unchecked(&fscache_n_retrievals_intr),
37812+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
37813 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
37814- atomic_read(&fscache_n_retrieval_ops),
37815- atomic_read(&fscache_n_retrieval_op_waits),
37816- atomic_read(&fscache_n_retrievals_object_dead));
37817+ atomic_read_unchecked(&fscache_n_retrieval_ops),
37818+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
37819+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
37820
37821 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
37822- atomic_read(&fscache_n_stores),
37823- atomic_read(&fscache_n_stores_ok),
37824- atomic_read(&fscache_n_stores_again),
37825- atomic_read(&fscache_n_stores_nobufs),
37826- atomic_read(&fscache_n_stores_oom));
37827+ atomic_read_unchecked(&fscache_n_stores),
37828+ atomic_read_unchecked(&fscache_n_stores_ok),
37829+ atomic_read_unchecked(&fscache_n_stores_again),
37830+ atomic_read_unchecked(&fscache_n_stores_nobufs),
37831+ atomic_read_unchecked(&fscache_n_stores_oom));
37832 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
37833- atomic_read(&fscache_n_store_ops),
37834- atomic_read(&fscache_n_store_calls),
37835- atomic_read(&fscache_n_store_pages),
37836- atomic_read(&fscache_n_store_radix_deletes),
37837- atomic_read(&fscache_n_store_pages_over_limit));
37838+ atomic_read_unchecked(&fscache_n_store_ops),
37839+ atomic_read_unchecked(&fscache_n_store_calls),
37840+ atomic_read_unchecked(&fscache_n_store_pages),
37841+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
37842+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
37843
37844 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
37845- atomic_read(&fscache_n_store_vmscan_not_storing),
37846- atomic_read(&fscache_n_store_vmscan_gone),
37847- atomic_read(&fscache_n_store_vmscan_busy),
37848- atomic_read(&fscache_n_store_vmscan_cancelled));
37849+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
37850+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
37851+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
37852+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
37853
37854 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
37855- atomic_read(&fscache_n_op_pend),
37856- atomic_read(&fscache_n_op_run),
37857- atomic_read(&fscache_n_op_enqueue),
37858- atomic_read(&fscache_n_op_cancelled),
37859- atomic_read(&fscache_n_op_rejected));
37860+ atomic_read_unchecked(&fscache_n_op_pend),
37861+ atomic_read_unchecked(&fscache_n_op_run),
37862+ atomic_read_unchecked(&fscache_n_op_enqueue),
37863+ atomic_read_unchecked(&fscache_n_op_cancelled),
37864+ atomic_read_unchecked(&fscache_n_op_rejected));
37865 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
37866- atomic_read(&fscache_n_op_deferred_release),
37867- atomic_read(&fscache_n_op_release),
37868- atomic_read(&fscache_n_op_gc));
37869+ atomic_read_unchecked(&fscache_n_op_deferred_release),
37870+ atomic_read_unchecked(&fscache_n_op_release),
37871+ atomic_read_unchecked(&fscache_n_op_gc));
37872
37873 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
37874 atomic_read(&fscache_n_cop_alloc_object),
37875diff -urNp linux-3.0.3/fs/fs_struct.c linux-3.0.3/fs/fs_struct.c
37876--- linux-3.0.3/fs/fs_struct.c 2011-07-21 22:17:23.000000000 -0400
37877+++ linux-3.0.3/fs/fs_struct.c 2011-08-23 21:48:14.000000000 -0400
37878@@ -4,6 +4,7 @@
37879 #include <linux/path.h>
37880 #include <linux/slab.h>
37881 #include <linux/fs_struct.h>
37882+#include <linux/grsecurity.h>
37883 #include "internal.h"
37884
37885 static inline void path_get_longterm(struct path *path)
37886@@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, s
37887 old_root = fs->root;
37888 fs->root = *path;
37889 path_get_longterm(path);
37890+ gr_set_chroot_entries(current, path);
37891 write_seqcount_end(&fs->seq);
37892 spin_unlock(&fs->lock);
37893 if (old_root.dentry)
37894@@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_roo
37895 && fs->root.mnt == old_root->mnt) {
37896 path_get_longterm(new_root);
37897 fs->root = *new_root;
37898+ gr_set_chroot_entries(p, new_root);
37899 count++;
37900 }
37901 if (fs->pwd.dentry == old_root->dentry
37902@@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
37903 spin_lock(&fs->lock);
37904 write_seqcount_begin(&fs->seq);
37905 tsk->fs = NULL;
37906- kill = !--fs->users;
37907+ gr_clear_chroot_entries(tsk);
37908+ kill = !atomic_dec_return(&fs->users);
37909 write_seqcount_end(&fs->seq);
37910 spin_unlock(&fs->lock);
37911 task_unlock(tsk);
37912@@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct
37913 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
37914 /* We don't need to lock fs - think why ;-) */
37915 if (fs) {
37916- fs->users = 1;
37917+ atomic_set(&fs->users, 1);
37918 fs->in_exec = 0;
37919 spin_lock_init(&fs->lock);
37920 seqcount_init(&fs->seq);
37921@@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct
37922 spin_lock(&old->lock);
37923 fs->root = old->root;
37924 path_get_longterm(&fs->root);
37925+ /* instead of calling gr_set_chroot_entries here,
37926+ we call it from every caller of this function
37927+ */
37928 fs->pwd = old->pwd;
37929 path_get_longterm(&fs->pwd);
37930 spin_unlock(&old->lock);
37931@@ -150,8 +157,9 @@ int unshare_fs_struct(void)
37932
37933 task_lock(current);
37934 spin_lock(&fs->lock);
37935- kill = !--fs->users;
37936+ kill = !atomic_dec_return(&fs->users);
37937 current->fs = new_fs;
37938+ gr_set_chroot_entries(current, &new_fs->root);
37939 spin_unlock(&fs->lock);
37940 task_unlock(current);
37941
37942@@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
37943
37944 /* to be mentioned only in INIT_TASK */
37945 struct fs_struct init_fs = {
37946- .users = 1,
37947+ .users = ATOMIC_INIT(1),
37948 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
37949 .seq = SEQCNT_ZERO,
37950 .umask = 0022,
37951@@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
37952 task_lock(current);
37953
37954 spin_lock(&init_fs.lock);
37955- init_fs.users++;
37956+ atomic_inc(&init_fs.users);
37957 spin_unlock(&init_fs.lock);
37958
37959 spin_lock(&fs->lock);
37960 current->fs = &init_fs;
37961- kill = !--fs->users;
37962+ gr_set_chroot_entries(current, &current->fs->root);
37963+ kill = !atomic_dec_return(&fs->users);
37964 spin_unlock(&fs->lock);
37965
37966 task_unlock(current);
37967diff -urNp linux-3.0.3/fs/fuse/cuse.c linux-3.0.3/fs/fuse/cuse.c
37968--- linux-3.0.3/fs/fuse/cuse.c 2011-07-21 22:17:23.000000000 -0400
37969+++ linux-3.0.3/fs/fuse/cuse.c 2011-08-23 21:47:56.000000000 -0400
37970@@ -586,10 +586,12 @@ static int __init cuse_init(void)
37971 INIT_LIST_HEAD(&cuse_conntbl[i]);
37972
37973 /* inherit and extend fuse_dev_operations */
37974- cuse_channel_fops = fuse_dev_operations;
37975- cuse_channel_fops.owner = THIS_MODULE;
37976- cuse_channel_fops.open = cuse_channel_open;
37977- cuse_channel_fops.release = cuse_channel_release;
37978+ pax_open_kernel();
37979+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
37980+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
37981+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
37982+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
37983+ pax_close_kernel();
37984
37985 cuse_class = class_create(THIS_MODULE, "cuse");
37986 if (IS_ERR(cuse_class))
37987diff -urNp linux-3.0.3/fs/fuse/dev.c linux-3.0.3/fs/fuse/dev.c
37988--- linux-3.0.3/fs/fuse/dev.c 2011-07-21 22:17:23.000000000 -0400
37989+++ linux-3.0.3/fs/fuse/dev.c 2011-08-23 21:47:56.000000000 -0400
37990@@ -1238,7 +1238,7 @@ static ssize_t fuse_dev_splice_read(stru
37991 ret = 0;
37992 pipe_lock(pipe);
37993
37994- if (!pipe->readers) {
37995+ if (!atomic_read(&pipe->readers)) {
37996 send_sig(SIGPIPE, current, 0);
37997 if (!ret)
37998 ret = -EPIPE;
37999diff -urNp linux-3.0.3/fs/fuse/dir.c linux-3.0.3/fs/fuse/dir.c
38000--- linux-3.0.3/fs/fuse/dir.c 2011-07-21 22:17:23.000000000 -0400
38001+++ linux-3.0.3/fs/fuse/dir.c 2011-08-23 21:47:56.000000000 -0400
38002@@ -1148,7 +1148,7 @@ static char *read_link(struct dentry *de
38003 return link;
38004 }
38005
38006-static void free_link(char *link)
38007+static void free_link(const char *link)
38008 {
38009 if (!IS_ERR(link))
38010 free_page((unsigned long) link);
38011diff -urNp linux-3.0.3/fs/gfs2/inode.c linux-3.0.3/fs/gfs2/inode.c
38012--- linux-3.0.3/fs/gfs2/inode.c 2011-07-21 22:17:23.000000000 -0400
38013+++ linux-3.0.3/fs/gfs2/inode.c 2011-08-23 21:47:56.000000000 -0400
38014@@ -1525,7 +1525,7 @@ out:
38015
38016 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
38017 {
38018- char *s = nd_get_link(nd);
38019+ const char *s = nd_get_link(nd);
38020 if (!IS_ERR(s))
38021 kfree(s);
38022 }
38023diff -urNp linux-3.0.3/fs/hfsplus/catalog.c linux-3.0.3/fs/hfsplus/catalog.c
38024--- linux-3.0.3/fs/hfsplus/catalog.c 2011-07-21 22:17:23.000000000 -0400
38025+++ linux-3.0.3/fs/hfsplus/catalog.c 2011-08-23 21:48:14.000000000 -0400
38026@@ -179,6 +179,8 @@ int hfsplus_find_cat(struct super_block
38027 int err;
38028 u16 type;
38029
38030+ pax_track_stack();
38031+
38032 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
38033 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
38034 if (err)
38035@@ -210,6 +212,8 @@ int hfsplus_create_cat(u32 cnid, struct
38036 int entry_size;
38037 int err;
38038
38039+ pax_track_stack();
38040+
38041 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n",
38042 str->name, cnid, inode->i_nlink);
38043 hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
38044@@ -349,6 +353,8 @@ int hfsplus_rename_cat(u32 cnid,
38045 int entry_size, type;
38046 int err = 0;
38047
38048+ pax_track_stack();
38049+
38050 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
38051 cnid, src_dir->i_ino, src_name->name,
38052 dst_dir->i_ino, dst_name->name);
38053diff -urNp linux-3.0.3/fs/hfsplus/dir.c linux-3.0.3/fs/hfsplus/dir.c
38054--- linux-3.0.3/fs/hfsplus/dir.c 2011-07-21 22:17:23.000000000 -0400
38055+++ linux-3.0.3/fs/hfsplus/dir.c 2011-08-23 21:48:14.000000000 -0400
38056@@ -129,6 +129,8 @@ static int hfsplus_readdir(struct file *
38057 struct hfsplus_readdir_data *rd;
38058 u16 type;
38059
38060+ pax_track_stack();
38061+
38062 if (filp->f_pos >= inode->i_size)
38063 return 0;
38064
38065diff -urNp linux-3.0.3/fs/hfsplus/inode.c linux-3.0.3/fs/hfsplus/inode.c
38066--- linux-3.0.3/fs/hfsplus/inode.c 2011-07-21 22:17:23.000000000 -0400
38067+++ linux-3.0.3/fs/hfsplus/inode.c 2011-08-23 21:48:14.000000000 -0400
38068@@ -489,6 +489,8 @@ int hfsplus_cat_read_inode(struct inode
38069 int res = 0;
38070 u16 type;
38071
38072+ pax_track_stack();
38073+
38074 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
38075
38076 HFSPLUS_I(inode)->linkid = 0;
38077@@ -552,6 +554,8 @@ int hfsplus_cat_write_inode(struct inode
38078 struct hfs_find_data fd;
38079 hfsplus_cat_entry entry;
38080
38081+ pax_track_stack();
38082+
38083 if (HFSPLUS_IS_RSRC(inode))
38084 main_inode = HFSPLUS_I(inode)->rsrc_inode;
38085
38086diff -urNp linux-3.0.3/fs/hfsplus/ioctl.c linux-3.0.3/fs/hfsplus/ioctl.c
38087--- linux-3.0.3/fs/hfsplus/ioctl.c 2011-07-21 22:17:23.000000000 -0400
38088+++ linux-3.0.3/fs/hfsplus/ioctl.c 2011-08-23 21:48:14.000000000 -0400
38089@@ -122,6 +122,8 @@ int hfsplus_setxattr(struct dentry *dent
38090 struct hfsplus_cat_file *file;
38091 int res;
38092
38093+ pax_track_stack();
38094+
38095 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
38096 return -EOPNOTSUPP;
38097
38098@@ -166,6 +168,8 @@ ssize_t hfsplus_getxattr(struct dentry *
38099 struct hfsplus_cat_file *file;
38100 ssize_t res = 0;
38101
38102+ pax_track_stack();
38103+
38104 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
38105 return -EOPNOTSUPP;
38106
38107diff -urNp linux-3.0.3/fs/hfsplus/super.c linux-3.0.3/fs/hfsplus/super.c
38108--- linux-3.0.3/fs/hfsplus/super.c 2011-07-21 22:17:23.000000000 -0400
38109+++ linux-3.0.3/fs/hfsplus/super.c 2011-08-23 21:48:14.000000000 -0400
38110@@ -340,6 +340,8 @@ static int hfsplus_fill_super(struct sup
38111 struct nls_table *nls = NULL;
38112 int err;
38113
38114+ pax_track_stack();
38115+
38116 err = -EINVAL;
38117 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
38118 if (!sbi)
38119diff -urNp linux-3.0.3/fs/hugetlbfs/inode.c linux-3.0.3/fs/hugetlbfs/inode.c
38120--- linux-3.0.3/fs/hugetlbfs/inode.c 2011-07-21 22:17:23.000000000 -0400
38121+++ linux-3.0.3/fs/hugetlbfs/inode.c 2011-08-23 21:48:14.000000000 -0400
38122@@ -914,7 +914,7 @@ static struct file_system_type hugetlbfs
38123 .kill_sb = kill_litter_super,
38124 };
38125
38126-static struct vfsmount *hugetlbfs_vfsmount;
38127+struct vfsmount *hugetlbfs_vfsmount;
38128
38129 static int can_do_hugetlb_shm(void)
38130 {
38131diff -urNp linux-3.0.3/fs/inode.c linux-3.0.3/fs/inode.c
38132--- linux-3.0.3/fs/inode.c 2011-07-21 22:17:23.000000000 -0400
38133+++ linux-3.0.3/fs/inode.c 2011-08-23 21:47:56.000000000 -0400
38134@@ -829,8 +829,8 @@ unsigned int get_next_ino(void)
38135
38136 #ifdef CONFIG_SMP
38137 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
38138- static atomic_t shared_last_ino;
38139- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
38140+ static atomic_unchecked_t shared_last_ino;
38141+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
38142
38143 res = next - LAST_INO_BATCH;
38144 }
38145diff -urNp linux-3.0.3/fs/jbd/checkpoint.c linux-3.0.3/fs/jbd/checkpoint.c
38146--- linux-3.0.3/fs/jbd/checkpoint.c 2011-07-21 22:17:23.000000000 -0400
38147+++ linux-3.0.3/fs/jbd/checkpoint.c 2011-08-23 21:48:14.000000000 -0400
38148@@ -350,6 +350,8 @@ int log_do_checkpoint(journal_t *journal
38149 tid_t this_tid;
38150 int result;
38151
38152+ pax_track_stack();
38153+
38154 jbd_debug(1, "Start checkpoint\n");
38155
38156 /*
38157diff -urNp linux-3.0.3/fs/jffs2/compr_rtime.c linux-3.0.3/fs/jffs2/compr_rtime.c
38158--- linux-3.0.3/fs/jffs2/compr_rtime.c 2011-07-21 22:17:23.000000000 -0400
38159+++ linux-3.0.3/fs/jffs2/compr_rtime.c 2011-08-23 21:48:14.000000000 -0400
38160@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
38161 int outpos = 0;
38162 int pos=0;
38163
38164+ pax_track_stack();
38165+
38166 memset(positions,0,sizeof(positions));
38167
38168 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
38169@@ -78,6 +80,8 @@ static int jffs2_rtime_decompress(unsign
38170 int outpos = 0;
38171 int pos=0;
38172
38173+ pax_track_stack();
38174+
38175 memset(positions,0,sizeof(positions));
38176
38177 while (outpos<destlen) {
38178diff -urNp linux-3.0.3/fs/jffs2/compr_rubin.c linux-3.0.3/fs/jffs2/compr_rubin.c
38179--- linux-3.0.3/fs/jffs2/compr_rubin.c 2011-07-21 22:17:23.000000000 -0400
38180+++ linux-3.0.3/fs/jffs2/compr_rubin.c 2011-08-23 21:48:14.000000000 -0400
38181@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
38182 int ret;
38183 uint32_t mysrclen, mydstlen;
38184
38185+ pax_track_stack();
38186+
38187 mysrclen = *sourcelen;
38188 mydstlen = *dstlen - 8;
38189
38190diff -urNp linux-3.0.3/fs/jffs2/erase.c linux-3.0.3/fs/jffs2/erase.c
38191--- linux-3.0.3/fs/jffs2/erase.c 2011-07-21 22:17:23.000000000 -0400
38192+++ linux-3.0.3/fs/jffs2/erase.c 2011-08-23 21:47:56.000000000 -0400
38193@@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(stru
38194 struct jffs2_unknown_node marker = {
38195 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
38196 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
38197- .totlen = cpu_to_je32(c->cleanmarker_size)
38198+ .totlen = cpu_to_je32(c->cleanmarker_size),
38199+ .hdr_crc = cpu_to_je32(0)
38200 };
38201
38202 jffs2_prealloc_raw_node_refs(c, jeb, 1);
38203diff -urNp linux-3.0.3/fs/jffs2/wbuf.c linux-3.0.3/fs/jffs2/wbuf.c
38204--- linux-3.0.3/fs/jffs2/wbuf.c 2011-07-21 22:17:23.000000000 -0400
38205+++ linux-3.0.3/fs/jffs2/wbuf.c 2011-08-23 21:47:56.000000000 -0400
38206@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
38207 {
38208 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
38209 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
38210- .totlen = constant_cpu_to_je32(8)
38211+ .totlen = constant_cpu_to_je32(8),
38212+ .hdr_crc = constant_cpu_to_je32(0)
38213 };
38214
38215 /*
38216diff -urNp linux-3.0.3/fs/jffs2/xattr.c linux-3.0.3/fs/jffs2/xattr.c
38217--- linux-3.0.3/fs/jffs2/xattr.c 2011-07-21 22:17:23.000000000 -0400
38218+++ linux-3.0.3/fs/jffs2/xattr.c 2011-08-23 21:48:14.000000000 -0400
38219@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
38220
38221 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
38222
38223+ pax_track_stack();
38224+
38225 /* Phase.1 : Merge same xref */
38226 for (i=0; i < XREF_TMPHASH_SIZE; i++)
38227 xref_tmphash[i] = NULL;
38228diff -urNp linux-3.0.3/fs/jfs/super.c linux-3.0.3/fs/jfs/super.c
38229--- linux-3.0.3/fs/jfs/super.c 2011-07-21 22:17:23.000000000 -0400
38230+++ linux-3.0.3/fs/jfs/super.c 2011-08-23 21:47:56.000000000 -0400
38231@@ -803,7 +803,7 @@ static int __init init_jfs_fs(void)
38232
38233 jfs_inode_cachep =
38234 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
38235- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
38236+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
38237 init_once);
38238 if (jfs_inode_cachep == NULL)
38239 return -ENOMEM;
38240diff -urNp linux-3.0.3/fs/Kconfig.binfmt linux-3.0.3/fs/Kconfig.binfmt
38241--- linux-3.0.3/fs/Kconfig.binfmt 2011-07-21 22:17:23.000000000 -0400
38242+++ linux-3.0.3/fs/Kconfig.binfmt 2011-08-23 21:47:56.000000000 -0400
38243@@ -86,7 +86,7 @@ config HAVE_AOUT
38244
38245 config BINFMT_AOUT
38246 tristate "Kernel support for a.out and ECOFF binaries"
38247- depends on HAVE_AOUT
38248+ depends on HAVE_AOUT && BROKEN
38249 ---help---
38250 A.out (Assembler.OUTput) is a set of formats for libraries and
38251 executables used in the earliest versions of UNIX. Linux used
38252diff -urNp linux-3.0.3/fs/libfs.c linux-3.0.3/fs/libfs.c
38253--- linux-3.0.3/fs/libfs.c 2011-07-21 22:17:23.000000000 -0400
38254+++ linux-3.0.3/fs/libfs.c 2011-08-23 21:47:56.000000000 -0400
38255@@ -163,6 +163,9 @@ int dcache_readdir(struct file * filp, v
38256
38257 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
38258 struct dentry *next;
38259+ char d_name[sizeof(next->d_iname)];
38260+ const unsigned char *name;
38261+
38262 next = list_entry(p, struct dentry, d_u.d_child);
38263 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
38264 if (!simple_positive(next)) {
38265@@ -172,7 +175,12 @@ int dcache_readdir(struct file * filp, v
38266
38267 spin_unlock(&next->d_lock);
38268 spin_unlock(&dentry->d_lock);
38269- if (filldir(dirent, next->d_name.name,
38270+ name = next->d_name.name;
38271+ if (name == next->d_iname) {
38272+ memcpy(d_name, name, next->d_name.len);
38273+ name = d_name;
38274+ }
38275+ if (filldir(dirent, name,
38276 next->d_name.len, filp->f_pos,
38277 next->d_inode->i_ino,
38278 dt_type(next->d_inode)) < 0)
38279diff -urNp linux-3.0.3/fs/lockd/clntproc.c linux-3.0.3/fs/lockd/clntproc.c
38280--- linux-3.0.3/fs/lockd/clntproc.c 2011-07-21 22:17:23.000000000 -0400
38281+++ linux-3.0.3/fs/lockd/clntproc.c 2011-08-23 21:48:14.000000000 -0400
38282@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
38283 /*
38284 * Cookie counter for NLM requests
38285 */
38286-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
38287+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
38288
38289 void nlmclnt_next_cookie(struct nlm_cookie *c)
38290 {
38291- u32 cookie = atomic_inc_return(&nlm_cookie);
38292+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
38293
38294 memcpy(c->data, &cookie, 4);
38295 c->len=4;
38296@@ -620,6 +620,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
38297 struct nlm_rqst reqst, *req;
38298 int status;
38299
38300+ pax_track_stack();
38301+
38302 req = &reqst;
38303 memset(req, 0, sizeof(*req));
38304 locks_init_lock(&req->a_args.lock.fl);
38305diff -urNp linux-3.0.3/fs/locks.c linux-3.0.3/fs/locks.c
38306--- linux-3.0.3/fs/locks.c 2011-07-21 22:17:23.000000000 -0400
38307+++ linux-3.0.3/fs/locks.c 2011-08-23 21:47:56.000000000 -0400
38308@@ -2043,16 +2043,16 @@ void locks_remove_flock(struct file *fil
38309 return;
38310
38311 if (filp->f_op && filp->f_op->flock) {
38312- struct file_lock fl = {
38313+ struct file_lock flock = {
38314 .fl_pid = current->tgid,
38315 .fl_file = filp,
38316 .fl_flags = FL_FLOCK,
38317 .fl_type = F_UNLCK,
38318 .fl_end = OFFSET_MAX,
38319 };
38320- filp->f_op->flock(filp, F_SETLKW, &fl);
38321- if (fl.fl_ops && fl.fl_ops->fl_release_private)
38322- fl.fl_ops->fl_release_private(&fl);
38323+ filp->f_op->flock(filp, F_SETLKW, &flock);
38324+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
38325+ flock.fl_ops->fl_release_private(&flock);
38326 }
38327
38328 lock_flocks();
38329diff -urNp linux-3.0.3/fs/logfs/super.c linux-3.0.3/fs/logfs/super.c
38330--- linux-3.0.3/fs/logfs/super.c 2011-07-21 22:17:23.000000000 -0400
38331+++ linux-3.0.3/fs/logfs/super.c 2011-08-23 21:48:14.000000000 -0400
38332@@ -266,6 +266,8 @@ static int logfs_recover_sb(struct super
38333 struct logfs_disk_super _ds1, *ds1 = &_ds1;
38334 int err, valid0, valid1;
38335
38336+ pax_track_stack();
38337+
38338 /* read first superblock */
38339 err = wbuf_read(sb, super->s_sb_ofs[0], sizeof(*ds0), ds0);
38340 if (err)
38341diff -urNp linux-3.0.3/fs/namei.c linux-3.0.3/fs/namei.c
38342--- linux-3.0.3/fs/namei.c 2011-07-21 22:17:23.000000000 -0400
38343+++ linux-3.0.3/fs/namei.c 2011-08-23 21:48:14.000000000 -0400
38344@@ -237,21 +237,31 @@ int generic_permission(struct inode *ino
38345 return ret;
38346
38347 /*
38348- * Read/write DACs are always overridable.
38349- * Executable DACs are overridable for all directories and
38350- * for non-directories that have least one exec bit set.
38351+ * Searching includes executable on directories, else just read.
38352 */
38353- if (!(mask & MAY_EXEC) || execute_ok(inode))
38354- if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
38355+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
38356+ if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE))) {
38357+#ifdef CONFIG_GRKERNSEC
38358+ if (flags & IPERM_FLAG_RCU)
38359+ return -ECHILD;
38360+#endif
38361+ if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
38362 return 0;
38363+ }
38364
38365 /*
38366- * Searching includes executable on directories, else just read.
38367+ * Read/write DACs are always overridable.
38368+ * Executable DACs are overridable for all directories and
38369+ * for non-directories that have least one exec bit set.
38370 */
38371- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
38372- if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
38373- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
38374+ if (!(mask & MAY_EXEC) || execute_ok(inode)) {
38375+#ifdef CONFIG_GRKERNSEC
38376+ if (flags & IPERM_FLAG_RCU)
38377+ return -ECHILD;
38378+#endif
38379+ if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
38380 return 0;
38381+ }
38382
38383 return -EACCES;
38384 }
38385@@ -547,6 +557,9 @@ static int complete_walk(struct nameidat
38386 br_read_unlock(vfsmount_lock);
38387 }
38388
38389+ if (!(nd->flags & LOOKUP_PARENT) && !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
38390+ return -ENOENT;
38391+
38392 if (likely(!(nd->flags & LOOKUP_JUMPED)))
38393 return 0;
38394
38395@@ -593,9 +606,16 @@ static inline int exec_permission(struct
38396 if (ret == -ECHILD)
38397 return ret;
38398
38399- if (ns_capable(ns, CAP_DAC_OVERRIDE) ||
38400- ns_capable(ns, CAP_DAC_READ_SEARCH))
38401+ if (ns_capable_nolog(ns, CAP_DAC_OVERRIDE))
38402 goto ok;
38403+ else {
38404+#ifdef CONFIG_GRKERNSEC
38405+ if (flags & IPERM_FLAG_RCU)
38406+ return -ECHILD;
38407+#endif
38408+ if (ns_capable(ns, CAP_DAC_READ_SEARCH) || ns_capable(ns, CAP_DAC_OVERRIDE))
38409+ goto ok;
38410+ }
38411
38412 return ret;
38413 ok:
38414@@ -703,11 +723,19 @@ follow_link(struct path *link, struct na
38415 return error;
38416 }
38417
38418+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
38419+ dentry->d_inode, dentry, nd->path.mnt)) {
38420+ error = -EACCES;
38421+ *p = ERR_PTR(error); /* no ->put_link(), please */
38422+ path_put(&nd->path);
38423+ return error;
38424+ }
38425+
38426 nd->last_type = LAST_BIND;
38427 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
38428 error = PTR_ERR(*p);
38429 if (!IS_ERR(*p)) {
38430- char *s = nd_get_link(nd);
38431+ const char *s = nd_get_link(nd);
38432 error = 0;
38433 if (s)
38434 error = __vfs_follow_link(nd, s);
38435@@ -1625,6 +1653,9 @@ static int do_path_lookup(int dfd, const
38436 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
38437
38438 if (likely(!retval)) {
38439+ if (*name != '/' && nd->path.dentry && nd->inode && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
38440+ return -ENOENT;
38441+
38442 if (unlikely(!audit_dummy_context())) {
38443 if (nd->path.dentry && nd->inode)
38444 audit_inode(name, nd->path.dentry);
38445@@ -1935,6 +1966,30 @@ int vfs_create(struct inode *dir, struct
38446 return error;
38447 }
38448
38449+/*
38450+ * Note that while the flag value (low two bits) for sys_open means:
38451+ * 00 - read-only
38452+ * 01 - write-only
38453+ * 10 - read-write
38454+ * 11 - special
38455+ * it is changed into
38456+ * 00 - no permissions needed
38457+ * 01 - read-permission
38458+ * 10 - write-permission
38459+ * 11 - read-write
38460+ * for the internal routines (ie open_namei()/follow_link() etc)
38461+ * This is more logical, and also allows the 00 "no perm needed"
38462+ * to be used for symlinks (where the permissions are checked
38463+ * later).
38464+ *
38465+*/
38466+static inline int open_to_namei_flags(int flag)
38467+{
38468+ if ((flag+1) & O_ACCMODE)
38469+ flag++;
38470+ return flag;
38471+}
38472+
38473 static int may_open(struct path *path, int acc_mode, int flag)
38474 {
38475 struct dentry *dentry = path->dentry;
38476@@ -1987,7 +2042,27 @@ static int may_open(struct path *path, i
38477 /*
38478 * Ensure there are no outstanding leases on the file.
38479 */
38480- return break_lease(inode, flag);
38481+ error = break_lease(inode, flag);
38482+
38483+ if (error)
38484+ return error;
38485+
38486+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
38487+ error = -EPERM;
38488+ goto exit;
38489+ }
38490+
38491+ if (gr_handle_rawio(inode)) {
38492+ error = -EPERM;
38493+ goto exit;
38494+ }
38495+
38496+ if (!gr_acl_handle_open(dentry, path->mnt, open_to_namei_flags(flag))) {
38497+ error = -EACCES;
38498+ goto exit;
38499+ }
38500+exit:
38501+ return error;
38502 }
38503
38504 static int handle_truncate(struct file *filp)
38505@@ -2013,30 +2088,6 @@ static int handle_truncate(struct file *
38506 }
38507
38508 /*
38509- * Note that while the flag value (low two bits) for sys_open means:
38510- * 00 - read-only
38511- * 01 - write-only
38512- * 10 - read-write
38513- * 11 - special
38514- * it is changed into
38515- * 00 - no permissions needed
38516- * 01 - read-permission
38517- * 10 - write-permission
38518- * 11 - read-write
38519- * for the internal routines (ie open_namei()/follow_link() etc)
38520- * This is more logical, and also allows the 00 "no perm needed"
38521- * to be used for symlinks (where the permissions are checked
38522- * later).
38523- *
38524-*/
38525-static inline int open_to_namei_flags(int flag)
38526-{
38527- if ((flag+1) & O_ACCMODE)
38528- flag++;
38529- return flag;
38530-}
38531-
38532-/*
38533 * Handle the last step of open()
38534 */
38535 static struct file *do_last(struct nameidata *nd, struct path *path,
38536@@ -2045,6 +2096,7 @@ static struct file *do_last(struct namei
38537 struct dentry *dir = nd->path.dentry;
38538 struct dentry *dentry;
38539 int open_flag = op->open_flag;
38540+ int flag = open_to_namei_flags(open_flag);
38541 int will_truncate = open_flag & O_TRUNC;
38542 int want_write = 0;
38543 int acc_mode = op->acc_mode;
38544@@ -2132,6 +2184,12 @@ static struct file *do_last(struct namei
38545 /* Negative dentry, just create the file */
38546 if (!dentry->d_inode) {
38547 int mode = op->mode;
38548+
38549+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, flag, mode)) {
38550+ error = -EACCES;
38551+ goto exit_mutex_unlock;
38552+ }
38553+
38554 if (!IS_POSIXACL(dir->d_inode))
38555 mode &= ~current_umask();
38556 /*
38557@@ -2155,6 +2213,8 @@ static struct file *do_last(struct namei
38558 error = vfs_create(dir->d_inode, dentry, mode, nd);
38559 if (error)
38560 goto exit_mutex_unlock;
38561+ else
38562+ gr_handle_create(path->dentry, path->mnt);
38563 mutex_unlock(&dir->d_inode->i_mutex);
38564 dput(nd->path.dentry);
38565 nd->path.dentry = dentry;
38566@@ -2164,6 +2224,14 @@ static struct file *do_last(struct namei
38567 /*
38568 * It already exists.
38569 */
38570+
38571+ /* only check if O_CREAT is specified, all other checks need to go
38572+ into may_open */
38573+ if (gr_handle_fifo(path->dentry, path->mnt, dir, flag, acc_mode)) {
38574+ error = -EACCES;
38575+ goto exit_mutex_unlock;
38576+ }
38577+
38578 mutex_unlock(&dir->d_inode->i_mutex);
38579 audit_inode(pathname, path->dentry);
38580
38581@@ -2450,6 +2518,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
38582 error = may_mknod(mode);
38583 if (error)
38584 goto out_dput;
38585+
38586+ if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
38587+ error = -EPERM;
38588+ goto out_dput;
38589+ }
38590+
38591+ if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
38592+ error = -EACCES;
38593+ goto out_dput;
38594+ }
38595+
38596 error = mnt_want_write(nd.path.mnt);
38597 if (error)
38598 goto out_dput;
38599@@ -2470,6 +2549,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
38600 }
38601 out_drop_write:
38602 mnt_drop_write(nd.path.mnt);
38603+
38604+ if (!error)
38605+ gr_handle_create(dentry, nd.path.mnt);
38606 out_dput:
38607 dput(dentry);
38608 out_unlock:
38609@@ -2522,6 +2604,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
38610 if (IS_ERR(dentry))
38611 goto out_unlock;
38612
38613+ if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
38614+ error = -EACCES;
38615+ goto out_dput;
38616+ }
38617+
38618 if (!IS_POSIXACL(nd.path.dentry->d_inode))
38619 mode &= ~current_umask();
38620 error = mnt_want_write(nd.path.mnt);
38621@@ -2533,6 +2620,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
38622 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
38623 out_drop_write:
38624 mnt_drop_write(nd.path.mnt);
38625+
38626+ if (!error)
38627+ gr_handle_create(dentry, nd.path.mnt);
38628+
38629 out_dput:
38630 dput(dentry);
38631 out_unlock:
38632@@ -2613,6 +2704,8 @@ static long do_rmdir(int dfd, const char
38633 char * name;
38634 struct dentry *dentry;
38635 struct nameidata nd;
38636+ ino_t saved_ino = 0;
38637+ dev_t saved_dev = 0;
38638
38639 error = user_path_parent(dfd, pathname, &nd, &name);
38640 if (error)
38641@@ -2641,6 +2734,17 @@ static long do_rmdir(int dfd, const char
38642 error = -ENOENT;
38643 goto exit3;
38644 }
38645+
38646+ if (dentry->d_inode->i_nlink <= 1) {
38647+ saved_ino = dentry->d_inode->i_ino;
38648+ saved_dev = gr_get_dev_from_dentry(dentry);
38649+ }
38650+
38651+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
38652+ error = -EACCES;
38653+ goto exit3;
38654+ }
38655+
38656 error = mnt_want_write(nd.path.mnt);
38657 if (error)
38658 goto exit3;
38659@@ -2648,6 +2752,8 @@ static long do_rmdir(int dfd, const char
38660 if (error)
38661 goto exit4;
38662 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
38663+ if (!error && (saved_dev || saved_ino))
38664+ gr_handle_delete(saved_ino, saved_dev);
38665 exit4:
38666 mnt_drop_write(nd.path.mnt);
38667 exit3:
38668@@ -2710,6 +2816,8 @@ static long do_unlinkat(int dfd, const c
38669 struct dentry *dentry;
38670 struct nameidata nd;
38671 struct inode *inode = NULL;
38672+ ino_t saved_ino = 0;
38673+ dev_t saved_dev = 0;
38674
38675 error = user_path_parent(dfd, pathname, &nd, &name);
38676 if (error)
38677@@ -2732,6 +2840,16 @@ static long do_unlinkat(int dfd, const c
38678 if (!inode)
38679 goto slashes;
38680 ihold(inode);
38681+
38682+ if (inode->i_nlink <= 1) {
38683+ saved_ino = inode->i_ino;
38684+ saved_dev = gr_get_dev_from_dentry(dentry);
38685+ }
38686+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
38687+ error = -EACCES;
38688+ goto exit2;
38689+ }
38690+
38691 error = mnt_want_write(nd.path.mnt);
38692 if (error)
38693 goto exit2;
38694@@ -2739,6 +2857,8 @@ static long do_unlinkat(int dfd, const c
38695 if (error)
38696 goto exit3;
38697 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
38698+ if (!error && (saved_ino || saved_dev))
38699+ gr_handle_delete(saved_ino, saved_dev);
38700 exit3:
38701 mnt_drop_write(nd.path.mnt);
38702 exit2:
38703@@ -2816,6 +2936,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
38704 if (IS_ERR(dentry))
38705 goto out_unlock;
38706
38707+ if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
38708+ error = -EACCES;
38709+ goto out_dput;
38710+ }
38711+
38712 error = mnt_want_write(nd.path.mnt);
38713 if (error)
38714 goto out_dput;
38715@@ -2823,6 +2948,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
38716 if (error)
38717 goto out_drop_write;
38718 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
38719+ if (!error)
38720+ gr_handle_create(dentry, nd.path.mnt);
38721 out_drop_write:
38722 mnt_drop_write(nd.path.mnt);
38723 out_dput:
38724@@ -2931,6 +3058,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
38725 error = PTR_ERR(new_dentry);
38726 if (IS_ERR(new_dentry))
38727 goto out_unlock;
38728+
38729+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
38730+ old_path.dentry->d_inode,
38731+ old_path.dentry->d_inode->i_mode, to)) {
38732+ error = -EACCES;
38733+ goto out_dput;
38734+ }
38735+
38736+ if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
38737+ old_path.dentry, old_path.mnt, to)) {
38738+ error = -EACCES;
38739+ goto out_dput;
38740+ }
38741+
38742 error = mnt_want_write(nd.path.mnt);
38743 if (error)
38744 goto out_dput;
38745@@ -2938,6 +3079,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
38746 if (error)
38747 goto out_drop_write;
38748 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
38749+ if (!error)
38750+ gr_handle_create(new_dentry, nd.path.mnt);
38751 out_drop_write:
38752 mnt_drop_write(nd.path.mnt);
38753 out_dput:
38754@@ -3113,6 +3256,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
38755 char *to;
38756 int error;
38757
38758+ pax_track_stack();
38759+
38760 error = user_path_parent(olddfd, oldname, &oldnd, &from);
38761 if (error)
38762 goto exit;
38763@@ -3169,6 +3314,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
38764 if (new_dentry == trap)
38765 goto exit5;
38766
38767+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
38768+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
38769+ to);
38770+ if (error)
38771+ goto exit5;
38772+
38773 error = mnt_want_write(oldnd.path.mnt);
38774 if (error)
38775 goto exit5;
38776@@ -3178,6 +3329,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
38777 goto exit6;
38778 error = vfs_rename(old_dir->d_inode, old_dentry,
38779 new_dir->d_inode, new_dentry);
38780+ if (!error)
38781+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
38782+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
38783 exit6:
38784 mnt_drop_write(oldnd.path.mnt);
38785 exit5:
38786@@ -3203,6 +3357,8 @@ SYSCALL_DEFINE2(rename, const char __use
38787
38788 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
38789 {
38790+ char tmpbuf[64];
38791+ const char *newlink;
38792 int len;
38793
38794 len = PTR_ERR(link);
38795@@ -3212,7 +3368,14 @@ int vfs_readlink(struct dentry *dentry,
38796 len = strlen(link);
38797 if (len > (unsigned) buflen)
38798 len = buflen;
38799- if (copy_to_user(buffer, link, len))
38800+
38801+ if (len < sizeof(tmpbuf)) {
38802+ memcpy(tmpbuf, link, len);
38803+ newlink = tmpbuf;
38804+ } else
38805+ newlink = link;
38806+
38807+ if (copy_to_user(buffer, newlink, len))
38808 len = -EFAULT;
38809 out:
38810 return len;
38811diff -urNp linux-3.0.3/fs/namespace.c linux-3.0.3/fs/namespace.c
38812--- linux-3.0.3/fs/namespace.c 2011-07-21 22:17:23.000000000 -0400
38813+++ linux-3.0.3/fs/namespace.c 2011-08-23 21:48:14.000000000 -0400
38814@@ -1328,6 +1328,9 @@ static int do_umount(struct vfsmount *mn
38815 if (!(sb->s_flags & MS_RDONLY))
38816 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
38817 up_write(&sb->s_umount);
38818+
38819+ gr_log_remount(mnt->mnt_devname, retval);
38820+
38821 return retval;
38822 }
38823
38824@@ -1347,6 +1350,9 @@ static int do_umount(struct vfsmount *mn
38825 br_write_unlock(vfsmount_lock);
38826 up_write(&namespace_sem);
38827 release_mounts(&umount_list);
38828+
38829+ gr_log_unmount(mnt->mnt_devname, retval);
38830+
38831 return retval;
38832 }
38833
38834@@ -2338,6 +2344,16 @@ long do_mount(char *dev_name, char *dir_
38835 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
38836 MS_STRICTATIME);
38837
38838+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
38839+ retval = -EPERM;
38840+ goto dput_out;
38841+ }
38842+
38843+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
38844+ retval = -EPERM;
38845+ goto dput_out;
38846+ }
38847+
38848 if (flags & MS_REMOUNT)
38849 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
38850 data_page);
38851@@ -2352,6 +2368,9 @@ long do_mount(char *dev_name, char *dir_
38852 dev_name, data_page);
38853 dput_out:
38854 path_put(&path);
38855+
38856+ gr_log_mount(dev_name, dir_name, retval);
38857+
38858 return retval;
38859 }
38860
38861@@ -2575,6 +2594,11 @@ SYSCALL_DEFINE2(pivot_root, const char _
38862 if (error)
38863 goto out2;
38864
38865+ if (gr_handle_chroot_pivot()) {
38866+ error = -EPERM;
38867+ goto out2;
38868+ }
38869+
38870 get_fs_root(current->fs, &root);
38871 error = lock_mount(&old);
38872 if (error)
38873diff -urNp linux-3.0.3/fs/ncpfs/dir.c linux-3.0.3/fs/ncpfs/dir.c
38874--- linux-3.0.3/fs/ncpfs/dir.c 2011-07-21 22:17:23.000000000 -0400
38875+++ linux-3.0.3/fs/ncpfs/dir.c 2011-08-23 21:48:14.000000000 -0400
38876@@ -299,6 +299,8 @@ ncp_lookup_validate(struct dentry *dentr
38877 int res, val = 0, len;
38878 __u8 __name[NCP_MAXPATHLEN + 1];
38879
38880+ pax_track_stack();
38881+
38882 if (dentry == dentry->d_sb->s_root)
38883 return 1;
38884
38885@@ -844,6 +846,8 @@ static struct dentry *ncp_lookup(struct
38886 int error, res, len;
38887 __u8 __name[NCP_MAXPATHLEN + 1];
38888
38889+ pax_track_stack();
38890+
38891 error = -EIO;
38892 if (!ncp_conn_valid(server))
38893 goto finished;
38894@@ -931,6 +935,8 @@ int ncp_create_new(struct inode *dir, st
38895 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
38896 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
38897
38898+ pax_track_stack();
38899+
38900 ncp_age_dentry(server, dentry);
38901 len = sizeof(__name);
38902 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
38903@@ -992,6 +998,8 @@ static int ncp_mkdir(struct inode *dir,
38904 int error, len;
38905 __u8 __name[NCP_MAXPATHLEN + 1];
38906
38907+ pax_track_stack();
38908+
38909 DPRINTK("ncp_mkdir: making %s/%s\n",
38910 dentry->d_parent->d_name.name, dentry->d_name.name);
38911
38912@@ -1140,6 +1148,8 @@ static int ncp_rename(struct inode *old_
38913 int old_len, new_len;
38914 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
38915
38916+ pax_track_stack();
38917+
38918 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
38919 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
38920 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
38921diff -urNp linux-3.0.3/fs/ncpfs/inode.c linux-3.0.3/fs/ncpfs/inode.c
38922--- linux-3.0.3/fs/ncpfs/inode.c 2011-07-21 22:17:23.000000000 -0400
38923+++ linux-3.0.3/fs/ncpfs/inode.c 2011-08-23 21:48:14.000000000 -0400
38924@@ -461,6 +461,8 @@ static int ncp_fill_super(struct super_b
38925 #endif
38926 struct ncp_entry_info finfo;
38927
38928+ pax_track_stack();
38929+
38930 memset(&data, 0, sizeof(data));
38931 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
38932 if (!server)
38933diff -urNp linux-3.0.3/fs/nfs/inode.c linux-3.0.3/fs/nfs/inode.c
38934--- linux-3.0.3/fs/nfs/inode.c 2011-07-21 22:17:23.000000000 -0400
38935+++ linux-3.0.3/fs/nfs/inode.c 2011-08-23 21:47:56.000000000 -0400
38936@@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct
38937 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
38938 nfsi->attrtimeo_timestamp = jiffies;
38939
38940- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
38941+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
38942 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
38943 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
38944 else
38945@@ -1000,16 +1000,16 @@ static int nfs_size_need_update(const st
38946 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
38947 }
38948
38949-static atomic_long_t nfs_attr_generation_counter;
38950+static atomic_long_unchecked_t nfs_attr_generation_counter;
38951
38952 static unsigned long nfs_read_attr_generation_counter(void)
38953 {
38954- return atomic_long_read(&nfs_attr_generation_counter);
38955+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
38956 }
38957
38958 unsigned long nfs_inc_attr_generation_counter(void)
38959 {
38960- return atomic_long_inc_return(&nfs_attr_generation_counter);
38961+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
38962 }
38963
38964 void nfs_fattr_init(struct nfs_fattr *fattr)
38965diff -urNp linux-3.0.3/fs/nfsd/nfs4state.c linux-3.0.3/fs/nfsd/nfs4state.c
38966--- linux-3.0.3/fs/nfsd/nfs4state.c 2011-08-23 21:44:40.000000000 -0400
38967+++ linux-3.0.3/fs/nfsd/nfs4state.c 2011-08-23 21:48:14.000000000 -0400
38968@@ -3794,6 +3794,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
38969 unsigned int strhashval;
38970 int err;
38971
38972+ pax_track_stack();
38973+
38974 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
38975 (long long) lock->lk_offset,
38976 (long long) lock->lk_length);
38977diff -urNp linux-3.0.3/fs/nfsd/nfs4xdr.c linux-3.0.3/fs/nfsd/nfs4xdr.c
38978--- linux-3.0.3/fs/nfsd/nfs4xdr.c 2011-07-21 22:17:23.000000000 -0400
38979+++ linux-3.0.3/fs/nfsd/nfs4xdr.c 2011-08-23 21:48:14.000000000 -0400
38980@@ -1788,6 +1788,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
38981 .dentry = dentry,
38982 };
38983
38984+ pax_track_stack();
38985+
38986 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
38987 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
38988 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
38989diff -urNp linux-3.0.3/fs/nfsd/vfs.c linux-3.0.3/fs/nfsd/vfs.c
38990--- linux-3.0.3/fs/nfsd/vfs.c 2011-07-21 22:17:23.000000000 -0400
38991+++ linux-3.0.3/fs/nfsd/vfs.c 2011-08-23 21:47:56.000000000 -0400
38992@@ -896,7 +896,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
38993 } else {
38994 oldfs = get_fs();
38995 set_fs(KERNEL_DS);
38996- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
38997+ host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
38998 set_fs(oldfs);
38999 }
39000
39001@@ -1000,7 +1000,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
39002
39003 /* Write the data. */
39004 oldfs = get_fs(); set_fs(KERNEL_DS);
39005- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
39006+ host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
39007 set_fs(oldfs);
39008 if (host_err < 0)
39009 goto out_nfserr;
39010@@ -1535,7 +1535,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
39011 */
39012
39013 oldfs = get_fs(); set_fs(KERNEL_DS);
39014- host_err = inode->i_op->readlink(dentry, buf, *lenp);
39015+ host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
39016 set_fs(oldfs);
39017
39018 if (host_err < 0)
39019diff -urNp linux-3.0.3/fs/notify/fanotify/fanotify_user.c linux-3.0.3/fs/notify/fanotify/fanotify_user.c
39020--- linux-3.0.3/fs/notify/fanotify/fanotify_user.c 2011-07-21 22:17:23.000000000 -0400
39021+++ linux-3.0.3/fs/notify/fanotify/fanotify_user.c 2011-08-23 21:48:14.000000000 -0400
39022@@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct
39023 goto out_close_fd;
39024
39025 ret = -EFAULT;
39026- if (copy_to_user(buf, &fanotify_event_metadata,
39027+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
39028+ copy_to_user(buf, &fanotify_event_metadata,
39029 fanotify_event_metadata.event_len))
39030 goto out_kill_access_response;
39031
39032diff -urNp linux-3.0.3/fs/notify/notification.c linux-3.0.3/fs/notify/notification.c
39033--- linux-3.0.3/fs/notify/notification.c 2011-07-21 22:17:23.000000000 -0400
39034+++ linux-3.0.3/fs/notify/notification.c 2011-08-23 21:47:56.000000000 -0400
39035@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
39036 * get set to 0 so it will never get 'freed'
39037 */
39038 static struct fsnotify_event *q_overflow_event;
39039-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
39040+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
39041
39042 /**
39043 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
39044@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
39045 */
39046 u32 fsnotify_get_cookie(void)
39047 {
39048- return atomic_inc_return(&fsnotify_sync_cookie);
39049+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
39050 }
39051 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
39052
39053diff -urNp linux-3.0.3/fs/ntfs/dir.c linux-3.0.3/fs/ntfs/dir.c
39054--- linux-3.0.3/fs/ntfs/dir.c 2011-07-21 22:17:23.000000000 -0400
39055+++ linux-3.0.3/fs/ntfs/dir.c 2011-08-23 21:47:56.000000000 -0400
39056@@ -1329,7 +1329,7 @@ find_next_index_buffer:
39057 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
39058 ~(s64)(ndir->itype.index.block_size - 1)));
39059 /* Bounds checks. */
39060- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
39061+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
39062 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
39063 "inode 0x%lx or driver bug.", vdir->i_ino);
39064 goto err_out;
39065diff -urNp linux-3.0.3/fs/ntfs/file.c linux-3.0.3/fs/ntfs/file.c
39066--- linux-3.0.3/fs/ntfs/file.c 2011-07-21 22:17:23.000000000 -0400
39067+++ linux-3.0.3/fs/ntfs/file.c 2011-08-23 21:47:56.000000000 -0400
39068@@ -2222,6 +2222,6 @@ const struct inode_operations ntfs_file_
39069 #endif /* NTFS_RW */
39070 };
39071
39072-const struct file_operations ntfs_empty_file_ops = {};
39073+const struct file_operations ntfs_empty_file_ops __read_only;
39074
39075-const struct inode_operations ntfs_empty_inode_ops = {};
39076+const struct inode_operations ntfs_empty_inode_ops __read_only;
39077diff -urNp linux-3.0.3/fs/ocfs2/localalloc.c linux-3.0.3/fs/ocfs2/localalloc.c
39078--- linux-3.0.3/fs/ocfs2/localalloc.c 2011-07-21 22:17:23.000000000 -0400
39079+++ linux-3.0.3/fs/ocfs2/localalloc.c 2011-08-23 21:47:56.000000000 -0400
39080@@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_windo
39081 goto bail;
39082 }
39083
39084- atomic_inc(&osb->alloc_stats.moves);
39085+ atomic_inc_unchecked(&osb->alloc_stats.moves);
39086
39087 bail:
39088 if (handle)
39089diff -urNp linux-3.0.3/fs/ocfs2/namei.c linux-3.0.3/fs/ocfs2/namei.c
39090--- linux-3.0.3/fs/ocfs2/namei.c 2011-07-21 22:17:23.000000000 -0400
39091+++ linux-3.0.3/fs/ocfs2/namei.c 2011-08-23 21:48:14.000000000 -0400
39092@@ -1063,6 +1063,8 @@ static int ocfs2_rename(struct inode *ol
39093 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
39094 struct ocfs2_dir_lookup_result target_insert = { NULL, };
39095
39096+ pax_track_stack();
39097+
39098 /* At some point it might be nice to break this function up a
39099 * bit. */
39100
39101diff -urNp linux-3.0.3/fs/ocfs2/ocfs2.h linux-3.0.3/fs/ocfs2/ocfs2.h
39102--- linux-3.0.3/fs/ocfs2/ocfs2.h 2011-07-21 22:17:23.000000000 -0400
39103+++ linux-3.0.3/fs/ocfs2/ocfs2.h 2011-08-23 21:47:56.000000000 -0400
39104@@ -235,11 +235,11 @@ enum ocfs2_vol_state
39105
39106 struct ocfs2_alloc_stats
39107 {
39108- atomic_t moves;
39109- atomic_t local_data;
39110- atomic_t bitmap_data;
39111- atomic_t bg_allocs;
39112- atomic_t bg_extends;
39113+ atomic_unchecked_t moves;
39114+ atomic_unchecked_t local_data;
39115+ atomic_unchecked_t bitmap_data;
39116+ atomic_unchecked_t bg_allocs;
39117+ atomic_unchecked_t bg_extends;
39118 };
39119
39120 enum ocfs2_local_alloc_state
39121diff -urNp linux-3.0.3/fs/ocfs2/suballoc.c linux-3.0.3/fs/ocfs2/suballoc.c
39122--- linux-3.0.3/fs/ocfs2/suballoc.c 2011-07-21 22:17:23.000000000 -0400
39123+++ linux-3.0.3/fs/ocfs2/suballoc.c 2011-08-23 21:47:56.000000000 -0400
39124@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(s
39125 mlog_errno(status);
39126 goto bail;
39127 }
39128- atomic_inc(&osb->alloc_stats.bg_extends);
39129+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
39130
39131 /* You should never ask for this much metadata */
39132 BUG_ON(bits_wanted >
39133@@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handl
39134 mlog_errno(status);
39135 goto bail;
39136 }
39137- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39138+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39139
39140 *suballoc_loc = res.sr_bg_blkno;
39141 *suballoc_bit_start = res.sr_bit_offset;
39142@@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_
39143 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
39144 res->sr_bits);
39145
39146- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39147+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39148
39149 BUG_ON(res->sr_bits != 1);
39150
39151@@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *hand
39152 mlog_errno(status);
39153 goto bail;
39154 }
39155- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39156+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39157
39158 BUG_ON(res.sr_bits != 1);
39159
39160@@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *han
39161 cluster_start,
39162 num_clusters);
39163 if (!status)
39164- atomic_inc(&osb->alloc_stats.local_data);
39165+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
39166 } else {
39167 if (min_clusters > (osb->bitmap_cpg - 1)) {
39168 /* The only paths asking for contiguousness
39169@@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *han
39170 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
39171 res.sr_bg_blkno,
39172 res.sr_bit_offset);
39173- atomic_inc(&osb->alloc_stats.bitmap_data);
39174+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
39175 *num_clusters = res.sr_bits;
39176 }
39177 }
39178diff -urNp linux-3.0.3/fs/ocfs2/super.c linux-3.0.3/fs/ocfs2/super.c
39179--- linux-3.0.3/fs/ocfs2/super.c 2011-07-21 22:17:23.000000000 -0400
39180+++ linux-3.0.3/fs/ocfs2/super.c 2011-08-23 21:47:56.000000000 -0400
39181@@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
39182 "%10s => GlobalAllocs: %d LocalAllocs: %d "
39183 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
39184 "Stats",
39185- atomic_read(&osb->alloc_stats.bitmap_data),
39186- atomic_read(&osb->alloc_stats.local_data),
39187- atomic_read(&osb->alloc_stats.bg_allocs),
39188- atomic_read(&osb->alloc_stats.moves),
39189- atomic_read(&osb->alloc_stats.bg_extends));
39190+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
39191+ atomic_read_unchecked(&osb->alloc_stats.local_data),
39192+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
39193+ atomic_read_unchecked(&osb->alloc_stats.moves),
39194+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
39195
39196 out += snprintf(buf + out, len - out,
39197 "%10s => State: %u Descriptor: %llu Size: %u bits "
39198@@ -2112,11 +2112,11 @@ static int ocfs2_initialize_super(struct
39199 spin_lock_init(&osb->osb_xattr_lock);
39200 ocfs2_init_steal_slots(osb);
39201
39202- atomic_set(&osb->alloc_stats.moves, 0);
39203- atomic_set(&osb->alloc_stats.local_data, 0);
39204- atomic_set(&osb->alloc_stats.bitmap_data, 0);
39205- atomic_set(&osb->alloc_stats.bg_allocs, 0);
39206- atomic_set(&osb->alloc_stats.bg_extends, 0);
39207+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
39208+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
39209+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
39210+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
39211+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
39212
39213 /* Copy the blockcheck stats from the superblock probe */
39214 osb->osb_ecc_stats = *stats;
39215diff -urNp linux-3.0.3/fs/ocfs2/symlink.c linux-3.0.3/fs/ocfs2/symlink.c
39216--- linux-3.0.3/fs/ocfs2/symlink.c 2011-07-21 22:17:23.000000000 -0400
39217+++ linux-3.0.3/fs/ocfs2/symlink.c 2011-08-23 21:47:56.000000000 -0400
39218@@ -142,7 +142,7 @@ bail:
39219
39220 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
39221 {
39222- char *link = nd_get_link(nd);
39223+ const char *link = nd_get_link(nd);
39224 if (!IS_ERR(link))
39225 kfree(link);
39226 }
39227diff -urNp linux-3.0.3/fs/open.c linux-3.0.3/fs/open.c
39228--- linux-3.0.3/fs/open.c 2011-07-21 22:17:23.000000000 -0400
39229+++ linux-3.0.3/fs/open.c 2011-08-23 21:48:14.000000000 -0400
39230@@ -112,6 +112,10 @@ static long do_sys_truncate(const char _
39231 error = locks_verify_truncate(inode, NULL, length);
39232 if (!error)
39233 error = security_path_truncate(&path);
39234+
39235+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
39236+ error = -EACCES;
39237+
39238 if (!error)
39239 error = do_truncate(path.dentry, length, 0, NULL);
39240
39241@@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
39242 if (__mnt_is_readonly(path.mnt))
39243 res = -EROFS;
39244
39245+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
39246+ res = -EACCES;
39247+
39248 out_path_release:
39249 path_put(&path);
39250 out:
39251@@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user
39252 if (error)
39253 goto dput_and_out;
39254
39255+ gr_log_chdir(path.dentry, path.mnt);
39256+
39257 set_fs_pwd(current->fs, &path);
39258
39259 dput_and_out:
39260@@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
39261 goto out_putf;
39262
39263 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
39264+
39265+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
39266+ error = -EPERM;
39267+
39268+ if (!error)
39269+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
39270+
39271 if (!error)
39272 set_fs_pwd(current->fs, &file->f_path);
39273 out_putf:
39274@@ -438,7 +454,18 @@ SYSCALL_DEFINE1(chroot, const char __use
39275 if (error)
39276 goto dput_and_out;
39277
39278+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
39279+ goto dput_and_out;
39280+
39281+ if (gr_handle_chroot_caps(&path)) {
39282+ error = -ENOMEM;
39283+ goto dput_and_out;
39284+ }
39285+
39286 set_fs_root(current->fs, &path);
39287+
39288+ gr_handle_chroot_chdir(&path);
39289+
39290 error = 0;
39291 dput_and_out:
39292 path_put(&path);
39293@@ -466,12 +493,25 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
39294 err = mnt_want_write_file(file);
39295 if (err)
39296 goto out_putf;
39297+
39298 mutex_lock(&inode->i_mutex);
39299+
39300+ if (!gr_acl_handle_fchmod(dentry, file->f_vfsmnt, mode)) {
39301+ err = -EACCES;
39302+ goto out_unlock;
39303+ }
39304+
39305 err = security_path_chmod(dentry, file->f_vfsmnt, mode);
39306 if (err)
39307 goto out_unlock;
39308 if (mode == (mode_t) -1)
39309 mode = inode->i_mode;
39310+
39311+ if (gr_handle_chroot_chmod(dentry, file->f_vfsmnt, mode)) {
39312+ err = -EACCES;
39313+ goto out_unlock;
39314+ }
39315+
39316 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
39317 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
39318 err = notify_change(dentry, &newattrs);
39319@@ -499,12 +539,25 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
39320 error = mnt_want_write(path.mnt);
39321 if (error)
39322 goto dput_and_out;
39323+
39324 mutex_lock(&inode->i_mutex);
39325+
39326+ if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
39327+ error = -EACCES;
39328+ goto out_unlock;
39329+ }
39330+
39331 error = security_path_chmod(path.dentry, path.mnt, mode);
39332 if (error)
39333 goto out_unlock;
39334 if (mode == (mode_t) -1)
39335 mode = inode->i_mode;
39336+
39337+ if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
39338+ error = -EACCES;
39339+ goto out_unlock;
39340+ }
39341+
39342 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
39343 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
39344 error = notify_change(path.dentry, &newattrs);
39345@@ -528,6 +581,9 @@ static int chown_common(struct path *pat
39346 int error;
39347 struct iattr newattrs;
39348
39349+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
39350+ return -EACCES;
39351+
39352 newattrs.ia_valid = ATTR_CTIME;
39353 if (user != (uid_t) -1) {
39354 newattrs.ia_valid |= ATTR_UID;
39355@@ -998,7 +1054,10 @@ long do_sys_open(int dfd, const char __u
39356 if (!IS_ERR(tmp)) {
39357 fd = get_unused_fd_flags(flags);
39358 if (fd >= 0) {
39359- struct file *f = do_filp_open(dfd, tmp, &op, lookup);
39360+ struct file *f;
39361+ /* don't allow to be set by userland */
39362+ flags &= ~FMODE_GREXEC;
39363+ f = do_filp_open(dfd, tmp, &op, lookup);
39364 if (IS_ERR(f)) {
39365 put_unused_fd(fd);
39366 fd = PTR_ERR(f);
39367diff -urNp linux-3.0.3/fs/partitions/ldm.c linux-3.0.3/fs/partitions/ldm.c
39368--- linux-3.0.3/fs/partitions/ldm.c 2011-07-21 22:17:23.000000000 -0400
39369+++ linux-3.0.3/fs/partitions/ldm.c 2011-08-23 21:48:14.000000000 -0400
39370@@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
39371 ldm_error ("A VBLK claims to have %d parts.", num);
39372 return false;
39373 }
39374+
39375 if (rec >= num) {
39376 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
39377 return false;
39378@@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
39379 goto found;
39380 }
39381
39382- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
39383+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
39384 if (!f) {
39385 ldm_crit ("Out of memory.");
39386 return false;
39387diff -urNp linux-3.0.3/fs/pipe.c linux-3.0.3/fs/pipe.c
39388--- linux-3.0.3/fs/pipe.c 2011-07-21 22:17:23.000000000 -0400
39389+++ linux-3.0.3/fs/pipe.c 2011-08-23 21:48:14.000000000 -0400
39390@@ -420,9 +420,9 @@ redo:
39391 }
39392 if (bufs) /* More to do? */
39393 continue;
39394- if (!pipe->writers)
39395+ if (!atomic_read(&pipe->writers))
39396 break;
39397- if (!pipe->waiting_writers) {
39398+ if (!atomic_read(&pipe->waiting_writers)) {
39399 /* syscall merging: Usually we must not sleep
39400 * if O_NONBLOCK is set, or if we got some data.
39401 * But if a writer sleeps in kernel space, then
39402@@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const str
39403 mutex_lock(&inode->i_mutex);
39404 pipe = inode->i_pipe;
39405
39406- if (!pipe->readers) {
39407+ if (!atomic_read(&pipe->readers)) {
39408 send_sig(SIGPIPE, current, 0);
39409 ret = -EPIPE;
39410 goto out;
39411@@ -530,7 +530,7 @@ redo1:
39412 for (;;) {
39413 int bufs;
39414
39415- if (!pipe->readers) {
39416+ if (!atomic_read(&pipe->readers)) {
39417 send_sig(SIGPIPE, current, 0);
39418 if (!ret)
39419 ret = -EPIPE;
39420@@ -616,9 +616,9 @@ redo2:
39421 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
39422 do_wakeup = 0;
39423 }
39424- pipe->waiting_writers++;
39425+ atomic_inc(&pipe->waiting_writers);
39426 pipe_wait(pipe);
39427- pipe->waiting_writers--;
39428+ atomic_dec(&pipe->waiting_writers);
39429 }
39430 out:
39431 mutex_unlock(&inode->i_mutex);
39432@@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table
39433 mask = 0;
39434 if (filp->f_mode & FMODE_READ) {
39435 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
39436- if (!pipe->writers && filp->f_version != pipe->w_counter)
39437+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
39438 mask |= POLLHUP;
39439 }
39440
39441@@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table
39442 * Most Unices do not set POLLERR for FIFOs but on Linux they
39443 * behave exactly like pipes for poll().
39444 */
39445- if (!pipe->readers)
39446+ if (!atomic_read(&pipe->readers))
39447 mask |= POLLERR;
39448 }
39449
39450@@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int de
39451
39452 mutex_lock(&inode->i_mutex);
39453 pipe = inode->i_pipe;
39454- pipe->readers -= decr;
39455- pipe->writers -= decw;
39456+ atomic_sub(decr, &pipe->readers);
39457+ atomic_sub(decw, &pipe->writers);
39458
39459- if (!pipe->readers && !pipe->writers) {
39460+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
39461 free_pipe_info(inode);
39462 } else {
39463 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
39464@@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, stru
39465
39466 if (inode->i_pipe) {
39467 ret = 0;
39468- inode->i_pipe->readers++;
39469+ atomic_inc(&inode->i_pipe->readers);
39470 }
39471
39472 mutex_unlock(&inode->i_mutex);
39473@@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, str
39474
39475 if (inode->i_pipe) {
39476 ret = 0;
39477- inode->i_pipe->writers++;
39478+ atomic_inc(&inode->i_pipe->writers);
39479 }
39480
39481 mutex_unlock(&inode->i_mutex);
39482@@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, stru
39483 if (inode->i_pipe) {
39484 ret = 0;
39485 if (filp->f_mode & FMODE_READ)
39486- inode->i_pipe->readers++;
39487+ atomic_inc(&inode->i_pipe->readers);
39488 if (filp->f_mode & FMODE_WRITE)
39489- inode->i_pipe->writers++;
39490+ atomic_inc(&inode->i_pipe->writers);
39491 }
39492
39493 mutex_unlock(&inode->i_mutex);
39494@@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
39495 inode->i_pipe = NULL;
39496 }
39497
39498-static struct vfsmount *pipe_mnt __read_mostly;
39499+struct vfsmount *pipe_mnt __read_mostly;
39500
39501 /*
39502 * pipefs_dname() is called from d_path().
39503@@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(voi
39504 goto fail_iput;
39505 inode->i_pipe = pipe;
39506
39507- pipe->readers = pipe->writers = 1;
39508+ atomic_set(&pipe->readers, 1);
39509+ atomic_set(&pipe->writers, 1);
39510 inode->i_fop = &rdwr_pipefifo_fops;
39511
39512 /*
39513diff -urNp linux-3.0.3/fs/proc/array.c linux-3.0.3/fs/proc/array.c
39514--- linux-3.0.3/fs/proc/array.c 2011-07-21 22:17:23.000000000 -0400
39515+++ linux-3.0.3/fs/proc/array.c 2011-08-23 21:48:14.000000000 -0400
39516@@ -60,6 +60,7 @@
39517 #include <linux/tty.h>
39518 #include <linux/string.h>
39519 #include <linux/mman.h>
39520+#include <linux/grsecurity.h>
39521 #include <linux/proc_fs.h>
39522 #include <linux/ioport.h>
39523 #include <linux/uaccess.h>
39524@@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq
39525 seq_putc(m, '\n');
39526 }
39527
39528+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39529+static inline void task_pax(struct seq_file *m, struct task_struct *p)
39530+{
39531+ if (p->mm)
39532+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
39533+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
39534+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
39535+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
39536+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
39537+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
39538+ else
39539+ seq_printf(m, "PaX:\t-----\n");
39540+}
39541+#endif
39542+
39543 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
39544 struct pid *pid, struct task_struct *task)
39545 {
39546@@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m,
39547 task_cpus_allowed(m, task);
39548 cpuset_task_status_allowed(m, task);
39549 task_context_switch_counts(m, task);
39550+
39551+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39552+ task_pax(m, task);
39553+#endif
39554+
39555+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
39556+ task_grsec_rbac(m, task);
39557+#endif
39558+
39559 return 0;
39560 }
39561
39562+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39563+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
39564+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
39565+ _mm->pax_flags & MF_PAX_SEGMEXEC))
39566+#endif
39567+
39568 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
39569 struct pid *pid, struct task_struct *task, int whole)
39570 {
39571@@ -375,9 +406,11 @@ static int do_task_stat(struct seq_file
39572 cputime_t cutime, cstime, utime, stime;
39573 cputime_t cgtime, gtime;
39574 unsigned long rsslim = 0;
39575- char tcomm[sizeof(task->comm)];
39576+ char tcomm[sizeof(task->comm)] = { 0 };
39577 unsigned long flags;
39578
39579+ pax_track_stack();
39580+
39581 state = *get_task_state(task);
39582 vsize = eip = esp = 0;
39583 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
39584@@ -449,6 +482,19 @@ static int do_task_stat(struct seq_file
39585 gtime = task->gtime;
39586 }
39587
39588+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39589+ if (PAX_RAND_FLAGS(mm)) {
39590+ eip = 0;
39591+ esp = 0;
39592+ wchan = 0;
39593+ }
39594+#endif
39595+#ifdef CONFIG_GRKERNSEC_HIDESYM
39596+ wchan = 0;
39597+ eip =0;
39598+ esp =0;
39599+#endif
39600+
39601 /* scale priority and nice values from timeslices to -20..20 */
39602 /* to make it look like a "normal" Unix priority/nice value */
39603 priority = task_prio(task);
39604@@ -489,9 +535,15 @@ static int do_task_stat(struct seq_file
39605 vsize,
39606 mm ? get_mm_rss(mm) : 0,
39607 rsslim,
39608+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39609+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
39610+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
39611+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
39612+#else
39613 mm ? (permitted ? mm->start_code : 1) : 0,
39614 mm ? (permitted ? mm->end_code : 1) : 0,
39615 (permitted && mm) ? mm->start_stack : 0,
39616+#endif
39617 esp,
39618 eip,
39619 /* The signal information here is obsolete.
39620@@ -544,3 +596,18 @@ int proc_pid_statm(struct seq_file *m, s
39621
39622 return 0;
39623 }
39624+
39625+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
39626+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
39627+{
39628+ u32 curr_ip = 0;
39629+ unsigned long flags;
39630+
39631+ if (lock_task_sighand(task, &flags)) {
39632+ curr_ip = task->signal->curr_ip;
39633+ unlock_task_sighand(task, &flags);
39634+ }
39635+
39636+ return sprintf(buffer, "%pI4\n", &curr_ip);
39637+}
39638+#endif
39639diff -urNp linux-3.0.3/fs/proc/base.c linux-3.0.3/fs/proc/base.c
39640--- linux-3.0.3/fs/proc/base.c 2011-08-23 21:44:40.000000000 -0400
39641+++ linux-3.0.3/fs/proc/base.c 2011-08-23 21:48:14.000000000 -0400
39642@@ -107,6 +107,22 @@ struct pid_entry {
39643 union proc_op op;
39644 };
39645
39646+struct getdents_callback {
39647+ struct linux_dirent __user * current_dir;
39648+ struct linux_dirent __user * previous;
39649+ struct file * file;
39650+ int count;
39651+ int error;
39652+};
39653+
39654+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
39655+ loff_t offset, u64 ino, unsigned int d_type)
39656+{
39657+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
39658+ buf->error = -EINVAL;
39659+ return 0;
39660+}
39661+
39662 #define NOD(NAME, MODE, IOP, FOP, OP) { \
39663 .name = (NAME), \
39664 .len = sizeof(NAME) - 1, \
39665@@ -209,6 +225,9 @@ static struct mm_struct *__check_mem_per
39666 if (task == current)
39667 return mm;
39668
39669+ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
39670+ return ERR_PTR(-EPERM);
39671+
39672 /*
39673 * If current is actively ptrace'ing, and would also be
39674 * permitted to freshly attach with ptrace now, permit it.
39675@@ -282,6 +301,9 @@ static int proc_pid_cmdline(struct task_
39676 if (!mm->arg_end)
39677 goto out_mm; /* Shh! No looking before we're done */
39678
39679+ if (gr_acl_handle_procpidmem(task))
39680+ goto out_mm;
39681+
39682 len = mm->arg_end - mm->arg_start;
39683
39684 if (len > PAGE_SIZE)
39685@@ -309,12 +331,28 @@ out:
39686 return res;
39687 }
39688
39689+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39690+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
39691+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
39692+ _mm->pax_flags & MF_PAX_SEGMEXEC))
39693+#endif
39694+
39695 static int proc_pid_auxv(struct task_struct *task, char *buffer)
39696 {
39697 struct mm_struct *mm = mm_for_maps(task);
39698 int res = PTR_ERR(mm);
39699 if (mm && !IS_ERR(mm)) {
39700 unsigned int nwords = 0;
39701+
39702+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39703+ /* allow if we're currently ptracing this task */
39704+ if (PAX_RAND_FLAGS(mm) &&
39705+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
39706+ mmput(mm);
39707+ return res;
39708+ }
39709+#endif
39710+
39711 do {
39712 nwords += 2;
39713 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
39714@@ -328,7 +366,7 @@ static int proc_pid_auxv(struct task_str
39715 }
39716
39717
39718-#ifdef CONFIG_KALLSYMS
39719+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39720 /*
39721 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
39722 * Returns the resolved symbol. If that fails, simply return the address.
39723@@ -367,7 +405,7 @@ static void unlock_trace(struct task_str
39724 mutex_unlock(&task->signal->cred_guard_mutex);
39725 }
39726
39727-#ifdef CONFIG_STACKTRACE
39728+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39729
39730 #define MAX_STACK_TRACE_DEPTH 64
39731
39732@@ -558,7 +596,7 @@ static int proc_pid_limits(struct task_s
39733 return count;
39734 }
39735
39736-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
39737+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
39738 static int proc_pid_syscall(struct task_struct *task, char *buffer)
39739 {
39740 long nr;
39741@@ -587,7 +625,7 @@ static int proc_pid_syscall(struct task_
39742 /************************************************************************/
39743
39744 /* permission checks */
39745-static int proc_fd_access_allowed(struct inode *inode)
39746+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
39747 {
39748 struct task_struct *task;
39749 int allowed = 0;
39750@@ -597,7 +635,10 @@ static int proc_fd_access_allowed(struct
39751 */
39752 task = get_proc_task(inode);
39753 if (task) {
39754- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
39755+ if (log)
39756+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
39757+ else
39758+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
39759 put_task_struct(task);
39760 }
39761 return allowed;
39762@@ -978,6 +1019,9 @@ static ssize_t environ_read(struct file
39763 if (!task)
39764 goto out_no_task;
39765
39766+ if (gr_acl_handle_procpidmem(task))
39767+ goto out;
39768+
39769 ret = -ENOMEM;
39770 page = (char *)__get_free_page(GFP_TEMPORARY);
39771 if (!page)
39772@@ -1614,7 +1658,7 @@ static void *proc_pid_follow_link(struct
39773 path_put(&nd->path);
39774
39775 /* Are we allowed to snoop on the tasks file descriptors? */
39776- if (!proc_fd_access_allowed(inode))
39777+ if (!proc_fd_access_allowed(inode,0))
39778 goto out;
39779
39780 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
39781@@ -1653,8 +1697,18 @@ static int proc_pid_readlink(struct dent
39782 struct path path;
39783
39784 /* Are we allowed to snoop on the tasks file descriptors? */
39785- if (!proc_fd_access_allowed(inode))
39786- goto out;
39787+ /* logging this is needed for learning on chromium to work properly,
39788+ but we don't want to flood the logs from 'ps' which does a readlink
39789+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
39790+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
39791+ */
39792+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
39793+ if (!proc_fd_access_allowed(inode,0))
39794+ goto out;
39795+ } else {
39796+ if (!proc_fd_access_allowed(inode,1))
39797+ goto out;
39798+ }
39799
39800 error = PROC_I(inode)->op.proc_get_link(inode, &path);
39801 if (error)
39802@@ -1719,7 +1773,11 @@ struct inode *proc_pid_make_inode(struct
39803 rcu_read_lock();
39804 cred = __task_cred(task);
39805 inode->i_uid = cred->euid;
39806+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39807+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
39808+#else
39809 inode->i_gid = cred->egid;
39810+#endif
39811 rcu_read_unlock();
39812 }
39813 security_task_to_inode(task, inode);
39814@@ -1737,6 +1795,9 @@ int pid_getattr(struct vfsmount *mnt, st
39815 struct inode *inode = dentry->d_inode;
39816 struct task_struct *task;
39817 const struct cred *cred;
39818+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39819+ const struct cred *tmpcred = current_cred();
39820+#endif
39821
39822 generic_fillattr(inode, stat);
39823
39824@@ -1744,13 +1805,41 @@ int pid_getattr(struct vfsmount *mnt, st
39825 stat->uid = 0;
39826 stat->gid = 0;
39827 task = pid_task(proc_pid(inode), PIDTYPE_PID);
39828+
39829+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
39830+ rcu_read_unlock();
39831+ return -ENOENT;
39832+ }
39833+
39834 if (task) {
39835+ cred = __task_cred(task);
39836+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39837+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
39838+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39839+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
39840+#endif
39841+ ) {
39842+#endif
39843 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
39844+#ifdef CONFIG_GRKERNSEC_PROC_USER
39845+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
39846+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39847+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
39848+#endif
39849 task_dumpable(task)) {
39850- cred = __task_cred(task);
39851 stat->uid = cred->euid;
39852+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39853+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
39854+#else
39855 stat->gid = cred->egid;
39856+#endif
39857 }
39858+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39859+ } else {
39860+ rcu_read_unlock();
39861+ return -ENOENT;
39862+ }
39863+#endif
39864 }
39865 rcu_read_unlock();
39866 return 0;
39867@@ -1787,11 +1876,20 @@ int pid_revalidate(struct dentry *dentry
39868
39869 if (task) {
39870 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
39871+#ifdef CONFIG_GRKERNSEC_PROC_USER
39872+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
39873+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39874+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
39875+#endif
39876 task_dumpable(task)) {
39877 rcu_read_lock();
39878 cred = __task_cred(task);
39879 inode->i_uid = cred->euid;
39880+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39881+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
39882+#else
39883 inode->i_gid = cred->egid;
39884+#endif
39885 rcu_read_unlock();
39886 } else {
39887 inode->i_uid = 0;
39888@@ -1909,7 +2007,8 @@ static int proc_fd_info(struct inode *in
39889 int fd = proc_fd(inode);
39890
39891 if (task) {
39892- files = get_files_struct(task);
39893+ if (!gr_acl_handle_procpidmem(task))
39894+ files = get_files_struct(task);
39895 put_task_struct(task);
39896 }
39897 if (files) {
39898@@ -2169,11 +2268,21 @@ static const struct file_operations proc
39899 */
39900 static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags)
39901 {
39902+ struct task_struct *task;
39903 int rv = generic_permission(inode, mask, flags, NULL);
39904- if (rv == 0)
39905- return 0;
39906+
39907 if (task_pid(current) == proc_pid(inode))
39908 rv = 0;
39909+
39910+ task = get_proc_task(inode);
39911+ if (task == NULL)
39912+ return rv;
39913+
39914+ if (gr_acl_handle_procpidmem(task))
39915+ rv = -EACCES;
39916+
39917+ put_task_struct(task);
39918+
39919 return rv;
39920 }
39921
39922@@ -2283,6 +2392,9 @@ static struct dentry *proc_pident_lookup
39923 if (!task)
39924 goto out_no_task;
39925
39926+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
39927+ goto out;
39928+
39929 /*
39930 * Yes, it does not scale. And it should not. Don't add
39931 * new entries into /proc/<tgid>/ without very good reasons.
39932@@ -2327,6 +2439,9 @@ static int proc_pident_readdir(struct fi
39933 if (!task)
39934 goto out_no_task;
39935
39936+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
39937+ goto out;
39938+
39939 ret = 0;
39940 i = filp->f_pos;
39941 switch (i) {
39942@@ -2597,7 +2712,7 @@ static void *proc_self_follow_link(struc
39943 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
39944 void *cookie)
39945 {
39946- char *s = nd_get_link(nd);
39947+ const char *s = nd_get_link(nd);
39948 if (!IS_ERR(s))
39949 __putname(s);
39950 }
39951@@ -2795,7 +2910,7 @@ static const struct pid_entry tgid_base_
39952 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
39953 #endif
39954 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
39955-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
39956+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
39957 INF("syscall", S_IRUGO, proc_pid_syscall),
39958 #endif
39959 INF("cmdline", S_IRUGO, proc_pid_cmdline),
39960@@ -2820,10 +2935,10 @@ static const struct pid_entry tgid_base_
39961 #ifdef CONFIG_SECURITY
39962 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
39963 #endif
39964-#ifdef CONFIG_KALLSYMS
39965+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39966 INF("wchan", S_IRUGO, proc_pid_wchan),
39967 #endif
39968-#ifdef CONFIG_STACKTRACE
39969+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39970 ONE("stack", S_IRUGO, proc_pid_stack),
39971 #endif
39972 #ifdef CONFIG_SCHEDSTATS
39973@@ -2857,6 +2972,9 @@ static const struct pid_entry tgid_base_
39974 #ifdef CONFIG_HARDWALL
39975 INF("hardwall", S_IRUGO, proc_pid_hardwall),
39976 #endif
39977+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
39978+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
39979+#endif
39980 };
39981
39982 static int proc_tgid_base_readdir(struct file * filp,
39983@@ -2982,7 +3100,14 @@ static struct dentry *proc_pid_instantia
39984 if (!inode)
39985 goto out;
39986
39987+#ifdef CONFIG_GRKERNSEC_PROC_USER
39988+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
39989+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39990+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
39991+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
39992+#else
39993 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
39994+#endif
39995 inode->i_op = &proc_tgid_base_inode_operations;
39996 inode->i_fop = &proc_tgid_base_operations;
39997 inode->i_flags|=S_IMMUTABLE;
39998@@ -3024,7 +3149,11 @@ struct dentry *proc_pid_lookup(struct in
39999 if (!task)
40000 goto out;
40001
40002+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
40003+ goto out_put_task;
40004+
40005 result = proc_pid_instantiate(dir, dentry, task, NULL);
40006+out_put_task:
40007 put_task_struct(task);
40008 out:
40009 return result;
40010@@ -3089,6 +3218,11 @@ int proc_pid_readdir(struct file * filp,
40011 {
40012 unsigned int nr;
40013 struct task_struct *reaper;
40014+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40015+ const struct cred *tmpcred = current_cred();
40016+ const struct cred *itercred;
40017+#endif
40018+ filldir_t __filldir = filldir;
40019 struct tgid_iter iter;
40020 struct pid_namespace *ns;
40021
40022@@ -3112,8 +3246,27 @@ int proc_pid_readdir(struct file * filp,
40023 for (iter = next_tgid(ns, iter);
40024 iter.task;
40025 iter.tgid += 1, iter = next_tgid(ns, iter)) {
40026+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40027+ rcu_read_lock();
40028+ itercred = __task_cred(iter.task);
40029+#endif
40030+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
40031+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40032+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
40033+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
40034+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
40035+#endif
40036+ )
40037+#endif
40038+ )
40039+ __filldir = &gr_fake_filldir;
40040+ else
40041+ __filldir = filldir;
40042+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40043+ rcu_read_unlock();
40044+#endif
40045 filp->f_pos = iter.tgid + TGID_OFFSET;
40046- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
40047+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
40048 put_task_struct(iter.task);
40049 goto out;
40050 }
40051@@ -3141,7 +3294,7 @@ static const struct pid_entry tid_base_s
40052 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
40053 #endif
40054 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
40055-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
40056+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
40057 INF("syscall", S_IRUGO, proc_pid_syscall),
40058 #endif
40059 INF("cmdline", S_IRUGO, proc_pid_cmdline),
40060@@ -3165,10 +3318,10 @@ static const struct pid_entry tid_base_s
40061 #ifdef CONFIG_SECURITY
40062 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
40063 #endif
40064-#ifdef CONFIG_KALLSYMS
40065+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
40066 INF("wchan", S_IRUGO, proc_pid_wchan),
40067 #endif
40068-#ifdef CONFIG_STACKTRACE
40069+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
40070 ONE("stack", S_IRUGO, proc_pid_stack),
40071 #endif
40072 #ifdef CONFIG_SCHEDSTATS
40073diff -urNp linux-3.0.3/fs/proc/cmdline.c linux-3.0.3/fs/proc/cmdline.c
40074--- linux-3.0.3/fs/proc/cmdline.c 2011-07-21 22:17:23.000000000 -0400
40075+++ linux-3.0.3/fs/proc/cmdline.c 2011-08-23 21:48:14.000000000 -0400
40076@@ -23,7 +23,11 @@ static const struct file_operations cmdl
40077
40078 static int __init proc_cmdline_init(void)
40079 {
40080+#ifdef CONFIG_GRKERNSEC_PROC_ADD
40081+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
40082+#else
40083 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
40084+#endif
40085 return 0;
40086 }
40087 module_init(proc_cmdline_init);
40088diff -urNp linux-3.0.3/fs/proc/devices.c linux-3.0.3/fs/proc/devices.c
40089--- linux-3.0.3/fs/proc/devices.c 2011-07-21 22:17:23.000000000 -0400
40090+++ linux-3.0.3/fs/proc/devices.c 2011-08-23 21:48:14.000000000 -0400
40091@@ -64,7 +64,11 @@ static const struct file_operations proc
40092
40093 static int __init proc_devices_init(void)
40094 {
40095+#ifdef CONFIG_GRKERNSEC_PROC_ADD
40096+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
40097+#else
40098 proc_create("devices", 0, NULL, &proc_devinfo_operations);
40099+#endif
40100 return 0;
40101 }
40102 module_init(proc_devices_init);
40103diff -urNp linux-3.0.3/fs/proc/inode.c linux-3.0.3/fs/proc/inode.c
40104--- linux-3.0.3/fs/proc/inode.c 2011-07-21 22:17:23.000000000 -0400
40105+++ linux-3.0.3/fs/proc/inode.c 2011-08-23 21:48:14.000000000 -0400
40106@@ -440,7 +440,11 @@ struct inode *proc_get_inode(struct supe
40107 if (de->mode) {
40108 inode->i_mode = de->mode;
40109 inode->i_uid = de->uid;
40110+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
40111+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
40112+#else
40113 inode->i_gid = de->gid;
40114+#endif
40115 }
40116 if (de->size)
40117 inode->i_size = de->size;
40118diff -urNp linux-3.0.3/fs/proc/internal.h linux-3.0.3/fs/proc/internal.h
40119--- linux-3.0.3/fs/proc/internal.h 2011-07-21 22:17:23.000000000 -0400
40120+++ linux-3.0.3/fs/proc/internal.h 2011-08-23 21:48:14.000000000 -0400
40121@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
40122 struct pid *pid, struct task_struct *task);
40123 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
40124 struct pid *pid, struct task_struct *task);
40125+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
40126+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
40127+#endif
40128 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
40129
40130 extern const struct file_operations proc_maps_operations;
40131diff -urNp linux-3.0.3/fs/proc/Kconfig linux-3.0.3/fs/proc/Kconfig
40132--- linux-3.0.3/fs/proc/Kconfig 2011-07-21 22:17:23.000000000 -0400
40133+++ linux-3.0.3/fs/proc/Kconfig 2011-08-23 21:48:14.000000000 -0400
40134@@ -30,12 +30,12 @@ config PROC_FS
40135
40136 config PROC_KCORE
40137 bool "/proc/kcore support" if !ARM
40138- depends on PROC_FS && MMU
40139+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
40140
40141 config PROC_VMCORE
40142 bool "/proc/vmcore support"
40143- depends on PROC_FS && CRASH_DUMP
40144- default y
40145+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
40146+ default n
40147 help
40148 Exports the dump image of crashed kernel in ELF format.
40149
40150@@ -59,8 +59,8 @@ config PROC_SYSCTL
40151 limited in memory.
40152
40153 config PROC_PAGE_MONITOR
40154- default y
40155- depends on PROC_FS && MMU
40156+ default n
40157+ depends on PROC_FS && MMU && !GRKERNSEC
40158 bool "Enable /proc page monitoring" if EXPERT
40159 help
40160 Various /proc files exist to monitor process memory utilization:
40161diff -urNp linux-3.0.3/fs/proc/kcore.c linux-3.0.3/fs/proc/kcore.c
40162--- linux-3.0.3/fs/proc/kcore.c 2011-07-21 22:17:23.000000000 -0400
40163+++ linux-3.0.3/fs/proc/kcore.c 2011-08-23 21:48:14.000000000 -0400
40164@@ -321,6 +321,8 @@ static void elf_kcore_store_hdr(char *bu
40165 off_t offset = 0;
40166 struct kcore_list *m;
40167
40168+ pax_track_stack();
40169+
40170 /* setup ELF header */
40171 elf = (struct elfhdr *) bufp;
40172 bufp += sizeof(struct elfhdr);
40173@@ -478,9 +480,10 @@ read_kcore(struct file *file, char __use
40174 * the addresses in the elf_phdr on our list.
40175 */
40176 start = kc_offset_to_vaddr(*fpos - elf_buflen);
40177- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
40178+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
40179+ if (tsz > buflen)
40180 tsz = buflen;
40181-
40182+
40183 while (buflen) {
40184 struct kcore_list *m;
40185
40186@@ -509,20 +512,23 @@ read_kcore(struct file *file, char __use
40187 kfree(elf_buf);
40188 } else {
40189 if (kern_addr_valid(start)) {
40190- unsigned long n;
40191+ char *elf_buf;
40192+ mm_segment_t oldfs;
40193
40194- n = copy_to_user(buffer, (char *)start, tsz);
40195- /*
40196- * We cannot distingush between fault on source
40197- * and fault on destination. When this happens
40198- * we clear too and hope it will trigger the
40199- * EFAULT again.
40200- */
40201- if (n) {
40202- if (clear_user(buffer + tsz - n,
40203- n))
40204+ elf_buf = kmalloc(tsz, GFP_KERNEL);
40205+ if (!elf_buf)
40206+ return -ENOMEM;
40207+ oldfs = get_fs();
40208+ set_fs(KERNEL_DS);
40209+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
40210+ set_fs(oldfs);
40211+ if (copy_to_user(buffer, elf_buf, tsz)) {
40212+ kfree(elf_buf);
40213 return -EFAULT;
40214+ }
40215 }
40216+ set_fs(oldfs);
40217+ kfree(elf_buf);
40218 } else {
40219 if (clear_user(buffer, tsz))
40220 return -EFAULT;
40221@@ -542,6 +548,9 @@ read_kcore(struct file *file, char __use
40222
40223 static int open_kcore(struct inode *inode, struct file *filp)
40224 {
40225+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
40226+ return -EPERM;
40227+#endif
40228 if (!capable(CAP_SYS_RAWIO))
40229 return -EPERM;
40230 if (kcore_need_update)
40231diff -urNp linux-3.0.3/fs/proc/meminfo.c linux-3.0.3/fs/proc/meminfo.c
40232--- linux-3.0.3/fs/proc/meminfo.c 2011-07-21 22:17:23.000000000 -0400
40233+++ linux-3.0.3/fs/proc/meminfo.c 2011-08-23 21:48:14.000000000 -0400
40234@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
40235 unsigned long pages[NR_LRU_LISTS];
40236 int lru;
40237
40238+ pax_track_stack();
40239+
40240 /*
40241 * display in kilobytes.
40242 */
40243@@ -157,7 +159,7 @@ static int meminfo_proc_show(struct seq_
40244 vmi.used >> 10,
40245 vmi.largest_chunk >> 10
40246 #ifdef CONFIG_MEMORY_FAILURE
40247- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
40248+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
40249 #endif
40250 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
40251 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
40252diff -urNp linux-3.0.3/fs/proc/nommu.c linux-3.0.3/fs/proc/nommu.c
40253--- linux-3.0.3/fs/proc/nommu.c 2011-07-21 22:17:23.000000000 -0400
40254+++ linux-3.0.3/fs/proc/nommu.c 2011-08-23 21:47:56.000000000 -0400
40255@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_
40256 if (len < 1)
40257 len = 1;
40258 seq_printf(m, "%*c", len, ' ');
40259- seq_path(m, &file->f_path, "");
40260+ seq_path(m, &file->f_path, "\n\\");
40261 }
40262
40263 seq_putc(m, '\n');
40264diff -urNp linux-3.0.3/fs/proc/proc_net.c linux-3.0.3/fs/proc/proc_net.c
40265--- linux-3.0.3/fs/proc/proc_net.c 2011-07-21 22:17:23.000000000 -0400
40266+++ linux-3.0.3/fs/proc/proc_net.c 2011-08-23 21:48:14.000000000 -0400
40267@@ -105,6 +105,17 @@ static struct net *get_proc_task_net(str
40268 struct task_struct *task;
40269 struct nsproxy *ns;
40270 struct net *net = NULL;
40271+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40272+ const struct cred *cred = current_cred();
40273+#endif
40274+
40275+#ifdef CONFIG_GRKERNSEC_PROC_USER
40276+ if (cred->fsuid)
40277+ return net;
40278+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40279+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
40280+ return net;
40281+#endif
40282
40283 rcu_read_lock();
40284 task = pid_task(proc_pid(dir), PIDTYPE_PID);
40285diff -urNp linux-3.0.3/fs/proc/proc_sysctl.c linux-3.0.3/fs/proc/proc_sysctl.c
40286--- linux-3.0.3/fs/proc/proc_sysctl.c 2011-07-21 22:17:23.000000000 -0400
40287+++ linux-3.0.3/fs/proc/proc_sysctl.c 2011-08-23 21:48:14.000000000 -0400
40288@@ -8,6 +8,8 @@
40289 #include <linux/namei.h>
40290 #include "internal.h"
40291
40292+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
40293+
40294 static const struct dentry_operations proc_sys_dentry_operations;
40295 static const struct file_operations proc_sys_file_operations;
40296 static const struct inode_operations proc_sys_inode_operations;
40297@@ -111,6 +113,9 @@ static struct dentry *proc_sys_lookup(st
40298 if (!p)
40299 goto out;
40300
40301+ if (gr_handle_sysctl(p, MAY_EXEC))
40302+ goto out;
40303+
40304 err = ERR_PTR(-ENOMEM);
40305 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
40306 if (h)
40307@@ -230,6 +235,9 @@ static int scan(struct ctl_table_header
40308 if (*pos < file->f_pos)
40309 continue;
40310
40311+ if (gr_handle_sysctl(table, 0))
40312+ continue;
40313+
40314 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
40315 if (res)
40316 return res;
40317@@ -355,6 +363,9 @@ static int proc_sys_getattr(struct vfsmo
40318 if (IS_ERR(head))
40319 return PTR_ERR(head);
40320
40321+ if (table && gr_handle_sysctl(table, MAY_EXEC))
40322+ return -ENOENT;
40323+
40324 generic_fillattr(inode, stat);
40325 if (table)
40326 stat->mode = (stat->mode & S_IFMT) | table->mode;
40327diff -urNp linux-3.0.3/fs/proc/root.c linux-3.0.3/fs/proc/root.c
40328--- linux-3.0.3/fs/proc/root.c 2011-07-21 22:17:23.000000000 -0400
40329+++ linux-3.0.3/fs/proc/root.c 2011-08-23 21:48:14.000000000 -0400
40330@@ -123,7 +123,15 @@ void __init proc_root_init(void)
40331 #ifdef CONFIG_PROC_DEVICETREE
40332 proc_device_tree_init();
40333 #endif
40334+#ifdef CONFIG_GRKERNSEC_PROC_ADD
40335+#ifdef CONFIG_GRKERNSEC_PROC_USER
40336+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
40337+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40338+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
40339+#endif
40340+#else
40341 proc_mkdir("bus", NULL);
40342+#endif
40343 proc_sys_init();
40344 }
40345
40346diff -urNp linux-3.0.3/fs/proc/task_mmu.c linux-3.0.3/fs/proc/task_mmu.c
40347--- linux-3.0.3/fs/proc/task_mmu.c 2011-07-21 22:17:23.000000000 -0400
40348+++ linux-3.0.3/fs/proc/task_mmu.c 2011-08-23 21:48:14.000000000 -0400
40349@@ -51,8 +51,13 @@ void task_mem(struct seq_file *m, struct
40350 "VmExe:\t%8lu kB\n"
40351 "VmLib:\t%8lu kB\n"
40352 "VmPTE:\t%8lu kB\n"
40353- "VmSwap:\t%8lu kB\n",
40354- hiwater_vm << (PAGE_SHIFT-10),
40355+ "VmSwap:\t%8lu kB\n"
40356+
40357+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40358+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
40359+#endif
40360+
40361+ ,hiwater_vm << (PAGE_SHIFT-10),
40362 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
40363 mm->locked_vm << (PAGE_SHIFT-10),
40364 hiwater_rss << (PAGE_SHIFT-10),
40365@@ -60,7 +65,13 @@ void task_mem(struct seq_file *m, struct
40366 data << (PAGE_SHIFT-10),
40367 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
40368 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
40369- swap << (PAGE_SHIFT-10));
40370+ swap << (PAGE_SHIFT-10)
40371+
40372+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40373+ , mm->context.user_cs_base, mm->context.user_cs_limit
40374+#endif
40375+
40376+ );
40377 }
40378
40379 unsigned long task_vsize(struct mm_struct *mm)
40380@@ -207,6 +218,12 @@ static int do_maps_open(struct inode *in
40381 return ret;
40382 }
40383
40384+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40385+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
40386+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
40387+ _mm->pax_flags & MF_PAX_SEGMEXEC))
40388+#endif
40389+
40390 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
40391 {
40392 struct mm_struct *mm = vma->vm_mm;
40393@@ -225,13 +242,13 @@ static void show_map_vma(struct seq_file
40394 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
40395 }
40396
40397- /* We don't show the stack guard page in /proc/maps */
40398+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40399+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
40400+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
40401+#else
40402 start = vma->vm_start;
40403- if (stack_guard_page_start(vma, start))
40404- start += PAGE_SIZE;
40405 end = vma->vm_end;
40406- if (stack_guard_page_end(vma, end))
40407- end -= PAGE_SIZE;
40408+#endif
40409
40410 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
40411 start,
40412@@ -240,7 +257,11 @@ static void show_map_vma(struct seq_file
40413 flags & VM_WRITE ? 'w' : '-',
40414 flags & VM_EXEC ? 'x' : '-',
40415 flags & VM_MAYSHARE ? 's' : 'p',
40416+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40417+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
40418+#else
40419 pgoff,
40420+#endif
40421 MAJOR(dev), MINOR(dev), ino, &len);
40422
40423 /*
40424@@ -249,7 +270,7 @@ static void show_map_vma(struct seq_file
40425 */
40426 if (file) {
40427 pad_len_spaces(m, len);
40428- seq_path(m, &file->f_path, "\n");
40429+ seq_path(m, &file->f_path, "\n\\");
40430 } else {
40431 const char *name = arch_vma_name(vma);
40432 if (!name) {
40433@@ -257,8 +278,9 @@ static void show_map_vma(struct seq_file
40434 if (vma->vm_start <= mm->brk &&
40435 vma->vm_end >= mm->start_brk) {
40436 name = "[heap]";
40437- } else if (vma->vm_start <= mm->start_stack &&
40438- vma->vm_end >= mm->start_stack) {
40439+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
40440+ (vma->vm_start <= mm->start_stack &&
40441+ vma->vm_end >= mm->start_stack)) {
40442 name = "[stack]";
40443 }
40444 } else {
40445@@ -433,11 +455,16 @@ static int show_smap(struct seq_file *m,
40446 };
40447
40448 memset(&mss, 0, sizeof mss);
40449- mss.vma = vma;
40450- /* mmap_sem is held in m_start */
40451- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
40452- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
40453-
40454+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40455+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
40456+#endif
40457+ mss.vma = vma;
40458+ /* mmap_sem is held in m_start */
40459+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
40460+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
40461+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40462+ }
40463+#endif
40464 show_map_vma(m, vma);
40465
40466 seq_printf(m,
40467@@ -455,7 +482,11 @@ static int show_smap(struct seq_file *m,
40468 "KernelPageSize: %8lu kB\n"
40469 "MMUPageSize: %8lu kB\n"
40470 "Locked: %8lu kB\n",
40471+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40472+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
40473+#else
40474 (vma->vm_end - vma->vm_start) >> 10,
40475+#endif
40476 mss.resident >> 10,
40477 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
40478 mss.shared_clean >> 10,
40479@@ -1001,7 +1032,7 @@ static int show_numa_map(struct seq_file
40480
40481 if (file) {
40482 seq_printf(m, " file=");
40483- seq_path(m, &file->f_path, "\n\t= ");
40484+ seq_path(m, &file->f_path, "\n\t\\= ");
40485 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
40486 seq_printf(m, " heap");
40487 } else if (vma->vm_start <= mm->start_stack &&
40488diff -urNp linux-3.0.3/fs/proc/task_nommu.c linux-3.0.3/fs/proc/task_nommu.c
40489--- linux-3.0.3/fs/proc/task_nommu.c 2011-07-21 22:17:23.000000000 -0400
40490+++ linux-3.0.3/fs/proc/task_nommu.c 2011-08-23 21:47:56.000000000 -0400
40491@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct
40492 else
40493 bytes += kobjsize(mm);
40494
40495- if (current->fs && current->fs->users > 1)
40496+ if (current->fs && atomic_read(&current->fs->users) > 1)
40497 sbytes += kobjsize(current->fs);
40498 else
40499 bytes += kobjsize(current->fs);
40500@@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_fil
40501
40502 if (file) {
40503 pad_len_spaces(m, len);
40504- seq_path(m, &file->f_path, "");
40505+ seq_path(m, &file->f_path, "\n\\");
40506 } else if (mm) {
40507 if (vma->vm_start <= mm->start_stack &&
40508 vma->vm_end >= mm->start_stack) {
40509diff -urNp linux-3.0.3/fs/quota/netlink.c linux-3.0.3/fs/quota/netlink.c
40510--- linux-3.0.3/fs/quota/netlink.c 2011-07-21 22:17:23.000000000 -0400
40511+++ linux-3.0.3/fs/quota/netlink.c 2011-08-23 21:47:56.000000000 -0400
40512@@ -33,7 +33,7 @@ static struct genl_family quota_genl_fam
40513 void quota_send_warning(short type, unsigned int id, dev_t dev,
40514 const char warntype)
40515 {
40516- static atomic_t seq;
40517+ static atomic_unchecked_t seq;
40518 struct sk_buff *skb;
40519 void *msg_head;
40520 int ret;
40521@@ -49,7 +49,7 @@ void quota_send_warning(short type, unsi
40522 "VFS: Not enough memory to send quota warning.\n");
40523 return;
40524 }
40525- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
40526+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
40527 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
40528 if (!msg_head) {
40529 printk(KERN_ERR
40530diff -urNp linux-3.0.3/fs/readdir.c linux-3.0.3/fs/readdir.c
40531--- linux-3.0.3/fs/readdir.c 2011-07-21 22:17:23.000000000 -0400
40532+++ linux-3.0.3/fs/readdir.c 2011-08-23 21:48:14.000000000 -0400
40533@@ -17,6 +17,7 @@
40534 #include <linux/security.h>
40535 #include <linux/syscalls.h>
40536 #include <linux/unistd.h>
40537+#include <linux/namei.h>
40538
40539 #include <asm/uaccess.h>
40540
40541@@ -67,6 +68,7 @@ struct old_linux_dirent {
40542
40543 struct readdir_callback {
40544 struct old_linux_dirent __user * dirent;
40545+ struct file * file;
40546 int result;
40547 };
40548
40549@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
40550 buf->result = -EOVERFLOW;
40551 return -EOVERFLOW;
40552 }
40553+
40554+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40555+ return 0;
40556+
40557 buf->result++;
40558 dirent = buf->dirent;
40559 if (!access_ok(VERIFY_WRITE, dirent,
40560@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
40561
40562 buf.result = 0;
40563 buf.dirent = dirent;
40564+ buf.file = file;
40565
40566 error = vfs_readdir(file, fillonedir, &buf);
40567 if (buf.result)
40568@@ -142,6 +149,7 @@ struct linux_dirent {
40569 struct getdents_callback {
40570 struct linux_dirent __user * current_dir;
40571 struct linux_dirent __user * previous;
40572+ struct file * file;
40573 int count;
40574 int error;
40575 };
40576@@ -163,6 +171,10 @@ static int filldir(void * __buf, const c
40577 buf->error = -EOVERFLOW;
40578 return -EOVERFLOW;
40579 }
40580+
40581+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40582+ return 0;
40583+
40584 dirent = buf->previous;
40585 if (dirent) {
40586 if (__put_user(offset, &dirent->d_off))
40587@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
40588 buf.previous = NULL;
40589 buf.count = count;
40590 buf.error = 0;
40591+ buf.file = file;
40592
40593 error = vfs_readdir(file, filldir, &buf);
40594 if (error >= 0)
40595@@ -229,6 +242,7 @@ out:
40596 struct getdents_callback64 {
40597 struct linux_dirent64 __user * current_dir;
40598 struct linux_dirent64 __user * previous;
40599+ struct file *file;
40600 int count;
40601 int error;
40602 };
40603@@ -244,6 +258,10 @@ static int filldir64(void * __buf, const
40604 buf->error = -EINVAL; /* only used if we fail.. */
40605 if (reclen > buf->count)
40606 return -EINVAL;
40607+
40608+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40609+ return 0;
40610+
40611 dirent = buf->previous;
40612 if (dirent) {
40613 if (__put_user(offset, &dirent->d_off))
40614@@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
40615
40616 buf.current_dir = dirent;
40617 buf.previous = NULL;
40618+ buf.file = file;
40619 buf.count = count;
40620 buf.error = 0;
40621
40622diff -urNp linux-3.0.3/fs/reiserfs/dir.c linux-3.0.3/fs/reiserfs/dir.c
40623--- linux-3.0.3/fs/reiserfs/dir.c 2011-07-21 22:17:23.000000000 -0400
40624+++ linux-3.0.3/fs/reiserfs/dir.c 2011-08-23 21:48:14.000000000 -0400
40625@@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
40626 struct reiserfs_dir_entry de;
40627 int ret = 0;
40628
40629+ pax_track_stack();
40630+
40631 reiserfs_write_lock(inode->i_sb);
40632
40633 reiserfs_check_lock_depth(inode->i_sb, "readdir");
40634diff -urNp linux-3.0.3/fs/reiserfs/do_balan.c linux-3.0.3/fs/reiserfs/do_balan.c
40635--- linux-3.0.3/fs/reiserfs/do_balan.c 2011-07-21 22:17:23.000000000 -0400
40636+++ linux-3.0.3/fs/reiserfs/do_balan.c 2011-08-23 21:47:56.000000000 -0400
40637@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb,
40638 return;
40639 }
40640
40641- atomic_inc(&(fs_generation(tb->tb_sb)));
40642+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
40643 do_balance_starts(tb);
40644
40645 /* balance leaf returns 0 except if combining L R and S into
40646diff -urNp linux-3.0.3/fs/reiserfs/journal.c linux-3.0.3/fs/reiserfs/journal.c
40647--- linux-3.0.3/fs/reiserfs/journal.c 2011-07-21 22:17:23.000000000 -0400
40648+++ linux-3.0.3/fs/reiserfs/journal.c 2011-08-23 21:48:14.000000000 -0400
40649@@ -2299,6 +2299,8 @@ static struct buffer_head *reiserfs_brea
40650 struct buffer_head *bh;
40651 int i, j;
40652
40653+ pax_track_stack();
40654+
40655 bh = __getblk(dev, block, bufsize);
40656 if (buffer_uptodate(bh))
40657 return (bh);
40658diff -urNp linux-3.0.3/fs/reiserfs/namei.c linux-3.0.3/fs/reiserfs/namei.c
40659--- linux-3.0.3/fs/reiserfs/namei.c 2011-07-21 22:17:23.000000000 -0400
40660+++ linux-3.0.3/fs/reiserfs/namei.c 2011-08-23 21:48:14.000000000 -0400
40661@@ -1225,6 +1225,8 @@ static int reiserfs_rename(struct inode
40662 unsigned long savelink = 1;
40663 struct timespec ctime;
40664
40665+ pax_track_stack();
40666+
40667 /* three balancings: (1) old name removal, (2) new name insertion
40668 and (3) maybe "save" link insertion
40669 stat data updates: (1) old directory,
40670diff -urNp linux-3.0.3/fs/reiserfs/procfs.c linux-3.0.3/fs/reiserfs/procfs.c
40671--- linux-3.0.3/fs/reiserfs/procfs.c 2011-07-21 22:17:23.000000000 -0400
40672+++ linux-3.0.3/fs/reiserfs/procfs.c 2011-08-23 21:48:14.000000000 -0400
40673@@ -113,7 +113,7 @@ static int show_super(struct seq_file *m
40674 "SMALL_TAILS " : "NO_TAILS ",
40675 replay_only(sb) ? "REPLAY_ONLY " : "",
40676 convert_reiserfs(sb) ? "CONV " : "",
40677- atomic_read(&r->s_generation_counter),
40678+ atomic_read_unchecked(&r->s_generation_counter),
40679 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
40680 SF(s_do_balance), SF(s_unneeded_left_neighbor),
40681 SF(s_good_search_by_key_reada), SF(s_bmaps),
40682@@ -299,6 +299,8 @@ static int show_journal(struct seq_file
40683 struct journal_params *jp = &rs->s_v1.s_journal;
40684 char b[BDEVNAME_SIZE];
40685
40686+ pax_track_stack();
40687+
40688 seq_printf(m, /* on-disk fields */
40689 "jp_journal_1st_block: \t%i\n"
40690 "jp_journal_dev: \t%s[%x]\n"
40691diff -urNp linux-3.0.3/fs/reiserfs/stree.c linux-3.0.3/fs/reiserfs/stree.c
40692--- linux-3.0.3/fs/reiserfs/stree.c 2011-07-21 22:17:23.000000000 -0400
40693+++ linux-3.0.3/fs/reiserfs/stree.c 2011-08-23 21:48:14.000000000 -0400
40694@@ -1196,6 +1196,8 @@ int reiserfs_delete_item(struct reiserfs
40695 int iter = 0;
40696 #endif
40697
40698+ pax_track_stack();
40699+
40700 BUG_ON(!th->t_trans_id);
40701
40702 init_tb_struct(th, &s_del_balance, sb, path,
40703@@ -1333,6 +1335,8 @@ void reiserfs_delete_solid_item(struct r
40704 int retval;
40705 int quota_cut_bytes = 0;
40706
40707+ pax_track_stack();
40708+
40709 BUG_ON(!th->t_trans_id);
40710
40711 le_key2cpu_key(&cpu_key, key);
40712@@ -1562,6 +1566,8 @@ int reiserfs_cut_from_item(struct reiser
40713 int quota_cut_bytes;
40714 loff_t tail_pos = 0;
40715
40716+ pax_track_stack();
40717+
40718 BUG_ON(!th->t_trans_id);
40719
40720 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
40721@@ -1957,6 +1963,8 @@ int reiserfs_paste_into_item(struct reis
40722 int retval;
40723 int fs_gen;
40724
40725+ pax_track_stack();
40726+
40727 BUG_ON(!th->t_trans_id);
40728
40729 fs_gen = get_generation(inode->i_sb);
40730@@ -2045,6 +2053,8 @@ int reiserfs_insert_item(struct reiserfs
40731 int fs_gen = 0;
40732 int quota_bytes = 0;
40733
40734+ pax_track_stack();
40735+
40736 BUG_ON(!th->t_trans_id);
40737
40738 if (inode) { /* Do we count quotas for item? */
40739diff -urNp linux-3.0.3/fs/reiserfs/super.c linux-3.0.3/fs/reiserfs/super.c
40740--- linux-3.0.3/fs/reiserfs/super.c 2011-07-21 22:17:23.000000000 -0400
40741+++ linux-3.0.3/fs/reiserfs/super.c 2011-08-23 21:48:14.000000000 -0400
40742@@ -927,6 +927,8 @@ static int reiserfs_parse_options(struct
40743 {.option_name = NULL}
40744 };
40745
40746+ pax_track_stack();
40747+
40748 *blocks = 0;
40749 if (!options || !*options)
40750 /* use default configuration: create tails, journaling on, no
40751diff -urNp linux-3.0.3/fs/select.c linux-3.0.3/fs/select.c
40752--- linux-3.0.3/fs/select.c 2011-07-21 22:17:23.000000000 -0400
40753+++ linux-3.0.3/fs/select.c 2011-08-23 21:48:14.000000000 -0400
40754@@ -20,6 +20,7 @@
40755 #include <linux/module.h>
40756 #include <linux/slab.h>
40757 #include <linux/poll.h>
40758+#include <linux/security.h>
40759 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
40760 #include <linux/file.h>
40761 #include <linux/fdtable.h>
40762@@ -403,6 +404,8 @@ int do_select(int n, fd_set_bits *fds, s
40763 int retval, i, timed_out = 0;
40764 unsigned long slack = 0;
40765
40766+ pax_track_stack();
40767+
40768 rcu_read_lock();
40769 retval = max_select_fd(n, fds);
40770 rcu_read_unlock();
40771@@ -528,6 +531,8 @@ int core_sys_select(int n, fd_set __user
40772 /* Allocate small arguments on the stack to save memory and be faster */
40773 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
40774
40775+ pax_track_stack();
40776+
40777 ret = -EINVAL;
40778 if (n < 0)
40779 goto out_nofds;
40780@@ -837,6 +842,9 @@ int do_sys_poll(struct pollfd __user *uf
40781 struct poll_list *walk = head;
40782 unsigned long todo = nfds;
40783
40784+ pax_track_stack();
40785+
40786+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
40787 if (nfds > rlimit(RLIMIT_NOFILE))
40788 return -EINVAL;
40789
40790diff -urNp linux-3.0.3/fs/seq_file.c linux-3.0.3/fs/seq_file.c
40791--- linux-3.0.3/fs/seq_file.c 2011-07-21 22:17:23.000000000 -0400
40792+++ linux-3.0.3/fs/seq_file.c 2011-08-23 21:47:56.000000000 -0400
40793@@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
40794 return 0;
40795 }
40796 if (!m->buf) {
40797- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
40798+ m->size = PAGE_SIZE;
40799+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
40800 if (!m->buf)
40801 return -ENOMEM;
40802 }
40803@@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
40804 Eoverflow:
40805 m->op->stop(m, p);
40806 kfree(m->buf);
40807- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
40808+ m->size <<= 1;
40809+ m->buf = kmalloc(m->size, GFP_KERNEL);
40810 return !m->buf ? -ENOMEM : -EAGAIN;
40811 }
40812
40813@@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
40814 m->version = file->f_version;
40815 /* grab buffer if we didn't have one */
40816 if (!m->buf) {
40817- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
40818+ m->size = PAGE_SIZE;
40819+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
40820 if (!m->buf)
40821 goto Enomem;
40822 }
40823@@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
40824 goto Fill;
40825 m->op->stop(m, p);
40826 kfree(m->buf);
40827- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
40828+ m->size <<= 1;
40829+ m->buf = kmalloc(m->size, GFP_KERNEL);
40830 if (!m->buf)
40831 goto Enomem;
40832 m->count = 0;
40833@@ -549,7 +553,7 @@ static void single_stop(struct seq_file
40834 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
40835 void *data)
40836 {
40837- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
40838+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
40839 int res = -ENOMEM;
40840
40841 if (op) {
40842diff -urNp linux-3.0.3/fs/splice.c linux-3.0.3/fs/splice.c
40843--- linux-3.0.3/fs/splice.c 2011-07-21 22:17:23.000000000 -0400
40844+++ linux-3.0.3/fs/splice.c 2011-08-23 21:48:14.000000000 -0400
40845@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode
40846 pipe_lock(pipe);
40847
40848 for (;;) {
40849- if (!pipe->readers) {
40850+ if (!atomic_read(&pipe->readers)) {
40851 send_sig(SIGPIPE, current, 0);
40852 if (!ret)
40853 ret = -EPIPE;
40854@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode
40855 do_wakeup = 0;
40856 }
40857
40858- pipe->waiting_writers++;
40859+ atomic_inc(&pipe->waiting_writers);
40860 pipe_wait(pipe);
40861- pipe->waiting_writers--;
40862+ atomic_dec(&pipe->waiting_writers);
40863 }
40864
40865 pipe_unlock(pipe);
40866@@ -320,6 +320,8 @@ __generic_file_splice_read(struct file *
40867 .spd_release = spd_release_page,
40868 };
40869
40870+ pax_track_stack();
40871+
40872 if (splice_grow_spd(pipe, &spd))
40873 return -ENOMEM;
40874
40875@@ -560,7 +562,7 @@ static ssize_t kernel_readv(struct file
40876 old_fs = get_fs();
40877 set_fs(get_ds());
40878 /* The cast to a user pointer is valid due to the set_fs() */
40879- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
40880+ res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
40881 set_fs(old_fs);
40882
40883 return res;
40884@@ -575,7 +577,7 @@ static ssize_t kernel_write(struct file
40885 old_fs = get_fs();
40886 set_fs(get_ds());
40887 /* The cast to a user pointer is valid due to the set_fs() */
40888- res = vfs_write(file, (const char __user *)buf, count, &pos);
40889+ res = vfs_write(file, (__force const char __user *)buf, count, &pos);
40890 set_fs(old_fs);
40891
40892 return res;
40893@@ -603,6 +605,8 @@ ssize_t default_file_splice_read(struct
40894 .spd_release = spd_release_page,
40895 };
40896
40897+ pax_track_stack();
40898+
40899 if (splice_grow_spd(pipe, &spd))
40900 return -ENOMEM;
40901
40902@@ -626,7 +630,7 @@ ssize_t default_file_splice_read(struct
40903 goto err;
40904
40905 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
40906- vec[i].iov_base = (void __user *) page_address(page);
40907+ vec[i].iov_base = (__force void __user *) page_address(page);
40908 vec[i].iov_len = this_len;
40909 spd.pages[i] = page;
40910 spd.nr_pages++;
40911@@ -846,10 +850,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
40912 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
40913 {
40914 while (!pipe->nrbufs) {
40915- if (!pipe->writers)
40916+ if (!atomic_read(&pipe->writers))
40917 return 0;
40918
40919- if (!pipe->waiting_writers && sd->num_spliced)
40920+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
40921 return 0;
40922
40923 if (sd->flags & SPLICE_F_NONBLOCK)
40924@@ -1182,7 +1186,7 @@ ssize_t splice_direct_to_actor(struct fi
40925 * out of the pipe right after the splice_to_pipe(). So set
40926 * PIPE_READERS appropriately.
40927 */
40928- pipe->readers = 1;
40929+ atomic_set(&pipe->readers, 1);
40930
40931 current->splice_pipe = pipe;
40932 }
40933@@ -1619,6 +1623,8 @@ static long vmsplice_to_pipe(struct file
40934 };
40935 long ret;
40936
40937+ pax_track_stack();
40938+
40939 pipe = get_pipe_info(file);
40940 if (!pipe)
40941 return -EBADF;
40942@@ -1734,9 +1740,9 @@ static int ipipe_prep(struct pipe_inode_
40943 ret = -ERESTARTSYS;
40944 break;
40945 }
40946- if (!pipe->writers)
40947+ if (!atomic_read(&pipe->writers))
40948 break;
40949- if (!pipe->waiting_writers) {
40950+ if (!atomic_read(&pipe->waiting_writers)) {
40951 if (flags & SPLICE_F_NONBLOCK) {
40952 ret = -EAGAIN;
40953 break;
40954@@ -1768,7 +1774,7 @@ static int opipe_prep(struct pipe_inode_
40955 pipe_lock(pipe);
40956
40957 while (pipe->nrbufs >= pipe->buffers) {
40958- if (!pipe->readers) {
40959+ if (!atomic_read(&pipe->readers)) {
40960 send_sig(SIGPIPE, current, 0);
40961 ret = -EPIPE;
40962 break;
40963@@ -1781,9 +1787,9 @@ static int opipe_prep(struct pipe_inode_
40964 ret = -ERESTARTSYS;
40965 break;
40966 }
40967- pipe->waiting_writers++;
40968+ atomic_inc(&pipe->waiting_writers);
40969 pipe_wait(pipe);
40970- pipe->waiting_writers--;
40971+ atomic_dec(&pipe->waiting_writers);
40972 }
40973
40974 pipe_unlock(pipe);
40975@@ -1819,14 +1825,14 @@ retry:
40976 pipe_double_lock(ipipe, opipe);
40977
40978 do {
40979- if (!opipe->readers) {
40980+ if (!atomic_read(&opipe->readers)) {
40981 send_sig(SIGPIPE, current, 0);
40982 if (!ret)
40983 ret = -EPIPE;
40984 break;
40985 }
40986
40987- if (!ipipe->nrbufs && !ipipe->writers)
40988+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
40989 break;
40990
40991 /*
40992@@ -1923,7 +1929,7 @@ static int link_pipe(struct pipe_inode_i
40993 pipe_double_lock(ipipe, opipe);
40994
40995 do {
40996- if (!opipe->readers) {
40997+ if (!atomic_read(&opipe->readers)) {
40998 send_sig(SIGPIPE, current, 0);
40999 if (!ret)
41000 ret = -EPIPE;
41001@@ -1968,7 +1974,7 @@ static int link_pipe(struct pipe_inode_i
41002 * return EAGAIN if we have the potential of some data in the
41003 * future, otherwise just return 0
41004 */
41005- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
41006+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
41007 ret = -EAGAIN;
41008
41009 pipe_unlock(ipipe);
41010diff -urNp linux-3.0.3/fs/sysfs/file.c linux-3.0.3/fs/sysfs/file.c
41011--- linux-3.0.3/fs/sysfs/file.c 2011-07-21 22:17:23.000000000 -0400
41012+++ linux-3.0.3/fs/sysfs/file.c 2011-08-23 21:47:56.000000000 -0400
41013@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
41014
41015 struct sysfs_open_dirent {
41016 atomic_t refcnt;
41017- atomic_t event;
41018+ atomic_unchecked_t event;
41019 wait_queue_head_t poll;
41020 struct list_head buffers; /* goes through sysfs_buffer.list */
41021 };
41022@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentr
41023 if (!sysfs_get_active(attr_sd))
41024 return -ENODEV;
41025
41026- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
41027+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
41028 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
41029
41030 sysfs_put_active(attr_sd);
41031@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct
41032 return -ENOMEM;
41033
41034 atomic_set(&new_od->refcnt, 0);
41035- atomic_set(&new_od->event, 1);
41036+ atomic_set_unchecked(&new_od->event, 1);
41037 init_waitqueue_head(&new_od->poll);
41038 INIT_LIST_HEAD(&new_od->buffers);
41039 goto retry;
41040@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct fi
41041
41042 sysfs_put_active(attr_sd);
41043
41044- if (buffer->event != atomic_read(&od->event))
41045+ if (buffer->event != atomic_read_unchecked(&od->event))
41046 goto trigger;
41047
41048 return DEFAULT_POLLMASK;
41049@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_di
41050
41051 od = sd->s_attr.open;
41052 if (od) {
41053- atomic_inc(&od->event);
41054+ atomic_inc_unchecked(&od->event);
41055 wake_up_interruptible(&od->poll);
41056 }
41057
41058diff -urNp linux-3.0.3/fs/sysfs/mount.c linux-3.0.3/fs/sysfs/mount.c
41059--- linux-3.0.3/fs/sysfs/mount.c 2011-07-21 22:17:23.000000000 -0400
41060+++ linux-3.0.3/fs/sysfs/mount.c 2011-08-23 21:48:14.000000000 -0400
41061@@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
41062 .s_name = "",
41063 .s_count = ATOMIC_INIT(1),
41064 .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
41065+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
41066+ .s_mode = S_IFDIR | S_IRWXU,
41067+#else
41068 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
41069+#endif
41070 .s_ino = 1,
41071 };
41072
41073diff -urNp linux-3.0.3/fs/sysfs/symlink.c linux-3.0.3/fs/sysfs/symlink.c
41074--- linux-3.0.3/fs/sysfs/symlink.c 2011-07-21 22:17:23.000000000 -0400
41075+++ linux-3.0.3/fs/sysfs/symlink.c 2011-08-23 21:47:56.000000000 -0400
41076@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct de
41077
41078 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
41079 {
41080- char *page = nd_get_link(nd);
41081+ const char *page = nd_get_link(nd);
41082 if (!IS_ERR(page))
41083 free_page((unsigned long)page);
41084 }
41085diff -urNp linux-3.0.3/fs/udf/inode.c linux-3.0.3/fs/udf/inode.c
41086--- linux-3.0.3/fs/udf/inode.c 2011-07-21 22:17:23.000000000 -0400
41087+++ linux-3.0.3/fs/udf/inode.c 2011-08-23 21:48:14.000000000 -0400
41088@@ -560,6 +560,8 @@ static struct buffer_head *inode_getblk(
41089 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
41090 int lastblock = 0;
41091
41092+ pax_track_stack();
41093+
41094 prev_epos.offset = udf_file_entry_alloc_offset(inode);
41095 prev_epos.block = iinfo->i_location;
41096 prev_epos.bh = NULL;
41097diff -urNp linux-3.0.3/fs/udf/misc.c linux-3.0.3/fs/udf/misc.c
41098--- linux-3.0.3/fs/udf/misc.c 2011-07-21 22:17:23.000000000 -0400
41099+++ linux-3.0.3/fs/udf/misc.c 2011-08-23 21:47:56.000000000 -0400
41100@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
41101
41102 u8 udf_tag_checksum(const struct tag *t)
41103 {
41104- u8 *data = (u8 *)t;
41105+ const u8 *data = (const u8 *)t;
41106 u8 checksum = 0;
41107 int i;
41108 for (i = 0; i < sizeof(struct tag); ++i)
41109diff -urNp linux-3.0.3/fs/utimes.c linux-3.0.3/fs/utimes.c
41110--- linux-3.0.3/fs/utimes.c 2011-07-21 22:17:23.000000000 -0400
41111+++ linux-3.0.3/fs/utimes.c 2011-08-23 21:48:14.000000000 -0400
41112@@ -1,6 +1,7 @@
41113 #include <linux/compiler.h>
41114 #include <linux/file.h>
41115 #include <linux/fs.h>
41116+#include <linux/security.h>
41117 #include <linux/linkage.h>
41118 #include <linux/mount.h>
41119 #include <linux/namei.h>
41120@@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
41121 goto mnt_drop_write_and_out;
41122 }
41123 }
41124+
41125+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
41126+ error = -EACCES;
41127+ goto mnt_drop_write_and_out;
41128+ }
41129+
41130 mutex_lock(&inode->i_mutex);
41131 error = notify_change(path->dentry, &newattrs);
41132 mutex_unlock(&inode->i_mutex);
41133diff -urNp linux-3.0.3/fs/xattr_acl.c linux-3.0.3/fs/xattr_acl.c
41134--- linux-3.0.3/fs/xattr_acl.c 2011-07-21 22:17:23.000000000 -0400
41135+++ linux-3.0.3/fs/xattr_acl.c 2011-08-23 21:47:56.000000000 -0400
41136@@ -17,8 +17,8 @@
41137 struct posix_acl *
41138 posix_acl_from_xattr(const void *value, size_t size)
41139 {
41140- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
41141- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
41142+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
41143+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
41144 int count;
41145 struct posix_acl *acl;
41146 struct posix_acl_entry *acl_e;
41147diff -urNp linux-3.0.3/fs/xattr.c linux-3.0.3/fs/xattr.c
41148--- linux-3.0.3/fs/xattr.c 2011-07-21 22:17:23.000000000 -0400
41149+++ linux-3.0.3/fs/xattr.c 2011-08-23 21:48:14.000000000 -0400
41150@@ -254,7 +254,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
41151 * Extended attribute SET operations
41152 */
41153 static long
41154-setxattr(struct dentry *d, const char __user *name, const void __user *value,
41155+setxattr(struct path *path, const char __user *name, const void __user *value,
41156 size_t size, int flags)
41157 {
41158 int error;
41159@@ -278,7 +278,13 @@ setxattr(struct dentry *d, const char __
41160 return PTR_ERR(kvalue);
41161 }
41162
41163- error = vfs_setxattr(d, kname, kvalue, size, flags);
41164+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
41165+ error = -EACCES;
41166+ goto out;
41167+ }
41168+
41169+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
41170+out:
41171 kfree(kvalue);
41172 return error;
41173 }
41174@@ -295,7 +301,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
41175 return error;
41176 error = mnt_want_write(path.mnt);
41177 if (!error) {
41178- error = setxattr(path.dentry, name, value, size, flags);
41179+ error = setxattr(&path, name, value, size, flags);
41180 mnt_drop_write(path.mnt);
41181 }
41182 path_put(&path);
41183@@ -314,7 +320,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
41184 return error;
41185 error = mnt_want_write(path.mnt);
41186 if (!error) {
41187- error = setxattr(path.dentry, name, value, size, flags);
41188+ error = setxattr(&path, name, value, size, flags);
41189 mnt_drop_write(path.mnt);
41190 }
41191 path_put(&path);
41192@@ -325,17 +331,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
41193 const void __user *,value, size_t, size, int, flags)
41194 {
41195 struct file *f;
41196- struct dentry *dentry;
41197 int error = -EBADF;
41198
41199 f = fget(fd);
41200 if (!f)
41201 return error;
41202- dentry = f->f_path.dentry;
41203- audit_inode(NULL, dentry);
41204+ audit_inode(NULL, f->f_path.dentry);
41205 error = mnt_want_write_file(f);
41206 if (!error) {
41207- error = setxattr(dentry, name, value, size, flags);
41208+ error = setxattr(&f->f_path, name, value, size, flags);
41209 mnt_drop_write(f->f_path.mnt);
41210 }
41211 fput(f);
41212diff -urNp linux-3.0.3/fs/xfs/linux-2.6/xfs_ioctl32.c linux-3.0.3/fs/xfs/linux-2.6/xfs_ioctl32.c
41213--- linux-3.0.3/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-07-21 22:17:23.000000000 -0400
41214+++ linux-3.0.3/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-08-23 21:48:14.000000000 -0400
41215@@ -73,6 +73,7 @@ xfs_compat_ioc_fsgeometry_v1(
41216 xfs_fsop_geom_t fsgeo;
41217 int error;
41218
41219+ memset(&fsgeo, 0, sizeof(fsgeo));
41220 error = xfs_fs_geometry(mp, &fsgeo, 3);
41221 if (error)
41222 return -error;
41223diff -urNp linux-3.0.3/fs/xfs/linux-2.6/xfs_ioctl.c linux-3.0.3/fs/xfs/linux-2.6/xfs_ioctl.c
41224--- linux-3.0.3/fs/xfs/linux-2.6/xfs_ioctl.c 2011-07-21 22:17:23.000000000 -0400
41225+++ linux-3.0.3/fs/xfs/linux-2.6/xfs_ioctl.c 2011-08-23 21:47:56.000000000 -0400
41226@@ -128,7 +128,7 @@ xfs_find_handle(
41227 }
41228
41229 error = -EFAULT;
41230- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
41231+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
41232 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
41233 goto out_put;
41234
41235diff -urNp linux-3.0.3/fs/xfs/linux-2.6/xfs_iops.c linux-3.0.3/fs/xfs/linux-2.6/xfs_iops.c
41236--- linux-3.0.3/fs/xfs/linux-2.6/xfs_iops.c 2011-07-21 22:17:23.000000000 -0400
41237+++ linux-3.0.3/fs/xfs/linux-2.6/xfs_iops.c 2011-08-23 21:47:56.000000000 -0400
41238@@ -437,7 +437,7 @@ xfs_vn_put_link(
41239 struct nameidata *nd,
41240 void *p)
41241 {
41242- char *s = nd_get_link(nd);
41243+ const char *s = nd_get_link(nd);
41244
41245 if (!IS_ERR(s))
41246 kfree(s);
41247diff -urNp linux-3.0.3/fs/xfs/xfs_bmap.c linux-3.0.3/fs/xfs/xfs_bmap.c
41248--- linux-3.0.3/fs/xfs/xfs_bmap.c 2011-07-21 22:17:23.000000000 -0400
41249+++ linux-3.0.3/fs/xfs/xfs_bmap.c 2011-08-23 21:47:56.000000000 -0400
41250@@ -253,7 +253,7 @@ xfs_bmap_validate_ret(
41251 int nmap,
41252 int ret_nmap);
41253 #else
41254-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
41255+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
41256 #endif /* DEBUG */
41257
41258 STATIC int
41259diff -urNp linux-3.0.3/fs/xfs/xfs_dir2_sf.c linux-3.0.3/fs/xfs/xfs_dir2_sf.c
41260--- linux-3.0.3/fs/xfs/xfs_dir2_sf.c 2011-07-21 22:17:23.000000000 -0400
41261+++ linux-3.0.3/fs/xfs/xfs_dir2_sf.c 2011-08-23 21:47:56.000000000 -0400
41262@@ -780,7 +780,15 @@ xfs_dir2_sf_getdents(
41263 }
41264
41265 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
41266- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
41267+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
41268+ char name[sfep->namelen];
41269+ memcpy(name, sfep->name, sfep->namelen);
41270+ if (filldir(dirent, name, sfep->namelen,
41271+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
41272+ *offset = off & 0x7fffffff;
41273+ return 0;
41274+ }
41275+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
41276 off & 0x7fffffff, ino, DT_UNKNOWN)) {
41277 *offset = off & 0x7fffffff;
41278 return 0;
41279diff -urNp linux-3.0.3/grsecurity/gracl_alloc.c linux-3.0.3/grsecurity/gracl_alloc.c
41280--- linux-3.0.3/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
41281+++ linux-3.0.3/grsecurity/gracl_alloc.c 2011-08-23 21:48:14.000000000 -0400
41282@@ -0,0 +1,105 @@
41283+#include <linux/kernel.h>
41284+#include <linux/mm.h>
41285+#include <linux/slab.h>
41286+#include <linux/vmalloc.h>
41287+#include <linux/gracl.h>
41288+#include <linux/grsecurity.h>
41289+
41290+static unsigned long alloc_stack_next = 1;
41291+static unsigned long alloc_stack_size = 1;
41292+static void **alloc_stack;
41293+
41294+static __inline__ int
41295+alloc_pop(void)
41296+{
41297+ if (alloc_stack_next == 1)
41298+ return 0;
41299+
41300+ kfree(alloc_stack[alloc_stack_next - 2]);
41301+
41302+ alloc_stack_next--;
41303+
41304+ return 1;
41305+}
41306+
41307+static __inline__ int
41308+alloc_push(void *buf)
41309+{
41310+ if (alloc_stack_next >= alloc_stack_size)
41311+ return 1;
41312+
41313+ alloc_stack[alloc_stack_next - 1] = buf;
41314+
41315+ alloc_stack_next++;
41316+
41317+ return 0;
41318+}
41319+
41320+void *
41321+acl_alloc(unsigned long len)
41322+{
41323+ void *ret = NULL;
41324+
41325+ if (!len || len > PAGE_SIZE)
41326+ goto out;
41327+
41328+ ret = kmalloc(len, GFP_KERNEL);
41329+
41330+ if (ret) {
41331+ if (alloc_push(ret)) {
41332+ kfree(ret);
41333+ ret = NULL;
41334+ }
41335+ }
41336+
41337+out:
41338+ return ret;
41339+}
41340+
41341+void *
41342+acl_alloc_num(unsigned long num, unsigned long len)
41343+{
41344+ if (!len || (num > (PAGE_SIZE / len)))
41345+ return NULL;
41346+
41347+ return acl_alloc(num * len);
41348+}
41349+
41350+void
41351+acl_free_all(void)
41352+{
41353+ if (gr_acl_is_enabled() || !alloc_stack)
41354+ return;
41355+
41356+ while (alloc_pop()) ;
41357+
41358+ if (alloc_stack) {
41359+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
41360+ kfree(alloc_stack);
41361+ else
41362+ vfree(alloc_stack);
41363+ }
41364+
41365+ alloc_stack = NULL;
41366+ alloc_stack_size = 1;
41367+ alloc_stack_next = 1;
41368+
41369+ return;
41370+}
41371+
41372+int
41373+acl_alloc_stack_init(unsigned long size)
41374+{
41375+ if ((size * sizeof (void *)) <= PAGE_SIZE)
41376+ alloc_stack =
41377+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
41378+ else
41379+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
41380+
41381+ alloc_stack_size = size;
41382+
41383+ if (!alloc_stack)
41384+ return 0;
41385+ else
41386+ return 1;
41387+}
41388diff -urNp linux-3.0.3/grsecurity/gracl.c linux-3.0.3/grsecurity/gracl.c
41389--- linux-3.0.3/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
41390+++ linux-3.0.3/grsecurity/gracl.c 2011-08-23 21:48:14.000000000 -0400
41391@@ -0,0 +1,4106 @@
41392+#include <linux/kernel.h>
41393+#include <linux/module.h>
41394+#include <linux/sched.h>
41395+#include <linux/mm.h>
41396+#include <linux/file.h>
41397+#include <linux/fs.h>
41398+#include <linux/namei.h>
41399+#include <linux/mount.h>
41400+#include <linux/tty.h>
41401+#include <linux/proc_fs.h>
41402+#include <linux/lglock.h>
41403+#include <linux/slab.h>
41404+#include <linux/vmalloc.h>
41405+#include <linux/types.h>
41406+#include <linux/sysctl.h>
41407+#include <linux/netdevice.h>
41408+#include <linux/ptrace.h>
41409+#include <linux/gracl.h>
41410+#include <linux/gralloc.h>
41411+#include <linux/grsecurity.h>
41412+#include <linux/grinternal.h>
41413+#include <linux/pid_namespace.h>
41414+#include <linux/fdtable.h>
41415+#include <linux/percpu.h>
41416+
41417+#include <asm/uaccess.h>
41418+#include <asm/errno.h>
41419+#include <asm/mman.h>
41420+
41421+static struct acl_role_db acl_role_set;
41422+static struct name_db name_set;
41423+static struct inodev_db inodev_set;
41424+
41425+/* for keeping track of userspace pointers used for subjects, so we
41426+ can share references in the kernel as well
41427+*/
41428+
41429+static struct path real_root;
41430+
41431+static struct acl_subj_map_db subj_map_set;
41432+
41433+static struct acl_role_label *default_role;
41434+
41435+static struct acl_role_label *role_list;
41436+
41437+static u16 acl_sp_role_value;
41438+
41439+extern char *gr_shared_page[4];
41440+static DEFINE_MUTEX(gr_dev_mutex);
41441+DEFINE_RWLOCK(gr_inode_lock);
41442+
41443+struct gr_arg *gr_usermode;
41444+
41445+static unsigned int gr_status __read_only = GR_STATUS_INIT;
41446+
41447+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
41448+extern void gr_clear_learn_entries(void);
41449+
41450+#ifdef CONFIG_GRKERNSEC_RESLOG
41451+extern void gr_log_resource(const struct task_struct *task,
41452+ const int res, const unsigned long wanted, const int gt);
41453+#endif
41454+
41455+unsigned char *gr_system_salt;
41456+unsigned char *gr_system_sum;
41457+
41458+static struct sprole_pw **acl_special_roles = NULL;
41459+static __u16 num_sprole_pws = 0;
41460+
41461+static struct acl_role_label *kernel_role = NULL;
41462+
41463+static unsigned int gr_auth_attempts = 0;
41464+static unsigned long gr_auth_expires = 0UL;
41465+
41466+#ifdef CONFIG_NET
41467+extern struct vfsmount *sock_mnt;
41468+#endif
41469+
41470+extern struct vfsmount *pipe_mnt;
41471+extern struct vfsmount *shm_mnt;
41472+#ifdef CONFIG_HUGETLBFS
41473+extern struct vfsmount *hugetlbfs_vfsmount;
41474+#endif
41475+
41476+static struct acl_object_label *fakefs_obj_rw;
41477+static struct acl_object_label *fakefs_obj_rwx;
41478+
41479+extern int gr_init_uidset(void);
41480+extern void gr_free_uidset(void);
41481+extern void gr_remove_uid(uid_t uid);
41482+extern int gr_find_uid(uid_t uid);
41483+
41484+DECLARE_BRLOCK(vfsmount_lock);
41485+
41486+__inline__ int
41487+gr_acl_is_enabled(void)
41488+{
41489+ return (gr_status & GR_READY);
41490+}
41491+
41492+#ifdef CONFIG_BTRFS_FS
41493+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
41494+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
41495+#endif
41496+
41497+static inline dev_t __get_dev(const struct dentry *dentry)
41498+{
41499+#ifdef CONFIG_BTRFS_FS
41500+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
41501+ return get_btrfs_dev_from_inode(dentry->d_inode);
41502+ else
41503+#endif
41504+ return dentry->d_inode->i_sb->s_dev;
41505+}
41506+
41507+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
41508+{
41509+ return __get_dev(dentry);
41510+}
41511+
41512+static char gr_task_roletype_to_char(struct task_struct *task)
41513+{
41514+ switch (task->role->roletype &
41515+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
41516+ GR_ROLE_SPECIAL)) {
41517+ case GR_ROLE_DEFAULT:
41518+ return 'D';
41519+ case GR_ROLE_USER:
41520+ return 'U';
41521+ case GR_ROLE_GROUP:
41522+ return 'G';
41523+ case GR_ROLE_SPECIAL:
41524+ return 'S';
41525+ }
41526+
41527+ return 'X';
41528+}
41529+
41530+char gr_roletype_to_char(void)
41531+{
41532+ return gr_task_roletype_to_char(current);
41533+}
41534+
41535+__inline__ int
41536+gr_acl_tpe_check(void)
41537+{
41538+ if (unlikely(!(gr_status & GR_READY)))
41539+ return 0;
41540+ if (current->role->roletype & GR_ROLE_TPE)
41541+ return 1;
41542+ else
41543+ return 0;
41544+}
41545+
41546+int
41547+gr_handle_rawio(const struct inode *inode)
41548+{
41549+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
41550+ if (inode && S_ISBLK(inode->i_mode) &&
41551+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
41552+ !capable(CAP_SYS_RAWIO))
41553+ return 1;
41554+#endif
41555+ return 0;
41556+}
41557+
41558+static int
41559+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
41560+{
41561+ if (likely(lena != lenb))
41562+ return 0;
41563+
41564+ return !memcmp(a, b, lena);
41565+}
41566+
41567+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
41568+{
41569+ *buflen -= namelen;
41570+ if (*buflen < 0)
41571+ return -ENAMETOOLONG;
41572+ *buffer -= namelen;
41573+ memcpy(*buffer, str, namelen);
41574+ return 0;
41575+}
41576+
41577+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
41578+{
41579+ return prepend(buffer, buflen, name->name, name->len);
41580+}
41581+
41582+static int prepend_path(const struct path *path, struct path *root,
41583+ char **buffer, int *buflen)
41584+{
41585+ struct dentry *dentry = path->dentry;
41586+ struct vfsmount *vfsmnt = path->mnt;
41587+ bool slash = false;
41588+ int error = 0;
41589+
41590+ while (dentry != root->dentry || vfsmnt != root->mnt) {
41591+ struct dentry * parent;
41592+
41593+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
41594+ /* Global root? */
41595+ if (vfsmnt->mnt_parent == vfsmnt) {
41596+ goto out;
41597+ }
41598+ dentry = vfsmnt->mnt_mountpoint;
41599+ vfsmnt = vfsmnt->mnt_parent;
41600+ continue;
41601+ }
41602+ parent = dentry->d_parent;
41603+ prefetch(parent);
41604+ spin_lock(&dentry->d_lock);
41605+ error = prepend_name(buffer, buflen, &dentry->d_name);
41606+ spin_unlock(&dentry->d_lock);
41607+ if (!error)
41608+ error = prepend(buffer, buflen, "/", 1);
41609+ if (error)
41610+ break;
41611+
41612+ slash = true;
41613+ dentry = parent;
41614+ }
41615+
41616+out:
41617+ if (!error && !slash)
41618+ error = prepend(buffer, buflen, "/", 1);
41619+
41620+ return error;
41621+}
41622+
41623+/* this must be called with vfsmount_lock and rename_lock held */
41624+
41625+static char *__our_d_path(const struct path *path, struct path *root,
41626+ char *buf, int buflen)
41627+{
41628+ char *res = buf + buflen;
41629+ int error;
41630+
41631+ prepend(&res, &buflen, "\0", 1);
41632+ error = prepend_path(path, root, &res, &buflen);
41633+ if (error)
41634+ return ERR_PTR(error);
41635+
41636+ return res;
41637+}
41638+
41639+static char *
41640+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
41641+{
41642+ char *retval;
41643+
41644+ retval = __our_d_path(path, root, buf, buflen);
41645+ if (unlikely(IS_ERR(retval)))
41646+ retval = strcpy(buf, "<path too long>");
41647+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
41648+ retval[1] = '\0';
41649+
41650+ return retval;
41651+}
41652+
41653+static char *
41654+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
41655+ char *buf, int buflen)
41656+{
41657+ struct path path;
41658+ char *res;
41659+
41660+ path.dentry = (struct dentry *)dentry;
41661+ path.mnt = (struct vfsmount *)vfsmnt;
41662+
41663+ /* we can use real_root.dentry, real_root.mnt, because this is only called
41664+ by the RBAC system */
41665+ res = gen_full_path(&path, &real_root, buf, buflen);
41666+
41667+ return res;
41668+}
41669+
41670+static char *
41671+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
41672+ char *buf, int buflen)
41673+{
41674+ char *res;
41675+ struct path path;
41676+ struct path root;
41677+ struct task_struct *reaper = &init_task;
41678+
41679+ path.dentry = (struct dentry *)dentry;
41680+ path.mnt = (struct vfsmount *)vfsmnt;
41681+
41682+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
41683+ get_fs_root(reaper->fs, &root);
41684+
41685+ write_seqlock(&rename_lock);
41686+ br_read_lock(vfsmount_lock);
41687+ res = gen_full_path(&path, &root, buf, buflen);
41688+ br_read_unlock(vfsmount_lock);
41689+ write_sequnlock(&rename_lock);
41690+
41691+ path_put(&root);
41692+ return res;
41693+}
41694+
41695+static char *
41696+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
41697+{
41698+ char *ret;
41699+ write_seqlock(&rename_lock);
41700+ br_read_lock(vfsmount_lock);
41701+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
41702+ PAGE_SIZE);
41703+ br_read_unlock(vfsmount_lock);
41704+ write_sequnlock(&rename_lock);
41705+ return ret;
41706+}
41707+
41708+char *
41709+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
41710+{
41711+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
41712+ PAGE_SIZE);
41713+}
41714+
41715+char *
41716+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
41717+{
41718+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
41719+ PAGE_SIZE);
41720+}
41721+
41722+char *
41723+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
41724+{
41725+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
41726+ PAGE_SIZE);
41727+}
41728+
41729+char *
41730+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
41731+{
41732+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
41733+ PAGE_SIZE);
41734+}
41735+
41736+char *
41737+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
41738+{
41739+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
41740+ PAGE_SIZE);
41741+}
41742+
41743+__inline__ __u32
41744+to_gr_audit(const __u32 reqmode)
41745+{
41746+ /* masks off auditable permission flags, then shifts them to create
41747+ auditing flags, and adds the special case of append auditing if
41748+ we're requesting write */
41749+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
41750+}
41751+
41752+struct acl_subject_label *
41753+lookup_subject_map(const struct acl_subject_label *userp)
41754+{
41755+ unsigned int index = shash(userp, subj_map_set.s_size);
41756+ struct subject_map *match;
41757+
41758+ match = subj_map_set.s_hash[index];
41759+
41760+ while (match && match->user != userp)
41761+ match = match->next;
41762+
41763+ if (match != NULL)
41764+ return match->kernel;
41765+ else
41766+ return NULL;
41767+}
41768+
41769+static void
41770+insert_subj_map_entry(struct subject_map *subjmap)
41771+{
41772+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
41773+ struct subject_map **curr;
41774+
41775+ subjmap->prev = NULL;
41776+
41777+ curr = &subj_map_set.s_hash[index];
41778+ if (*curr != NULL)
41779+ (*curr)->prev = subjmap;
41780+
41781+ subjmap->next = *curr;
41782+ *curr = subjmap;
41783+
41784+ return;
41785+}
41786+
41787+static struct acl_role_label *
41788+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
41789+ const gid_t gid)
41790+{
41791+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
41792+ struct acl_role_label *match;
41793+ struct role_allowed_ip *ipp;
41794+ unsigned int x;
41795+ u32 curr_ip = task->signal->curr_ip;
41796+
41797+ task->signal->saved_ip = curr_ip;
41798+
41799+ match = acl_role_set.r_hash[index];
41800+
41801+ while (match) {
41802+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
41803+ for (x = 0; x < match->domain_child_num; x++) {
41804+ if (match->domain_children[x] == uid)
41805+ goto found;
41806+ }
41807+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
41808+ break;
41809+ match = match->next;
41810+ }
41811+found:
41812+ if (match == NULL) {
41813+ try_group:
41814+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
41815+ match = acl_role_set.r_hash[index];
41816+
41817+ while (match) {
41818+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
41819+ for (x = 0; x < match->domain_child_num; x++) {
41820+ if (match->domain_children[x] == gid)
41821+ goto found2;
41822+ }
41823+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
41824+ break;
41825+ match = match->next;
41826+ }
41827+found2:
41828+ if (match == NULL)
41829+ match = default_role;
41830+ if (match->allowed_ips == NULL)
41831+ return match;
41832+ else {
41833+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
41834+ if (likely
41835+ ((ntohl(curr_ip) & ipp->netmask) ==
41836+ (ntohl(ipp->addr) & ipp->netmask)))
41837+ return match;
41838+ }
41839+ match = default_role;
41840+ }
41841+ } else if (match->allowed_ips == NULL) {
41842+ return match;
41843+ } else {
41844+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
41845+ if (likely
41846+ ((ntohl(curr_ip) & ipp->netmask) ==
41847+ (ntohl(ipp->addr) & ipp->netmask)))
41848+ return match;
41849+ }
41850+ goto try_group;
41851+ }
41852+
41853+ return match;
41854+}
41855+
41856+struct acl_subject_label *
41857+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
41858+ const struct acl_role_label *role)
41859+{
41860+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
41861+ struct acl_subject_label *match;
41862+
41863+ match = role->subj_hash[index];
41864+
41865+ while (match && (match->inode != ino || match->device != dev ||
41866+ (match->mode & GR_DELETED))) {
41867+ match = match->next;
41868+ }
41869+
41870+ if (match && !(match->mode & GR_DELETED))
41871+ return match;
41872+ else
41873+ return NULL;
41874+}
41875+
41876+struct acl_subject_label *
41877+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
41878+ const struct acl_role_label *role)
41879+{
41880+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
41881+ struct acl_subject_label *match;
41882+
41883+ match = role->subj_hash[index];
41884+
41885+ while (match && (match->inode != ino || match->device != dev ||
41886+ !(match->mode & GR_DELETED))) {
41887+ match = match->next;
41888+ }
41889+
41890+ if (match && (match->mode & GR_DELETED))
41891+ return match;
41892+ else
41893+ return NULL;
41894+}
41895+
41896+static struct acl_object_label *
41897+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
41898+ const struct acl_subject_label *subj)
41899+{
41900+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
41901+ struct acl_object_label *match;
41902+
41903+ match = subj->obj_hash[index];
41904+
41905+ while (match && (match->inode != ino || match->device != dev ||
41906+ (match->mode & GR_DELETED))) {
41907+ match = match->next;
41908+ }
41909+
41910+ if (match && !(match->mode & GR_DELETED))
41911+ return match;
41912+ else
41913+ return NULL;
41914+}
41915+
41916+static struct acl_object_label *
41917+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
41918+ const struct acl_subject_label *subj)
41919+{
41920+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
41921+ struct acl_object_label *match;
41922+
41923+ match = subj->obj_hash[index];
41924+
41925+ while (match && (match->inode != ino || match->device != dev ||
41926+ !(match->mode & GR_DELETED))) {
41927+ match = match->next;
41928+ }
41929+
41930+ if (match && (match->mode & GR_DELETED))
41931+ return match;
41932+
41933+ match = subj->obj_hash[index];
41934+
41935+ while (match && (match->inode != ino || match->device != dev ||
41936+ (match->mode & GR_DELETED))) {
41937+ match = match->next;
41938+ }
41939+
41940+ if (match && !(match->mode & GR_DELETED))
41941+ return match;
41942+ else
41943+ return NULL;
41944+}
41945+
41946+static struct name_entry *
41947+lookup_name_entry(const char *name)
41948+{
41949+ unsigned int len = strlen(name);
41950+ unsigned int key = full_name_hash(name, len);
41951+ unsigned int index = key % name_set.n_size;
41952+ struct name_entry *match;
41953+
41954+ match = name_set.n_hash[index];
41955+
41956+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
41957+ match = match->next;
41958+
41959+ return match;
41960+}
41961+
41962+static struct name_entry *
41963+lookup_name_entry_create(const char *name)
41964+{
41965+ unsigned int len = strlen(name);
41966+ unsigned int key = full_name_hash(name, len);
41967+ unsigned int index = key % name_set.n_size;
41968+ struct name_entry *match;
41969+
41970+ match = name_set.n_hash[index];
41971+
41972+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
41973+ !match->deleted))
41974+ match = match->next;
41975+
41976+ if (match && match->deleted)
41977+ return match;
41978+
41979+ match = name_set.n_hash[index];
41980+
41981+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
41982+ match->deleted))
41983+ match = match->next;
41984+
41985+ if (match && !match->deleted)
41986+ return match;
41987+ else
41988+ return NULL;
41989+}
41990+
41991+static struct inodev_entry *
41992+lookup_inodev_entry(const ino_t ino, const dev_t dev)
41993+{
41994+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
41995+ struct inodev_entry *match;
41996+
41997+ match = inodev_set.i_hash[index];
41998+
41999+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
42000+ match = match->next;
42001+
42002+ return match;
42003+}
42004+
42005+static void
42006+insert_inodev_entry(struct inodev_entry *entry)
42007+{
42008+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
42009+ inodev_set.i_size);
42010+ struct inodev_entry **curr;
42011+
42012+ entry->prev = NULL;
42013+
42014+ curr = &inodev_set.i_hash[index];
42015+ if (*curr != NULL)
42016+ (*curr)->prev = entry;
42017+
42018+ entry->next = *curr;
42019+ *curr = entry;
42020+
42021+ return;
42022+}
42023+
42024+static void
42025+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
42026+{
42027+ unsigned int index =
42028+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
42029+ struct acl_role_label **curr;
42030+ struct acl_role_label *tmp;
42031+
42032+ curr = &acl_role_set.r_hash[index];
42033+
42034+ /* if role was already inserted due to domains and already has
42035+ a role in the same bucket as it attached, then we need to
42036+ combine these two buckets
42037+ */
42038+ if (role->next) {
42039+ tmp = role->next;
42040+ while (tmp->next)
42041+ tmp = tmp->next;
42042+ tmp->next = *curr;
42043+ } else
42044+ role->next = *curr;
42045+ *curr = role;
42046+
42047+ return;
42048+}
42049+
42050+static void
42051+insert_acl_role_label(struct acl_role_label *role)
42052+{
42053+ int i;
42054+
42055+ if (role_list == NULL) {
42056+ role_list = role;
42057+ role->prev = NULL;
42058+ } else {
42059+ role->prev = role_list;
42060+ role_list = role;
42061+ }
42062+
42063+ /* used for hash chains */
42064+ role->next = NULL;
42065+
42066+ if (role->roletype & GR_ROLE_DOMAIN) {
42067+ for (i = 0; i < role->domain_child_num; i++)
42068+ __insert_acl_role_label(role, role->domain_children[i]);
42069+ } else
42070+ __insert_acl_role_label(role, role->uidgid);
42071+}
42072+
42073+static int
42074+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
42075+{
42076+ struct name_entry **curr, *nentry;
42077+ struct inodev_entry *ientry;
42078+ unsigned int len = strlen(name);
42079+ unsigned int key = full_name_hash(name, len);
42080+ unsigned int index = key % name_set.n_size;
42081+
42082+ curr = &name_set.n_hash[index];
42083+
42084+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
42085+ curr = &((*curr)->next);
42086+
42087+ if (*curr != NULL)
42088+ return 1;
42089+
42090+ nentry = acl_alloc(sizeof (struct name_entry));
42091+ if (nentry == NULL)
42092+ return 0;
42093+ ientry = acl_alloc(sizeof (struct inodev_entry));
42094+ if (ientry == NULL)
42095+ return 0;
42096+ ientry->nentry = nentry;
42097+
42098+ nentry->key = key;
42099+ nentry->name = name;
42100+ nentry->inode = inode;
42101+ nentry->device = device;
42102+ nentry->len = len;
42103+ nentry->deleted = deleted;
42104+
42105+ nentry->prev = NULL;
42106+ curr = &name_set.n_hash[index];
42107+ if (*curr != NULL)
42108+ (*curr)->prev = nentry;
42109+ nentry->next = *curr;
42110+ *curr = nentry;
42111+
42112+ /* insert us into the table searchable by inode/dev */
42113+ insert_inodev_entry(ientry);
42114+
42115+ return 1;
42116+}
42117+
42118+static void
42119+insert_acl_obj_label(struct acl_object_label *obj,
42120+ struct acl_subject_label *subj)
42121+{
42122+ unsigned int index =
42123+ fhash(obj->inode, obj->device, subj->obj_hash_size);
42124+ struct acl_object_label **curr;
42125+
42126+
42127+ obj->prev = NULL;
42128+
42129+ curr = &subj->obj_hash[index];
42130+ if (*curr != NULL)
42131+ (*curr)->prev = obj;
42132+
42133+ obj->next = *curr;
42134+ *curr = obj;
42135+
42136+ return;
42137+}
42138+
42139+static void
42140+insert_acl_subj_label(struct acl_subject_label *obj,
42141+ struct acl_role_label *role)
42142+{
42143+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
42144+ struct acl_subject_label **curr;
42145+
42146+ obj->prev = NULL;
42147+
42148+ curr = &role->subj_hash[index];
42149+ if (*curr != NULL)
42150+ (*curr)->prev = obj;
42151+
42152+ obj->next = *curr;
42153+ *curr = obj;
42154+
42155+ return;
42156+}
42157+
42158+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
42159+
42160+static void *
42161+create_table(__u32 * len, int elementsize)
42162+{
42163+ unsigned int table_sizes[] = {
42164+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
42165+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
42166+ 4194301, 8388593, 16777213, 33554393, 67108859
42167+ };
42168+ void *newtable = NULL;
42169+ unsigned int pwr = 0;
42170+
42171+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
42172+ table_sizes[pwr] <= *len)
42173+ pwr++;
42174+
42175+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
42176+ return newtable;
42177+
42178+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
42179+ newtable =
42180+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
42181+ else
42182+ newtable = vmalloc(table_sizes[pwr] * elementsize);
42183+
42184+ *len = table_sizes[pwr];
42185+
42186+ return newtable;
42187+}
42188+
42189+static int
42190+init_variables(const struct gr_arg *arg)
42191+{
42192+ struct task_struct *reaper = &init_task;
42193+ unsigned int stacksize;
42194+
42195+ subj_map_set.s_size = arg->role_db.num_subjects;
42196+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
42197+ name_set.n_size = arg->role_db.num_objects;
42198+ inodev_set.i_size = arg->role_db.num_objects;
42199+
42200+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
42201+ !name_set.n_size || !inodev_set.i_size)
42202+ return 1;
42203+
42204+ if (!gr_init_uidset())
42205+ return 1;
42206+
42207+ /* set up the stack that holds allocation info */
42208+
42209+ stacksize = arg->role_db.num_pointers + 5;
42210+
42211+ if (!acl_alloc_stack_init(stacksize))
42212+ return 1;
42213+
42214+ /* grab reference for the real root dentry and vfsmount */
42215+ get_fs_root(reaper->fs, &real_root);
42216+
42217+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
42218+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
42219+#endif
42220+
42221+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
42222+ if (fakefs_obj_rw == NULL)
42223+ return 1;
42224+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
42225+
42226+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
42227+ if (fakefs_obj_rwx == NULL)
42228+ return 1;
42229+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
42230+
42231+ subj_map_set.s_hash =
42232+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
42233+ acl_role_set.r_hash =
42234+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
42235+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
42236+ inodev_set.i_hash =
42237+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
42238+
42239+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
42240+ !name_set.n_hash || !inodev_set.i_hash)
42241+ return 1;
42242+
42243+ memset(subj_map_set.s_hash, 0,
42244+ sizeof(struct subject_map *) * subj_map_set.s_size);
42245+ memset(acl_role_set.r_hash, 0,
42246+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
42247+ memset(name_set.n_hash, 0,
42248+ sizeof (struct name_entry *) * name_set.n_size);
42249+ memset(inodev_set.i_hash, 0,
42250+ sizeof (struct inodev_entry *) * inodev_set.i_size);
42251+
42252+ return 0;
42253+}
42254+
42255+/* free information not needed after startup
42256+ currently contains user->kernel pointer mappings for subjects
42257+*/
42258+
42259+static void
42260+free_init_variables(void)
42261+{
42262+ __u32 i;
42263+
42264+ if (subj_map_set.s_hash) {
42265+ for (i = 0; i < subj_map_set.s_size; i++) {
42266+ if (subj_map_set.s_hash[i]) {
42267+ kfree(subj_map_set.s_hash[i]);
42268+ subj_map_set.s_hash[i] = NULL;
42269+ }
42270+ }
42271+
42272+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
42273+ PAGE_SIZE)
42274+ kfree(subj_map_set.s_hash);
42275+ else
42276+ vfree(subj_map_set.s_hash);
42277+ }
42278+
42279+ return;
42280+}
42281+
42282+static void
42283+free_variables(void)
42284+{
42285+ struct acl_subject_label *s;
42286+ struct acl_role_label *r;
42287+ struct task_struct *task, *task2;
42288+ unsigned int x;
42289+
42290+ gr_clear_learn_entries();
42291+
42292+ read_lock(&tasklist_lock);
42293+ do_each_thread(task2, task) {
42294+ task->acl_sp_role = 0;
42295+ task->acl_role_id = 0;
42296+ task->acl = NULL;
42297+ task->role = NULL;
42298+ } while_each_thread(task2, task);
42299+ read_unlock(&tasklist_lock);
42300+
42301+ /* release the reference to the real root dentry and vfsmount */
42302+ path_put(&real_root);
42303+
42304+ /* free all object hash tables */
42305+
42306+ FOR_EACH_ROLE_START(r)
42307+ if (r->subj_hash == NULL)
42308+ goto next_role;
42309+ FOR_EACH_SUBJECT_START(r, s, x)
42310+ if (s->obj_hash == NULL)
42311+ break;
42312+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
42313+ kfree(s->obj_hash);
42314+ else
42315+ vfree(s->obj_hash);
42316+ FOR_EACH_SUBJECT_END(s, x)
42317+ FOR_EACH_NESTED_SUBJECT_START(r, s)
42318+ if (s->obj_hash == NULL)
42319+ break;
42320+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
42321+ kfree(s->obj_hash);
42322+ else
42323+ vfree(s->obj_hash);
42324+ FOR_EACH_NESTED_SUBJECT_END(s)
42325+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
42326+ kfree(r->subj_hash);
42327+ else
42328+ vfree(r->subj_hash);
42329+ r->subj_hash = NULL;
42330+next_role:
42331+ FOR_EACH_ROLE_END(r)
42332+
42333+ acl_free_all();
42334+
42335+ if (acl_role_set.r_hash) {
42336+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
42337+ PAGE_SIZE)
42338+ kfree(acl_role_set.r_hash);
42339+ else
42340+ vfree(acl_role_set.r_hash);
42341+ }
42342+ if (name_set.n_hash) {
42343+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
42344+ PAGE_SIZE)
42345+ kfree(name_set.n_hash);
42346+ else
42347+ vfree(name_set.n_hash);
42348+ }
42349+
42350+ if (inodev_set.i_hash) {
42351+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
42352+ PAGE_SIZE)
42353+ kfree(inodev_set.i_hash);
42354+ else
42355+ vfree(inodev_set.i_hash);
42356+ }
42357+
42358+ gr_free_uidset();
42359+
42360+ memset(&name_set, 0, sizeof (struct name_db));
42361+ memset(&inodev_set, 0, sizeof (struct inodev_db));
42362+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
42363+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
42364+
42365+ default_role = NULL;
42366+ role_list = NULL;
42367+
42368+ return;
42369+}
42370+
42371+static __u32
42372+count_user_objs(struct acl_object_label *userp)
42373+{
42374+ struct acl_object_label o_tmp;
42375+ __u32 num = 0;
42376+
42377+ while (userp) {
42378+ if (copy_from_user(&o_tmp, userp,
42379+ sizeof (struct acl_object_label)))
42380+ break;
42381+
42382+ userp = o_tmp.prev;
42383+ num++;
42384+ }
42385+
42386+ return num;
42387+}
42388+
42389+static struct acl_subject_label *
42390+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
42391+
42392+static int
42393+copy_user_glob(struct acl_object_label *obj)
42394+{
42395+ struct acl_object_label *g_tmp, **guser;
42396+ unsigned int len;
42397+ char *tmp;
42398+
42399+ if (obj->globbed == NULL)
42400+ return 0;
42401+
42402+ guser = &obj->globbed;
42403+ while (*guser) {
42404+ g_tmp = (struct acl_object_label *)
42405+ acl_alloc(sizeof (struct acl_object_label));
42406+ if (g_tmp == NULL)
42407+ return -ENOMEM;
42408+
42409+ if (copy_from_user(g_tmp, *guser,
42410+ sizeof (struct acl_object_label)))
42411+ return -EFAULT;
42412+
42413+ len = strnlen_user(g_tmp->filename, PATH_MAX);
42414+
42415+ if (!len || len >= PATH_MAX)
42416+ return -EINVAL;
42417+
42418+ if ((tmp = (char *) acl_alloc(len)) == NULL)
42419+ return -ENOMEM;
42420+
42421+ if (copy_from_user(tmp, g_tmp->filename, len))
42422+ return -EFAULT;
42423+ tmp[len-1] = '\0';
42424+ g_tmp->filename = tmp;
42425+
42426+ *guser = g_tmp;
42427+ guser = &(g_tmp->next);
42428+ }
42429+
42430+ return 0;
42431+}
42432+
42433+static int
42434+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
42435+ struct acl_role_label *role)
42436+{
42437+ struct acl_object_label *o_tmp;
42438+ unsigned int len;
42439+ int ret;
42440+ char *tmp;
42441+
42442+ while (userp) {
42443+ if ((o_tmp = (struct acl_object_label *)
42444+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
42445+ return -ENOMEM;
42446+
42447+ if (copy_from_user(o_tmp, userp,
42448+ sizeof (struct acl_object_label)))
42449+ return -EFAULT;
42450+
42451+ userp = o_tmp->prev;
42452+
42453+ len = strnlen_user(o_tmp->filename, PATH_MAX);
42454+
42455+ if (!len || len >= PATH_MAX)
42456+ return -EINVAL;
42457+
42458+ if ((tmp = (char *) acl_alloc(len)) == NULL)
42459+ return -ENOMEM;
42460+
42461+ if (copy_from_user(tmp, o_tmp->filename, len))
42462+ return -EFAULT;
42463+ tmp[len-1] = '\0';
42464+ o_tmp->filename = tmp;
42465+
42466+ insert_acl_obj_label(o_tmp, subj);
42467+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
42468+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
42469+ return -ENOMEM;
42470+
42471+ ret = copy_user_glob(o_tmp);
42472+ if (ret)
42473+ return ret;
42474+
42475+ if (o_tmp->nested) {
42476+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
42477+ if (IS_ERR(o_tmp->nested))
42478+ return PTR_ERR(o_tmp->nested);
42479+
42480+ /* insert into nested subject list */
42481+ o_tmp->nested->next = role->hash->first;
42482+ role->hash->first = o_tmp->nested;
42483+ }
42484+ }
42485+
42486+ return 0;
42487+}
42488+
42489+static __u32
42490+count_user_subjs(struct acl_subject_label *userp)
42491+{
42492+ struct acl_subject_label s_tmp;
42493+ __u32 num = 0;
42494+
42495+ while (userp) {
42496+ if (copy_from_user(&s_tmp, userp,
42497+ sizeof (struct acl_subject_label)))
42498+ break;
42499+
42500+ userp = s_tmp.prev;
42501+ /* do not count nested subjects against this count, since
42502+ they are not included in the hash table, but are
42503+ attached to objects. We have already counted
42504+ the subjects in userspace for the allocation
42505+ stack
42506+ */
42507+ if (!(s_tmp.mode & GR_NESTED))
42508+ num++;
42509+ }
42510+
42511+ return num;
42512+}
42513+
42514+static int
42515+copy_user_allowedips(struct acl_role_label *rolep)
42516+{
42517+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
42518+
42519+ ruserip = rolep->allowed_ips;
42520+
42521+ while (ruserip) {
42522+ rlast = rtmp;
42523+
42524+ if ((rtmp = (struct role_allowed_ip *)
42525+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
42526+ return -ENOMEM;
42527+
42528+ if (copy_from_user(rtmp, ruserip,
42529+ sizeof (struct role_allowed_ip)))
42530+ return -EFAULT;
42531+
42532+ ruserip = rtmp->prev;
42533+
42534+ if (!rlast) {
42535+ rtmp->prev = NULL;
42536+ rolep->allowed_ips = rtmp;
42537+ } else {
42538+ rlast->next = rtmp;
42539+ rtmp->prev = rlast;
42540+ }
42541+
42542+ if (!ruserip)
42543+ rtmp->next = NULL;
42544+ }
42545+
42546+ return 0;
42547+}
42548+
42549+static int
42550+copy_user_transitions(struct acl_role_label *rolep)
42551+{
42552+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
42553+
42554+ unsigned int len;
42555+ char *tmp;
42556+
42557+ rusertp = rolep->transitions;
42558+
42559+ while (rusertp) {
42560+ rlast = rtmp;
42561+
42562+ if ((rtmp = (struct role_transition *)
42563+ acl_alloc(sizeof (struct role_transition))) == NULL)
42564+ return -ENOMEM;
42565+
42566+ if (copy_from_user(rtmp, rusertp,
42567+ sizeof (struct role_transition)))
42568+ return -EFAULT;
42569+
42570+ rusertp = rtmp->prev;
42571+
42572+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
42573+
42574+ if (!len || len >= GR_SPROLE_LEN)
42575+ return -EINVAL;
42576+
42577+ if ((tmp = (char *) acl_alloc(len)) == NULL)
42578+ return -ENOMEM;
42579+
42580+ if (copy_from_user(tmp, rtmp->rolename, len))
42581+ return -EFAULT;
42582+ tmp[len-1] = '\0';
42583+ rtmp->rolename = tmp;
42584+
42585+ if (!rlast) {
42586+ rtmp->prev = NULL;
42587+ rolep->transitions = rtmp;
42588+ } else {
42589+ rlast->next = rtmp;
42590+ rtmp->prev = rlast;
42591+ }
42592+
42593+ if (!rusertp)
42594+ rtmp->next = NULL;
42595+ }
42596+
42597+ return 0;
42598+}
42599+
42600+static struct acl_subject_label *
42601+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
42602+{
42603+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
42604+ unsigned int len;
42605+ char *tmp;
42606+ __u32 num_objs;
42607+ struct acl_ip_label **i_tmp, *i_utmp2;
42608+ struct gr_hash_struct ghash;
42609+ struct subject_map *subjmap;
42610+ unsigned int i_num;
42611+ int err;
42612+
42613+ s_tmp = lookup_subject_map(userp);
42614+
42615+ /* we've already copied this subject into the kernel, just return
42616+ the reference to it, and don't copy it over again
42617+ */
42618+ if (s_tmp)
42619+ return(s_tmp);
42620+
42621+ if ((s_tmp = (struct acl_subject_label *)
42622+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
42623+ return ERR_PTR(-ENOMEM);
42624+
42625+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
42626+ if (subjmap == NULL)
42627+ return ERR_PTR(-ENOMEM);
42628+
42629+ subjmap->user = userp;
42630+ subjmap->kernel = s_tmp;
42631+ insert_subj_map_entry(subjmap);
42632+
42633+ if (copy_from_user(s_tmp, userp,
42634+ sizeof (struct acl_subject_label)))
42635+ return ERR_PTR(-EFAULT);
42636+
42637+ len = strnlen_user(s_tmp->filename, PATH_MAX);
42638+
42639+ if (!len || len >= PATH_MAX)
42640+ return ERR_PTR(-EINVAL);
42641+
42642+ if ((tmp = (char *) acl_alloc(len)) == NULL)
42643+ return ERR_PTR(-ENOMEM);
42644+
42645+ if (copy_from_user(tmp, s_tmp->filename, len))
42646+ return ERR_PTR(-EFAULT);
42647+ tmp[len-1] = '\0';
42648+ s_tmp->filename = tmp;
42649+
42650+ if (!strcmp(s_tmp->filename, "/"))
42651+ role->root_label = s_tmp;
42652+
42653+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
42654+ return ERR_PTR(-EFAULT);
42655+
42656+ /* copy user and group transition tables */
42657+
42658+ if (s_tmp->user_trans_num) {
42659+ uid_t *uidlist;
42660+
42661+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
42662+ if (uidlist == NULL)
42663+ return ERR_PTR(-ENOMEM);
42664+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
42665+ return ERR_PTR(-EFAULT);
42666+
42667+ s_tmp->user_transitions = uidlist;
42668+ }
42669+
42670+ if (s_tmp->group_trans_num) {
42671+ gid_t *gidlist;
42672+
42673+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
42674+ if (gidlist == NULL)
42675+ return ERR_PTR(-ENOMEM);
42676+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
42677+ return ERR_PTR(-EFAULT);
42678+
42679+ s_tmp->group_transitions = gidlist;
42680+ }
42681+
42682+ /* set up object hash table */
42683+ num_objs = count_user_objs(ghash.first);
42684+
42685+ s_tmp->obj_hash_size = num_objs;
42686+ s_tmp->obj_hash =
42687+ (struct acl_object_label **)
42688+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
42689+
42690+ if (!s_tmp->obj_hash)
42691+ return ERR_PTR(-ENOMEM);
42692+
42693+ memset(s_tmp->obj_hash, 0,
42694+ s_tmp->obj_hash_size *
42695+ sizeof (struct acl_object_label *));
42696+
42697+ /* add in objects */
42698+ err = copy_user_objs(ghash.first, s_tmp, role);
42699+
42700+ if (err)
42701+ return ERR_PTR(err);
42702+
42703+ /* set pointer for parent subject */
42704+ if (s_tmp->parent_subject) {
42705+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
42706+
42707+ if (IS_ERR(s_tmp2))
42708+ return s_tmp2;
42709+
42710+ s_tmp->parent_subject = s_tmp2;
42711+ }
42712+
42713+ /* add in ip acls */
42714+
42715+ if (!s_tmp->ip_num) {
42716+ s_tmp->ips = NULL;
42717+ goto insert;
42718+ }
42719+
42720+ i_tmp =
42721+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
42722+ sizeof (struct acl_ip_label *));
42723+
42724+ if (!i_tmp)
42725+ return ERR_PTR(-ENOMEM);
42726+
42727+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
42728+ *(i_tmp + i_num) =
42729+ (struct acl_ip_label *)
42730+ acl_alloc(sizeof (struct acl_ip_label));
42731+ if (!*(i_tmp + i_num))
42732+ return ERR_PTR(-ENOMEM);
42733+
42734+ if (copy_from_user
42735+ (&i_utmp2, s_tmp->ips + i_num,
42736+ sizeof (struct acl_ip_label *)))
42737+ return ERR_PTR(-EFAULT);
42738+
42739+ if (copy_from_user
42740+ (*(i_tmp + i_num), i_utmp2,
42741+ sizeof (struct acl_ip_label)))
42742+ return ERR_PTR(-EFAULT);
42743+
42744+ if ((*(i_tmp + i_num))->iface == NULL)
42745+ continue;
42746+
42747+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
42748+ if (!len || len >= IFNAMSIZ)
42749+ return ERR_PTR(-EINVAL);
42750+ tmp = acl_alloc(len);
42751+ if (tmp == NULL)
42752+ return ERR_PTR(-ENOMEM);
42753+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
42754+ return ERR_PTR(-EFAULT);
42755+ (*(i_tmp + i_num))->iface = tmp;
42756+ }
42757+
42758+ s_tmp->ips = i_tmp;
42759+
42760+insert:
42761+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
42762+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
42763+ return ERR_PTR(-ENOMEM);
42764+
42765+ return s_tmp;
42766+}
42767+
42768+static int
42769+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
42770+{
42771+ struct acl_subject_label s_pre;
42772+ struct acl_subject_label * ret;
42773+ int err;
42774+
42775+ while (userp) {
42776+ if (copy_from_user(&s_pre, userp,
42777+ sizeof (struct acl_subject_label)))
42778+ return -EFAULT;
42779+
42780+ /* do not add nested subjects here, add
42781+ while parsing objects
42782+ */
42783+
42784+ if (s_pre.mode & GR_NESTED) {
42785+ userp = s_pre.prev;
42786+ continue;
42787+ }
42788+
42789+ ret = do_copy_user_subj(userp, role);
42790+
42791+ err = PTR_ERR(ret);
42792+ if (IS_ERR(ret))
42793+ return err;
42794+
42795+ insert_acl_subj_label(ret, role);
42796+
42797+ userp = s_pre.prev;
42798+ }
42799+
42800+ return 0;
42801+}
42802+
42803+static int
42804+copy_user_acl(struct gr_arg *arg)
42805+{
42806+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
42807+ struct sprole_pw *sptmp;
42808+ struct gr_hash_struct *ghash;
42809+ uid_t *domainlist;
42810+ unsigned int r_num;
42811+ unsigned int len;
42812+ char *tmp;
42813+ int err = 0;
42814+ __u16 i;
42815+ __u32 num_subjs;
42816+
42817+ /* we need a default and kernel role */
42818+ if (arg->role_db.num_roles < 2)
42819+ return -EINVAL;
42820+
42821+ /* copy special role authentication info from userspace */
42822+
42823+ num_sprole_pws = arg->num_sprole_pws;
42824+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
42825+
42826+ if (!acl_special_roles) {
42827+ err = -ENOMEM;
42828+ goto cleanup;
42829+ }
42830+
42831+ for (i = 0; i < num_sprole_pws; i++) {
42832+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
42833+ if (!sptmp) {
42834+ err = -ENOMEM;
42835+ goto cleanup;
42836+ }
42837+ if (copy_from_user(sptmp, arg->sprole_pws + i,
42838+ sizeof (struct sprole_pw))) {
42839+ err = -EFAULT;
42840+ goto cleanup;
42841+ }
42842+
42843+ len =
42844+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
42845+
42846+ if (!len || len >= GR_SPROLE_LEN) {
42847+ err = -EINVAL;
42848+ goto cleanup;
42849+ }
42850+
42851+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
42852+ err = -ENOMEM;
42853+ goto cleanup;
42854+ }
42855+
42856+ if (copy_from_user(tmp, sptmp->rolename, len)) {
42857+ err = -EFAULT;
42858+ goto cleanup;
42859+ }
42860+ tmp[len-1] = '\0';
42861+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
42862+ printk(KERN_ALERT "Copying special role %s\n", tmp);
42863+#endif
42864+ sptmp->rolename = tmp;
42865+ acl_special_roles[i] = sptmp;
42866+ }
42867+
42868+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
42869+
42870+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
42871+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
42872+
42873+ if (!r_tmp) {
42874+ err = -ENOMEM;
42875+ goto cleanup;
42876+ }
42877+
42878+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
42879+ sizeof (struct acl_role_label *))) {
42880+ err = -EFAULT;
42881+ goto cleanup;
42882+ }
42883+
42884+ if (copy_from_user(r_tmp, r_utmp2,
42885+ sizeof (struct acl_role_label))) {
42886+ err = -EFAULT;
42887+ goto cleanup;
42888+ }
42889+
42890+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
42891+
42892+ if (!len || len >= PATH_MAX) {
42893+ err = -EINVAL;
42894+ goto cleanup;
42895+ }
42896+
42897+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
42898+ err = -ENOMEM;
42899+ goto cleanup;
42900+ }
42901+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
42902+ err = -EFAULT;
42903+ goto cleanup;
42904+ }
42905+ tmp[len-1] = '\0';
42906+ r_tmp->rolename = tmp;
42907+
42908+ if (!strcmp(r_tmp->rolename, "default")
42909+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
42910+ default_role = r_tmp;
42911+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
42912+ kernel_role = r_tmp;
42913+ }
42914+
42915+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
42916+ err = -ENOMEM;
42917+ goto cleanup;
42918+ }
42919+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
42920+ err = -EFAULT;
42921+ goto cleanup;
42922+ }
42923+
42924+ r_tmp->hash = ghash;
42925+
42926+ num_subjs = count_user_subjs(r_tmp->hash->first);
42927+
42928+ r_tmp->subj_hash_size = num_subjs;
42929+ r_tmp->subj_hash =
42930+ (struct acl_subject_label **)
42931+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
42932+
42933+ if (!r_tmp->subj_hash) {
42934+ err = -ENOMEM;
42935+ goto cleanup;
42936+ }
42937+
42938+ err = copy_user_allowedips(r_tmp);
42939+ if (err)
42940+ goto cleanup;
42941+
42942+ /* copy domain info */
42943+ if (r_tmp->domain_children != NULL) {
42944+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
42945+ if (domainlist == NULL) {
42946+ err = -ENOMEM;
42947+ goto cleanup;
42948+ }
42949+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
42950+ err = -EFAULT;
42951+ goto cleanup;
42952+ }
42953+ r_tmp->domain_children = domainlist;
42954+ }
42955+
42956+ err = copy_user_transitions(r_tmp);
42957+ if (err)
42958+ goto cleanup;
42959+
42960+ memset(r_tmp->subj_hash, 0,
42961+ r_tmp->subj_hash_size *
42962+ sizeof (struct acl_subject_label *));
42963+
42964+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
42965+
42966+ if (err)
42967+ goto cleanup;
42968+
42969+ /* set nested subject list to null */
42970+ r_tmp->hash->first = NULL;
42971+
42972+ insert_acl_role_label(r_tmp);
42973+ }
42974+
42975+ goto return_err;
42976+ cleanup:
42977+ free_variables();
42978+ return_err:
42979+ return err;
42980+
42981+}
42982+
42983+static int
42984+gracl_init(struct gr_arg *args)
42985+{
42986+ int error = 0;
42987+
42988+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
42989+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
42990+
42991+ if (init_variables(args)) {
42992+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
42993+ error = -ENOMEM;
42994+ free_variables();
42995+ goto out;
42996+ }
42997+
42998+ error = copy_user_acl(args);
42999+ free_init_variables();
43000+ if (error) {
43001+ free_variables();
43002+ goto out;
43003+ }
43004+
43005+ if ((error = gr_set_acls(0))) {
43006+ free_variables();
43007+ goto out;
43008+ }
43009+
43010+ pax_open_kernel();
43011+ gr_status |= GR_READY;
43012+ pax_close_kernel();
43013+
43014+ out:
43015+ return error;
43016+}
43017+
43018+/* derived from glibc fnmatch() 0: match, 1: no match*/
43019+
43020+static int
43021+glob_match(const char *p, const char *n)
43022+{
43023+ char c;
43024+
43025+ while ((c = *p++) != '\0') {
43026+ switch (c) {
43027+ case '?':
43028+ if (*n == '\0')
43029+ return 1;
43030+ else if (*n == '/')
43031+ return 1;
43032+ break;
43033+ case '\\':
43034+ if (*n != c)
43035+ return 1;
43036+ break;
43037+ case '*':
43038+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
43039+ if (*n == '/')
43040+ return 1;
43041+ else if (c == '?') {
43042+ if (*n == '\0')
43043+ return 1;
43044+ else
43045+ ++n;
43046+ }
43047+ }
43048+ if (c == '\0') {
43049+ return 0;
43050+ } else {
43051+ const char *endp;
43052+
43053+ if ((endp = strchr(n, '/')) == NULL)
43054+ endp = n + strlen(n);
43055+
43056+ if (c == '[') {
43057+ for (--p; n < endp; ++n)
43058+ if (!glob_match(p, n))
43059+ return 0;
43060+ } else if (c == '/') {
43061+ while (*n != '\0' && *n != '/')
43062+ ++n;
43063+ if (*n == '/' && !glob_match(p, n + 1))
43064+ return 0;
43065+ } else {
43066+ for (--p; n < endp; ++n)
43067+ if (*n == c && !glob_match(p, n))
43068+ return 0;
43069+ }
43070+
43071+ return 1;
43072+ }
43073+ case '[':
43074+ {
43075+ int not;
43076+ char cold;
43077+
43078+ if (*n == '\0' || *n == '/')
43079+ return 1;
43080+
43081+ not = (*p == '!' || *p == '^');
43082+ if (not)
43083+ ++p;
43084+
43085+ c = *p++;
43086+ for (;;) {
43087+ unsigned char fn = (unsigned char)*n;
43088+
43089+ if (c == '\0')
43090+ return 1;
43091+ else {
43092+ if (c == fn)
43093+ goto matched;
43094+ cold = c;
43095+ c = *p++;
43096+
43097+ if (c == '-' && *p != ']') {
43098+ unsigned char cend = *p++;
43099+
43100+ if (cend == '\0')
43101+ return 1;
43102+
43103+ if (cold <= fn && fn <= cend)
43104+ goto matched;
43105+
43106+ c = *p++;
43107+ }
43108+ }
43109+
43110+ if (c == ']')
43111+ break;
43112+ }
43113+ if (!not)
43114+ return 1;
43115+ break;
43116+ matched:
43117+ while (c != ']') {
43118+ if (c == '\0')
43119+ return 1;
43120+
43121+ c = *p++;
43122+ }
43123+ if (not)
43124+ return 1;
43125+ }
43126+ break;
43127+ default:
43128+ if (c != *n)
43129+ return 1;
43130+ }
43131+
43132+ ++n;
43133+ }
43134+
43135+ if (*n == '\0')
43136+ return 0;
43137+
43138+ if (*n == '/')
43139+ return 0;
43140+
43141+ return 1;
43142+}
43143+
43144+static struct acl_object_label *
43145+chk_glob_label(struct acl_object_label *globbed,
43146+ struct dentry *dentry, struct vfsmount *mnt, char **path)
43147+{
43148+ struct acl_object_label *tmp;
43149+
43150+ if (*path == NULL)
43151+ *path = gr_to_filename_nolock(dentry, mnt);
43152+
43153+ tmp = globbed;
43154+
43155+ while (tmp) {
43156+ if (!glob_match(tmp->filename, *path))
43157+ return tmp;
43158+ tmp = tmp->next;
43159+ }
43160+
43161+ return NULL;
43162+}
43163+
43164+static struct acl_object_label *
43165+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
43166+ const ino_t curr_ino, const dev_t curr_dev,
43167+ const struct acl_subject_label *subj, char **path, const int checkglob)
43168+{
43169+ struct acl_subject_label *tmpsubj;
43170+ struct acl_object_label *retval;
43171+ struct acl_object_label *retval2;
43172+
43173+ tmpsubj = (struct acl_subject_label *) subj;
43174+ read_lock(&gr_inode_lock);
43175+ do {
43176+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
43177+ if (retval) {
43178+ if (checkglob && retval->globbed) {
43179+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
43180+ (struct vfsmount *)orig_mnt, path);
43181+ if (retval2)
43182+ retval = retval2;
43183+ }
43184+ break;
43185+ }
43186+ } while ((tmpsubj = tmpsubj->parent_subject));
43187+ read_unlock(&gr_inode_lock);
43188+
43189+ return retval;
43190+}
43191+
43192+static __inline__ struct acl_object_label *
43193+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
43194+ struct dentry *curr_dentry,
43195+ const struct acl_subject_label *subj, char **path, const int checkglob)
43196+{
43197+ int newglob = checkglob;
43198+ ino_t inode;
43199+ dev_t device;
43200+
43201+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
43202+ as we don't want a / * rule to match instead of the / object
43203+ don't do this for create lookups that call this function though, since they're looking up
43204+ on the parent and thus need globbing checks on all paths
43205+ */
43206+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
43207+ newglob = GR_NO_GLOB;
43208+
43209+ spin_lock(&curr_dentry->d_lock);
43210+ inode = curr_dentry->d_inode->i_ino;
43211+ device = __get_dev(curr_dentry);
43212+ spin_unlock(&curr_dentry->d_lock);
43213+
43214+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
43215+}
43216+
43217+static struct acl_object_label *
43218+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43219+ const struct acl_subject_label *subj, char *path, const int checkglob)
43220+{
43221+ struct dentry *dentry = (struct dentry *) l_dentry;
43222+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
43223+ struct acl_object_label *retval;
43224+ struct dentry *parent;
43225+
43226+ write_seqlock(&rename_lock);
43227+ br_read_lock(vfsmount_lock);
43228+
43229+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
43230+#ifdef CONFIG_NET
43231+ mnt == sock_mnt ||
43232+#endif
43233+#ifdef CONFIG_HUGETLBFS
43234+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
43235+#endif
43236+ /* ignore Eric Biederman */
43237+ IS_PRIVATE(l_dentry->d_inode))) {
43238+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
43239+ goto out;
43240+ }
43241+
43242+ for (;;) {
43243+ if (dentry == real_root.dentry && mnt == real_root.mnt)
43244+ break;
43245+
43246+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
43247+ if (mnt->mnt_parent == mnt)
43248+ break;
43249+
43250+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
43251+ if (retval != NULL)
43252+ goto out;
43253+
43254+ dentry = mnt->mnt_mountpoint;
43255+ mnt = mnt->mnt_parent;
43256+ continue;
43257+ }
43258+
43259+ parent = dentry->d_parent;
43260+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
43261+ if (retval != NULL)
43262+ goto out;
43263+
43264+ dentry = parent;
43265+ }
43266+
43267+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
43268+
43269+ /* real_root is pinned so we don't have to hold a reference */
43270+ if (retval == NULL)
43271+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
43272+out:
43273+ br_read_unlock(vfsmount_lock);
43274+ write_sequnlock(&rename_lock);
43275+
43276+ BUG_ON(retval == NULL);
43277+
43278+ return retval;
43279+}
43280+
43281+static __inline__ struct acl_object_label *
43282+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43283+ const struct acl_subject_label *subj)
43284+{
43285+ char *path = NULL;
43286+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
43287+}
43288+
43289+static __inline__ struct acl_object_label *
43290+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43291+ const struct acl_subject_label *subj)
43292+{
43293+ char *path = NULL;
43294+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
43295+}
43296+
43297+static __inline__ struct acl_object_label *
43298+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43299+ const struct acl_subject_label *subj, char *path)
43300+{
43301+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
43302+}
43303+
43304+static struct acl_subject_label *
43305+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43306+ const struct acl_role_label *role)
43307+{
43308+ struct dentry *dentry = (struct dentry *) l_dentry;
43309+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
43310+ struct acl_subject_label *retval;
43311+ struct dentry *parent;
43312+
43313+ write_seqlock(&rename_lock);
43314+ br_read_lock(vfsmount_lock);
43315+
43316+ for (;;) {
43317+ if (dentry == real_root.dentry && mnt == real_root.mnt)
43318+ break;
43319+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
43320+ if (mnt->mnt_parent == mnt)
43321+ break;
43322+
43323+ spin_lock(&dentry->d_lock);
43324+ read_lock(&gr_inode_lock);
43325+ retval =
43326+ lookup_acl_subj_label(dentry->d_inode->i_ino,
43327+ __get_dev(dentry), role);
43328+ read_unlock(&gr_inode_lock);
43329+ spin_unlock(&dentry->d_lock);
43330+ if (retval != NULL)
43331+ goto out;
43332+
43333+ dentry = mnt->mnt_mountpoint;
43334+ mnt = mnt->mnt_parent;
43335+ continue;
43336+ }
43337+
43338+ spin_lock(&dentry->d_lock);
43339+ read_lock(&gr_inode_lock);
43340+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
43341+ __get_dev(dentry), role);
43342+ read_unlock(&gr_inode_lock);
43343+ parent = dentry->d_parent;
43344+ spin_unlock(&dentry->d_lock);
43345+
43346+ if (retval != NULL)
43347+ goto out;
43348+
43349+ dentry = parent;
43350+ }
43351+
43352+ spin_lock(&dentry->d_lock);
43353+ read_lock(&gr_inode_lock);
43354+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
43355+ __get_dev(dentry), role);
43356+ read_unlock(&gr_inode_lock);
43357+ spin_unlock(&dentry->d_lock);
43358+
43359+ if (unlikely(retval == NULL)) {
43360+ /* real_root is pinned, we don't need to hold a reference */
43361+ read_lock(&gr_inode_lock);
43362+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
43363+ __get_dev(real_root.dentry), role);
43364+ read_unlock(&gr_inode_lock);
43365+ }
43366+out:
43367+ br_read_unlock(vfsmount_lock);
43368+ write_sequnlock(&rename_lock);
43369+
43370+ BUG_ON(retval == NULL);
43371+
43372+ return retval;
43373+}
43374+
43375+static void
43376+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
43377+{
43378+ struct task_struct *task = current;
43379+ const struct cred *cred = current_cred();
43380+
43381+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
43382+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
43383+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
43384+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
43385+
43386+ return;
43387+}
43388+
43389+static void
43390+gr_log_learn_sysctl(const char *path, const __u32 mode)
43391+{
43392+ struct task_struct *task = current;
43393+ const struct cred *cred = current_cred();
43394+
43395+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
43396+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
43397+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
43398+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
43399+
43400+ return;
43401+}
43402+
43403+static void
43404+gr_log_learn_id_change(const char type, const unsigned int real,
43405+ const unsigned int effective, const unsigned int fs)
43406+{
43407+ struct task_struct *task = current;
43408+ const struct cred *cred = current_cred();
43409+
43410+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
43411+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
43412+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
43413+ type, real, effective, fs, &task->signal->saved_ip);
43414+
43415+ return;
43416+}
43417+
43418+__u32
43419+gr_check_link(const struct dentry * new_dentry,
43420+ const struct dentry * parent_dentry,
43421+ const struct vfsmount * parent_mnt,
43422+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
43423+{
43424+ struct acl_object_label *obj;
43425+ __u32 oldmode, newmode;
43426+ __u32 needmode;
43427+
43428+ if (unlikely(!(gr_status & GR_READY)))
43429+ return (GR_CREATE | GR_LINK);
43430+
43431+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
43432+ oldmode = obj->mode;
43433+
43434+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
43435+ oldmode |= (GR_CREATE | GR_LINK);
43436+
43437+ needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
43438+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
43439+ needmode |= GR_SETID | GR_AUDIT_SETID;
43440+
43441+ newmode =
43442+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
43443+ oldmode | needmode);
43444+
43445+ needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
43446+ GR_SETID | GR_READ | GR_FIND | GR_DELETE |
43447+ GR_INHERIT | GR_AUDIT_INHERIT);
43448+
43449+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
43450+ goto bad;
43451+
43452+ if ((oldmode & needmode) != needmode)
43453+ goto bad;
43454+
43455+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
43456+ if ((newmode & needmode) != needmode)
43457+ goto bad;
43458+
43459+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
43460+ return newmode;
43461+bad:
43462+ needmode = oldmode;
43463+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
43464+ needmode |= GR_SETID;
43465+
43466+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
43467+ gr_log_learn(old_dentry, old_mnt, needmode);
43468+ return (GR_CREATE | GR_LINK);
43469+ } else if (newmode & GR_SUPPRESS)
43470+ return GR_SUPPRESS;
43471+ else
43472+ return 0;
43473+}
43474+
43475+__u32
43476+gr_search_file(const struct dentry * dentry, const __u32 mode,
43477+ const struct vfsmount * mnt)
43478+{
43479+ __u32 retval = mode;
43480+ struct acl_subject_label *curracl;
43481+ struct acl_object_label *currobj;
43482+
43483+ if (unlikely(!(gr_status & GR_READY)))
43484+ return (mode & ~GR_AUDITS);
43485+
43486+ curracl = current->acl;
43487+
43488+ currobj = chk_obj_label(dentry, mnt, curracl);
43489+ retval = currobj->mode & mode;
43490+
43491+ /* if we're opening a specified transfer file for writing
43492+ (e.g. /dev/initctl), then transfer our role to init
43493+ */
43494+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
43495+ current->role->roletype & GR_ROLE_PERSIST)) {
43496+ struct task_struct *task = init_pid_ns.child_reaper;
43497+
43498+ if (task->role != current->role) {
43499+ task->acl_sp_role = 0;
43500+ task->acl_role_id = current->acl_role_id;
43501+ task->role = current->role;
43502+ rcu_read_lock();
43503+ read_lock(&grsec_exec_file_lock);
43504+ gr_apply_subject_to_task(task);
43505+ read_unlock(&grsec_exec_file_lock);
43506+ rcu_read_unlock();
43507+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
43508+ }
43509+ }
43510+
43511+ if (unlikely
43512+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
43513+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
43514+ __u32 new_mode = mode;
43515+
43516+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
43517+
43518+ retval = new_mode;
43519+
43520+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
43521+ new_mode |= GR_INHERIT;
43522+
43523+ if (!(mode & GR_NOLEARN))
43524+ gr_log_learn(dentry, mnt, new_mode);
43525+ }
43526+
43527+ return retval;
43528+}
43529+
43530+__u32
43531+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
43532+ const struct vfsmount * mnt, const __u32 mode)
43533+{
43534+ struct name_entry *match;
43535+ struct acl_object_label *matchpo;
43536+ struct acl_subject_label *curracl;
43537+ char *path;
43538+ __u32 retval;
43539+
43540+ if (unlikely(!(gr_status & GR_READY)))
43541+ return (mode & ~GR_AUDITS);
43542+
43543+ preempt_disable();
43544+ path = gr_to_filename_rbac(new_dentry, mnt);
43545+ match = lookup_name_entry_create(path);
43546+
43547+ if (!match)
43548+ goto check_parent;
43549+
43550+ curracl = current->acl;
43551+
43552+ read_lock(&gr_inode_lock);
43553+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
43554+ read_unlock(&gr_inode_lock);
43555+
43556+ if (matchpo) {
43557+ if ((matchpo->mode & mode) !=
43558+ (mode & ~(GR_AUDITS | GR_SUPPRESS))
43559+ && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
43560+ __u32 new_mode = mode;
43561+
43562+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
43563+
43564+ gr_log_learn(new_dentry, mnt, new_mode);
43565+
43566+ preempt_enable();
43567+ return new_mode;
43568+ }
43569+ preempt_enable();
43570+ return (matchpo->mode & mode);
43571+ }
43572+
43573+ check_parent:
43574+ curracl = current->acl;
43575+
43576+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
43577+ retval = matchpo->mode & mode;
43578+
43579+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
43580+ && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
43581+ __u32 new_mode = mode;
43582+
43583+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
43584+
43585+ gr_log_learn(new_dentry, mnt, new_mode);
43586+ preempt_enable();
43587+ return new_mode;
43588+ }
43589+
43590+ preempt_enable();
43591+ return retval;
43592+}
43593+
43594+int
43595+gr_check_hidden_task(const struct task_struct *task)
43596+{
43597+ if (unlikely(!(gr_status & GR_READY)))
43598+ return 0;
43599+
43600+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
43601+ return 1;
43602+
43603+ return 0;
43604+}
43605+
43606+int
43607+gr_check_protected_task(const struct task_struct *task)
43608+{
43609+ if (unlikely(!(gr_status & GR_READY) || !task))
43610+ return 0;
43611+
43612+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
43613+ task->acl != current->acl)
43614+ return 1;
43615+
43616+ return 0;
43617+}
43618+
43619+int
43620+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
43621+{
43622+ struct task_struct *p;
43623+ int ret = 0;
43624+
43625+ if (unlikely(!(gr_status & GR_READY) || !pid))
43626+ return ret;
43627+
43628+ read_lock(&tasklist_lock);
43629+ do_each_pid_task(pid, type, p) {
43630+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
43631+ p->acl != current->acl) {
43632+ ret = 1;
43633+ goto out;
43634+ }
43635+ } while_each_pid_task(pid, type, p);
43636+out:
43637+ read_unlock(&tasklist_lock);
43638+
43639+ return ret;
43640+}
43641+
43642+void
43643+gr_copy_label(struct task_struct *tsk)
43644+{
43645+ tsk->signal->used_accept = 0;
43646+ tsk->acl_sp_role = 0;
43647+ tsk->acl_role_id = current->acl_role_id;
43648+ tsk->acl = current->acl;
43649+ tsk->role = current->role;
43650+ tsk->signal->curr_ip = current->signal->curr_ip;
43651+ tsk->signal->saved_ip = current->signal->saved_ip;
43652+ if (current->exec_file)
43653+ get_file(current->exec_file);
43654+ tsk->exec_file = current->exec_file;
43655+ tsk->is_writable = current->is_writable;
43656+ if (unlikely(current->signal->used_accept)) {
43657+ current->signal->curr_ip = 0;
43658+ current->signal->saved_ip = 0;
43659+ }
43660+
43661+ return;
43662+}
43663+
43664+static void
43665+gr_set_proc_res(struct task_struct *task)
43666+{
43667+ struct acl_subject_label *proc;
43668+ unsigned short i;
43669+
43670+ proc = task->acl;
43671+
43672+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
43673+ return;
43674+
43675+ for (i = 0; i < RLIM_NLIMITS; i++) {
43676+ if (!(proc->resmask & (1 << i)))
43677+ continue;
43678+
43679+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
43680+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
43681+ }
43682+
43683+ return;
43684+}
43685+
43686+extern int __gr_process_user_ban(struct user_struct *user);
43687+
43688+int
43689+gr_check_user_change(int real, int effective, int fs)
43690+{
43691+ unsigned int i;
43692+ __u16 num;
43693+ uid_t *uidlist;
43694+ int curuid;
43695+ int realok = 0;
43696+ int effectiveok = 0;
43697+ int fsok = 0;
43698+
43699+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
43700+ struct user_struct *user;
43701+
43702+ if (real == -1)
43703+ goto skipit;
43704+
43705+ user = find_user(real);
43706+ if (user == NULL)
43707+ goto skipit;
43708+
43709+ if (__gr_process_user_ban(user)) {
43710+ /* for find_user */
43711+ free_uid(user);
43712+ return 1;
43713+ }
43714+
43715+ /* for find_user */
43716+ free_uid(user);
43717+
43718+skipit:
43719+#endif
43720+
43721+ if (unlikely(!(gr_status & GR_READY)))
43722+ return 0;
43723+
43724+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
43725+ gr_log_learn_id_change('u', real, effective, fs);
43726+
43727+ num = current->acl->user_trans_num;
43728+ uidlist = current->acl->user_transitions;
43729+
43730+ if (uidlist == NULL)
43731+ return 0;
43732+
43733+ if (real == -1)
43734+ realok = 1;
43735+ if (effective == -1)
43736+ effectiveok = 1;
43737+ if (fs == -1)
43738+ fsok = 1;
43739+
43740+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
43741+ for (i = 0; i < num; i++) {
43742+ curuid = (int)uidlist[i];
43743+ if (real == curuid)
43744+ realok = 1;
43745+ if (effective == curuid)
43746+ effectiveok = 1;
43747+ if (fs == curuid)
43748+ fsok = 1;
43749+ }
43750+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
43751+ for (i = 0; i < num; i++) {
43752+ curuid = (int)uidlist[i];
43753+ if (real == curuid)
43754+ break;
43755+ if (effective == curuid)
43756+ break;
43757+ if (fs == curuid)
43758+ break;
43759+ }
43760+ /* not in deny list */
43761+ if (i == num) {
43762+ realok = 1;
43763+ effectiveok = 1;
43764+ fsok = 1;
43765+ }
43766+ }
43767+
43768+ if (realok && effectiveok && fsok)
43769+ return 0;
43770+ else {
43771+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
43772+ return 1;
43773+ }
43774+}
43775+
43776+int
43777+gr_check_group_change(int real, int effective, int fs)
43778+{
43779+ unsigned int i;
43780+ __u16 num;
43781+ gid_t *gidlist;
43782+ int curgid;
43783+ int realok = 0;
43784+ int effectiveok = 0;
43785+ int fsok = 0;
43786+
43787+ if (unlikely(!(gr_status & GR_READY)))
43788+ return 0;
43789+
43790+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
43791+ gr_log_learn_id_change('g', real, effective, fs);
43792+
43793+ num = current->acl->group_trans_num;
43794+ gidlist = current->acl->group_transitions;
43795+
43796+ if (gidlist == NULL)
43797+ return 0;
43798+
43799+ if (real == -1)
43800+ realok = 1;
43801+ if (effective == -1)
43802+ effectiveok = 1;
43803+ if (fs == -1)
43804+ fsok = 1;
43805+
43806+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
43807+ for (i = 0; i < num; i++) {
43808+ curgid = (int)gidlist[i];
43809+ if (real == curgid)
43810+ realok = 1;
43811+ if (effective == curgid)
43812+ effectiveok = 1;
43813+ if (fs == curgid)
43814+ fsok = 1;
43815+ }
43816+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
43817+ for (i = 0; i < num; i++) {
43818+ curgid = (int)gidlist[i];
43819+ if (real == curgid)
43820+ break;
43821+ if (effective == curgid)
43822+ break;
43823+ if (fs == curgid)
43824+ break;
43825+ }
43826+ /* not in deny list */
43827+ if (i == num) {
43828+ realok = 1;
43829+ effectiveok = 1;
43830+ fsok = 1;
43831+ }
43832+ }
43833+
43834+ if (realok && effectiveok && fsok)
43835+ return 0;
43836+ else {
43837+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
43838+ return 1;
43839+ }
43840+}
43841+
43842+void
43843+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
43844+{
43845+ struct acl_role_label *role = task->role;
43846+ struct acl_subject_label *subj = NULL;
43847+ struct acl_object_label *obj;
43848+ struct file *filp;
43849+
43850+ if (unlikely(!(gr_status & GR_READY)))
43851+ return;
43852+
43853+ filp = task->exec_file;
43854+
43855+ /* kernel process, we'll give them the kernel role */
43856+ if (unlikely(!filp)) {
43857+ task->role = kernel_role;
43858+ task->acl = kernel_role->root_label;
43859+ return;
43860+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
43861+ role = lookup_acl_role_label(task, uid, gid);
43862+
43863+ /* perform subject lookup in possibly new role
43864+ we can use this result below in the case where role == task->role
43865+ */
43866+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
43867+
43868+ /* if we changed uid/gid, but result in the same role
43869+ and are using inheritance, don't lose the inherited subject
43870+ if current subject is other than what normal lookup
43871+ would result in, we arrived via inheritance, don't
43872+ lose subject
43873+ */
43874+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
43875+ (subj == task->acl)))
43876+ task->acl = subj;
43877+
43878+ task->role = role;
43879+
43880+ task->is_writable = 0;
43881+
43882+ /* ignore additional mmap checks for processes that are writable
43883+ by the default ACL */
43884+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
43885+ if (unlikely(obj->mode & GR_WRITE))
43886+ task->is_writable = 1;
43887+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
43888+ if (unlikely(obj->mode & GR_WRITE))
43889+ task->is_writable = 1;
43890+
43891+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
43892+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
43893+#endif
43894+
43895+ gr_set_proc_res(task);
43896+
43897+ return;
43898+}
43899+
43900+int
43901+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
43902+ const int unsafe_share)
43903+{
43904+ struct task_struct *task = current;
43905+ struct acl_subject_label *newacl;
43906+ struct acl_object_label *obj;
43907+ __u32 retmode;
43908+
43909+ if (unlikely(!(gr_status & GR_READY)))
43910+ return 0;
43911+
43912+ newacl = chk_subj_label(dentry, mnt, task->role);
43913+
43914+ task_lock(task);
43915+ if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
43916+ !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
43917+ !(task->role->roletype & GR_ROLE_GOD) &&
43918+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
43919+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
43920+ task_unlock(task);
43921+ if (unsafe_share)
43922+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
43923+ else
43924+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
43925+ return -EACCES;
43926+ }
43927+ task_unlock(task);
43928+
43929+ obj = chk_obj_label(dentry, mnt, task->acl);
43930+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
43931+
43932+ if (!(task->acl->mode & GR_INHERITLEARN) &&
43933+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
43934+ if (obj->nested)
43935+ task->acl = obj->nested;
43936+ else
43937+ task->acl = newacl;
43938+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
43939+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
43940+
43941+ task->is_writable = 0;
43942+
43943+ /* ignore additional mmap checks for processes that are writable
43944+ by the default ACL */
43945+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
43946+ if (unlikely(obj->mode & GR_WRITE))
43947+ task->is_writable = 1;
43948+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
43949+ if (unlikely(obj->mode & GR_WRITE))
43950+ task->is_writable = 1;
43951+
43952+ gr_set_proc_res(task);
43953+
43954+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
43955+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
43956+#endif
43957+ return 0;
43958+}
43959+
43960+/* always called with valid inodev ptr */
43961+static void
43962+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
43963+{
43964+ struct acl_object_label *matchpo;
43965+ struct acl_subject_label *matchps;
43966+ struct acl_subject_label *subj;
43967+ struct acl_role_label *role;
43968+ unsigned int x;
43969+
43970+ FOR_EACH_ROLE_START(role)
43971+ FOR_EACH_SUBJECT_START(role, subj, x)
43972+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
43973+ matchpo->mode |= GR_DELETED;
43974+ FOR_EACH_SUBJECT_END(subj,x)
43975+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
43976+ if (subj->inode == ino && subj->device == dev)
43977+ subj->mode |= GR_DELETED;
43978+ FOR_EACH_NESTED_SUBJECT_END(subj)
43979+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
43980+ matchps->mode |= GR_DELETED;
43981+ FOR_EACH_ROLE_END(role)
43982+
43983+ inodev->nentry->deleted = 1;
43984+
43985+ return;
43986+}
43987+
43988+void
43989+gr_handle_delete(const ino_t ino, const dev_t dev)
43990+{
43991+ struct inodev_entry *inodev;
43992+
43993+ if (unlikely(!(gr_status & GR_READY)))
43994+ return;
43995+
43996+ write_lock(&gr_inode_lock);
43997+ inodev = lookup_inodev_entry(ino, dev);
43998+ if (inodev != NULL)
43999+ do_handle_delete(inodev, ino, dev);
44000+ write_unlock(&gr_inode_lock);
44001+
44002+ return;
44003+}
44004+
44005+static void
44006+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
44007+ const ino_t newinode, const dev_t newdevice,
44008+ struct acl_subject_label *subj)
44009+{
44010+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
44011+ struct acl_object_label *match;
44012+
44013+ match = subj->obj_hash[index];
44014+
44015+ while (match && (match->inode != oldinode ||
44016+ match->device != olddevice ||
44017+ !(match->mode & GR_DELETED)))
44018+ match = match->next;
44019+
44020+ if (match && (match->inode == oldinode)
44021+ && (match->device == olddevice)
44022+ && (match->mode & GR_DELETED)) {
44023+ if (match->prev == NULL) {
44024+ subj->obj_hash[index] = match->next;
44025+ if (match->next != NULL)
44026+ match->next->prev = NULL;
44027+ } else {
44028+ match->prev->next = match->next;
44029+ if (match->next != NULL)
44030+ match->next->prev = match->prev;
44031+ }
44032+ match->prev = NULL;
44033+ match->next = NULL;
44034+ match->inode = newinode;
44035+ match->device = newdevice;
44036+ match->mode &= ~GR_DELETED;
44037+
44038+ insert_acl_obj_label(match, subj);
44039+ }
44040+
44041+ return;
44042+}
44043+
44044+static void
44045+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
44046+ const ino_t newinode, const dev_t newdevice,
44047+ struct acl_role_label *role)
44048+{
44049+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
44050+ struct acl_subject_label *match;
44051+
44052+ match = role->subj_hash[index];
44053+
44054+ while (match && (match->inode != oldinode ||
44055+ match->device != olddevice ||
44056+ !(match->mode & GR_DELETED)))
44057+ match = match->next;
44058+
44059+ if (match && (match->inode == oldinode)
44060+ && (match->device == olddevice)
44061+ && (match->mode & GR_DELETED)) {
44062+ if (match->prev == NULL) {
44063+ role->subj_hash[index] = match->next;
44064+ if (match->next != NULL)
44065+ match->next->prev = NULL;
44066+ } else {
44067+ match->prev->next = match->next;
44068+ if (match->next != NULL)
44069+ match->next->prev = match->prev;
44070+ }
44071+ match->prev = NULL;
44072+ match->next = NULL;
44073+ match->inode = newinode;
44074+ match->device = newdevice;
44075+ match->mode &= ~GR_DELETED;
44076+
44077+ insert_acl_subj_label(match, role);
44078+ }
44079+
44080+ return;
44081+}
44082+
44083+static void
44084+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
44085+ const ino_t newinode, const dev_t newdevice)
44086+{
44087+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
44088+ struct inodev_entry *match;
44089+
44090+ match = inodev_set.i_hash[index];
44091+
44092+ while (match && (match->nentry->inode != oldinode ||
44093+ match->nentry->device != olddevice || !match->nentry->deleted))
44094+ match = match->next;
44095+
44096+ if (match && (match->nentry->inode == oldinode)
44097+ && (match->nentry->device == olddevice) &&
44098+ match->nentry->deleted) {
44099+ if (match->prev == NULL) {
44100+ inodev_set.i_hash[index] = match->next;
44101+ if (match->next != NULL)
44102+ match->next->prev = NULL;
44103+ } else {
44104+ match->prev->next = match->next;
44105+ if (match->next != NULL)
44106+ match->next->prev = match->prev;
44107+ }
44108+ match->prev = NULL;
44109+ match->next = NULL;
44110+ match->nentry->inode = newinode;
44111+ match->nentry->device = newdevice;
44112+ match->nentry->deleted = 0;
44113+
44114+ insert_inodev_entry(match);
44115+ }
44116+
44117+ return;
44118+}
44119+
44120+static void
44121+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
44122+ const struct vfsmount *mnt)
44123+{
44124+ struct acl_subject_label *subj;
44125+ struct acl_role_label *role;
44126+ unsigned int x;
44127+ ino_t ino = dentry->d_inode->i_ino;
44128+ dev_t dev = __get_dev(dentry);
44129+
44130+ FOR_EACH_ROLE_START(role)
44131+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
44132+
44133+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
44134+ if ((subj->inode == ino) && (subj->device == dev)) {
44135+ subj->inode = ino;
44136+ subj->device = dev;
44137+ }
44138+ FOR_EACH_NESTED_SUBJECT_END(subj)
44139+ FOR_EACH_SUBJECT_START(role, subj, x)
44140+ update_acl_obj_label(matchn->inode, matchn->device,
44141+ ino, dev, subj);
44142+ FOR_EACH_SUBJECT_END(subj,x)
44143+ FOR_EACH_ROLE_END(role)
44144+
44145+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
44146+
44147+ return;
44148+}
44149+
44150+void
44151+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
44152+{
44153+ struct name_entry *matchn;
44154+
44155+ if (unlikely(!(gr_status & GR_READY)))
44156+ return;
44157+
44158+ preempt_disable();
44159+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
44160+
44161+ if (unlikely((unsigned long)matchn)) {
44162+ write_lock(&gr_inode_lock);
44163+ do_handle_create(matchn, dentry, mnt);
44164+ write_unlock(&gr_inode_lock);
44165+ }
44166+ preempt_enable();
44167+
44168+ return;
44169+}
44170+
44171+void
44172+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
44173+ struct dentry *old_dentry,
44174+ struct dentry *new_dentry,
44175+ struct vfsmount *mnt, const __u8 replace)
44176+{
44177+ struct name_entry *matchn;
44178+ struct inodev_entry *inodev;
44179+ ino_t old_ino = old_dentry->d_inode->i_ino;
44180+ dev_t old_dev = __get_dev(old_dentry);
44181+
44182+ /* vfs_rename swaps the name and parent link for old_dentry and
44183+ new_dentry
44184+ at this point, old_dentry has the new name, parent link, and inode
44185+ for the renamed file
44186+ if a file is being replaced by a rename, new_dentry has the inode
44187+ and name for the replaced file
44188+ */
44189+
44190+ if (unlikely(!(gr_status & GR_READY)))
44191+ return;
44192+
44193+ preempt_disable();
44194+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
44195+
44196+ /* we wouldn't have to check d_inode if it weren't for
44197+ NFS silly-renaming
44198+ */
44199+
44200+ write_lock(&gr_inode_lock);
44201+ if (unlikely(replace && new_dentry->d_inode)) {
44202+ ino_t new_ino = new_dentry->d_inode->i_ino;
44203+ dev_t new_dev = __get_dev(new_dentry);
44204+
44205+ inodev = lookup_inodev_entry(new_ino, new_dev);
44206+ if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
44207+ do_handle_delete(inodev, new_ino, new_dev);
44208+ }
44209+
44210+ inodev = lookup_inodev_entry(old_ino, old_dev);
44211+ if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
44212+ do_handle_delete(inodev, old_ino, old_dev);
44213+
44214+ if (unlikely((unsigned long)matchn))
44215+ do_handle_create(matchn, old_dentry, mnt);
44216+
44217+ write_unlock(&gr_inode_lock);
44218+ preempt_enable();
44219+
44220+ return;
44221+}
44222+
44223+static int
44224+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
44225+ unsigned char **sum)
44226+{
44227+ struct acl_role_label *r;
44228+ struct role_allowed_ip *ipp;
44229+ struct role_transition *trans;
44230+ unsigned int i;
44231+ int found = 0;
44232+ u32 curr_ip = current->signal->curr_ip;
44233+
44234+ current->signal->saved_ip = curr_ip;
44235+
44236+ /* check transition table */
44237+
44238+ for (trans = current->role->transitions; trans; trans = trans->next) {
44239+ if (!strcmp(rolename, trans->rolename)) {
44240+ found = 1;
44241+ break;
44242+ }
44243+ }
44244+
44245+ if (!found)
44246+ return 0;
44247+
44248+ /* handle special roles that do not require authentication
44249+ and check ip */
44250+
44251+ FOR_EACH_ROLE_START(r)
44252+ if (!strcmp(rolename, r->rolename) &&
44253+ (r->roletype & GR_ROLE_SPECIAL)) {
44254+ found = 0;
44255+ if (r->allowed_ips != NULL) {
44256+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
44257+ if ((ntohl(curr_ip) & ipp->netmask) ==
44258+ (ntohl(ipp->addr) & ipp->netmask))
44259+ found = 1;
44260+ }
44261+ } else
44262+ found = 2;
44263+ if (!found)
44264+ return 0;
44265+
44266+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
44267+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
44268+ *salt = NULL;
44269+ *sum = NULL;
44270+ return 1;
44271+ }
44272+ }
44273+ FOR_EACH_ROLE_END(r)
44274+
44275+ for (i = 0; i < num_sprole_pws; i++) {
44276+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
44277+ *salt = acl_special_roles[i]->salt;
44278+ *sum = acl_special_roles[i]->sum;
44279+ return 1;
44280+ }
44281+ }
44282+
44283+ return 0;
44284+}
44285+
44286+static void
44287+assign_special_role(char *rolename)
44288+{
44289+ struct acl_object_label *obj;
44290+ struct acl_role_label *r;
44291+ struct acl_role_label *assigned = NULL;
44292+ struct task_struct *tsk;
44293+ struct file *filp;
44294+
44295+ FOR_EACH_ROLE_START(r)
44296+ if (!strcmp(rolename, r->rolename) &&
44297+ (r->roletype & GR_ROLE_SPECIAL)) {
44298+ assigned = r;
44299+ break;
44300+ }
44301+ FOR_EACH_ROLE_END(r)
44302+
44303+ if (!assigned)
44304+ return;
44305+
44306+ read_lock(&tasklist_lock);
44307+ read_lock(&grsec_exec_file_lock);
44308+
44309+ tsk = current->real_parent;
44310+ if (tsk == NULL)
44311+ goto out_unlock;
44312+
44313+ filp = tsk->exec_file;
44314+ if (filp == NULL)
44315+ goto out_unlock;
44316+
44317+ tsk->is_writable = 0;
44318+
44319+ tsk->acl_sp_role = 1;
44320+ tsk->acl_role_id = ++acl_sp_role_value;
44321+ tsk->role = assigned;
44322+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
44323+
44324+ /* ignore additional mmap checks for processes that are writable
44325+ by the default ACL */
44326+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
44327+ if (unlikely(obj->mode & GR_WRITE))
44328+ tsk->is_writable = 1;
44329+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
44330+ if (unlikely(obj->mode & GR_WRITE))
44331+ tsk->is_writable = 1;
44332+
44333+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44334+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
44335+#endif
44336+
44337+out_unlock:
44338+ read_unlock(&grsec_exec_file_lock);
44339+ read_unlock(&tasklist_lock);
44340+ return;
44341+}
44342+
44343+int gr_check_secure_terminal(struct task_struct *task)
44344+{
44345+ struct task_struct *p, *p2, *p3;
44346+ struct files_struct *files;
44347+ struct fdtable *fdt;
44348+ struct file *our_file = NULL, *file;
44349+ int i;
44350+
44351+ if (task->signal->tty == NULL)
44352+ return 1;
44353+
44354+ files = get_files_struct(task);
44355+ if (files != NULL) {
44356+ rcu_read_lock();
44357+ fdt = files_fdtable(files);
44358+ for (i=0; i < fdt->max_fds; i++) {
44359+ file = fcheck_files(files, i);
44360+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
44361+ get_file(file);
44362+ our_file = file;
44363+ }
44364+ }
44365+ rcu_read_unlock();
44366+ put_files_struct(files);
44367+ }
44368+
44369+ if (our_file == NULL)
44370+ return 1;
44371+
44372+ read_lock(&tasklist_lock);
44373+ do_each_thread(p2, p) {
44374+ files = get_files_struct(p);
44375+ if (files == NULL ||
44376+ (p->signal && p->signal->tty == task->signal->tty)) {
44377+ if (files != NULL)
44378+ put_files_struct(files);
44379+ continue;
44380+ }
44381+ rcu_read_lock();
44382+ fdt = files_fdtable(files);
44383+ for (i=0; i < fdt->max_fds; i++) {
44384+ file = fcheck_files(files, i);
44385+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
44386+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
44387+ p3 = task;
44388+ while (p3->pid > 0) {
44389+ if (p3 == p)
44390+ break;
44391+ p3 = p3->real_parent;
44392+ }
44393+ if (p3 == p)
44394+ break;
44395+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
44396+ gr_handle_alertkill(p);
44397+ rcu_read_unlock();
44398+ put_files_struct(files);
44399+ read_unlock(&tasklist_lock);
44400+ fput(our_file);
44401+ return 0;
44402+ }
44403+ }
44404+ rcu_read_unlock();
44405+ put_files_struct(files);
44406+ } while_each_thread(p2, p);
44407+ read_unlock(&tasklist_lock);
44408+
44409+ fput(our_file);
44410+ return 1;
44411+}
44412+
44413+ssize_t
44414+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
44415+{
44416+ struct gr_arg_wrapper uwrap;
44417+ unsigned char *sprole_salt = NULL;
44418+ unsigned char *sprole_sum = NULL;
44419+ int error = sizeof (struct gr_arg_wrapper);
44420+ int error2 = 0;
44421+
44422+ mutex_lock(&gr_dev_mutex);
44423+
44424+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
44425+ error = -EPERM;
44426+ goto out;
44427+ }
44428+
44429+ if (count != sizeof (struct gr_arg_wrapper)) {
44430+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
44431+ error = -EINVAL;
44432+ goto out;
44433+ }
44434+
44435+
44436+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
44437+ gr_auth_expires = 0;
44438+ gr_auth_attempts = 0;
44439+ }
44440+
44441+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
44442+ error = -EFAULT;
44443+ goto out;
44444+ }
44445+
44446+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
44447+ error = -EINVAL;
44448+ goto out;
44449+ }
44450+
44451+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
44452+ error = -EFAULT;
44453+ goto out;
44454+ }
44455+
44456+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
44457+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
44458+ time_after(gr_auth_expires, get_seconds())) {
44459+ error = -EBUSY;
44460+ goto out;
44461+ }
44462+
44463+ /* if non-root trying to do anything other than use a special role,
44464+ do not attempt authentication, do not count towards authentication
44465+ locking
44466+ */
44467+
44468+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
44469+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
44470+ current_uid()) {
44471+ error = -EPERM;
44472+ goto out;
44473+ }
44474+
44475+ /* ensure pw and special role name are null terminated */
44476+
44477+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
44478+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
44479+
44480+ /* Okay.
44481+ * We have our enough of the argument structure..(we have yet
44482+ * to copy_from_user the tables themselves) . Copy the tables
44483+ * only if we need them, i.e. for loading operations. */
44484+
44485+ switch (gr_usermode->mode) {
44486+ case GR_STATUS:
44487+ if (gr_status & GR_READY) {
44488+ error = 1;
44489+ if (!gr_check_secure_terminal(current))
44490+ error = 3;
44491+ } else
44492+ error = 2;
44493+ goto out;
44494+ case GR_SHUTDOWN:
44495+ if ((gr_status & GR_READY)
44496+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
44497+ pax_open_kernel();
44498+ gr_status &= ~GR_READY;
44499+ pax_close_kernel();
44500+
44501+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
44502+ free_variables();
44503+ memset(gr_usermode, 0, sizeof (struct gr_arg));
44504+ memset(gr_system_salt, 0, GR_SALT_LEN);
44505+ memset(gr_system_sum, 0, GR_SHA_LEN);
44506+ } else if (gr_status & GR_READY) {
44507+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
44508+ error = -EPERM;
44509+ } else {
44510+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
44511+ error = -EAGAIN;
44512+ }
44513+ break;
44514+ case GR_ENABLE:
44515+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
44516+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
44517+ else {
44518+ if (gr_status & GR_READY)
44519+ error = -EAGAIN;
44520+ else
44521+ error = error2;
44522+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
44523+ }
44524+ break;
44525+ case GR_RELOAD:
44526+ if (!(gr_status & GR_READY)) {
44527+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
44528+ error = -EAGAIN;
44529+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
44530+ preempt_disable();
44531+
44532+ pax_open_kernel();
44533+ gr_status &= ~GR_READY;
44534+ pax_close_kernel();
44535+
44536+ free_variables();
44537+ if (!(error2 = gracl_init(gr_usermode))) {
44538+ preempt_enable();
44539+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
44540+ } else {
44541+ preempt_enable();
44542+ error = error2;
44543+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
44544+ }
44545+ } else {
44546+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
44547+ error = -EPERM;
44548+ }
44549+ break;
44550+ case GR_SEGVMOD:
44551+ if (unlikely(!(gr_status & GR_READY))) {
44552+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
44553+ error = -EAGAIN;
44554+ break;
44555+ }
44556+
44557+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
44558+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
44559+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
44560+ struct acl_subject_label *segvacl;
44561+ segvacl =
44562+ lookup_acl_subj_label(gr_usermode->segv_inode,
44563+ gr_usermode->segv_device,
44564+ current->role);
44565+ if (segvacl) {
44566+ segvacl->crashes = 0;
44567+ segvacl->expires = 0;
44568+ }
44569+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
44570+ gr_remove_uid(gr_usermode->segv_uid);
44571+ }
44572+ } else {
44573+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
44574+ error = -EPERM;
44575+ }
44576+ break;
44577+ case GR_SPROLE:
44578+ case GR_SPROLEPAM:
44579+ if (unlikely(!(gr_status & GR_READY))) {
44580+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
44581+ error = -EAGAIN;
44582+ break;
44583+ }
44584+
44585+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
44586+ current->role->expires = 0;
44587+ current->role->auth_attempts = 0;
44588+ }
44589+
44590+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
44591+ time_after(current->role->expires, get_seconds())) {
44592+ error = -EBUSY;
44593+ goto out;
44594+ }
44595+
44596+ if (lookup_special_role_auth
44597+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
44598+ && ((!sprole_salt && !sprole_sum)
44599+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
44600+ char *p = "";
44601+ assign_special_role(gr_usermode->sp_role);
44602+ read_lock(&tasklist_lock);
44603+ if (current->real_parent)
44604+ p = current->real_parent->role->rolename;
44605+ read_unlock(&tasklist_lock);
44606+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
44607+ p, acl_sp_role_value);
44608+ } else {
44609+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
44610+ error = -EPERM;
44611+ if(!(current->role->auth_attempts++))
44612+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
44613+
44614+ goto out;
44615+ }
44616+ break;
44617+ case GR_UNSPROLE:
44618+ if (unlikely(!(gr_status & GR_READY))) {
44619+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
44620+ error = -EAGAIN;
44621+ break;
44622+ }
44623+
44624+ if (current->role->roletype & GR_ROLE_SPECIAL) {
44625+ char *p = "";
44626+ int i = 0;
44627+
44628+ read_lock(&tasklist_lock);
44629+ if (current->real_parent) {
44630+ p = current->real_parent->role->rolename;
44631+ i = current->real_parent->acl_role_id;
44632+ }
44633+ read_unlock(&tasklist_lock);
44634+
44635+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
44636+ gr_set_acls(1);
44637+ } else {
44638+ error = -EPERM;
44639+ goto out;
44640+ }
44641+ break;
44642+ default:
44643+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
44644+ error = -EINVAL;
44645+ break;
44646+ }
44647+
44648+ if (error != -EPERM)
44649+ goto out;
44650+
44651+ if(!(gr_auth_attempts++))
44652+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
44653+
44654+ out:
44655+ mutex_unlock(&gr_dev_mutex);
44656+ return error;
44657+}
44658+
44659+/* must be called with
44660+ rcu_read_lock();
44661+ read_lock(&tasklist_lock);
44662+ read_lock(&grsec_exec_file_lock);
44663+*/
44664+int gr_apply_subject_to_task(struct task_struct *task)
44665+{
44666+ struct acl_object_label *obj;
44667+ char *tmpname;
44668+ struct acl_subject_label *tmpsubj;
44669+ struct file *filp;
44670+ struct name_entry *nmatch;
44671+
44672+ filp = task->exec_file;
44673+ if (filp == NULL)
44674+ return 0;
44675+
44676+ /* the following is to apply the correct subject
44677+ on binaries running when the RBAC system
44678+ is enabled, when the binaries have been
44679+ replaced or deleted since their execution
44680+ -----
44681+ when the RBAC system starts, the inode/dev
44682+ from exec_file will be one the RBAC system
44683+ is unaware of. It only knows the inode/dev
44684+ of the present file on disk, or the absence
44685+ of it.
44686+ */
44687+ preempt_disable();
44688+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
44689+
44690+ nmatch = lookup_name_entry(tmpname);
44691+ preempt_enable();
44692+ tmpsubj = NULL;
44693+ if (nmatch) {
44694+ if (nmatch->deleted)
44695+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
44696+ else
44697+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
44698+ if (tmpsubj != NULL)
44699+ task->acl = tmpsubj;
44700+ }
44701+ if (tmpsubj == NULL)
44702+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
44703+ task->role);
44704+ if (task->acl) {
44705+ task->is_writable = 0;
44706+ /* ignore additional mmap checks for processes that are writable
44707+ by the default ACL */
44708+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
44709+ if (unlikely(obj->mode & GR_WRITE))
44710+ task->is_writable = 1;
44711+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
44712+ if (unlikely(obj->mode & GR_WRITE))
44713+ task->is_writable = 1;
44714+
44715+ gr_set_proc_res(task);
44716+
44717+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44718+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
44719+#endif
44720+ } else {
44721+ return 1;
44722+ }
44723+
44724+ return 0;
44725+}
44726+
44727+int
44728+gr_set_acls(const int type)
44729+{
44730+ struct task_struct *task, *task2;
44731+ struct acl_role_label *role = current->role;
44732+ __u16 acl_role_id = current->acl_role_id;
44733+ const struct cred *cred;
44734+ int ret;
44735+
44736+ rcu_read_lock();
44737+ read_lock(&tasklist_lock);
44738+ read_lock(&grsec_exec_file_lock);
44739+ do_each_thread(task2, task) {
44740+ /* check to see if we're called from the exit handler,
44741+ if so, only replace ACLs that have inherited the admin
44742+ ACL */
44743+
44744+ if (type && (task->role != role ||
44745+ task->acl_role_id != acl_role_id))
44746+ continue;
44747+
44748+ task->acl_role_id = 0;
44749+ task->acl_sp_role = 0;
44750+
44751+ if (task->exec_file) {
44752+ cred = __task_cred(task);
44753+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
44754+ ret = gr_apply_subject_to_task(task);
44755+ if (ret) {
44756+ read_unlock(&grsec_exec_file_lock);
44757+ read_unlock(&tasklist_lock);
44758+ rcu_read_unlock();
44759+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
44760+ return ret;
44761+ }
44762+ } else {
44763+ // it's a kernel process
44764+ task->role = kernel_role;
44765+ task->acl = kernel_role->root_label;
44766+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
44767+ task->acl->mode &= ~GR_PROCFIND;
44768+#endif
44769+ }
44770+ } while_each_thread(task2, task);
44771+ read_unlock(&grsec_exec_file_lock);
44772+ read_unlock(&tasklist_lock);
44773+ rcu_read_unlock();
44774+
44775+ return 0;
44776+}
44777+
44778+void
44779+gr_learn_resource(const struct task_struct *task,
44780+ const int res, const unsigned long wanted, const int gt)
44781+{
44782+ struct acl_subject_label *acl;
44783+ const struct cred *cred;
44784+
44785+ if (unlikely((gr_status & GR_READY) &&
44786+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
44787+ goto skip_reslog;
44788+
44789+#ifdef CONFIG_GRKERNSEC_RESLOG
44790+ gr_log_resource(task, res, wanted, gt);
44791+#endif
44792+ skip_reslog:
44793+
44794+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
44795+ return;
44796+
44797+ acl = task->acl;
44798+
44799+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
44800+ !(acl->resmask & (1 << (unsigned short) res))))
44801+ return;
44802+
44803+ if (wanted >= acl->res[res].rlim_cur) {
44804+ unsigned long res_add;
44805+
44806+ res_add = wanted;
44807+ switch (res) {
44808+ case RLIMIT_CPU:
44809+ res_add += GR_RLIM_CPU_BUMP;
44810+ break;
44811+ case RLIMIT_FSIZE:
44812+ res_add += GR_RLIM_FSIZE_BUMP;
44813+ break;
44814+ case RLIMIT_DATA:
44815+ res_add += GR_RLIM_DATA_BUMP;
44816+ break;
44817+ case RLIMIT_STACK:
44818+ res_add += GR_RLIM_STACK_BUMP;
44819+ break;
44820+ case RLIMIT_CORE:
44821+ res_add += GR_RLIM_CORE_BUMP;
44822+ break;
44823+ case RLIMIT_RSS:
44824+ res_add += GR_RLIM_RSS_BUMP;
44825+ break;
44826+ case RLIMIT_NPROC:
44827+ res_add += GR_RLIM_NPROC_BUMP;
44828+ break;
44829+ case RLIMIT_NOFILE:
44830+ res_add += GR_RLIM_NOFILE_BUMP;
44831+ break;
44832+ case RLIMIT_MEMLOCK:
44833+ res_add += GR_RLIM_MEMLOCK_BUMP;
44834+ break;
44835+ case RLIMIT_AS:
44836+ res_add += GR_RLIM_AS_BUMP;
44837+ break;
44838+ case RLIMIT_LOCKS:
44839+ res_add += GR_RLIM_LOCKS_BUMP;
44840+ break;
44841+ case RLIMIT_SIGPENDING:
44842+ res_add += GR_RLIM_SIGPENDING_BUMP;
44843+ break;
44844+ case RLIMIT_MSGQUEUE:
44845+ res_add += GR_RLIM_MSGQUEUE_BUMP;
44846+ break;
44847+ case RLIMIT_NICE:
44848+ res_add += GR_RLIM_NICE_BUMP;
44849+ break;
44850+ case RLIMIT_RTPRIO:
44851+ res_add += GR_RLIM_RTPRIO_BUMP;
44852+ break;
44853+ case RLIMIT_RTTIME:
44854+ res_add += GR_RLIM_RTTIME_BUMP;
44855+ break;
44856+ }
44857+
44858+ acl->res[res].rlim_cur = res_add;
44859+
44860+ if (wanted > acl->res[res].rlim_max)
44861+ acl->res[res].rlim_max = res_add;
44862+
44863+ /* only log the subject filename, since resource logging is supported for
44864+ single-subject learning only */
44865+ rcu_read_lock();
44866+ cred = __task_cred(task);
44867+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
44868+ task->role->roletype, cred->uid, cred->gid, acl->filename,
44869+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
44870+ "", (unsigned long) res, &task->signal->saved_ip);
44871+ rcu_read_unlock();
44872+ }
44873+
44874+ return;
44875+}
44876+
44877+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
44878+void
44879+pax_set_initial_flags(struct linux_binprm *bprm)
44880+{
44881+ struct task_struct *task = current;
44882+ struct acl_subject_label *proc;
44883+ unsigned long flags;
44884+
44885+ if (unlikely(!(gr_status & GR_READY)))
44886+ return;
44887+
44888+ flags = pax_get_flags(task);
44889+
44890+ proc = task->acl;
44891+
44892+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
44893+ flags &= ~MF_PAX_PAGEEXEC;
44894+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
44895+ flags &= ~MF_PAX_SEGMEXEC;
44896+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
44897+ flags &= ~MF_PAX_RANDMMAP;
44898+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
44899+ flags &= ~MF_PAX_EMUTRAMP;
44900+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
44901+ flags &= ~MF_PAX_MPROTECT;
44902+
44903+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
44904+ flags |= MF_PAX_PAGEEXEC;
44905+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
44906+ flags |= MF_PAX_SEGMEXEC;
44907+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
44908+ flags |= MF_PAX_RANDMMAP;
44909+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
44910+ flags |= MF_PAX_EMUTRAMP;
44911+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
44912+ flags |= MF_PAX_MPROTECT;
44913+
44914+ pax_set_flags(task, flags);
44915+
44916+ return;
44917+}
44918+#endif
44919+
44920+#ifdef CONFIG_SYSCTL
44921+/* Eric Biederman likes breaking userland ABI and every inode-based security
44922+ system to save 35kb of memory */
44923+
44924+/* we modify the passed in filename, but adjust it back before returning */
44925+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
44926+{
44927+ struct name_entry *nmatch;
44928+ char *p, *lastp = NULL;
44929+ struct acl_object_label *obj = NULL, *tmp;
44930+ struct acl_subject_label *tmpsubj;
44931+ char c = '\0';
44932+
44933+ read_lock(&gr_inode_lock);
44934+
44935+ p = name + len - 1;
44936+ do {
44937+ nmatch = lookup_name_entry(name);
44938+ if (lastp != NULL)
44939+ *lastp = c;
44940+
44941+ if (nmatch == NULL)
44942+ goto next_component;
44943+ tmpsubj = current->acl;
44944+ do {
44945+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
44946+ if (obj != NULL) {
44947+ tmp = obj->globbed;
44948+ while (tmp) {
44949+ if (!glob_match(tmp->filename, name)) {
44950+ obj = tmp;
44951+ goto found_obj;
44952+ }
44953+ tmp = tmp->next;
44954+ }
44955+ goto found_obj;
44956+ }
44957+ } while ((tmpsubj = tmpsubj->parent_subject));
44958+next_component:
44959+ /* end case */
44960+ if (p == name)
44961+ break;
44962+
44963+ while (*p != '/')
44964+ p--;
44965+ if (p == name)
44966+ lastp = p + 1;
44967+ else {
44968+ lastp = p;
44969+ p--;
44970+ }
44971+ c = *lastp;
44972+ *lastp = '\0';
44973+ } while (1);
44974+found_obj:
44975+ read_unlock(&gr_inode_lock);
44976+ /* obj returned will always be non-null */
44977+ return obj;
44978+}
44979+
44980+/* returns 0 when allowing, non-zero on error
44981+ op of 0 is used for readdir, so we don't log the names of hidden files
44982+*/
44983+__u32
44984+gr_handle_sysctl(const struct ctl_table *table, const int op)
44985+{
44986+ struct ctl_table *tmp;
44987+ const char *proc_sys = "/proc/sys";
44988+ char *path;
44989+ struct acl_object_label *obj;
44990+ unsigned short len = 0, pos = 0, depth = 0, i;
44991+ __u32 err = 0;
44992+ __u32 mode = 0;
44993+
44994+ if (unlikely(!(gr_status & GR_READY)))
44995+ return 0;
44996+
44997+ /* for now, ignore operations on non-sysctl entries if it's not a
44998+ readdir*/
44999+ if (table->child != NULL && op != 0)
45000+ return 0;
45001+
45002+ mode |= GR_FIND;
45003+ /* it's only a read if it's an entry, read on dirs is for readdir */
45004+ if (op & MAY_READ)
45005+ mode |= GR_READ;
45006+ if (op & MAY_WRITE)
45007+ mode |= GR_WRITE;
45008+
45009+ preempt_disable();
45010+
45011+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
45012+
45013+ /* it's only a read/write if it's an actual entry, not a dir
45014+ (which are opened for readdir)
45015+ */
45016+
45017+ /* convert the requested sysctl entry into a pathname */
45018+
45019+ for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
45020+ len += strlen(tmp->procname);
45021+ len++;
45022+ depth++;
45023+ }
45024+
45025+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
45026+ /* deny */
45027+ goto out;
45028+ }
45029+
45030+ memset(path, 0, PAGE_SIZE);
45031+
45032+ memcpy(path, proc_sys, strlen(proc_sys));
45033+
45034+ pos += strlen(proc_sys);
45035+
45036+ for (; depth > 0; depth--) {
45037+ path[pos] = '/';
45038+ pos++;
45039+ for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
45040+ if (depth == i) {
45041+ memcpy(path + pos, tmp->procname,
45042+ strlen(tmp->procname));
45043+ pos += strlen(tmp->procname);
45044+ }
45045+ i++;
45046+ }
45047+ }
45048+
45049+ obj = gr_lookup_by_name(path, pos);
45050+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
45051+
45052+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
45053+ ((err & mode) != mode))) {
45054+ __u32 new_mode = mode;
45055+
45056+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
45057+
45058+ err = 0;
45059+ gr_log_learn_sysctl(path, new_mode);
45060+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
45061+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
45062+ err = -ENOENT;
45063+ } else if (!(err & GR_FIND)) {
45064+ err = -ENOENT;
45065+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
45066+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
45067+ path, (mode & GR_READ) ? " reading" : "",
45068+ (mode & GR_WRITE) ? " writing" : "");
45069+ err = -EACCES;
45070+ } else if ((err & mode) != mode) {
45071+ err = -EACCES;
45072+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
45073+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
45074+ path, (mode & GR_READ) ? " reading" : "",
45075+ (mode & GR_WRITE) ? " writing" : "");
45076+ err = 0;
45077+ } else
45078+ err = 0;
45079+
45080+ out:
45081+ preempt_enable();
45082+
45083+ return err;
45084+}
45085+#endif
45086+
45087+int
45088+gr_handle_proc_ptrace(struct task_struct *task)
45089+{
45090+ struct file *filp;
45091+ struct task_struct *tmp = task;
45092+ struct task_struct *curtemp = current;
45093+ __u32 retmode;
45094+
45095+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
45096+ if (unlikely(!(gr_status & GR_READY)))
45097+ return 0;
45098+#endif
45099+
45100+ read_lock(&tasklist_lock);
45101+ read_lock(&grsec_exec_file_lock);
45102+ filp = task->exec_file;
45103+
45104+ while (tmp->pid > 0) {
45105+ if (tmp == curtemp)
45106+ break;
45107+ tmp = tmp->real_parent;
45108+ }
45109+
45110+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
45111+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
45112+ read_unlock(&grsec_exec_file_lock);
45113+ read_unlock(&tasklist_lock);
45114+ return 1;
45115+ }
45116+
45117+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
45118+ if (!(gr_status & GR_READY)) {
45119+ read_unlock(&grsec_exec_file_lock);
45120+ read_unlock(&tasklist_lock);
45121+ return 0;
45122+ }
45123+#endif
45124+
45125+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
45126+ read_unlock(&grsec_exec_file_lock);
45127+ read_unlock(&tasklist_lock);
45128+
45129+ if (retmode & GR_NOPTRACE)
45130+ return 1;
45131+
45132+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
45133+ && (current->acl != task->acl || (current->acl != current->role->root_label
45134+ && current->pid != task->pid)))
45135+ return 1;
45136+
45137+ return 0;
45138+}
45139+
45140+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
45141+{
45142+ if (unlikely(!(gr_status & GR_READY)))
45143+ return;
45144+
45145+ if (!(current->role->roletype & GR_ROLE_GOD))
45146+ return;
45147+
45148+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
45149+ p->role->rolename, gr_task_roletype_to_char(p),
45150+ p->acl->filename);
45151+}
45152+
45153+int
45154+gr_handle_ptrace(struct task_struct *task, const long request)
45155+{
45156+ struct task_struct *tmp = task;
45157+ struct task_struct *curtemp = current;
45158+ __u32 retmode;
45159+
45160+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
45161+ if (unlikely(!(gr_status & GR_READY)))
45162+ return 0;
45163+#endif
45164+
45165+ read_lock(&tasklist_lock);
45166+ while (tmp->pid > 0) {
45167+ if (tmp == curtemp)
45168+ break;
45169+ tmp = tmp->real_parent;
45170+ }
45171+
45172+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
45173+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
45174+ read_unlock(&tasklist_lock);
45175+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
45176+ return 1;
45177+ }
45178+ read_unlock(&tasklist_lock);
45179+
45180+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
45181+ if (!(gr_status & GR_READY))
45182+ return 0;
45183+#endif
45184+
45185+ read_lock(&grsec_exec_file_lock);
45186+ if (unlikely(!task->exec_file)) {
45187+ read_unlock(&grsec_exec_file_lock);
45188+ return 0;
45189+ }
45190+
45191+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
45192+ read_unlock(&grsec_exec_file_lock);
45193+
45194+ if (retmode & GR_NOPTRACE) {
45195+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
45196+ return 1;
45197+ }
45198+
45199+ if (retmode & GR_PTRACERD) {
45200+ switch (request) {
45201+ case PTRACE_POKETEXT:
45202+ case PTRACE_POKEDATA:
45203+ case PTRACE_POKEUSR:
45204+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
45205+ case PTRACE_SETREGS:
45206+ case PTRACE_SETFPREGS:
45207+#endif
45208+#ifdef CONFIG_X86
45209+ case PTRACE_SETFPXREGS:
45210+#endif
45211+#ifdef CONFIG_ALTIVEC
45212+ case PTRACE_SETVRREGS:
45213+#endif
45214+ return 1;
45215+ default:
45216+ return 0;
45217+ }
45218+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
45219+ !(current->role->roletype & GR_ROLE_GOD) &&
45220+ (current->acl != task->acl)) {
45221+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
45222+ return 1;
45223+ }
45224+
45225+ return 0;
45226+}
45227+
45228+static int is_writable_mmap(const struct file *filp)
45229+{
45230+ struct task_struct *task = current;
45231+ struct acl_object_label *obj, *obj2;
45232+
45233+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
45234+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
45235+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
45236+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
45237+ task->role->root_label);
45238+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
45239+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
45240+ return 1;
45241+ }
45242+ }
45243+ return 0;
45244+}
45245+
45246+int
45247+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
45248+{
45249+ __u32 mode;
45250+
45251+ if (unlikely(!file || !(prot & PROT_EXEC)))
45252+ return 1;
45253+
45254+ if (is_writable_mmap(file))
45255+ return 0;
45256+
45257+ mode =
45258+ gr_search_file(file->f_path.dentry,
45259+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
45260+ file->f_path.mnt);
45261+
45262+ if (!gr_tpe_allow(file))
45263+ return 0;
45264+
45265+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
45266+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45267+ return 0;
45268+ } else if (unlikely(!(mode & GR_EXEC))) {
45269+ return 0;
45270+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
45271+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45272+ return 1;
45273+ }
45274+
45275+ return 1;
45276+}
45277+
45278+int
45279+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
45280+{
45281+ __u32 mode;
45282+
45283+ if (unlikely(!file || !(prot & PROT_EXEC)))
45284+ return 1;
45285+
45286+ if (is_writable_mmap(file))
45287+ return 0;
45288+
45289+ mode =
45290+ gr_search_file(file->f_path.dentry,
45291+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
45292+ file->f_path.mnt);
45293+
45294+ if (!gr_tpe_allow(file))
45295+ return 0;
45296+
45297+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
45298+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45299+ return 0;
45300+ } else if (unlikely(!(mode & GR_EXEC))) {
45301+ return 0;
45302+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
45303+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45304+ return 1;
45305+ }
45306+
45307+ return 1;
45308+}
45309+
45310+void
45311+gr_acl_handle_psacct(struct task_struct *task, const long code)
45312+{
45313+ unsigned long runtime;
45314+ unsigned long cputime;
45315+ unsigned int wday, cday;
45316+ __u8 whr, chr;
45317+ __u8 wmin, cmin;
45318+ __u8 wsec, csec;
45319+ struct timespec timeval;
45320+
45321+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
45322+ !(task->acl->mode & GR_PROCACCT)))
45323+ return;
45324+
45325+ do_posix_clock_monotonic_gettime(&timeval);
45326+ runtime = timeval.tv_sec - task->start_time.tv_sec;
45327+ wday = runtime / (3600 * 24);
45328+ runtime -= wday * (3600 * 24);
45329+ whr = runtime / 3600;
45330+ runtime -= whr * 3600;
45331+ wmin = runtime / 60;
45332+ runtime -= wmin * 60;
45333+ wsec = runtime;
45334+
45335+ cputime = (task->utime + task->stime) / HZ;
45336+ cday = cputime / (3600 * 24);
45337+ cputime -= cday * (3600 * 24);
45338+ chr = cputime / 3600;
45339+ cputime -= chr * 3600;
45340+ cmin = cputime / 60;
45341+ cputime -= cmin * 60;
45342+ csec = cputime;
45343+
45344+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
45345+
45346+ return;
45347+}
45348+
45349+void gr_set_kernel_label(struct task_struct *task)
45350+{
45351+ if (gr_status & GR_READY) {
45352+ task->role = kernel_role;
45353+ task->acl = kernel_role->root_label;
45354+ }
45355+ return;
45356+}
45357+
45358+#ifdef CONFIG_TASKSTATS
45359+int gr_is_taskstats_denied(int pid)
45360+{
45361+ struct task_struct *task;
45362+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45363+ const struct cred *cred;
45364+#endif
45365+ int ret = 0;
45366+
45367+ /* restrict taskstats viewing to un-chrooted root users
45368+ who have the 'view' subject flag if the RBAC system is enabled
45369+ */
45370+
45371+ rcu_read_lock();
45372+ read_lock(&tasklist_lock);
45373+ task = find_task_by_vpid(pid);
45374+ if (task) {
45375+#ifdef CONFIG_GRKERNSEC_CHROOT
45376+ if (proc_is_chrooted(task))
45377+ ret = -EACCES;
45378+#endif
45379+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45380+ cred = __task_cred(task);
45381+#ifdef CONFIG_GRKERNSEC_PROC_USER
45382+ if (cred->uid != 0)
45383+ ret = -EACCES;
45384+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45385+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
45386+ ret = -EACCES;
45387+#endif
45388+#endif
45389+ if (gr_status & GR_READY) {
45390+ if (!(task->acl->mode & GR_VIEW))
45391+ ret = -EACCES;
45392+ }
45393+ } else
45394+ ret = -ENOENT;
45395+
45396+ read_unlock(&tasklist_lock);
45397+ rcu_read_unlock();
45398+
45399+ return ret;
45400+}
45401+#endif
45402+
45403+/* AUXV entries are filled via a descendant of search_binary_handler
45404+ after we've already applied the subject for the target
45405+*/
45406+int gr_acl_enable_at_secure(void)
45407+{
45408+ if (unlikely(!(gr_status & GR_READY)))
45409+ return 0;
45410+
45411+ if (current->acl->mode & GR_ATSECURE)
45412+ return 1;
45413+
45414+ return 0;
45415+}
45416+
45417+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
45418+{
45419+ struct task_struct *task = current;
45420+ struct dentry *dentry = file->f_path.dentry;
45421+ struct vfsmount *mnt = file->f_path.mnt;
45422+ struct acl_object_label *obj, *tmp;
45423+ struct acl_subject_label *subj;
45424+ unsigned int bufsize;
45425+ int is_not_root;
45426+ char *path;
45427+ dev_t dev = __get_dev(dentry);
45428+
45429+ if (unlikely(!(gr_status & GR_READY)))
45430+ return 1;
45431+
45432+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
45433+ return 1;
45434+
45435+ /* ignore Eric Biederman */
45436+ if (IS_PRIVATE(dentry->d_inode))
45437+ return 1;
45438+
45439+ subj = task->acl;
45440+ do {
45441+ obj = lookup_acl_obj_label(ino, dev, subj);
45442+ if (obj != NULL)
45443+ return (obj->mode & GR_FIND) ? 1 : 0;
45444+ } while ((subj = subj->parent_subject));
45445+
45446+ /* this is purely an optimization since we're looking for an object
45447+ for the directory we're doing a readdir on
45448+ if it's possible for any globbed object to match the entry we're
45449+ filling into the directory, then the object we find here will be
45450+ an anchor point with attached globbed objects
45451+ */
45452+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
45453+ if (obj->globbed == NULL)
45454+ return (obj->mode & GR_FIND) ? 1 : 0;
45455+
45456+ is_not_root = ((obj->filename[0] == '/') &&
45457+ (obj->filename[1] == '\0')) ? 0 : 1;
45458+ bufsize = PAGE_SIZE - namelen - is_not_root;
45459+
45460+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
45461+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
45462+ return 1;
45463+
45464+ preempt_disable();
45465+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
45466+ bufsize);
45467+
45468+ bufsize = strlen(path);
45469+
45470+ /* if base is "/", don't append an additional slash */
45471+ if (is_not_root)
45472+ *(path + bufsize) = '/';
45473+ memcpy(path + bufsize + is_not_root, name, namelen);
45474+ *(path + bufsize + namelen + is_not_root) = '\0';
45475+
45476+ tmp = obj->globbed;
45477+ while (tmp) {
45478+ if (!glob_match(tmp->filename, path)) {
45479+ preempt_enable();
45480+ return (tmp->mode & GR_FIND) ? 1 : 0;
45481+ }
45482+ tmp = tmp->next;
45483+ }
45484+ preempt_enable();
45485+ return (obj->mode & GR_FIND) ? 1 : 0;
45486+}
45487+
45488+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
45489+EXPORT_SYMBOL(gr_acl_is_enabled);
45490+#endif
45491+EXPORT_SYMBOL(gr_learn_resource);
45492+EXPORT_SYMBOL(gr_set_kernel_label);
45493+#ifdef CONFIG_SECURITY
45494+EXPORT_SYMBOL(gr_check_user_change);
45495+EXPORT_SYMBOL(gr_check_group_change);
45496+#endif
45497+
45498diff -urNp linux-3.0.3/grsecurity/gracl_cap.c linux-3.0.3/grsecurity/gracl_cap.c
45499--- linux-3.0.3/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
45500+++ linux-3.0.3/grsecurity/gracl_cap.c 2011-08-23 21:48:14.000000000 -0400
45501@@ -0,0 +1,139 @@
45502+#include <linux/kernel.h>
45503+#include <linux/module.h>
45504+#include <linux/sched.h>
45505+#include <linux/gracl.h>
45506+#include <linux/grsecurity.h>
45507+#include <linux/grinternal.h>
45508+
45509+static const char *captab_log[] = {
45510+ "CAP_CHOWN",
45511+ "CAP_DAC_OVERRIDE",
45512+ "CAP_DAC_READ_SEARCH",
45513+ "CAP_FOWNER",
45514+ "CAP_FSETID",
45515+ "CAP_KILL",
45516+ "CAP_SETGID",
45517+ "CAP_SETUID",
45518+ "CAP_SETPCAP",
45519+ "CAP_LINUX_IMMUTABLE",
45520+ "CAP_NET_BIND_SERVICE",
45521+ "CAP_NET_BROADCAST",
45522+ "CAP_NET_ADMIN",
45523+ "CAP_NET_RAW",
45524+ "CAP_IPC_LOCK",
45525+ "CAP_IPC_OWNER",
45526+ "CAP_SYS_MODULE",
45527+ "CAP_SYS_RAWIO",
45528+ "CAP_SYS_CHROOT",
45529+ "CAP_SYS_PTRACE",
45530+ "CAP_SYS_PACCT",
45531+ "CAP_SYS_ADMIN",
45532+ "CAP_SYS_BOOT",
45533+ "CAP_SYS_NICE",
45534+ "CAP_SYS_RESOURCE",
45535+ "CAP_SYS_TIME",
45536+ "CAP_SYS_TTY_CONFIG",
45537+ "CAP_MKNOD",
45538+ "CAP_LEASE",
45539+ "CAP_AUDIT_WRITE",
45540+ "CAP_AUDIT_CONTROL",
45541+ "CAP_SETFCAP",
45542+ "CAP_MAC_OVERRIDE",
45543+ "CAP_MAC_ADMIN",
45544+ "CAP_SYSLOG"
45545+};
45546+
45547+EXPORT_SYMBOL(gr_is_capable);
45548+EXPORT_SYMBOL(gr_is_capable_nolog);
45549+
45550+int
45551+gr_is_capable(const int cap)
45552+{
45553+ struct task_struct *task = current;
45554+ const struct cred *cred = current_cred();
45555+ struct acl_subject_label *curracl;
45556+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
45557+ kernel_cap_t cap_audit = __cap_empty_set;
45558+
45559+ if (!gr_acl_is_enabled())
45560+ return 1;
45561+
45562+ curracl = task->acl;
45563+
45564+ cap_drop = curracl->cap_lower;
45565+ cap_mask = curracl->cap_mask;
45566+ cap_audit = curracl->cap_invert_audit;
45567+
45568+ while ((curracl = curracl->parent_subject)) {
45569+ /* if the cap isn't specified in the current computed mask but is specified in the
45570+ current level subject, and is lowered in the current level subject, then add
45571+ it to the set of dropped capabilities
45572+ otherwise, add the current level subject's mask to the current computed mask
45573+ */
45574+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
45575+ cap_raise(cap_mask, cap);
45576+ if (cap_raised(curracl->cap_lower, cap))
45577+ cap_raise(cap_drop, cap);
45578+ if (cap_raised(curracl->cap_invert_audit, cap))
45579+ cap_raise(cap_audit, cap);
45580+ }
45581+ }
45582+
45583+ if (!cap_raised(cap_drop, cap)) {
45584+ if (cap_raised(cap_audit, cap))
45585+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
45586+ return 1;
45587+ }
45588+
45589+ curracl = task->acl;
45590+
45591+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
45592+ && cap_raised(cred->cap_effective, cap)) {
45593+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
45594+ task->role->roletype, cred->uid,
45595+ cred->gid, task->exec_file ?
45596+ gr_to_filename(task->exec_file->f_path.dentry,
45597+ task->exec_file->f_path.mnt) : curracl->filename,
45598+ curracl->filename, 0UL,
45599+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
45600+ return 1;
45601+ }
45602+
45603+ if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
45604+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
45605+ return 0;
45606+}
45607+
45608+int
45609+gr_is_capable_nolog(const int cap)
45610+{
45611+ struct acl_subject_label *curracl;
45612+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
45613+
45614+ if (!gr_acl_is_enabled())
45615+ return 1;
45616+
45617+ curracl = current->acl;
45618+
45619+ cap_drop = curracl->cap_lower;
45620+ cap_mask = curracl->cap_mask;
45621+
45622+ while ((curracl = curracl->parent_subject)) {
45623+ /* if the cap isn't specified in the current computed mask but is specified in the
45624+ current level subject, and is lowered in the current level subject, then add
45625+ it to the set of dropped capabilities
45626+ otherwise, add the current level subject's mask to the current computed mask
45627+ */
45628+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
45629+ cap_raise(cap_mask, cap);
45630+ if (cap_raised(curracl->cap_lower, cap))
45631+ cap_raise(cap_drop, cap);
45632+ }
45633+ }
45634+
45635+ if (!cap_raised(cap_drop, cap))
45636+ return 1;
45637+
45638+ return 0;
45639+}
45640+
45641diff -urNp linux-3.0.3/grsecurity/gracl_fs.c linux-3.0.3/grsecurity/gracl_fs.c
45642--- linux-3.0.3/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
45643+++ linux-3.0.3/grsecurity/gracl_fs.c 2011-08-23 21:48:14.000000000 -0400
45644@@ -0,0 +1,431 @@
45645+#include <linux/kernel.h>
45646+#include <linux/sched.h>
45647+#include <linux/types.h>
45648+#include <linux/fs.h>
45649+#include <linux/file.h>
45650+#include <linux/stat.h>
45651+#include <linux/grsecurity.h>
45652+#include <linux/grinternal.h>
45653+#include <linux/gracl.h>
45654+
45655+__u32
45656+gr_acl_handle_hidden_file(const struct dentry * dentry,
45657+ const struct vfsmount * mnt)
45658+{
45659+ __u32 mode;
45660+
45661+ if (unlikely(!dentry->d_inode))
45662+ return GR_FIND;
45663+
45664+ mode =
45665+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
45666+
45667+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
45668+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
45669+ return mode;
45670+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
45671+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
45672+ return 0;
45673+ } else if (unlikely(!(mode & GR_FIND)))
45674+ return 0;
45675+
45676+ return GR_FIND;
45677+}
45678+
45679+__u32
45680+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
45681+ const int fmode)
45682+{
45683+ __u32 reqmode = GR_FIND;
45684+ __u32 mode;
45685+
45686+ if (unlikely(!dentry->d_inode))
45687+ return reqmode;
45688+
45689+ if (unlikely(fmode & O_APPEND))
45690+ reqmode |= GR_APPEND;
45691+ else if (unlikely(fmode & FMODE_WRITE))
45692+ reqmode |= GR_WRITE;
45693+ if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
45694+ reqmode |= GR_READ;
45695+ if ((fmode & FMODE_GREXEC) && (fmode & __FMODE_EXEC))
45696+ reqmode &= ~GR_READ;
45697+ mode =
45698+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
45699+ mnt);
45700+
45701+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
45702+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
45703+ reqmode & GR_READ ? " reading" : "",
45704+ reqmode & GR_WRITE ? " writing" : reqmode &
45705+ GR_APPEND ? " appending" : "");
45706+ return reqmode;
45707+ } else
45708+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
45709+ {
45710+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
45711+ reqmode & GR_READ ? " reading" : "",
45712+ reqmode & GR_WRITE ? " writing" : reqmode &
45713+ GR_APPEND ? " appending" : "");
45714+ return 0;
45715+ } else if (unlikely((mode & reqmode) != reqmode))
45716+ return 0;
45717+
45718+ return reqmode;
45719+}
45720+
45721+__u32
45722+gr_acl_handle_creat(const struct dentry * dentry,
45723+ const struct dentry * p_dentry,
45724+ const struct vfsmount * p_mnt, const int fmode,
45725+ const int imode)
45726+{
45727+ __u32 reqmode = GR_WRITE | GR_CREATE;
45728+ __u32 mode;
45729+
45730+ if (unlikely(fmode & O_APPEND))
45731+ reqmode |= GR_APPEND;
45732+ if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
45733+ reqmode |= GR_READ;
45734+ if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
45735+ reqmode |= GR_SETID;
45736+
45737+ mode =
45738+ gr_check_create(dentry, p_dentry, p_mnt,
45739+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
45740+
45741+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
45742+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
45743+ reqmode & GR_READ ? " reading" : "",
45744+ reqmode & GR_WRITE ? " writing" : reqmode &
45745+ GR_APPEND ? " appending" : "");
45746+ return reqmode;
45747+ } else
45748+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
45749+ {
45750+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
45751+ reqmode & GR_READ ? " reading" : "",
45752+ reqmode & GR_WRITE ? " writing" : reqmode &
45753+ GR_APPEND ? " appending" : "");
45754+ return 0;
45755+ } else if (unlikely((mode & reqmode) != reqmode))
45756+ return 0;
45757+
45758+ return reqmode;
45759+}
45760+
45761+__u32
45762+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
45763+ const int fmode)
45764+{
45765+ __u32 mode, reqmode = GR_FIND;
45766+
45767+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
45768+ reqmode |= GR_EXEC;
45769+ if (fmode & S_IWOTH)
45770+ reqmode |= GR_WRITE;
45771+ if (fmode & S_IROTH)
45772+ reqmode |= GR_READ;
45773+
45774+ mode =
45775+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
45776+ mnt);
45777+
45778+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
45779+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
45780+ reqmode & GR_READ ? " reading" : "",
45781+ reqmode & GR_WRITE ? " writing" : "",
45782+ reqmode & GR_EXEC ? " executing" : "");
45783+ return reqmode;
45784+ } else
45785+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
45786+ {
45787+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
45788+ reqmode & GR_READ ? " reading" : "",
45789+ reqmode & GR_WRITE ? " writing" : "",
45790+ reqmode & GR_EXEC ? " executing" : "");
45791+ return 0;
45792+ } else if (unlikely((mode & reqmode) != reqmode))
45793+ return 0;
45794+
45795+ return reqmode;
45796+}
45797+
45798+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
45799+{
45800+ __u32 mode;
45801+
45802+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
45803+
45804+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
45805+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
45806+ return mode;
45807+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
45808+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
45809+ return 0;
45810+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
45811+ return 0;
45812+
45813+ return (reqmode);
45814+}
45815+
45816+__u32
45817+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
45818+{
45819+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
45820+}
45821+
45822+__u32
45823+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
45824+{
45825+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
45826+}
45827+
45828+__u32
45829+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
45830+{
45831+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
45832+}
45833+
45834+__u32
45835+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
45836+{
45837+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
45838+}
45839+
45840+__u32
45841+gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
45842+ mode_t mode)
45843+{
45844+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
45845+ return 1;
45846+
45847+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
45848+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
45849+ GR_FCHMOD_ACL_MSG);
45850+ } else {
45851+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
45852+ }
45853+}
45854+
45855+__u32
45856+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
45857+ mode_t mode)
45858+{
45859+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
45860+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
45861+ GR_CHMOD_ACL_MSG);
45862+ } else {
45863+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
45864+ }
45865+}
45866+
45867+__u32
45868+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
45869+{
45870+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
45871+}
45872+
45873+__u32
45874+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
45875+{
45876+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
45877+}
45878+
45879+__u32
45880+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
45881+{
45882+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
45883+}
45884+
45885+__u32
45886+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
45887+{
45888+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
45889+ GR_UNIXCONNECT_ACL_MSG);
45890+}
45891+
45892+/* hardlinks require at minimum create permission,
45893+ any additional privilege required is based on the
45894+ privilege of the file being linked to
45895+*/
45896+__u32
45897+gr_acl_handle_link(const struct dentry * new_dentry,
45898+ const struct dentry * parent_dentry,
45899+ const struct vfsmount * parent_mnt,
45900+ const struct dentry * old_dentry,
45901+ const struct vfsmount * old_mnt, const char *to)
45902+{
45903+ __u32 mode;
45904+ __u32 needmode = GR_CREATE | GR_LINK;
45905+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
45906+
45907+ mode =
45908+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
45909+ old_mnt);
45910+
45911+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
45912+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
45913+ return mode;
45914+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
45915+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
45916+ return 0;
45917+ } else if (unlikely((mode & needmode) != needmode))
45918+ return 0;
45919+
45920+ return 1;
45921+}
45922+
45923+__u32
45924+gr_acl_handle_symlink(const struct dentry * new_dentry,
45925+ const struct dentry * parent_dentry,
45926+ const struct vfsmount * parent_mnt, const char *from)
45927+{
45928+ __u32 needmode = GR_WRITE | GR_CREATE;
45929+ __u32 mode;
45930+
45931+ mode =
45932+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
45933+ GR_CREATE | GR_AUDIT_CREATE |
45934+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
45935+
45936+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
45937+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
45938+ return mode;
45939+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
45940+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
45941+ return 0;
45942+ } else if (unlikely((mode & needmode) != needmode))
45943+ return 0;
45944+
45945+ return (GR_WRITE | GR_CREATE);
45946+}
45947+
45948+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
45949+{
45950+ __u32 mode;
45951+
45952+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
45953+
45954+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
45955+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
45956+ return mode;
45957+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
45958+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
45959+ return 0;
45960+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
45961+ return 0;
45962+
45963+ return (reqmode);
45964+}
45965+
45966+__u32
45967+gr_acl_handle_mknod(const struct dentry * new_dentry,
45968+ const struct dentry * parent_dentry,
45969+ const struct vfsmount * parent_mnt,
45970+ const int mode)
45971+{
45972+ __u32 reqmode = GR_WRITE | GR_CREATE;
45973+ if (unlikely(mode & (S_ISUID | S_ISGID)))
45974+ reqmode |= GR_SETID;
45975+
45976+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
45977+ reqmode, GR_MKNOD_ACL_MSG);
45978+}
45979+
45980+__u32
45981+gr_acl_handle_mkdir(const struct dentry *new_dentry,
45982+ const struct dentry *parent_dentry,
45983+ const struct vfsmount *parent_mnt)
45984+{
45985+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
45986+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
45987+}
45988+
45989+#define RENAME_CHECK_SUCCESS(old, new) \
45990+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
45991+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
45992+
45993+int
45994+gr_acl_handle_rename(struct dentry *new_dentry,
45995+ struct dentry *parent_dentry,
45996+ const struct vfsmount *parent_mnt,
45997+ struct dentry *old_dentry,
45998+ struct inode *old_parent_inode,
45999+ struct vfsmount *old_mnt, const char *newname)
46000+{
46001+ __u32 comp1, comp2;
46002+ int error = 0;
46003+
46004+ if (unlikely(!gr_acl_is_enabled()))
46005+ return 0;
46006+
46007+ if (!new_dentry->d_inode) {
46008+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
46009+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
46010+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
46011+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
46012+ GR_DELETE | GR_AUDIT_DELETE |
46013+ GR_AUDIT_READ | GR_AUDIT_WRITE |
46014+ GR_SUPPRESS, old_mnt);
46015+ } else {
46016+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
46017+ GR_CREATE | GR_DELETE |
46018+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
46019+ GR_AUDIT_READ | GR_AUDIT_WRITE |
46020+ GR_SUPPRESS, parent_mnt);
46021+ comp2 =
46022+ gr_search_file(old_dentry,
46023+ GR_READ | GR_WRITE | GR_AUDIT_READ |
46024+ GR_DELETE | GR_AUDIT_DELETE |
46025+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
46026+ }
46027+
46028+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
46029+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
46030+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
46031+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
46032+ && !(comp2 & GR_SUPPRESS)) {
46033+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
46034+ error = -EACCES;
46035+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
46036+ error = -EACCES;
46037+
46038+ return error;
46039+}
46040+
46041+void
46042+gr_acl_handle_exit(void)
46043+{
46044+ u16 id;
46045+ char *rolename;
46046+ struct file *exec_file;
46047+
46048+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
46049+ !(current->role->roletype & GR_ROLE_PERSIST))) {
46050+ id = current->acl_role_id;
46051+ rolename = current->role->rolename;
46052+ gr_set_acls(1);
46053+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
46054+ }
46055+
46056+ write_lock(&grsec_exec_file_lock);
46057+ exec_file = current->exec_file;
46058+ current->exec_file = NULL;
46059+ write_unlock(&grsec_exec_file_lock);
46060+
46061+ if (exec_file)
46062+ fput(exec_file);
46063+}
46064+
46065+int
46066+gr_acl_handle_procpidmem(const struct task_struct *task)
46067+{
46068+ if (unlikely(!gr_acl_is_enabled()))
46069+ return 0;
46070+
46071+ if (task != current && task->acl->mode & GR_PROTPROCFD)
46072+ return -EACCES;
46073+
46074+ return 0;
46075+}
46076diff -urNp linux-3.0.3/grsecurity/gracl_ip.c linux-3.0.3/grsecurity/gracl_ip.c
46077--- linux-3.0.3/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
46078+++ linux-3.0.3/grsecurity/gracl_ip.c 2011-08-23 21:48:14.000000000 -0400
46079@@ -0,0 +1,381 @@
46080+#include <linux/kernel.h>
46081+#include <asm/uaccess.h>
46082+#include <asm/errno.h>
46083+#include <net/sock.h>
46084+#include <linux/file.h>
46085+#include <linux/fs.h>
46086+#include <linux/net.h>
46087+#include <linux/in.h>
46088+#include <linux/skbuff.h>
46089+#include <linux/ip.h>
46090+#include <linux/udp.h>
46091+#include <linux/types.h>
46092+#include <linux/sched.h>
46093+#include <linux/netdevice.h>
46094+#include <linux/inetdevice.h>
46095+#include <linux/gracl.h>
46096+#include <linux/grsecurity.h>
46097+#include <linux/grinternal.h>
46098+
46099+#define GR_BIND 0x01
46100+#define GR_CONNECT 0x02
46101+#define GR_INVERT 0x04
46102+#define GR_BINDOVERRIDE 0x08
46103+#define GR_CONNECTOVERRIDE 0x10
46104+#define GR_SOCK_FAMILY 0x20
46105+
46106+static const char * gr_protocols[IPPROTO_MAX] = {
46107+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
46108+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
46109+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
46110+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
46111+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
46112+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
46113+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
46114+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
46115+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
46116+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
46117+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
46118+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
46119+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
46120+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
46121+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
46122+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
46123+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
46124+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
46125+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
46126+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
46127+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
46128+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
46129+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
46130+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
46131+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
46132+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
46133+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
46134+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
46135+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
46136+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
46137+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
46138+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
46139+ };
46140+
46141+static const char * gr_socktypes[SOCK_MAX] = {
46142+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
46143+ "unknown:7", "unknown:8", "unknown:9", "packet"
46144+ };
46145+
46146+static const char * gr_sockfamilies[AF_MAX+1] = {
46147+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
46148+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
46149+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
46150+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
46151+ };
46152+
46153+const char *
46154+gr_proto_to_name(unsigned char proto)
46155+{
46156+ return gr_protocols[proto];
46157+}
46158+
46159+const char *
46160+gr_socktype_to_name(unsigned char type)
46161+{
46162+ return gr_socktypes[type];
46163+}
46164+
46165+const char *
46166+gr_sockfamily_to_name(unsigned char family)
46167+{
46168+ return gr_sockfamilies[family];
46169+}
46170+
46171+int
46172+gr_search_socket(const int domain, const int type, const int protocol)
46173+{
46174+ struct acl_subject_label *curr;
46175+ const struct cred *cred = current_cred();
46176+
46177+ if (unlikely(!gr_acl_is_enabled()))
46178+ goto exit;
46179+
46180+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
46181+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
46182+ goto exit; // let the kernel handle it
46183+
46184+ curr = current->acl;
46185+
46186+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
46187+ /* the family is allowed, if this is PF_INET allow it only if
46188+ the extra sock type/protocol checks pass */
46189+ if (domain == PF_INET)
46190+ goto inet_check;
46191+ goto exit;
46192+ } else {
46193+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
46194+ __u32 fakeip = 0;
46195+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46196+ current->role->roletype, cred->uid,
46197+ cred->gid, current->exec_file ?
46198+ gr_to_filename(current->exec_file->f_path.dentry,
46199+ current->exec_file->f_path.mnt) :
46200+ curr->filename, curr->filename,
46201+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
46202+ &current->signal->saved_ip);
46203+ goto exit;
46204+ }
46205+ goto exit_fail;
46206+ }
46207+
46208+inet_check:
46209+ /* the rest of this checking is for IPv4 only */
46210+ if (!curr->ips)
46211+ goto exit;
46212+
46213+ if ((curr->ip_type & (1 << type)) &&
46214+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
46215+ goto exit;
46216+
46217+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
46218+ /* we don't place acls on raw sockets , and sometimes
46219+ dgram/ip sockets are opened for ioctl and not
46220+ bind/connect, so we'll fake a bind learn log */
46221+ if (type == SOCK_RAW || type == SOCK_PACKET) {
46222+ __u32 fakeip = 0;
46223+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46224+ current->role->roletype, cred->uid,
46225+ cred->gid, current->exec_file ?
46226+ gr_to_filename(current->exec_file->f_path.dentry,
46227+ current->exec_file->f_path.mnt) :
46228+ curr->filename, curr->filename,
46229+ &fakeip, 0, type,
46230+ protocol, GR_CONNECT, &current->signal->saved_ip);
46231+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
46232+ __u32 fakeip = 0;
46233+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46234+ current->role->roletype, cred->uid,
46235+ cred->gid, current->exec_file ?
46236+ gr_to_filename(current->exec_file->f_path.dentry,
46237+ current->exec_file->f_path.mnt) :
46238+ curr->filename, curr->filename,
46239+ &fakeip, 0, type,
46240+ protocol, GR_BIND, &current->signal->saved_ip);
46241+ }
46242+ /* we'll log when they use connect or bind */
46243+ goto exit;
46244+ }
46245+
46246+exit_fail:
46247+ if (domain == PF_INET)
46248+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
46249+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
46250+ else
46251+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
46252+ gr_socktype_to_name(type), protocol);
46253+
46254+ return 0;
46255+exit:
46256+ return 1;
46257+}
46258+
46259+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
46260+{
46261+ if ((ip->mode & mode) &&
46262+ (ip_port >= ip->low) &&
46263+ (ip_port <= ip->high) &&
46264+ ((ntohl(ip_addr) & our_netmask) ==
46265+ (ntohl(our_addr) & our_netmask))
46266+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
46267+ && (ip->type & (1 << type))) {
46268+ if (ip->mode & GR_INVERT)
46269+ return 2; // specifically denied
46270+ else
46271+ return 1; // allowed
46272+ }
46273+
46274+ return 0; // not specifically allowed, may continue parsing
46275+}
46276+
46277+static int
46278+gr_search_connectbind(const int full_mode, struct sock *sk,
46279+ struct sockaddr_in *addr, const int type)
46280+{
46281+ char iface[IFNAMSIZ] = {0};
46282+ struct acl_subject_label *curr;
46283+ struct acl_ip_label *ip;
46284+ struct inet_sock *isk;
46285+ struct net_device *dev;
46286+ struct in_device *idev;
46287+ unsigned long i;
46288+ int ret;
46289+ int mode = full_mode & (GR_BIND | GR_CONNECT);
46290+ __u32 ip_addr = 0;
46291+ __u32 our_addr;
46292+ __u32 our_netmask;
46293+ char *p;
46294+ __u16 ip_port = 0;
46295+ const struct cred *cred = current_cred();
46296+
46297+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
46298+ return 0;
46299+
46300+ curr = current->acl;
46301+ isk = inet_sk(sk);
46302+
46303+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
46304+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
46305+ addr->sin_addr.s_addr = curr->inaddr_any_override;
46306+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
46307+ struct sockaddr_in saddr;
46308+ int err;
46309+
46310+ saddr.sin_family = AF_INET;
46311+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
46312+ saddr.sin_port = isk->inet_sport;
46313+
46314+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
46315+ if (err)
46316+ return err;
46317+
46318+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
46319+ if (err)
46320+ return err;
46321+ }
46322+
46323+ if (!curr->ips)
46324+ return 0;
46325+
46326+ ip_addr = addr->sin_addr.s_addr;
46327+ ip_port = ntohs(addr->sin_port);
46328+
46329+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
46330+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46331+ current->role->roletype, cred->uid,
46332+ cred->gid, current->exec_file ?
46333+ gr_to_filename(current->exec_file->f_path.dentry,
46334+ current->exec_file->f_path.mnt) :
46335+ curr->filename, curr->filename,
46336+ &ip_addr, ip_port, type,
46337+ sk->sk_protocol, mode, &current->signal->saved_ip);
46338+ return 0;
46339+ }
46340+
46341+ for (i = 0; i < curr->ip_num; i++) {
46342+ ip = *(curr->ips + i);
46343+ if (ip->iface != NULL) {
46344+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
46345+ p = strchr(iface, ':');
46346+ if (p != NULL)
46347+ *p = '\0';
46348+ dev = dev_get_by_name(sock_net(sk), iface);
46349+ if (dev == NULL)
46350+ continue;
46351+ idev = in_dev_get(dev);
46352+ if (idev == NULL) {
46353+ dev_put(dev);
46354+ continue;
46355+ }
46356+ rcu_read_lock();
46357+ for_ifa(idev) {
46358+ if (!strcmp(ip->iface, ifa->ifa_label)) {
46359+ our_addr = ifa->ifa_address;
46360+ our_netmask = 0xffffffff;
46361+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
46362+ if (ret == 1) {
46363+ rcu_read_unlock();
46364+ in_dev_put(idev);
46365+ dev_put(dev);
46366+ return 0;
46367+ } else if (ret == 2) {
46368+ rcu_read_unlock();
46369+ in_dev_put(idev);
46370+ dev_put(dev);
46371+ goto denied;
46372+ }
46373+ }
46374+ } endfor_ifa(idev);
46375+ rcu_read_unlock();
46376+ in_dev_put(idev);
46377+ dev_put(dev);
46378+ } else {
46379+ our_addr = ip->addr;
46380+ our_netmask = ip->netmask;
46381+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
46382+ if (ret == 1)
46383+ return 0;
46384+ else if (ret == 2)
46385+ goto denied;
46386+ }
46387+ }
46388+
46389+denied:
46390+ if (mode == GR_BIND)
46391+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
46392+ else if (mode == GR_CONNECT)
46393+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
46394+
46395+ return -EACCES;
46396+}
46397+
46398+int
46399+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
46400+{
46401+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
46402+}
46403+
46404+int
46405+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
46406+{
46407+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
46408+}
46409+
46410+int gr_search_listen(struct socket *sock)
46411+{
46412+ struct sock *sk = sock->sk;
46413+ struct sockaddr_in addr;
46414+
46415+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
46416+ addr.sin_port = inet_sk(sk)->inet_sport;
46417+
46418+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
46419+}
46420+
46421+int gr_search_accept(struct socket *sock)
46422+{
46423+ struct sock *sk = sock->sk;
46424+ struct sockaddr_in addr;
46425+
46426+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
46427+ addr.sin_port = inet_sk(sk)->inet_sport;
46428+
46429+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
46430+}
46431+
46432+int
46433+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
46434+{
46435+ if (addr)
46436+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
46437+ else {
46438+ struct sockaddr_in sin;
46439+ const struct inet_sock *inet = inet_sk(sk);
46440+
46441+ sin.sin_addr.s_addr = inet->inet_daddr;
46442+ sin.sin_port = inet->inet_dport;
46443+
46444+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
46445+ }
46446+}
46447+
46448+int
46449+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
46450+{
46451+ struct sockaddr_in sin;
46452+
46453+ if (unlikely(skb->len < sizeof (struct udphdr)))
46454+ return 0; // skip this packet
46455+
46456+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
46457+ sin.sin_port = udp_hdr(skb)->source;
46458+
46459+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
46460+}
46461diff -urNp linux-3.0.3/grsecurity/gracl_learn.c linux-3.0.3/grsecurity/gracl_learn.c
46462--- linux-3.0.3/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
46463+++ linux-3.0.3/grsecurity/gracl_learn.c 2011-08-23 21:48:14.000000000 -0400
46464@@ -0,0 +1,207 @@
46465+#include <linux/kernel.h>
46466+#include <linux/mm.h>
46467+#include <linux/sched.h>
46468+#include <linux/poll.h>
46469+#include <linux/string.h>
46470+#include <linux/file.h>
46471+#include <linux/types.h>
46472+#include <linux/vmalloc.h>
46473+#include <linux/grinternal.h>
46474+
46475+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
46476+ size_t count, loff_t *ppos);
46477+extern int gr_acl_is_enabled(void);
46478+
46479+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
46480+static int gr_learn_attached;
46481+
46482+/* use a 512k buffer */
46483+#define LEARN_BUFFER_SIZE (512 * 1024)
46484+
46485+static DEFINE_SPINLOCK(gr_learn_lock);
46486+static DEFINE_MUTEX(gr_learn_user_mutex);
46487+
46488+/* we need to maintain two buffers, so that the kernel context of grlearn
46489+ uses a semaphore around the userspace copying, and the other kernel contexts
46490+ use a spinlock when copying into the buffer, since they cannot sleep
46491+*/
46492+static char *learn_buffer;
46493+static char *learn_buffer_user;
46494+static int learn_buffer_len;
46495+static int learn_buffer_user_len;
46496+
46497+static ssize_t
46498+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
46499+{
46500+ DECLARE_WAITQUEUE(wait, current);
46501+ ssize_t retval = 0;
46502+
46503+ add_wait_queue(&learn_wait, &wait);
46504+ set_current_state(TASK_INTERRUPTIBLE);
46505+ do {
46506+ mutex_lock(&gr_learn_user_mutex);
46507+ spin_lock(&gr_learn_lock);
46508+ if (learn_buffer_len)
46509+ break;
46510+ spin_unlock(&gr_learn_lock);
46511+ mutex_unlock(&gr_learn_user_mutex);
46512+ if (file->f_flags & O_NONBLOCK) {
46513+ retval = -EAGAIN;
46514+ goto out;
46515+ }
46516+ if (signal_pending(current)) {
46517+ retval = -ERESTARTSYS;
46518+ goto out;
46519+ }
46520+
46521+ schedule();
46522+ } while (1);
46523+
46524+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
46525+ learn_buffer_user_len = learn_buffer_len;
46526+ retval = learn_buffer_len;
46527+ learn_buffer_len = 0;
46528+
46529+ spin_unlock(&gr_learn_lock);
46530+
46531+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
46532+ retval = -EFAULT;
46533+
46534+ mutex_unlock(&gr_learn_user_mutex);
46535+out:
46536+ set_current_state(TASK_RUNNING);
46537+ remove_wait_queue(&learn_wait, &wait);
46538+ return retval;
46539+}
46540+
46541+static unsigned int
46542+poll_learn(struct file * file, poll_table * wait)
46543+{
46544+ poll_wait(file, &learn_wait, wait);
46545+
46546+ if (learn_buffer_len)
46547+ return (POLLIN | POLLRDNORM);
46548+
46549+ return 0;
46550+}
46551+
46552+void
46553+gr_clear_learn_entries(void)
46554+{
46555+ char *tmp;
46556+
46557+ mutex_lock(&gr_learn_user_mutex);
46558+ spin_lock(&gr_learn_lock);
46559+ tmp = learn_buffer;
46560+ learn_buffer = NULL;
46561+ spin_unlock(&gr_learn_lock);
46562+ if (tmp)
46563+ vfree(tmp);
46564+ if (learn_buffer_user != NULL) {
46565+ vfree(learn_buffer_user);
46566+ learn_buffer_user = NULL;
46567+ }
46568+ learn_buffer_len = 0;
46569+ mutex_unlock(&gr_learn_user_mutex);
46570+
46571+ return;
46572+}
46573+
46574+void
46575+gr_add_learn_entry(const char *fmt, ...)
46576+{
46577+ va_list args;
46578+ unsigned int len;
46579+
46580+ if (!gr_learn_attached)
46581+ return;
46582+
46583+ spin_lock(&gr_learn_lock);
46584+
46585+ /* leave a gap at the end so we know when it's "full" but don't have to
46586+ compute the exact length of the string we're trying to append
46587+ */
46588+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
46589+ spin_unlock(&gr_learn_lock);
46590+ wake_up_interruptible(&learn_wait);
46591+ return;
46592+ }
46593+ if (learn_buffer == NULL) {
46594+ spin_unlock(&gr_learn_lock);
46595+ return;
46596+ }
46597+
46598+ va_start(args, fmt);
46599+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
46600+ va_end(args);
46601+
46602+ learn_buffer_len += len + 1;
46603+
46604+ spin_unlock(&gr_learn_lock);
46605+ wake_up_interruptible(&learn_wait);
46606+
46607+ return;
46608+}
46609+
46610+static int
46611+open_learn(struct inode *inode, struct file *file)
46612+{
46613+ if (file->f_mode & FMODE_READ && gr_learn_attached)
46614+ return -EBUSY;
46615+ if (file->f_mode & FMODE_READ) {
46616+ int retval = 0;
46617+ mutex_lock(&gr_learn_user_mutex);
46618+ if (learn_buffer == NULL)
46619+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
46620+ if (learn_buffer_user == NULL)
46621+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
46622+ if (learn_buffer == NULL) {
46623+ retval = -ENOMEM;
46624+ goto out_error;
46625+ }
46626+ if (learn_buffer_user == NULL) {
46627+ retval = -ENOMEM;
46628+ goto out_error;
46629+ }
46630+ learn_buffer_len = 0;
46631+ learn_buffer_user_len = 0;
46632+ gr_learn_attached = 1;
46633+out_error:
46634+ mutex_unlock(&gr_learn_user_mutex);
46635+ return retval;
46636+ }
46637+ return 0;
46638+}
46639+
46640+static int
46641+close_learn(struct inode *inode, struct file *file)
46642+{
46643+ if (file->f_mode & FMODE_READ) {
46644+ char *tmp = NULL;
46645+ mutex_lock(&gr_learn_user_mutex);
46646+ spin_lock(&gr_learn_lock);
46647+ tmp = learn_buffer;
46648+ learn_buffer = NULL;
46649+ spin_unlock(&gr_learn_lock);
46650+ if (tmp)
46651+ vfree(tmp);
46652+ if (learn_buffer_user != NULL) {
46653+ vfree(learn_buffer_user);
46654+ learn_buffer_user = NULL;
46655+ }
46656+ learn_buffer_len = 0;
46657+ learn_buffer_user_len = 0;
46658+ gr_learn_attached = 0;
46659+ mutex_unlock(&gr_learn_user_mutex);
46660+ }
46661+
46662+ return 0;
46663+}
46664+
46665+const struct file_operations grsec_fops = {
46666+ .read = read_learn,
46667+ .write = write_grsec_handler,
46668+ .open = open_learn,
46669+ .release = close_learn,
46670+ .poll = poll_learn,
46671+};
46672diff -urNp linux-3.0.3/grsecurity/gracl_res.c linux-3.0.3/grsecurity/gracl_res.c
46673--- linux-3.0.3/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
46674+++ linux-3.0.3/grsecurity/gracl_res.c 2011-08-23 21:48:14.000000000 -0400
46675@@ -0,0 +1,68 @@
46676+#include <linux/kernel.h>
46677+#include <linux/sched.h>
46678+#include <linux/gracl.h>
46679+#include <linux/grinternal.h>
46680+
46681+static const char *restab_log[] = {
46682+ [RLIMIT_CPU] = "RLIMIT_CPU",
46683+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
46684+ [RLIMIT_DATA] = "RLIMIT_DATA",
46685+ [RLIMIT_STACK] = "RLIMIT_STACK",
46686+ [RLIMIT_CORE] = "RLIMIT_CORE",
46687+ [RLIMIT_RSS] = "RLIMIT_RSS",
46688+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
46689+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
46690+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
46691+ [RLIMIT_AS] = "RLIMIT_AS",
46692+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
46693+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
46694+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
46695+ [RLIMIT_NICE] = "RLIMIT_NICE",
46696+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
46697+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
46698+ [GR_CRASH_RES] = "RLIMIT_CRASH"
46699+};
46700+
46701+void
46702+gr_log_resource(const struct task_struct *task,
46703+ const int res, const unsigned long wanted, const int gt)
46704+{
46705+ const struct cred *cred;
46706+ unsigned long rlim;
46707+
46708+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
46709+ return;
46710+
46711+ // not yet supported resource
46712+ if (unlikely(!restab_log[res]))
46713+ return;
46714+
46715+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
46716+ rlim = task_rlimit_max(task, res);
46717+ else
46718+ rlim = task_rlimit(task, res);
46719+
46720+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
46721+ return;
46722+
46723+ rcu_read_lock();
46724+ cred = __task_cred(task);
46725+
46726+ if (res == RLIMIT_NPROC &&
46727+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
46728+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
46729+ goto out_rcu_unlock;
46730+ else if (res == RLIMIT_MEMLOCK &&
46731+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
46732+ goto out_rcu_unlock;
46733+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
46734+ goto out_rcu_unlock;
46735+ rcu_read_unlock();
46736+
46737+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
46738+
46739+ return;
46740+out_rcu_unlock:
46741+ rcu_read_unlock();
46742+ return;
46743+}
46744diff -urNp linux-3.0.3/grsecurity/gracl_segv.c linux-3.0.3/grsecurity/gracl_segv.c
46745--- linux-3.0.3/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
46746+++ linux-3.0.3/grsecurity/gracl_segv.c 2011-08-23 21:48:14.000000000 -0400
46747@@ -0,0 +1,299 @@
46748+#include <linux/kernel.h>
46749+#include <linux/mm.h>
46750+#include <asm/uaccess.h>
46751+#include <asm/errno.h>
46752+#include <asm/mman.h>
46753+#include <net/sock.h>
46754+#include <linux/file.h>
46755+#include <linux/fs.h>
46756+#include <linux/net.h>
46757+#include <linux/in.h>
46758+#include <linux/slab.h>
46759+#include <linux/types.h>
46760+#include <linux/sched.h>
46761+#include <linux/timer.h>
46762+#include <linux/gracl.h>
46763+#include <linux/grsecurity.h>
46764+#include <linux/grinternal.h>
46765+
46766+static struct crash_uid *uid_set;
46767+static unsigned short uid_used;
46768+static DEFINE_SPINLOCK(gr_uid_lock);
46769+extern rwlock_t gr_inode_lock;
46770+extern struct acl_subject_label *
46771+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
46772+ struct acl_role_label *role);
46773+
46774+#ifdef CONFIG_BTRFS_FS
46775+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
46776+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
46777+#endif
46778+
46779+static inline dev_t __get_dev(const struct dentry *dentry)
46780+{
46781+#ifdef CONFIG_BTRFS_FS
46782+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
46783+ return get_btrfs_dev_from_inode(dentry->d_inode);
46784+ else
46785+#endif
46786+ return dentry->d_inode->i_sb->s_dev;
46787+}
46788+
46789+int
46790+gr_init_uidset(void)
46791+{
46792+ uid_set =
46793+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
46794+ uid_used = 0;
46795+
46796+ return uid_set ? 1 : 0;
46797+}
46798+
46799+void
46800+gr_free_uidset(void)
46801+{
46802+ if (uid_set)
46803+ kfree(uid_set);
46804+
46805+ return;
46806+}
46807+
46808+int
46809+gr_find_uid(const uid_t uid)
46810+{
46811+ struct crash_uid *tmp = uid_set;
46812+ uid_t buid;
46813+ int low = 0, high = uid_used - 1, mid;
46814+
46815+ while (high >= low) {
46816+ mid = (low + high) >> 1;
46817+ buid = tmp[mid].uid;
46818+ if (buid == uid)
46819+ return mid;
46820+ if (buid > uid)
46821+ high = mid - 1;
46822+ if (buid < uid)
46823+ low = mid + 1;
46824+ }
46825+
46826+ return -1;
46827+}
46828+
46829+static __inline__ void
46830+gr_insertsort(void)
46831+{
46832+ unsigned short i, j;
46833+ struct crash_uid index;
46834+
46835+ for (i = 1; i < uid_used; i++) {
46836+ index = uid_set[i];
46837+ j = i;
46838+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
46839+ uid_set[j] = uid_set[j - 1];
46840+ j--;
46841+ }
46842+ uid_set[j] = index;
46843+ }
46844+
46845+ return;
46846+}
46847+
46848+static __inline__ void
46849+gr_insert_uid(const uid_t uid, const unsigned long expires)
46850+{
46851+ int loc;
46852+
46853+ if (uid_used == GR_UIDTABLE_MAX)
46854+ return;
46855+
46856+ loc = gr_find_uid(uid);
46857+
46858+ if (loc >= 0) {
46859+ uid_set[loc].expires = expires;
46860+ return;
46861+ }
46862+
46863+ uid_set[uid_used].uid = uid;
46864+ uid_set[uid_used].expires = expires;
46865+ uid_used++;
46866+
46867+ gr_insertsort();
46868+
46869+ return;
46870+}
46871+
46872+void
46873+gr_remove_uid(const unsigned short loc)
46874+{
46875+ unsigned short i;
46876+
46877+ for (i = loc + 1; i < uid_used; i++)
46878+ uid_set[i - 1] = uid_set[i];
46879+
46880+ uid_used--;
46881+
46882+ return;
46883+}
46884+
46885+int
46886+gr_check_crash_uid(const uid_t uid)
46887+{
46888+ int loc;
46889+ int ret = 0;
46890+
46891+ if (unlikely(!gr_acl_is_enabled()))
46892+ return 0;
46893+
46894+ spin_lock(&gr_uid_lock);
46895+ loc = gr_find_uid(uid);
46896+
46897+ if (loc < 0)
46898+ goto out_unlock;
46899+
46900+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
46901+ gr_remove_uid(loc);
46902+ else
46903+ ret = 1;
46904+
46905+out_unlock:
46906+ spin_unlock(&gr_uid_lock);
46907+ return ret;
46908+}
46909+
46910+static __inline__ int
46911+proc_is_setxid(const struct cred *cred)
46912+{
46913+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
46914+ cred->uid != cred->fsuid)
46915+ return 1;
46916+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
46917+ cred->gid != cred->fsgid)
46918+ return 1;
46919+
46920+ return 0;
46921+}
46922+
46923+extern int gr_fake_force_sig(int sig, struct task_struct *t);
46924+
46925+void
46926+gr_handle_crash(struct task_struct *task, const int sig)
46927+{
46928+ struct acl_subject_label *curr;
46929+ struct acl_subject_label *curr2;
46930+ struct task_struct *tsk, *tsk2;
46931+ const struct cred *cred;
46932+ const struct cred *cred2;
46933+
46934+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
46935+ return;
46936+
46937+ if (unlikely(!gr_acl_is_enabled()))
46938+ return;
46939+
46940+ curr = task->acl;
46941+
46942+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
46943+ return;
46944+
46945+ if (time_before_eq(curr->expires, get_seconds())) {
46946+ curr->expires = 0;
46947+ curr->crashes = 0;
46948+ }
46949+
46950+ curr->crashes++;
46951+
46952+ if (!curr->expires)
46953+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
46954+
46955+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
46956+ time_after(curr->expires, get_seconds())) {
46957+ rcu_read_lock();
46958+ cred = __task_cred(task);
46959+ if (cred->uid && proc_is_setxid(cred)) {
46960+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
46961+ spin_lock(&gr_uid_lock);
46962+ gr_insert_uid(cred->uid, curr->expires);
46963+ spin_unlock(&gr_uid_lock);
46964+ curr->expires = 0;
46965+ curr->crashes = 0;
46966+ read_lock(&tasklist_lock);
46967+ do_each_thread(tsk2, tsk) {
46968+ cred2 = __task_cred(tsk);
46969+ if (tsk != task && cred2->uid == cred->uid)
46970+ gr_fake_force_sig(SIGKILL, tsk);
46971+ } while_each_thread(tsk2, tsk);
46972+ read_unlock(&tasklist_lock);
46973+ } else {
46974+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
46975+ read_lock(&tasklist_lock);
46976+ do_each_thread(tsk2, tsk) {
46977+ if (likely(tsk != task)) {
46978+ curr2 = tsk->acl;
46979+
46980+ if (curr2->device == curr->device &&
46981+ curr2->inode == curr->inode)
46982+ gr_fake_force_sig(SIGKILL, tsk);
46983+ }
46984+ } while_each_thread(tsk2, tsk);
46985+ read_unlock(&tasklist_lock);
46986+ }
46987+ rcu_read_unlock();
46988+ }
46989+
46990+ return;
46991+}
46992+
46993+int
46994+gr_check_crash_exec(const struct file *filp)
46995+{
46996+ struct acl_subject_label *curr;
46997+
46998+ if (unlikely(!gr_acl_is_enabled()))
46999+ return 0;
47000+
47001+ read_lock(&gr_inode_lock);
47002+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
47003+ __get_dev(filp->f_path.dentry),
47004+ current->role);
47005+ read_unlock(&gr_inode_lock);
47006+
47007+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
47008+ (!curr->crashes && !curr->expires))
47009+ return 0;
47010+
47011+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
47012+ time_after(curr->expires, get_seconds()))
47013+ return 1;
47014+ else if (time_before_eq(curr->expires, get_seconds())) {
47015+ curr->crashes = 0;
47016+ curr->expires = 0;
47017+ }
47018+
47019+ return 0;
47020+}
47021+
47022+void
47023+gr_handle_alertkill(struct task_struct *task)
47024+{
47025+ struct acl_subject_label *curracl;
47026+ __u32 curr_ip;
47027+ struct task_struct *p, *p2;
47028+
47029+ if (unlikely(!gr_acl_is_enabled()))
47030+ return;
47031+
47032+ curracl = task->acl;
47033+ curr_ip = task->signal->curr_ip;
47034+
47035+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
47036+ read_lock(&tasklist_lock);
47037+ do_each_thread(p2, p) {
47038+ if (p->signal->curr_ip == curr_ip)
47039+ gr_fake_force_sig(SIGKILL, p);
47040+ } while_each_thread(p2, p);
47041+ read_unlock(&tasklist_lock);
47042+ } else if (curracl->mode & GR_KILLPROC)
47043+ gr_fake_force_sig(SIGKILL, task);
47044+
47045+ return;
47046+}
47047diff -urNp linux-3.0.3/grsecurity/gracl_shm.c linux-3.0.3/grsecurity/gracl_shm.c
47048--- linux-3.0.3/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
47049+++ linux-3.0.3/grsecurity/gracl_shm.c 2011-08-23 21:48:14.000000000 -0400
47050@@ -0,0 +1,40 @@
47051+#include <linux/kernel.h>
47052+#include <linux/mm.h>
47053+#include <linux/sched.h>
47054+#include <linux/file.h>
47055+#include <linux/ipc.h>
47056+#include <linux/gracl.h>
47057+#include <linux/grsecurity.h>
47058+#include <linux/grinternal.h>
47059+
47060+int
47061+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
47062+ const time_t shm_createtime, const uid_t cuid, const int shmid)
47063+{
47064+ struct task_struct *task;
47065+
47066+ if (!gr_acl_is_enabled())
47067+ return 1;
47068+
47069+ rcu_read_lock();
47070+ read_lock(&tasklist_lock);
47071+
47072+ task = find_task_by_vpid(shm_cprid);
47073+
47074+ if (unlikely(!task))
47075+ task = find_task_by_vpid(shm_lapid);
47076+
47077+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
47078+ (task->pid == shm_lapid)) &&
47079+ (task->acl->mode & GR_PROTSHM) &&
47080+ (task->acl != current->acl))) {
47081+ read_unlock(&tasklist_lock);
47082+ rcu_read_unlock();
47083+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
47084+ return 0;
47085+ }
47086+ read_unlock(&tasklist_lock);
47087+ rcu_read_unlock();
47088+
47089+ return 1;
47090+}
47091diff -urNp linux-3.0.3/grsecurity/grsec_chdir.c linux-3.0.3/grsecurity/grsec_chdir.c
47092--- linux-3.0.3/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
47093+++ linux-3.0.3/grsecurity/grsec_chdir.c 2011-08-23 21:48:14.000000000 -0400
47094@@ -0,0 +1,19 @@
47095+#include <linux/kernel.h>
47096+#include <linux/sched.h>
47097+#include <linux/fs.h>
47098+#include <linux/file.h>
47099+#include <linux/grsecurity.h>
47100+#include <linux/grinternal.h>
47101+
47102+void
47103+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
47104+{
47105+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
47106+ if ((grsec_enable_chdir && grsec_enable_group &&
47107+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
47108+ !grsec_enable_group)) {
47109+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
47110+ }
47111+#endif
47112+ return;
47113+}
47114diff -urNp linux-3.0.3/grsecurity/grsec_chroot.c linux-3.0.3/grsecurity/grsec_chroot.c
47115--- linux-3.0.3/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
47116+++ linux-3.0.3/grsecurity/grsec_chroot.c 2011-08-23 21:48:14.000000000 -0400
47117@@ -0,0 +1,349 @@
47118+#include <linux/kernel.h>
47119+#include <linux/module.h>
47120+#include <linux/sched.h>
47121+#include <linux/file.h>
47122+#include <linux/fs.h>
47123+#include <linux/mount.h>
47124+#include <linux/types.h>
47125+#include <linux/pid_namespace.h>
47126+#include <linux/grsecurity.h>
47127+#include <linux/grinternal.h>
47128+
47129+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
47130+{
47131+#ifdef CONFIG_GRKERNSEC
47132+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
47133+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
47134+ task->gr_is_chrooted = 1;
47135+ else
47136+ task->gr_is_chrooted = 0;
47137+
47138+ task->gr_chroot_dentry = path->dentry;
47139+#endif
47140+ return;
47141+}
47142+
47143+void gr_clear_chroot_entries(struct task_struct *task)
47144+{
47145+#ifdef CONFIG_GRKERNSEC
47146+ task->gr_is_chrooted = 0;
47147+ task->gr_chroot_dentry = NULL;
47148+#endif
47149+ return;
47150+}
47151+
47152+int
47153+gr_handle_chroot_unix(const pid_t pid)
47154+{
47155+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
47156+ struct task_struct *p;
47157+
47158+ if (unlikely(!grsec_enable_chroot_unix))
47159+ return 1;
47160+
47161+ if (likely(!proc_is_chrooted(current)))
47162+ return 1;
47163+
47164+ rcu_read_lock();
47165+ read_lock(&tasklist_lock);
47166+ p = find_task_by_vpid_unrestricted(pid);
47167+ if (unlikely(p && !have_same_root(current, p))) {
47168+ read_unlock(&tasklist_lock);
47169+ rcu_read_unlock();
47170+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
47171+ return 0;
47172+ }
47173+ read_unlock(&tasklist_lock);
47174+ rcu_read_unlock();
47175+#endif
47176+ return 1;
47177+}
47178+
47179+int
47180+gr_handle_chroot_nice(void)
47181+{
47182+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
47183+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
47184+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
47185+ return -EPERM;
47186+ }
47187+#endif
47188+ return 0;
47189+}
47190+
47191+int
47192+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
47193+{
47194+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
47195+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
47196+ && proc_is_chrooted(current)) {
47197+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
47198+ return -EACCES;
47199+ }
47200+#endif
47201+ return 0;
47202+}
47203+
47204+int
47205+gr_handle_chroot_rawio(const struct inode *inode)
47206+{
47207+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
47208+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
47209+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
47210+ return 1;
47211+#endif
47212+ return 0;
47213+}
47214+
47215+int
47216+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
47217+{
47218+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
47219+ struct task_struct *p;
47220+ int ret = 0;
47221+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
47222+ return ret;
47223+
47224+ read_lock(&tasklist_lock);
47225+ do_each_pid_task(pid, type, p) {
47226+ if (!have_same_root(current, p)) {
47227+ ret = 1;
47228+ goto out;
47229+ }
47230+ } while_each_pid_task(pid, type, p);
47231+out:
47232+ read_unlock(&tasklist_lock);
47233+ return ret;
47234+#endif
47235+ return 0;
47236+}
47237+
47238+int
47239+gr_pid_is_chrooted(struct task_struct *p)
47240+{
47241+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
47242+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
47243+ return 0;
47244+
47245+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
47246+ !have_same_root(current, p)) {
47247+ return 1;
47248+ }
47249+#endif
47250+ return 0;
47251+}
47252+
47253+EXPORT_SYMBOL(gr_pid_is_chrooted);
47254+
47255+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
47256+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
47257+{
47258+ struct path path, currentroot;
47259+ int ret = 0;
47260+
47261+ path.dentry = (struct dentry *)u_dentry;
47262+ path.mnt = (struct vfsmount *)u_mnt;
47263+ get_fs_root(current->fs, &currentroot);
47264+ if (path_is_under(&path, &currentroot))
47265+ ret = 1;
47266+ path_put(&currentroot);
47267+
47268+ return ret;
47269+}
47270+#endif
47271+
47272+int
47273+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
47274+{
47275+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
47276+ if (!grsec_enable_chroot_fchdir)
47277+ return 1;
47278+
47279+ if (!proc_is_chrooted(current))
47280+ return 1;
47281+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
47282+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
47283+ return 0;
47284+ }
47285+#endif
47286+ return 1;
47287+}
47288+
47289+int
47290+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
47291+ const time_t shm_createtime)
47292+{
47293+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
47294+ struct task_struct *p;
47295+ time_t starttime;
47296+
47297+ if (unlikely(!grsec_enable_chroot_shmat))
47298+ return 1;
47299+
47300+ if (likely(!proc_is_chrooted(current)))
47301+ return 1;
47302+
47303+ rcu_read_lock();
47304+ read_lock(&tasklist_lock);
47305+
47306+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
47307+ starttime = p->start_time.tv_sec;
47308+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
47309+ if (have_same_root(current, p)) {
47310+ goto allow;
47311+ } else {
47312+ read_unlock(&tasklist_lock);
47313+ rcu_read_unlock();
47314+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
47315+ return 0;
47316+ }
47317+ }
47318+ /* creator exited, pid reuse, fall through to next check */
47319+ }
47320+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
47321+ if (unlikely(!have_same_root(current, p))) {
47322+ read_unlock(&tasklist_lock);
47323+ rcu_read_unlock();
47324+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
47325+ return 0;
47326+ }
47327+ }
47328+
47329+allow:
47330+ read_unlock(&tasklist_lock);
47331+ rcu_read_unlock();
47332+#endif
47333+ return 1;
47334+}
47335+
47336+void
47337+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
47338+{
47339+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
47340+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
47341+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
47342+#endif
47343+ return;
47344+}
47345+
47346+int
47347+gr_handle_chroot_mknod(const struct dentry *dentry,
47348+ const struct vfsmount *mnt, const int mode)
47349+{
47350+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
47351+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
47352+ proc_is_chrooted(current)) {
47353+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
47354+ return -EPERM;
47355+ }
47356+#endif
47357+ return 0;
47358+}
47359+
47360+int
47361+gr_handle_chroot_mount(const struct dentry *dentry,
47362+ const struct vfsmount *mnt, const char *dev_name)
47363+{
47364+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
47365+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
47366+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
47367+ return -EPERM;
47368+ }
47369+#endif
47370+ return 0;
47371+}
47372+
47373+int
47374+gr_handle_chroot_pivot(void)
47375+{
47376+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
47377+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
47378+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
47379+ return -EPERM;
47380+ }
47381+#endif
47382+ return 0;
47383+}
47384+
47385+int
47386+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
47387+{
47388+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
47389+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
47390+ !gr_is_outside_chroot(dentry, mnt)) {
47391+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
47392+ return -EPERM;
47393+ }
47394+#endif
47395+ return 0;
47396+}
47397+
47398+int
47399+gr_handle_chroot_caps(struct path *path)
47400+{
47401+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
47402+ if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
47403+ (init_task.fs->root.dentry != path->dentry) &&
47404+ (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
47405+
47406+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
47407+ const struct cred *old = current_cred();
47408+ struct cred *new = prepare_creds();
47409+ if (new == NULL)
47410+ return 1;
47411+
47412+ new->cap_permitted = cap_drop(old->cap_permitted,
47413+ chroot_caps);
47414+ new->cap_inheritable = cap_drop(old->cap_inheritable,
47415+ chroot_caps);
47416+ new->cap_effective = cap_drop(old->cap_effective,
47417+ chroot_caps);
47418+
47419+ commit_creds(new);
47420+
47421+ return 0;
47422+ }
47423+#endif
47424+ return 0;
47425+}
47426+
47427+int
47428+gr_handle_chroot_sysctl(const int op)
47429+{
47430+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
47431+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
47432+ proc_is_chrooted(current))
47433+ return -EACCES;
47434+#endif
47435+ return 0;
47436+}
47437+
47438+void
47439+gr_handle_chroot_chdir(struct path *path)
47440+{
47441+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
47442+ if (grsec_enable_chroot_chdir)
47443+ set_fs_pwd(current->fs, path);
47444+#endif
47445+ return;
47446+}
47447+
47448+int
47449+gr_handle_chroot_chmod(const struct dentry *dentry,
47450+ const struct vfsmount *mnt, const int mode)
47451+{
47452+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
47453+ /* allow chmod +s on directories, but not files */
47454+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
47455+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
47456+ proc_is_chrooted(current)) {
47457+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
47458+ return -EPERM;
47459+ }
47460+#endif
47461+ return 0;
47462+}
47463+
47464+#ifdef CONFIG_SECURITY
47465+EXPORT_SYMBOL(gr_handle_chroot_caps);
47466+#endif
47467diff -urNp linux-3.0.3/grsecurity/grsec_disabled.c linux-3.0.3/grsecurity/grsec_disabled.c
47468--- linux-3.0.3/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
47469+++ linux-3.0.3/grsecurity/grsec_disabled.c 2011-08-23 21:48:14.000000000 -0400
47470@@ -0,0 +1,447 @@
47471+#include <linux/kernel.h>
47472+#include <linux/module.h>
47473+#include <linux/sched.h>
47474+#include <linux/file.h>
47475+#include <linux/fs.h>
47476+#include <linux/kdev_t.h>
47477+#include <linux/net.h>
47478+#include <linux/in.h>
47479+#include <linux/ip.h>
47480+#include <linux/skbuff.h>
47481+#include <linux/sysctl.h>
47482+
47483+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
47484+void
47485+pax_set_initial_flags(struct linux_binprm *bprm)
47486+{
47487+ return;
47488+}
47489+#endif
47490+
47491+#ifdef CONFIG_SYSCTL
47492+__u32
47493+gr_handle_sysctl(const struct ctl_table * table, const int op)
47494+{
47495+ return 0;
47496+}
47497+#endif
47498+
47499+#ifdef CONFIG_TASKSTATS
47500+int gr_is_taskstats_denied(int pid)
47501+{
47502+ return 0;
47503+}
47504+#endif
47505+
47506+int
47507+gr_acl_is_enabled(void)
47508+{
47509+ return 0;
47510+}
47511+
47512+int
47513+gr_handle_rawio(const struct inode *inode)
47514+{
47515+ return 0;
47516+}
47517+
47518+void
47519+gr_acl_handle_psacct(struct task_struct *task, const long code)
47520+{
47521+ return;
47522+}
47523+
47524+int
47525+gr_handle_ptrace(struct task_struct *task, const long request)
47526+{
47527+ return 0;
47528+}
47529+
47530+int
47531+gr_handle_proc_ptrace(struct task_struct *task)
47532+{
47533+ return 0;
47534+}
47535+
47536+void
47537+gr_learn_resource(const struct task_struct *task,
47538+ const int res, const unsigned long wanted, const int gt)
47539+{
47540+ return;
47541+}
47542+
47543+int
47544+gr_set_acls(const int type)
47545+{
47546+ return 0;
47547+}
47548+
47549+int
47550+gr_check_hidden_task(const struct task_struct *tsk)
47551+{
47552+ return 0;
47553+}
47554+
47555+int
47556+gr_check_protected_task(const struct task_struct *task)
47557+{
47558+ return 0;
47559+}
47560+
47561+int
47562+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
47563+{
47564+ return 0;
47565+}
47566+
47567+void
47568+gr_copy_label(struct task_struct *tsk)
47569+{
47570+ return;
47571+}
47572+
47573+void
47574+gr_set_pax_flags(struct task_struct *task)
47575+{
47576+ return;
47577+}
47578+
47579+int
47580+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
47581+ const int unsafe_share)
47582+{
47583+ return 0;
47584+}
47585+
47586+void
47587+gr_handle_delete(const ino_t ino, const dev_t dev)
47588+{
47589+ return;
47590+}
47591+
47592+void
47593+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
47594+{
47595+ return;
47596+}
47597+
47598+void
47599+gr_handle_crash(struct task_struct *task, const int sig)
47600+{
47601+ return;
47602+}
47603+
47604+int
47605+gr_check_crash_exec(const struct file *filp)
47606+{
47607+ return 0;
47608+}
47609+
47610+int
47611+gr_check_crash_uid(const uid_t uid)
47612+{
47613+ return 0;
47614+}
47615+
47616+void
47617+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
47618+ struct dentry *old_dentry,
47619+ struct dentry *new_dentry,
47620+ struct vfsmount *mnt, const __u8 replace)
47621+{
47622+ return;
47623+}
47624+
47625+int
47626+gr_search_socket(const int family, const int type, const int protocol)
47627+{
47628+ return 1;
47629+}
47630+
47631+int
47632+gr_search_connectbind(const int mode, const struct socket *sock,
47633+ const struct sockaddr_in *addr)
47634+{
47635+ return 0;
47636+}
47637+
47638+int
47639+gr_is_capable(const int cap)
47640+{
47641+ return 1;
47642+}
47643+
47644+int
47645+gr_is_capable_nolog(const int cap)
47646+{
47647+ return 1;
47648+}
47649+
47650+void
47651+gr_handle_alertkill(struct task_struct *task)
47652+{
47653+ return;
47654+}
47655+
47656+__u32
47657+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
47658+{
47659+ return 1;
47660+}
47661+
47662+__u32
47663+gr_acl_handle_hidden_file(const struct dentry * dentry,
47664+ const struct vfsmount * mnt)
47665+{
47666+ return 1;
47667+}
47668+
47669+__u32
47670+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
47671+ const int fmode)
47672+{
47673+ return 1;
47674+}
47675+
47676+__u32
47677+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
47678+{
47679+ return 1;
47680+}
47681+
47682+__u32
47683+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
47684+{
47685+ return 1;
47686+}
47687+
47688+int
47689+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
47690+ unsigned int *vm_flags)
47691+{
47692+ return 1;
47693+}
47694+
47695+__u32
47696+gr_acl_handle_truncate(const struct dentry * dentry,
47697+ const struct vfsmount * mnt)
47698+{
47699+ return 1;
47700+}
47701+
47702+__u32
47703+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
47704+{
47705+ return 1;
47706+}
47707+
47708+__u32
47709+gr_acl_handle_access(const struct dentry * dentry,
47710+ const struct vfsmount * mnt, const int fmode)
47711+{
47712+ return 1;
47713+}
47714+
47715+__u32
47716+gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
47717+ mode_t mode)
47718+{
47719+ return 1;
47720+}
47721+
47722+__u32
47723+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
47724+ mode_t mode)
47725+{
47726+ return 1;
47727+}
47728+
47729+__u32
47730+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
47731+{
47732+ return 1;
47733+}
47734+
47735+__u32
47736+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
47737+{
47738+ return 1;
47739+}
47740+
47741+void
47742+grsecurity_init(void)
47743+{
47744+ return;
47745+}
47746+
47747+__u32
47748+gr_acl_handle_mknod(const struct dentry * new_dentry,
47749+ const struct dentry * parent_dentry,
47750+ const struct vfsmount * parent_mnt,
47751+ const int mode)
47752+{
47753+ return 1;
47754+}
47755+
47756+__u32
47757+gr_acl_handle_mkdir(const struct dentry * new_dentry,
47758+ const struct dentry * parent_dentry,
47759+ const struct vfsmount * parent_mnt)
47760+{
47761+ return 1;
47762+}
47763+
47764+__u32
47765+gr_acl_handle_symlink(const struct dentry * new_dentry,
47766+ const struct dentry * parent_dentry,
47767+ const struct vfsmount * parent_mnt, const char *from)
47768+{
47769+ return 1;
47770+}
47771+
47772+__u32
47773+gr_acl_handle_link(const struct dentry * new_dentry,
47774+ const struct dentry * parent_dentry,
47775+ const struct vfsmount * parent_mnt,
47776+ const struct dentry * old_dentry,
47777+ const struct vfsmount * old_mnt, const char *to)
47778+{
47779+ return 1;
47780+}
47781+
47782+int
47783+gr_acl_handle_rename(const struct dentry *new_dentry,
47784+ const struct dentry *parent_dentry,
47785+ const struct vfsmount *parent_mnt,
47786+ const struct dentry *old_dentry,
47787+ const struct inode *old_parent_inode,
47788+ const struct vfsmount *old_mnt, const char *newname)
47789+{
47790+ return 0;
47791+}
47792+
47793+int
47794+gr_acl_handle_filldir(const struct file *file, const char *name,
47795+ const int namelen, const ino_t ino)
47796+{
47797+ return 1;
47798+}
47799+
47800+int
47801+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
47802+ const time_t shm_createtime, const uid_t cuid, const int shmid)
47803+{
47804+ return 1;
47805+}
47806+
47807+int
47808+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
47809+{
47810+ return 0;
47811+}
47812+
47813+int
47814+gr_search_accept(const struct socket *sock)
47815+{
47816+ return 0;
47817+}
47818+
47819+int
47820+gr_search_listen(const struct socket *sock)
47821+{
47822+ return 0;
47823+}
47824+
47825+int
47826+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
47827+{
47828+ return 0;
47829+}
47830+
47831+__u32
47832+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
47833+{
47834+ return 1;
47835+}
47836+
47837+__u32
47838+gr_acl_handle_creat(const struct dentry * dentry,
47839+ const struct dentry * p_dentry,
47840+ const struct vfsmount * p_mnt, const int fmode,
47841+ const int imode)
47842+{
47843+ return 1;
47844+}
47845+
47846+void
47847+gr_acl_handle_exit(void)
47848+{
47849+ return;
47850+}
47851+
47852+int
47853+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
47854+{
47855+ return 1;
47856+}
47857+
47858+void
47859+gr_set_role_label(const uid_t uid, const gid_t gid)
47860+{
47861+ return;
47862+}
47863+
47864+int
47865+gr_acl_handle_procpidmem(const struct task_struct *task)
47866+{
47867+ return 0;
47868+}
47869+
47870+int
47871+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
47872+{
47873+ return 0;
47874+}
47875+
47876+int
47877+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
47878+{
47879+ return 0;
47880+}
47881+
47882+void
47883+gr_set_kernel_label(struct task_struct *task)
47884+{
47885+ return;
47886+}
47887+
47888+int
47889+gr_check_user_change(int real, int effective, int fs)
47890+{
47891+ return 0;
47892+}
47893+
47894+int
47895+gr_check_group_change(int real, int effective, int fs)
47896+{
47897+ return 0;
47898+}
47899+
47900+int gr_acl_enable_at_secure(void)
47901+{
47902+ return 0;
47903+}
47904+
47905+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
47906+{
47907+ return dentry->d_inode->i_sb->s_dev;
47908+}
47909+
47910+EXPORT_SYMBOL(gr_is_capable);
47911+EXPORT_SYMBOL(gr_is_capable_nolog);
47912+EXPORT_SYMBOL(gr_learn_resource);
47913+EXPORT_SYMBOL(gr_set_kernel_label);
47914+#ifdef CONFIG_SECURITY
47915+EXPORT_SYMBOL(gr_check_user_change);
47916+EXPORT_SYMBOL(gr_check_group_change);
47917+#endif
47918diff -urNp linux-3.0.3/grsecurity/grsec_exec.c linux-3.0.3/grsecurity/grsec_exec.c
47919--- linux-3.0.3/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
47920+++ linux-3.0.3/grsecurity/grsec_exec.c 2011-08-25 17:25:59.000000000 -0400
47921@@ -0,0 +1,72 @@
47922+#include <linux/kernel.h>
47923+#include <linux/sched.h>
47924+#include <linux/file.h>
47925+#include <linux/binfmts.h>
47926+#include <linux/fs.h>
47927+#include <linux/types.h>
47928+#include <linux/grdefs.h>
47929+#include <linux/grsecurity.h>
47930+#include <linux/grinternal.h>
47931+#include <linux/capability.h>
47932+
47933+#include <asm/uaccess.h>
47934+
47935+#ifdef CONFIG_GRKERNSEC_EXECLOG
47936+static char gr_exec_arg_buf[132];
47937+static DEFINE_MUTEX(gr_exec_arg_mutex);
47938+#endif
47939+
47940+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
47941+
47942+void
47943+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
47944+{
47945+#ifdef CONFIG_GRKERNSEC_EXECLOG
47946+ char *grarg = gr_exec_arg_buf;
47947+ unsigned int i, x, execlen = 0;
47948+ char c;
47949+
47950+ if (!((grsec_enable_execlog && grsec_enable_group &&
47951+ in_group_p(grsec_audit_gid))
47952+ || (grsec_enable_execlog && !grsec_enable_group)))
47953+ return;
47954+
47955+ mutex_lock(&gr_exec_arg_mutex);
47956+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
47957+
47958+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
47959+ const char __user *p;
47960+ unsigned int len;
47961+
47962+ p = get_user_arg_ptr(argv, i);
47963+ if (IS_ERR(p))
47964+ goto log;
47965+
47966+ len = strnlen_user(p, 128 - execlen);
47967+ if (len > 128 - execlen)
47968+ len = 128 - execlen;
47969+ else if (len > 0)
47970+ len--;
47971+ if (copy_from_user(grarg + execlen, p, len))
47972+ goto log;
47973+
47974+ /* rewrite unprintable characters */
47975+ for (x = 0; x < len; x++) {
47976+ c = *(grarg + execlen + x);
47977+ if (c < 32 || c > 126)
47978+ *(grarg + execlen + x) = ' ';
47979+ }
47980+
47981+ execlen += len;
47982+ *(grarg + execlen) = ' ';
47983+ *(grarg + execlen + 1) = '\0';
47984+ execlen++;
47985+ }
47986+
47987+ log:
47988+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
47989+ bprm->file->f_path.mnt, grarg);
47990+ mutex_unlock(&gr_exec_arg_mutex);
47991+#endif
47992+ return;
47993+}
47994diff -urNp linux-3.0.3/grsecurity/grsec_fifo.c linux-3.0.3/grsecurity/grsec_fifo.c
47995--- linux-3.0.3/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
47996+++ linux-3.0.3/grsecurity/grsec_fifo.c 2011-08-23 21:48:14.000000000 -0400
47997@@ -0,0 +1,24 @@
47998+#include <linux/kernel.h>
47999+#include <linux/sched.h>
48000+#include <linux/fs.h>
48001+#include <linux/file.h>
48002+#include <linux/grinternal.h>
48003+
48004+int
48005+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
48006+ const struct dentry *dir, const int flag, const int acc_mode)
48007+{
48008+#ifdef CONFIG_GRKERNSEC_FIFO
48009+ const struct cred *cred = current_cred();
48010+
48011+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
48012+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
48013+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
48014+ (cred->fsuid != dentry->d_inode->i_uid)) {
48015+ if (!inode_permission(dentry->d_inode, acc_mode))
48016+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
48017+ return -EACCES;
48018+ }
48019+#endif
48020+ return 0;
48021+}
48022diff -urNp linux-3.0.3/grsecurity/grsec_fork.c linux-3.0.3/grsecurity/grsec_fork.c
48023--- linux-3.0.3/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
48024+++ linux-3.0.3/grsecurity/grsec_fork.c 2011-08-23 21:48:14.000000000 -0400
48025@@ -0,0 +1,23 @@
48026+#include <linux/kernel.h>
48027+#include <linux/sched.h>
48028+#include <linux/grsecurity.h>
48029+#include <linux/grinternal.h>
48030+#include <linux/errno.h>
48031+
48032+void
48033+gr_log_forkfail(const int retval)
48034+{
48035+#ifdef CONFIG_GRKERNSEC_FORKFAIL
48036+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
48037+ switch (retval) {
48038+ case -EAGAIN:
48039+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
48040+ break;
48041+ case -ENOMEM:
48042+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
48043+ break;
48044+ }
48045+ }
48046+#endif
48047+ return;
48048+}
48049diff -urNp linux-3.0.3/grsecurity/grsec_init.c linux-3.0.3/grsecurity/grsec_init.c
48050--- linux-3.0.3/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
48051+++ linux-3.0.3/grsecurity/grsec_init.c 2011-08-25 17:25:12.000000000 -0400
48052@@ -0,0 +1,269 @@
48053+#include <linux/kernel.h>
48054+#include <linux/sched.h>
48055+#include <linux/mm.h>
48056+#include <linux/gracl.h>
48057+#include <linux/slab.h>
48058+#include <linux/vmalloc.h>
48059+#include <linux/percpu.h>
48060+#include <linux/module.h>
48061+
48062+int grsec_enable_brute;
48063+int grsec_enable_link;
48064+int grsec_enable_dmesg;
48065+int grsec_enable_harden_ptrace;
48066+int grsec_enable_fifo;
48067+int grsec_enable_execlog;
48068+int grsec_enable_signal;
48069+int grsec_enable_forkfail;
48070+int grsec_enable_audit_ptrace;
48071+int grsec_enable_time;
48072+int grsec_enable_audit_textrel;
48073+int grsec_enable_group;
48074+int grsec_audit_gid;
48075+int grsec_enable_chdir;
48076+int grsec_enable_mount;
48077+int grsec_enable_rofs;
48078+int grsec_enable_chroot_findtask;
48079+int grsec_enable_chroot_mount;
48080+int grsec_enable_chroot_shmat;
48081+int grsec_enable_chroot_fchdir;
48082+int grsec_enable_chroot_double;
48083+int grsec_enable_chroot_pivot;
48084+int grsec_enable_chroot_chdir;
48085+int grsec_enable_chroot_chmod;
48086+int grsec_enable_chroot_mknod;
48087+int grsec_enable_chroot_nice;
48088+int grsec_enable_chroot_execlog;
48089+int grsec_enable_chroot_caps;
48090+int grsec_enable_chroot_sysctl;
48091+int grsec_enable_chroot_unix;
48092+int grsec_enable_tpe;
48093+int grsec_tpe_gid;
48094+int grsec_enable_blackhole;
48095+#ifdef CONFIG_IPV6_MODULE
48096+EXPORT_SYMBOL(grsec_enable_blackhole);
48097+#endif
48098+int grsec_lastack_retries;
48099+int grsec_enable_tpe_all;
48100+int grsec_enable_tpe_invert;
48101+int grsec_enable_socket_all;
48102+int grsec_socket_all_gid;
48103+int grsec_enable_socket_client;
48104+int grsec_socket_client_gid;
48105+int grsec_enable_socket_server;
48106+int grsec_socket_server_gid;
48107+int grsec_resource_logging;
48108+int grsec_disable_privio;
48109+int grsec_enable_log_rwxmaps;
48110+int grsec_lock;
48111+
48112+DEFINE_SPINLOCK(grsec_alert_lock);
48113+unsigned long grsec_alert_wtime = 0;
48114+unsigned long grsec_alert_fyet = 0;
48115+
48116+DEFINE_SPINLOCK(grsec_audit_lock);
48117+
48118+DEFINE_RWLOCK(grsec_exec_file_lock);
48119+
48120+char *gr_shared_page[4];
48121+
48122+char *gr_alert_log_fmt;
48123+char *gr_audit_log_fmt;
48124+char *gr_alert_log_buf;
48125+char *gr_audit_log_buf;
48126+
48127+extern struct gr_arg *gr_usermode;
48128+extern unsigned char *gr_system_salt;
48129+extern unsigned char *gr_system_sum;
48130+
48131+void __init
48132+grsecurity_init(void)
48133+{
48134+ int j;
48135+ /* create the per-cpu shared pages */
48136+
48137+#ifdef CONFIG_X86
48138+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
48139+#endif
48140+
48141+ for (j = 0; j < 4; j++) {
48142+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
48143+ if (gr_shared_page[j] == NULL) {
48144+ panic("Unable to allocate grsecurity shared page");
48145+ return;
48146+ }
48147+ }
48148+
48149+ /* allocate log buffers */
48150+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
48151+ if (!gr_alert_log_fmt) {
48152+ panic("Unable to allocate grsecurity alert log format buffer");
48153+ return;
48154+ }
48155+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
48156+ if (!gr_audit_log_fmt) {
48157+ panic("Unable to allocate grsecurity audit log format buffer");
48158+ return;
48159+ }
48160+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
48161+ if (!gr_alert_log_buf) {
48162+ panic("Unable to allocate grsecurity alert log buffer");
48163+ return;
48164+ }
48165+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
48166+ if (!gr_audit_log_buf) {
48167+ panic("Unable to allocate grsecurity audit log buffer");
48168+ return;
48169+ }
48170+
48171+ /* allocate memory for authentication structure */
48172+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
48173+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
48174+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
48175+
48176+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
48177+ panic("Unable to allocate grsecurity authentication structure");
48178+ return;
48179+ }
48180+
48181+
48182+#ifdef CONFIG_GRKERNSEC_IO
48183+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
48184+ grsec_disable_privio = 1;
48185+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
48186+ grsec_disable_privio = 1;
48187+#else
48188+ grsec_disable_privio = 0;
48189+#endif
48190+#endif
48191+
48192+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
48193+ /* for backward compatibility, tpe_invert always defaults to on if
48194+ enabled in the kernel
48195+ */
48196+ grsec_enable_tpe_invert = 1;
48197+#endif
48198+
48199+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
48200+#ifndef CONFIG_GRKERNSEC_SYSCTL
48201+ grsec_lock = 1;
48202+#endif
48203+
48204+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
48205+ grsec_enable_audit_textrel = 1;
48206+#endif
48207+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
48208+ grsec_enable_log_rwxmaps = 1;
48209+#endif
48210+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
48211+ grsec_enable_group = 1;
48212+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
48213+#endif
48214+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
48215+ grsec_enable_chdir = 1;
48216+#endif
48217+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
48218+ grsec_enable_harden_ptrace = 1;
48219+#endif
48220+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48221+ grsec_enable_mount = 1;
48222+#endif
48223+#ifdef CONFIG_GRKERNSEC_LINK
48224+ grsec_enable_link = 1;
48225+#endif
48226+#ifdef CONFIG_GRKERNSEC_BRUTE
48227+ grsec_enable_brute = 1;
48228+#endif
48229+#ifdef CONFIG_GRKERNSEC_DMESG
48230+ grsec_enable_dmesg = 1;
48231+#endif
48232+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
48233+ grsec_enable_blackhole = 1;
48234+ grsec_lastack_retries = 4;
48235+#endif
48236+#ifdef CONFIG_GRKERNSEC_FIFO
48237+ grsec_enable_fifo = 1;
48238+#endif
48239+#ifdef CONFIG_GRKERNSEC_EXECLOG
48240+ grsec_enable_execlog = 1;
48241+#endif
48242+#ifdef CONFIG_GRKERNSEC_SIGNAL
48243+ grsec_enable_signal = 1;
48244+#endif
48245+#ifdef CONFIG_GRKERNSEC_FORKFAIL
48246+ grsec_enable_forkfail = 1;
48247+#endif
48248+#ifdef CONFIG_GRKERNSEC_TIME
48249+ grsec_enable_time = 1;
48250+#endif
48251+#ifdef CONFIG_GRKERNSEC_RESLOG
48252+ grsec_resource_logging = 1;
48253+#endif
48254+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
48255+ grsec_enable_chroot_findtask = 1;
48256+#endif
48257+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
48258+ grsec_enable_chroot_unix = 1;
48259+#endif
48260+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
48261+ grsec_enable_chroot_mount = 1;
48262+#endif
48263+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
48264+ grsec_enable_chroot_fchdir = 1;
48265+#endif
48266+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
48267+ grsec_enable_chroot_shmat = 1;
48268+#endif
48269+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
48270+ grsec_enable_audit_ptrace = 1;
48271+#endif
48272+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
48273+ grsec_enable_chroot_double = 1;
48274+#endif
48275+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
48276+ grsec_enable_chroot_pivot = 1;
48277+#endif
48278+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
48279+ grsec_enable_chroot_chdir = 1;
48280+#endif
48281+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
48282+ grsec_enable_chroot_chmod = 1;
48283+#endif
48284+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
48285+ grsec_enable_chroot_mknod = 1;
48286+#endif
48287+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
48288+ grsec_enable_chroot_nice = 1;
48289+#endif
48290+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
48291+ grsec_enable_chroot_execlog = 1;
48292+#endif
48293+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
48294+ grsec_enable_chroot_caps = 1;
48295+#endif
48296+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
48297+ grsec_enable_chroot_sysctl = 1;
48298+#endif
48299+#ifdef CONFIG_GRKERNSEC_TPE
48300+ grsec_enable_tpe = 1;
48301+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
48302+#ifdef CONFIG_GRKERNSEC_TPE_ALL
48303+ grsec_enable_tpe_all = 1;
48304+#endif
48305+#endif
48306+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
48307+ grsec_enable_socket_all = 1;
48308+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
48309+#endif
48310+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
48311+ grsec_enable_socket_client = 1;
48312+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
48313+#endif
48314+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
48315+ grsec_enable_socket_server = 1;
48316+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
48317+#endif
48318+#endif
48319+
48320+ return;
48321+}
48322diff -urNp linux-3.0.3/grsecurity/grsec_link.c linux-3.0.3/grsecurity/grsec_link.c
48323--- linux-3.0.3/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
48324+++ linux-3.0.3/grsecurity/grsec_link.c 2011-08-23 21:48:14.000000000 -0400
48325@@ -0,0 +1,43 @@
48326+#include <linux/kernel.h>
48327+#include <linux/sched.h>
48328+#include <linux/fs.h>
48329+#include <linux/file.h>
48330+#include <linux/grinternal.h>
48331+
48332+int
48333+gr_handle_follow_link(const struct inode *parent,
48334+ const struct inode *inode,
48335+ const struct dentry *dentry, const struct vfsmount *mnt)
48336+{
48337+#ifdef CONFIG_GRKERNSEC_LINK
48338+ const struct cred *cred = current_cred();
48339+
48340+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
48341+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
48342+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
48343+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
48344+ return -EACCES;
48345+ }
48346+#endif
48347+ return 0;
48348+}
48349+
48350+int
48351+gr_handle_hardlink(const struct dentry *dentry,
48352+ const struct vfsmount *mnt,
48353+ struct inode *inode, const int mode, const char *to)
48354+{
48355+#ifdef CONFIG_GRKERNSEC_LINK
48356+ const struct cred *cred = current_cred();
48357+
48358+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
48359+ (!S_ISREG(mode) || (mode & S_ISUID) ||
48360+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
48361+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
48362+ !capable(CAP_FOWNER) && cred->uid) {
48363+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
48364+ return -EPERM;
48365+ }
48366+#endif
48367+ return 0;
48368+}
48369diff -urNp linux-3.0.3/grsecurity/grsec_log.c linux-3.0.3/grsecurity/grsec_log.c
48370--- linux-3.0.3/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
48371+++ linux-3.0.3/grsecurity/grsec_log.c 2011-08-23 21:48:14.000000000 -0400
48372@@ -0,0 +1,310 @@
48373+#include <linux/kernel.h>
48374+#include <linux/sched.h>
48375+#include <linux/file.h>
48376+#include <linux/tty.h>
48377+#include <linux/fs.h>
48378+#include <linux/grinternal.h>
48379+
48380+#ifdef CONFIG_TREE_PREEMPT_RCU
48381+#define DISABLE_PREEMPT() preempt_disable()
48382+#define ENABLE_PREEMPT() preempt_enable()
48383+#else
48384+#define DISABLE_PREEMPT()
48385+#define ENABLE_PREEMPT()
48386+#endif
48387+
48388+#define BEGIN_LOCKS(x) \
48389+ DISABLE_PREEMPT(); \
48390+ rcu_read_lock(); \
48391+ read_lock(&tasklist_lock); \
48392+ read_lock(&grsec_exec_file_lock); \
48393+ if (x != GR_DO_AUDIT) \
48394+ spin_lock(&grsec_alert_lock); \
48395+ else \
48396+ spin_lock(&grsec_audit_lock)
48397+
48398+#define END_LOCKS(x) \
48399+ if (x != GR_DO_AUDIT) \
48400+ spin_unlock(&grsec_alert_lock); \
48401+ else \
48402+ spin_unlock(&grsec_audit_lock); \
48403+ read_unlock(&grsec_exec_file_lock); \
48404+ read_unlock(&tasklist_lock); \
48405+ rcu_read_unlock(); \
48406+ ENABLE_PREEMPT(); \
48407+ if (x == GR_DONT_AUDIT) \
48408+ gr_handle_alertkill(current)
48409+
48410+enum {
48411+ FLOODING,
48412+ NO_FLOODING
48413+};
48414+
48415+extern char *gr_alert_log_fmt;
48416+extern char *gr_audit_log_fmt;
48417+extern char *gr_alert_log_buf;
48418+extern char *gr_audit_log_buf;
48419+
48420+static int gr_log_start(int audit)
48421+{
48422+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
48423+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
48424+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48425+
48426+ if (audit == GR_DO_AUDIT)
48427+ goto set_fmt;
48428+
48429+ if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
48430+ grsec_alert_wtime = jiffies;
48431+ grsec_alert_fyet = 0;
48432+ } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
48433+ grsec_alert_fyet++;
48434+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
48435+ grsec_alert_wtime = jiffies;
48436+ grsec_alert_fyet++;
48437+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
48438+ return FLOODING;
48439+ } else return FLOODING;
48440+
48441+set_fmt:
48442+ memset(buf, 0, PAGE_SIZE);
48443+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
48444+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
48445+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
48446+ } else if (current->signal->curr_ip) {
48447+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
48448+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
48449+ } else if (gr_acl_is_enabled()) {
48450+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
48451+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
48452+ } else {
48453+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
48454+ strcpy(buf, fmt);
48455+ }
48456+
48457+ return NO_FLOODING;
48458+}
48459+
48460+static void gr_log_middle(int audit, const char *msg, va_list ap)
48461+ __attribute__ ((format (printf, 2, 0)));
48462+
48463+static void gr_log_middle(int audit, const char *msg, va_list ap)
48464+{
48465+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48466+ unsigned int len = strlen(buf);
48467+
48468+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
48469+
48470+ return;
48471+}
48472+
48473+static void gr_log_middle_varargs(int audit, const char *msg, ...)
48474+ __attribute__ ((format (printf, 2, 3)));
48475+
48476+static void gr_log_middle_varargs(int audit, const char *msg, ...)
48477+{
48478+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48479+ unsigned int len = strlen(buf);
48480+ va_list ap;
48481+
48482+ va_start(ap, msg);
48483+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
48484+ va_end(ap);
48485+
48486+ return;
48487+}
48488+
48489+static void gr_log_end(int audit)
48490+{
48491+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48492+ unsigned int len = strlen(buf);
48493+
48494+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
48495+ printk("%s\n", buf);
48496+
48497+ return;
48498+}
48499+
48500+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
48501+{
48502+ int logtype;
48503+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
48504+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
48505+ void *voidptr = NULL;
48506+ int num1 = 0, num2 = 0;
48507+ unsigned long ulong1 = 0, ulong2 = 0;
48508+ struct dentry *dentry = NULL;
48509+ struct vfsmount *mnt = NULL;
48510+ struct file *file = NULL;
48511+ struct task_struct *task = NULL;
48512+ const struct cred *cred, *pcred;
48513+ va_list ap;
48514+
48515+ BEGIN_LOCKS(audit);
48516+ logtype = gr_log_start(audit);
48517+ if (logtype == FLOODING) {
48518+ END_LOCKS(audit);
48519+ return;
48520+ }
48521+ va_start(ap, argtypes);
48522+ switch (argtypes) {
48523+ case GR_TTYSNIFF:
48524+ task = va_arg(ap, struct task_struct *);
48525+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
48526+ break;
48527+ case GR_SYSCTL_HIDDEN:
48528+ str1 = va_arg(ap, char *);
48529+ gr_log_middle_varargs(audit, msg, result, str1);
48530+ break;
48531+ case GR_RBAC:
48532+ dentry = va_arg(ap, struct dentry *);
48533+ mnt = va_arg(ap, struct vfsmount *);
48534+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
48535+ break;
48536+ case GR_RBAC_STR:
48537+ dentry = va_arg(ap, struct dentry *);
48538+ mnt = va_arg(ap, struct vfsmount *);
48539+ str1 = va_arg(ap, char *);
48540+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
48541+ break;
48542+ case GR_STR_RBAC:
48543+ str1 = va_arg(ap, char *);
48544+ dentry = va_arg(ap, struct dentry *);
48545+ mnt = va_arg(ap, struct vfsmount *);
48546+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
48547+ break;
48548+ case GR_RBAC_MODE2:
48549+ dentry = va_arg(ap, struct dentry *);
48550+ mnt = va_arg(ap, struct vfsmount *);
48551+ str1 = va_arg(ap, char *);
48552+ str2 = va_arg(ap, char *);
48553+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
48554+ break;
48555+ case GR_RBAC_MODE3:
48556+ dentry = va_arg(ap, struct dentry *);
48557+ mnt = va_arg(ap, struct vfsmount *);
48558+ str1 = va_arg(ap, char *);
48559+ str2 = va_arg(ap, char *);
48560+ str3 = va_arg(ap, char *);
48561+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
48562+ break;
48563+ case GR_FILENAME:
48564+ dentry = va_arg(ap, struct dentry *);
48565+ mnt = va_arg(ap, struct vfsmount *);
48566+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
48567+ break;
48568+ case GR_STR_FILENAME:
48569+ str1 = va_arg(ap, char *);
48570+ dentry = va_arg(ap, struct dentry *);
48571+ mnt = va_arg(ap, struct vfsmount *);
48572+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
48573+ break;
48574+ case GR_FILENAME_STR:
48575+ dentry = va_arg(ap, struct dentry *);
48576+ mnt = va_arg(ap, struct vfsmount *);
48577+ str1 = va_arg(ap, char *);
48578+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
48579+ break;
48580+ case GR_FILENAME_TWO_INT:
48581+ dentry = va_arg(ap, struct dentry *);
48582+ mnt = va_arg(ap, struct vfsmount *);
48583+ num1 = va_arg(ap, int);
48584+ num2 = va_arg(ap, int);
48585+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
48586+ break;
48587+ case GR_FILENAME_TWO_INT_STR:
48588+ dentry = va_arg(ap, struct dentry *);
48589+ mnt = va_arg(ap, struct vfsmount *);
48590+ num1 = va_arg(ap, int);
48591+ num2 = va_arg(ap, int);
48592+ str1 = va_arg(ap, char *);
48593+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
48594+ break;
48595+ case GR_TEXTREL:
48596+ file = va_arg(ap, struct file *);
48597+ ulong1 = va_arg(ap, unsigned long);
48598+ ulong2 = va_arg(ap, unsigned long);
48599+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
48600+ break;
48601+ case GR_PTRACE:
48602+ task = va_arg(ap, struct task_struct *);
48603+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
48604+ break;
48605+ case GR_RESOURCE:
48606+ task = va_arg(ap, struct task_struct *);
48607+ cred = __task_cred(task);
48608+ pcred = __task_cred(task->real_parent);
48609+ ulong1 = va_arg(ap, unsigned long);
48610+ str1 = va_arg(ap, char *);
48611+ ulong2 = va_arg(ap, unsigned long);
48612+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48613+ break;
48614+ case GR_CAP:
48615+ task = va_arg(ap, struct task_struct *);
48616+ cred = __task_cred(task);
48617+ pcred = __task_cred(task->real_parent);
48618+ str1 = va_arg(ap, char *);
48619+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48620+ break;
48621+ case GR_SIG:
48622+ str1 = va_arg(ap, char *);
48623+ voidptr = va_arg(ap, void *);
48624+ gr_log_middle_varargs(audit, msg, str1, voidptr);
48625+ break;
48626+ case GR_SIG2:
48627+ task = va_arg(ap, struct task_struct *);
48628+ cred = __task_cred(task);
48629+ pcred = __task_cred(task->real_parent);
48630+ num1 = va_arg(ap, int);
48631+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48632+ break;
48633+ case GR_CRASH1:
48634+ task = va_arg(ap, struct task_struct *);
48635+ cred = __task_cred(task);
48636+ pcred = __task_cred(task->real_parent);
48637+ ulong1 = va_arg(ap, unsigned long);
48638+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
48639+ break;
48640+ case GR_CRASH2:
48641+ task = va_arg(ap, struct task_struct *);
48642+ cred = __task_cred(task);
48643+ pcred = __task_cred(task->real_parent);
48644+ ulong1 = va_arg(ap, unsigned long);
48645+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
48646+ break;
48647+ case GR_RWXMAP:
48648+ file = va_arg(ap, struct file *);
48649+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
48650+ break;
48651+ case GR_PSACCT:
48652+ {
48653+ unsigned int wday, cday;
48654+ __u8 whr, chr;
48655+ __u8 wmin, cmin;
48656+ __u8 wsec, csec;
48657+ char cur_tty[64] = { 0 };
48658+ char parent_tty[64] = { 0 };
48659+
48660+ task = va_arg(ap, struct task_struct *);
48661+ wday = va_arg(ap, unsigned int);
48662+ cday = va_arg(ap, unsigned int);
48663+ whr = va_arg(ap, int);
48664+ chr = va_arg(ap, int);
48665+ wmin = va_arg(ap, int);
48666+ cmin = va_arg(ap, int);
48667+ wsec = va_arg(ap, int);
48668+ csec = va_arg(ap, int);
48669+ ulong1 = va_arg(ap, unsigned long);
48670+ cred = __task_cred(task);
48671+ pcred = __task_cred(task->real_parent);
48672+
48673+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48674+ }
48675+ break;
48676+ default:
48677+ gr_log_middle(audit, msg, ap);
48678+ }
48679+ va_end(ap);
48680+ gr_log_end(audit);
48681+ END_LOCKS(audit);
48682+}
48683diff -urNp linux-3.0.3/grsecurity/grsec_mem.c linux-3.0.3/grsecurity/grsec_mem.c
48684--- linux-3.0.3/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
48685+++ linux-3.0.3/grsecurity/grsec_mem.c 2011-08-23 21:48:14.000000000 -0400
48686@@ -0,0 +1,33 @@
48687+#include <linux/kernel.h>
48688+#include <linux/sched.h>
48689+#include <linux/mm.h>
48690+#include <linux/mman.h>
48691+#include <linux/grinternal.h>
48692+
48693+void
48694+gr_handle_ioperm(void)
48695+{
48696+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
48697+ return;
48698+}
48699+
48700+void
48701+gr_handle_iopl(void)
48702+{
48703+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
48704+ return;
48705+}
48706+
48707+void
48708+gr_handle_mem_readwrite(u64 from, u64 to)
48709+{
48710+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
48711+ return;
48712+}
48713+
48714+void
48715+gr_handle_vm86(void)
48716+{
48717+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
48718+ return;
48719+}
48720diff -urNp linux-3.0.3/grsecurity/grsec_mount.c linux-3.0.3/grsecurity/grsec_mount.c
48721--- linux-3.0.3/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
48722+++ linux-3.0.3/grsecurity/grsec_mount.c 2011-08-23 21:48:14.000000000 -0400
48723@@ -0,0 +1,62 @@
48724+#include <linux/kernel.h>
48725+#include <linux/sched.h>
48726+#include <linux/mount.h>
48727+#include <linux/grsecurity.h>
48728+#include <linux/grinternal.h>
48729+
48730+void
48731+gr_log_remount(const char *devname, const int retval)
48732+{
48733+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48734+ if (grsec_enable_mount && (retval >= 0))
48735+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
48736+#endif
48737+ return;
48738+}
48739+
48740+void
48741+gr_log_unmount(const char *devname, const int retval)
48742+{
48743+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48744+ if (grsec_enable_mount && (retval >= 0))
48745+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
48746+#endif
48747+ return;
48748+}
48749+
48750+void
48751+gr_log_mount(const char *from, const char *to, const int retval)
48752+{
48753+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48754+ if (grsec_enable_mount && (retval >= 0))
48755+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
48756+#endif
48757+ return;
48758+}
48759+
48760+int
48761+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
48762+{
48763+#ifdef CONFIG_GRKERNSEC_ROFS
48764+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
48765+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
48766+ return -EPERM;
48767+ } else
48768+ return 0;
48769+#endif
48770+ return 0;
48771+}
48772+
48773+int
48774+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
48775+{
48776+#ifdef CONFIG_GRKERNSEC_ROFS
48777+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
48778+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
48779+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
48780+ return -EPERM;
48781+ } else
48782+ return 0;
48783+#endif
48784+ return 0;
48785+}
48786diff -urNp linux-3.0.3/grsecurity/grsec_pax.c linux-3.0.3/grsecurity/grsec_pax.c
48787--- linux-3.0.3/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
48788+++ linux-3.0.3/grsecurity/grsec_pax.c 2011-08-23 21:48:14.000000000 -0400
48789@@ -0,0 +1,36 @@
48790+#include <linux/kernel.h>
48791+#include <linux/sched.h>
48792+#include <linux/mm.h>
48793+#include <linux/file.h>
48794+#include <linux/grinternal.h>
48795+#include <linux/grsecurity.h>
48796+
48797+void
48798+gr_log_textrel(struct vm_area_struct * vma)
48799+{
48800+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
48801+ if (grsec_enable_audit_textrel)
48802+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
48803+#endif
48804+ return;
48805+}
48806+
48807+void
48808+gr_log_rwxmmap(struct file *file)
48809+{
48810+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
48811+ if (grsec_enable_log_rwxmaps)
48812+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
48813+#endif
48814+ return;
48815+}
48816+
48817+void
48818+gr_log_rwxmprotect(struct file *file)
48819+{
48820+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
48821+ if (grsec_enable_log_rwxmaps)
48822+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
48823+#endif
48824+ return;
48825+}
48826diff -urNp linux-3.0.3/grsecurity/grsec_ptrace.c linux-3.0.3/grsecurity/grsec_ptrace.c
48827--- linux-3.0.3/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
48828+++ linux-3.0.3/grsecurity/grsec_ptrace.c 2011-08-23 21:48:14.000000000 -0400
48829@@ -0,0 +1,14 @@
48830+#include <linux/kernel.h>
48831+#include <linux/sched.h>
48832+#include <linux/grinternal.h>
48833+#include <linux/grsecurity.h>
48834+
48835+void
48836+gr_audit_ptrace(struct task_struct *task)
48837+{
48838+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
48839+ if (grsec_enable_audit_ptrace)
48840+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
48841+#endif
48842+ return;
48843+}
48844diff -urNp linux-3.0.3/grsecurity/grsec_sig.c linux-3.0.3/grsecurity/grsec_sig.c
48845--- linux-3.0.3/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
48846+++ linux-3.0.3/grsecurity/grsec_sig.c 2011-08-23 21:48:14.000000000 -0400
48847@@ -0,0 +1,206 @@
48848+#include <linux/kernel.h>
48849+#include <linux/sched.h>
48850+#include <linux/delay.h>
48851+#include <linux/grsecurity.h>
48852+#include <linux/grinternal.h>
48853+#include <linux/hardirq.h>
48854+
48855+char *signames[] = {
48856+ [SIGSEGV] = "Segmentation fault",
48857+ [SIGILL] = "Illegal instruction",
48858+ [SIGABRT] = "Abort",
48859+ [SIGBUS] = "Invalid alignment/Bus error"
48860+};
48861+
48862+void
48863+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
48864+{
48865+#ifdef CONFIG_GRKERNSEC_SIGNAL
48866+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
48867+ (sig == SIGABRT) || (sig == SIGBUS))) {
48868+ if (t->pid == current->pid) {
48869+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
48870+ } else {
48871+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
48872+ }
48873+ }
48874+#endif
48875+ return;
48876+}
48877+
48878+int
48879+gr_handle_signal(const struct task_struct *p, const int sig)
48880+{
48881+#ifdef CONFIG_GRKERNSEC
48882+ if (current->pid > 1 && gr_check_protected_task(p)) {
48883+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
48884+ return -EPERM;
48885+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
48886+ return -EPERM;
48887+ }
48888+#endif
48889+ return 0;
48890+}
48891+
48892+#ifdef CONFIG_GRKERNSEC
48893+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
48894+
48895+int gr_fake_force_sig(int sig, struct task_struct *t)
48896+{
48897+ unsigned long int flags;
48898+ int ret, blocked, ignored;
48899+ struct k_sigaction *action;
48900+
48901+ spin_lock_irqsave(&t->sighand->siglock, flags);
48902+ action = &t->sighand->action[sig-1];
48903+ ignored = action->sa.sa_handler == SIG_IGN;
48904+ blocked = sigismember(&t->blocked, sig);
48905+ if (blocked || ignored) {
48906+ action->sa.sa_handler = SIG_DFL;
48907+ if (blocked) {
48908+ sigdelset(&t->blocked, sig);
48909+ recalc_sigpending_and_wake(t);
48910+ }
48911+ }
48912+ if (action->sa.sa_handler == SIG_DFL)
48913+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
48914+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
48915+
48916+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
48917+
48918+ return ret;
48919+}
48920+#endif
48921+
48922+#ifdef CONFIG_GRKERNSEC_BRUTE
48923+#define GR_USER_BAN_TIME (15 * 60)
48924+
48925+static int __get_dumpable(unsigned long mm_flags)
48926+{
48927+ int ret;
48928+
48929+ ret = mm_flags & MMF_DUMPABLE_MASK;
48930+ return (ret >= 2) ? 2 : ret;
48931+}
48932+#endif
48933+
48934+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
48935+{
48936+#ifdef CONFIG_GRKERNSEC_BRUTE
48937+ uid_t uid = 0;
48938+
48939+ if (!grsec_enable_brute)
48940+ return;
48941+
48942+ rcu_read_lock();
48943+ read_lock(&tasklist_lock);
48944+ read_lock(&grsec_exec_file_lock);
48945+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
48946+ p->real_parent->brute = 1;
48947+ else {
48948+ const struct cred *cred = __task_cred(p), *cred2;
48949+ struct task_struct *tsk, *tsk2;
48950+
48951+ if (!__get_dumpable(mm_flags) && cred->uid) {
48952+ struct user_struct *user;
48953+
48954+ uid = cred->uid;
48955+
48956+ /* this is put upon execution past expiration */
48957+ user = find_user(uid);
48958+ if (user == NULL)
48959+ goto unlock;
48960+ user->banned = 1;
48961+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
48962+ if (user->ban_expires == ~0UL)
48963+ user->ban_expires--;
48964+
48965+ do_each_thread(tsk2, tsk) {
48966+ cred2 = __task_cred(tsk);
48967+ if (tsk != p && cred2->uid == uid)
48968+ gr_fake_force_sig(SIGKILL, tsk);
48969+ } while_each_thread(tsk2, tsk);
48970+ }
48971+ }
48972+unlock:
48973+ read_unlock(&grsec_exec_file_lock);
48974+ read_unlock(&tasklist_lock);
48975+ rcu_read_unlock();
48976+
48977+ if (uid)
48978+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
48979+
48980+#endif
48981+ return;
48982+}
48983+
48984+void gr_handle_brute_check(void)
48985+{
48986+#ifdef CONFIG_GRKERNSEC_BRUTE
48987+ if (current->brute)
48988+ msleep(30 * 1000);
48989+#endif
48990+ return;
48991+}
48992+
48993+void gr_handle_kernel_exploit(void)
48994+{
48995+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
48996+ const struct cred *cred;
48997+ struct task_struct *tsk, *tsk2;
48998+ struct user_struct *user;
48999+ uid_t uid;
49000+
49001+ if (in_irq() || in_serving_softirq() || in_nmi())
49002+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
49003+
49004+ uid = current_uid();
49005+
49006+ if (uid == 0)
49007+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
49008+ else {
49009+ /* kill all the processes of this user, hold a reference
49010+ to their creds struct, and prevent them from creating
49011+ another process until system reset
49012+ */
49013+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
49014+ /* we intentionally leak this ref */
49015+ user = get_uid(current->cred->user);
49016+ if (user) {
49017+ user->banned = 1;
49018+ user->ban_expires = ~0UL;
49019+ }
49020+
49021+ read_lock(&tasklist_lock);
49022+ do_each_thread(tsk2, tsk) {
49023+ cred = __task_cred(tsk);
49024+ if (cred->uid == uid)
49025+ gr_fake_force_sig(SIGKILL, tsk);
49026+ } while_each_thread(tsk2, tsk);
49027+ read_unlock(&tasklist_lock);
49028+ }
49029+#endif
49030+}
49031+
49032+int __gr_process_user_ban(struct user_struct *user)
49033+{
49034+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
49035+ if (unlikely(user->banned)) {
49036+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
49037+ user->banned = 0;
49038+ user->ban_expires = 0;
49039+ free_uid(user);
49040+ } else
49041+ return -EPERM;
49042+ }
49043+#endif
49044+ return 0;
49045+}
49046+
49047+int gr_process_user_ban(void)
49048+{
49049+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
49050+ return __gr_process_user_ban(current->cred->user);
49051+#endif
49052+ return 0;
49053+}
49054diff -urNp linux-3.0.3/grsecurity/grsec_sock.c linux-3.0.3/grsecurity/grsec_sock.c
49055--- linux-3.0.3/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
49056+++ linux-3.0.3/grsecurity/grsec_sock.c 2011-08-23 21:48:14.000000000 -0400
49057@@ -0,0 +1,244 @@
49058+#include <linux/kernel.h>
49059+#include <linux/module.h>
49060+#include <linux/sched.h>
49061+#include <linux/file.h>
49062+#include <linux/net.h>
49063+#include <linux/in.h>
49064+#include <linux/ip.h>
49065+#include <net/sock.h>
49066+#include <net/inet_sock.h>
49067+#include <linux/grsecurity.h>
49068+#include <linux/grinternal.h>
49069+#include <linux/gracl.h>
49070+
49071+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
49072+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
49073+
49074+EXPORT_SYMBOL(gr_search_udp_recvmsg);
49075+EXPORT_SYMBOL(gr_search_udp_sendmsg);
49076+
49077+#ifdef CONFIG_UNIX_MODULE
49078+EXPORT_SYMBOL(gr_acl_handle_unix);
49079+EXPORT_SYMBOL(gr_acl_handle_mknod);
49080+EXPORT_SYMBOL(gr_handle_chroot_unix);
49081+EXPORT_SYMBOL(gr_handle_create);
49082+#endif
49083+
49084+#ifdef CONFIG_GRKERNSEC
49085+#define gr_conn_table_size 32749
49086+struct conn_table_entry {
49087+ struct conn_table_entry *next;
49088+ struct signal_struct *sig;
49089+};
49090+
49091+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
49092+DEFINE_SPINLOCK(gr_conn_table_lock);
49093+
49094+extern const char * gr_socktype_to_name(unsigned char type);
49095+extern const char * gr_proto_to_name(unsigned char proto);
49096+extern const char * gr_sockfamily_to_name(unsigned char family);
49097+
49098+static __inline__ int
49099+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
49100+{
49101+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
49102+}
49103+
49104+static __inline__ int
49105+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
49106+ __u16 sport, __u16 dport)
49107+{
49108+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
49109+ sig->gr_sport == sport && sig->gr_dport == dport))
49110+ return 1;
49111+ else
49112+ return 0;
49113+}
49114+
49115+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
49116+{
49117+ struct conn_table_entry **match;
49118+ unsigned int index;
49119+
49120+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
49121+ sig->gr_sport, sig->gr_dport,
49122+ gr_conn_table_size);
49123+
49124+ newent->sig = sig;
49125+
49126+ match = &gr_conn_table[index];
49127+ newent->next = *match;
49128+ *match = newent;
49129+
49130+ return;
49131+}
49132+
49133+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
49134+{
49135+ struct conn_table_entry *match, *last = NULL;
49136+ unsigned int index;
49137+
49138+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
49139+ sig->gr_sport, sig->gr_dport,
49140+ gr_conn_table_size);
49141+
49142+ match = gr_conn_table[index];
49143+ while (match && !conn_match(match->sig,
49144+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
49145+ sig->gr_dport)) {
49146+ last = match;
49147+ match = match->next;
49148+ }
49149+
49150+ if (match) {
49151+ if (last)
49152+ last->next = match->next;
49153+ else
49154+ gr_conn_table[index] = NULL;
49155+ kfree(match);
49156+ }
49157+
49158+ return;
49159+}
49160+
49161+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
49162+ __u16 sport, __u16 dport)
49163+{
49164+ struct conn_table_entry *match;
49165+ unsigned int index;
49166+
49167+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
49168+
49169+ match = gr_conn_table[index];
49170+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
49171+ match = match->next;
49172+
49173+ if (match)
49174+ return match->sig;
49175+ else
49176+ return NULL;
49177+}
49178+
49179+#endif
49180+
49181+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
49182+{
49183+#ifdef CONFIG_GRKERNSEC
49184+ struct signal_struct *sig = task->signal;
49185+ struct conn_table_entry *newent;
49186+
49187+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
49188+ if (newent == NULL)
49189+ return;
49190+ /* no bh lock needed since we are called with bh disabled */
49191+ spin_lock(&gr_conn_table_lock);
49192+ gr_del_task_from_ip_table_nolock(sig);
49193+ sig->gr_saddr = inet->inet_rcv_saddr;
49194+ sig->gr_daddr = inet->inet_daddr;
49195+ sig->gr_sport = inet->inet_sport;
49196+ sig->gr_dport = inet->inet_dport;
49197+ gr_add_to_task_ip_table_nolock(sig, newent);
49198+ spin_unlock(&gr_conn_table_lock);
49199+#endif
49200+ return;
49201+}
49202+
49203+void gr_del_task_from_ip_table(struct task_struct *task)
49204+{
49205+#ifdef CONFIG_GRKERNSEC
49206+ spin_lock_bh(&gr_conn_table_lock);
49207+ gr_del_task_from_ip_table_nolock(task->signal);
49208+ spin_unlock_bh(&gr_conn_table_lock);
49209+#endif
49210+ return;
49211+}
49212+
49213+void
49214+gr_attach_curr_ip(const struct sock *sk)
49215+{
49216+#ifdef CONFIG_GRKERNSEC
49217+ struct signal_struct *p, *set;
49218+ const struct inet_sock *inet = inet_sk(sk);
49219+
49220+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
49221+ return;
49222+
49223+ set = current->signal;
49224+
49225+ spin_lock_bh(&gr_conn_table_lock);
49226+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
49227+ inet->inet_dport, inet->inet_sport);
49228+ if (unlikely(p != NULL)) {
49229+ set->curr_ip = p->curr_ip;
49230+ set->used_accept = 1;
49231+ gr_del_task_from_ip_table_nolock(p);
49232+ spin_unlock_bh(&gr_conn_table_lock);
49233+ return;
49234+ }
49235+ spin_unlock_bh(&gr_conn_table_lock);
49236+
49237+ set->curr_ip = inet->inet_daddr;
49238+ set->used_accept = 1;
49239+#endif
49240+ return;
49241+}
49242+
49243+int
49244+gr_handle_sock_all(const int family, const int type, const int protocol)
49245+{
49246+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
49247+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
49248+ (family != AF_UNIX)) {
49249+ if (family == AF_INET)
49250+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
49251+ else
49252+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
49253+ return -EACCES;
49254+ }
49255+#endif
49256+ return 0;
49257+}
49258+
49259+int
49260+gr_handle_sock_server(const struct sockaddr *sck)
49261+{
49262+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
49263+ if (grsec_enable_socket_server &&
49264+ in_group_p(grsec_socket_server_gid) &&
49265+ sck && (sck->sa_family != AF_UNIX) &&
49266+ (sck->sa_family != AF_LOCAL)) {
49267+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
49268+ return -EACCES;
49269+ }
49270+#endif
49271+ return 0;
49272+}
49273+
49274+int
49275+gr_handle_sock_server_other(const struct sock *sck)
49276+{
49277+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
49278+ if (grsec_enable_socket_server &&
49279+ in_group_p(grsec_socket_server_gid) &&
49280+ sck && (sck->sk_family != AF_UNIX) &&
49281+ (sck->sk_family != AF_LOCAL)) {
49282+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
49283+ return -EACCES;
49284+ }
49285+#endif
49286+ return 0;
49287+}
49288+
49289+int
49290+gr_handle_sock_client(const struct sockaddr *sck)
49291+{
49292+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
49293+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
49294+ sck && (sck->sa_family != AF_UNIX) &&
49295+ (sck->sa_family != AF_LOCAL)) {
49296+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
49297+ return -EACCES;
49298+ }
49299+#endif
49300+ return 0;
49301+}
49302diff -urNp linux-3.0.3/grsecurity/grsec_sysctl.c linux-3.0.3/grsecurity/grsec_sysctl.c
49303--- linux-3.0.3/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
49304+++ linux-3.0.3/grsecurity/grsec_sysctl.c 2011-08-25 17:26:15.000000000 -0400
49305@@ -0,0 +1,433 @@
49306+#include <linux/kernel.h>
49307+#include <linux/sched.h>
49308+#include <linux/sysctl.h>
49309+#include <linux/grsecurity.h>
49310+#include <linux/grinternal.h>
49311+
49312+int
49313+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
49314+{
49315+#ifdef CONFIG_GRKERNSEC_SYSCTL
49316+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
49317+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
49318+ return -EACCES;
49319+ }
49320+#endif
49321+ return 0;
49322+}
49323+
49324+#ifdef CONFIG_GRKERNSEC_ROFS
49325+static int __maybe_unused one = 1;
49326+#endif
49327+
49328+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
49329+struct ctl_table grsecurity_table[] = {
49330+#ifdef CONFIG_GRKERNSEC_SYSCTL
49331+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
49332+#ifdef CONFIG_GRKERNSEC_IO
49333+ {
49334+ .procname = "disable_priv_io",
49335+ .data = &grsec_disable_privio,
49336+ .maxlen = sizeof(int),
49337+ .mode = 0600,
49338+ .proc_handler = &proc_dointvec,
49339+ },
49340+#endif
49341+#endif
49342+#ifdef CONFIG_GRKERNSEC_LINK
49343+ {
49344+ .procname = "linking_restrictions",
49345+ .data = &grsec_enable_link,
49346+ .maxlen = sizeof(int),
49347+ .mode = 0600,
49348+ .proc_handler = &proc_dointvec,
49349+ },
49350+#endif
49351+#ifdef CONFIG_GRKERNSEC_BRUTE
49352+ {
49353+ .procname = "deter_bruteforce",
49354+ .data = &grsec_enable_brute,
49355+ .maxlen = sizeof(int),
49356+ .mode = 0600,
49357+ .proc_handler = &proc_dointvec,
49358+ },
49359+#endif
49360+#ifdef CONFIG_GRKERNSEC_FIFO
49361+ {
49362+ .procname = "fifo_restrictions",
49363+ .data = &grsec_enable_fifo,
49364+ .maxlen = sizeof(int),
49365+ .mode = 0600,
49366+ .proc_handler = &proc_dointvec,
49367+ },
49368+#endif
49369+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
49370+ {
49371+ .procname = "ip_blackhole",
49372+ .data = &grsec_enable_blackhole,
49373+ .maxlen = sizeof(int),
49374+ .mode = 0600,
49375+ .proc_handler = &proc_dointvec,
49376+ },
49377+ {
49378+ .procname = "lastack_retries",
49379+ .data = &grsec_lastack_retries,
49380+ .maxlen = sizeof(int),
49381+ .mode = 0600,
49382+ .proc_handler = &proc_dointvec,
49383+ },
49384+#endif
49385+#ifdef CONFIG_GRKERNSEC_EXECLOG
49386+ {
49387+ .procname = "exec_logging",
49388+ .data = &grsec_enable_execlog,
49389+ .maxlen = sizeof(int),
49390+ .mode = 0600,
49391+ .proc_handler = &proc_dointvec,
49392+ },
49393+#endif
49394+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
49395+ {
49396+ .procname = "rwxmap_logging",
49397+ .data = &grsec_enable_log_rwxmaps,
49398+ .maxlen = sizeof(int),
49399+ .mode = 0600,
49400+ .proc_handler = &proc_dointvec,
49401+ },
49402+#endif
49403+#ifdef CONFIG_GRKERNSEC_SIGNAL
49404+ {
49405+ .procname = "signal_logging",
49406+ .data = &grsec_enable_signal,
49407+ .maxlen = sizeof(int),
49408+ .mode = 0600,
49409+ .proc_handler = &proc_dointvec,
49410+ },
49411+#endif
49412+#ifdef CONFIG_GRKERNSEC_FORKFAIL
49413+ {
49414+ .procname = "forkfail_logging",
49415+ .data = &grsec_enable_forkfail,
49416+ .maxlen = sizeof(int),
49417+ .mode = 0600,
49418+ .proc_handler = &proc_dointvec,
49419+ },
49420+#endif
49421+#ifdef CONFIG_GRKERNSEC_TIME
49422+ {
49423+ .procname = "timechange_logging",
49424+ .data = &grsec_enable_time,
49425+ .maxlen = sizeof(int),
49426+ .mode = 0600,
49427+ .proc_handler = &proc_dointvec,
49428+ },
49429+#endif
49430+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
49431+ {
49432+ .procname = "chroot_deny_shmat",
49433+ .data = &grsec_enable_chroot_shmat,
49434+ .maxlen = sizeof(int),
49435+ .mode = 0600,
49436+ .proc_handler = &proc_dointvec,
49437+ },
49438+#endif
49439+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
49440+ {
49441+ .procname = "chroot_deny_unix",
49442+ .data = &grsec_enable_chroot_unix,
49443+ .maxlen = sizeof(int),
49444+ .mode = 0600,
49445+ .proc_handler = &proc_dointvec,
49446+ },
49447+#endif
49448+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
49449+ {
49450+ .procname = "chroot_deny_mount",
49451+ .data = &grsec_enable_chroot_mount,
49452+ .maxlen = sizeof(int),
49453+ .mode = 0600,
49454+ .proc_handler = &proc_dointvec,
49455+ },
49456+#endif
49457+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
49458+ {
49459+ .procname = "chroot_deny_fchdir",
49460+ .data = &grsec_enable_chroot_fchdir,
49461+ .maxlen = sizeof(int),
49462+ .mode = 0600,
49463+ .proc_handler = &proc_dointvec,
49464+ },
49465+#endif
49466+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
49467+ {
49468+ .procname = "chroot_deny_chroot",
49469+ .data = &grsec_enable_chroot_double,
49470+ .maxlen = sizeof(int),
49471+ .mode = 0600,
49472+ .proc_handler = &proc_dointvec,
49473+ },
49474+#endif
49475+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
49476+ {
49477+ .procname = "chroot_deny_pivot",
49478+ .data = &grsec_enable_chroot_pivot,
49479+ .maxlen = sizeof(int),
49480+ .mode = 0600,
49481+ .proc_handler = &proc_dointvec,
49482+ },
49483+#endif
49484+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
49485+ {
49486+ .procname = "chroot_enforce_chdir",
49487+ .data = &grsec_enable_chroot_chdir,
49488+ .maxlen = sizeof(int),
49489+ .mode = 0600,
49490+ .proc_handler = &proc_dointvec,
49491+ },
49492+#endif
49493+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
49494+ {
49495+ .procname = "chroot_deny_chmod",
49496+ .data = &grsec_enable_chroot_chmod,
49497+ .maxlen = sizeof(int),
49498+ .mode = 0600,
49499+ .proc_handler = &proc_dointvec,
49500+ },
49501+#endif
49502+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
49503+ {
49504+ .procname = "chroot_deny_mknod",
49505+ .data = &grsec_enable_chroot_mknod,
49506+ .maxlen = sizeof(int),
49507+ .mode = 0600,
49508+ .proc_handler = &proc_dointvec,
49509+ },
49510+#endif
49511+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
49512+ {
49513+ .procname = "chroot_restrict_nice",
49514+ .data = &grsec_enable_chroot_nice,
49515+ .maxlen = sizeof(int),
49516+ .mode = 0600,
49517+ .proc_handler = &proc_dointvec,
49518+ },
49519+#endif
49520+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
49521+ {
49522+ .procname = "chroot_execlog",
49523+ .data = &grsec_enable_chroot_execlog,
49524+ .maxlen = sizeof(int),
49525+ .mode = 0600,
49526+ .proc_handler = &proc_dointvec,
49527+ },
49528+#endif
49529+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
49530+ {
49531+ .procname = "chroot_caps",
49532+ .data = &grsec_enable_chroot_caps,
49533+ .maxlen = sizeof(int),
49534+ .mode = 0600,
49535+ .proc_handler = &proc_dointvec,
49536+ },
49537+#endif
49538+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
49539+ {
49540+ .procname = "chroot_deny_sysctl",
49541+ .data = &grsec_enable_chroot_sysctl,
49542+ .maxlen = sizeof(int),
49543+ .mode = 0600,
49544+ .proc_handler = &proc_dointvec,
49545+ },
49546+#endif
49547+#ifdef CONFIG_GRKERNSEC_TPE
49548+ {
49549+ .procname = "tpe",
49550+ .data = &grsec_enable_tpe,
49551+ .maxlen = sizeof(int),
49552+ .mode = 0600,
49553+ .proc_handler = &proc_dointvec,
49554+ },
49555+ {
49556+ .procname = "tpe_gid",
49557+ .data = &grsec_tpe_gid,
49558+ .maxlen = sizeof(int),
49559+ .mode = 0600,
49560+ .proc_handler = &proc_dointvec,
49561+ },
49562+#endif
49563+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
49564+ {
49565+ .procname = "tpe_invert",
49566+ .data = &grsec_enable_tpe_invert,
49567+ .maxlen = sizeof(int),
49568+ .mode = 0600,
49569+ .proc_handler = &proc_dointvec,
49570+ },
49571+#endif
49572+#ifdef CONFIG_GRKERNSEC_TPE_ALL
49573+ {
49574+ .procname = "tpe_restrict_all",
49575+ .data = &grsec_enable_tpe_all,
49576+ .maxlen = sizeof(int),
49577+ .mode = 0600,
49578+ .proc_handler = &proc_dointvec,
49579+ },
49580+#endif
49581+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
49582+ {
49583+ .procname = "socket_all",
49584+ .data = &grsec_enable_socket_all,
49585+ .maxlen = sizeof(int),
49586+ .mode = 0600,
49587+ .proc_handler = &proc_dointvec,
49588+ },
49589+ {
49590+ .procname = "socket_all_gid",
49591+ .data = &grsec_socket_all_gid,
49592+ .maxlen = sizeof(int),
49593+ .mode = 0600,
49594+ .proc_handler = &proc_dointvec,
49595+ },
49596+#endif
49597+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
49598+ {
49599+ .procname = "socket_client",
49600+ .data = &grsec_enable_socket_client,
49601+ .maxlen = sizeof(int),
49602+ .mode = 0600,
49603+ .proc_handler = &proc_dointvec,
49604+ },
49605+ {
49606+ .procname = "socket_client_gid",
49607+ .data = &grsec_socket_client_gid,
49608+ .maxlen = sizeof(int),
49609+ .mode = 0600,
49610+ .proc_handler = &proc_dointvec,
49611+ },
49612+#endif
49613+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
49614+ {
49615+ .procname = "socket_server",
49616+ .data = &grsec_enable_socket_server,
49617+ .maxlen = sizeof(int),
49618+ .mode = 0600,
49619+ .proc_handler = &proc_dointvec,
49620+ },
49621+ {
49622+ .procname = "socket_server_gid",
49623+ .data = &grsec_socket_server_gid,
49624+ .maxlen = sizeof(int),
49625+ .mode = 0600,
49626+ .proc_handler = &proc_dointvec,
49627+ },
49628+#endif
49629+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
49630+ {
49631+ .procname = "audit_group",
49632+ .data = &grsec_enable_group,
49633+ .maxlen = sizeof(int),
49634+ .mode = 0600,
49635+ .proc_handler = &proc_dointvec,
49636+ },
49637+ {
49638+ .procname = "audit_gid",
49639+ .data = &grsec_audit_gid,
49640+ .maxlen = sizeof(int),
49641+ .mode = 0600,
49642+ .proc_handler = &proc_dointvec,
49643+ },
49644+#endif
49645+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
49646+ {
49647+ .procname = "audit_chdir",
49648+ .data = &grsec_enable_chdir,
49649+ .maxlen = sizeof(int),
49650+ .mode = 0600,
49651+ .proc_handler = &proc_dointvec,
49652+ },
49653+#endif
49654+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
49655+ {
49656+ .procname = "audit_mount",
49657+ .data = &grsec_enable_mount,
49658+ .maxlen = sizeof(int),
49659+ .mode = 0600,
49660+ .proc_handler = &proc_dointvec,
49661+ },
49662+#endif
49663+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
49664+ {
49665+ .procname = "audit_textrel",
49666+ .data = &grsec_enable_audit_textrel,
49667+ .maxlen = sizeof(int),
49668+ .mode = 0600,
49669+ .proc_handler = &proc_dointvec,
49670+ },
49671+#endif
49672+#ifdef CONFIG_GRKERNSEC_DMESG
49673+ {
49674+ .procname = "dmesg",
49675+ .data = &grsec_enable_dmesg,
49676+ .maxlen = sizeof(int),
49677+ .mode = 0600,
49678+ .proc_handler = &proc_dointvec,
49679+ },
49680+#endif
49681+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
49682+ {
49683+ .procname = "chroot_findtask",
49684+ .data = &grsec_enable_chroot_findtask,
49685+ .maxlen = sizeof(int),
49686+ .mode = 0600,
49687+ .proc_handler = &proc_dointvec,
49688+ },
49689+#endif
49690+#ifdef CONFIG_GRKERNSEC_RESLOG
49691+ {
49692+ .procname = "resource_logging",
49693+ .data = &grsec_resource_logging,
49694+ .maxlen = sizeof(int),
49695+ .mode = 0600,
49696+ .proc_handler = &proc_dointvec,
49697+ },
49698+#endif
49699+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
49700+ {
49701+ .procname = "audit_ptrace",
49702+ .data = &grsec_enable_audit_ptrace,
49703+ .maxlen = sizeof(int),
49704+ .mode = 0600,
49705+ .proc_handler = &proc_dointvec,
49706+ },
49707+#endif
49708+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
49709+ {
49710+ .procname = "harden_ptrace",
49711+ .data = &grsec_enable_harden_ptrace,
49712+ .maxlen = sizeof(int),
49713+ .mode = 0600,
49714+ .proc_handler = &proc_dointvec,
49715+ },
49716+#endif
49717+ {
49718+ .procname = "grsec_lock",
49719+ .data = &grsec_lock,
49720+ .maxlen = sizeof(int),
49721+ .mode = 0600,
49722+ .proc_handler = &proc_dointvec,
49723+ },
49724+#endif
49725+#ifdef CONFIG_GRKERNSEC_ROFS
49726+ {
49727+ .procname = "romount_protect",
49728+ .data = &grsec_enable_rofs,
49729+ .maxlen = sizeof(int),
49730+ .mode = 0600,
49731+ .proc_handler = &proc_dointvec_minmax,
49732+ .extra1 = &one,
49733+ .extra2 = &one,
49734+ },
49735+#endif
49736+ { }
49737+};
49738+#endif
49739diff -urNp linux-3.0.3/grsecurity/grsec_time.c linux-3.0.3/grsecurity/grsec_time.c
49740--- linux-3.0.3/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
49741+++ linux-3.0.3/grsecurity/grsec_time.c 2011-08-23 21:48:14.000000000 -0400
49742@@ -0,0 +1,16 @@
49743+#include <linux/kernel.h>
49744+#include <linux/sched.h>
49745+#include <linux/grinternal.h>
49746+#include <linux/module.h>
49747+
49748+void
49749+gr_log_timechange(void)
49750+{
49751+#ifdef CONFIG_GRKERNSEC_TIME
49752+ if (grsec_enable_time)
49753+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
49754+#endif
49755+ return;
49756+}
49757+
49758+EXPORT_SYMBOL(gr_log_timechange);
49759diff -urNp linux-3.0.3/grsecurity/grsec_tpe.c linux-3.0.3/grsecurity/grsec_tpe.c
49760--- linux-3.0.3/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
49761+++ linux-3.0.3/grsecurity/grsec_tpe.c 2011-08-23 21:48:14.000000000 -0400
49762@@ -0,0 +1,39 @@
49763+#include <linux/kernel.h>
49764+#include <linux/sched.h>
49765+#include <linux/file.h>
49766+#include <linux/fs.h>
49767+#include <linux/grinternal.h>
49768+
49769+extern int gr_acl_tpe_check(void);
49770+
49771+int
49772+gr_tpe_allow(const struct file *file)
49773+{
49774+#ifdef CONFIG_GRKERNSEC
49775+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
49776+ const struct cred *cred = current_cred();
49777+
49778+ if (cred->uid && ((grsec_enable_tpe &&
49779+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
49780+ ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
49781+ (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
49782+#else
49783+ in_group_p(grsec_tpe_gid)
49784+#endif
49785+ ) || gr_acl_tpe_check()) &&
49786+ (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
49787+ (inode->i_mode & S_IWOTH))))) {
49788+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
49789+ return 0;
49790+ }
49791+#ifdef CONFIG_GRKERNSEC_TPE_ALL
49792+ if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
49793+ ((inode->i_uid && (inode->i_uid != cred->uid)) ||
49794+ (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
49795+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
49796+ return 0;
49797+ }
49798+#endif
49799+#endif
49800+ return 1;
49801+}
49802diff -urNp linux-3.0.3/grsecurity/grsum.c linux-3.0.3/grsecurity/grsum.c
49803--- linux-3.0.3/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
49804+++ linux-3.0.3/grsecurity/grsum.c 2011-08-23 21:48:14.000000000 -0400
49805@@ -0,0 +1,61 @@
49806+#include <linux/err.h>
49807+#include <linux/kernel.h>
49808+#include <linux/sched.h>
49809+#include <linux/mm.h>
49810+#include <linux/scatterlist.h>
49811+#include <linux/crypto.h>
49812+#include <linux/gracl.h>
49813+
49814+
49815+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
49816+#error "crypto and sha256 must be built into the kernel"
49817+#endif
49818+
49819+int
49820+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
49821+{
49822+ char *p;
49823+ struct crypto_hash *tfm;
49824+ struct hash_desc desc;
49825+ struct scatterlist sg;
49826+ unsigned char temp_sum[GR_SHA_LEN];
49827+ volatile int retval = 0;
49828+ volatile int dummy = 0;
49829+ unsigned int i;
49830+
49831+ sg_init_table(&sg, 1);
49832+
49833+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
49834+ if (IS_ERR(tfm)) {
49835+ /* should never happen, since sha256 should be built in */
49836+ return 1;
49837+ }
49838+
49839+ desc.tfm = tfm;
49840+ desc.flags = 0;
49841+
49842+ crypto_hash_init(&desc);
49843+
49844+ p = salt;
49845+ sg_set_buf(&sg, p, GR_SALT_LEN);
49846+ crypto_hash_update(&desc, &sg, sg.length);
49847+
49848+ p = entry->pw;
49849+ sg_set_buf(&sg, p, strlen(p));
49850+
49851+ crypto_hash_update(&desc, &sg, sg.length);
49852+
49853+ crypto_hash_final(&desc, temp_sum);
49854+
49855+ memset(entry->pw, 0, GR_PW_LEN);
49856+
49857+ for (i = 0; i < GR_SHA_LEN; i++)
49858+ if (sum[i] != temp_sum[i])
49859+ retval = 1;
49860+ else
49861+ dummy = 1; // waste a cycle
49862+
49863+ crypto_free_hash(tfm);
49864+
49865+ return retval;
49866+}
49867diff -urNp linux-3.0.3/grsecurity/Kconfig linux-3.0.3/grsecurity/Kconfig
49868--- linux-3.0.3/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
49869+++ linux-3.0.3/grsecurity/Kconfig 2011-08-25 17:25:34.000000000 -0400
49870@@ -0,0 +1,1038 @@
49871+#
49872+# grecurity configuration
49873+#
49874+
49875+menu "Grsecurity"
49876+
49877+config GRKERNSEC
49878+ bool "Grsecurity"
49879+ select CRYPTO
49880+ select CRYPTO_SHA256
49881+ help
49882+ If you say Y here, you will be able to configure many features
49883+ that will enhance the security of your system. It is highly
49884+ recommended that you say Y here and read through the help
49885+ for each option so that you fully understand the features and
49886+ can evaluate their usefulness for your machine.
49887+
49888+choice
49889+ prompt "Security Level"
49890+ depends on GRKERNSEC
49891+ default GRKERNSEC_CUSTOM
49892+
49893+config GRKERNSEC_LOW
49894+ bool "Low"
49895+ select GRKERNSEC_LINK
49896+ select GRKERNSEC_FIFO
49897+ select GRKERNSEC_RANDNET
49898+ select GRKERNSEC_DMESG
49899+ select GRKERNSEC_CHROOT
49900+ select GRKERNSEC_CHROOT_CHDIR
49901+
49902+ help
49903+ If you choose this option, several of the grsecurity options will
49904+ be enabled that will give you greater protection against a number
49905+ of attacks, while assuring that none of your software will have any
49906+ conflicts with the additional security measures. If you run a lot
49907+ of unusual software, or you are having problems with the higher
49908+ security levels, you should say Y here. With this option, the
49909+ following features are enabled:
49910+
49911+ - Linking restrictions
49912+ - FIFO restrictions
49913+ - Restricted dmesg
49914+ - Enforced chdir("/") on chroot
49915+ - Runtime module disabling
49916+
49917+config GRKERNSEC_MEDIUM
49918+ bool "Medium"
49919+ select PAX
49920+ select PAX_EI_PAX
49921+ select PAX_PT_PAX_FLAGS
49922+ select PAX_HAVE_ACL_FLAGS
49923+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
49924+ select GRKERNSEC_CHROOT
49925+ select GRKERNSEC_CHROOT_SYSCTL
49926+ select GRKERNSEC_LINK
49927+ select GRKERNSEC_FIFO
49928+ select GRKERNSEC_DMESG
49929+ select GRKERNSEC_RANDNET
49930+ select GRKERNSEC_FORKFAIL
49931+ select GRKERNSEC_TIME
49932+ select GRKERNSEC_SIGNAL
49933+ select GRKERNSEC_CHROOT
49934+ select GRKERNSEC_CHROOT_UNIX
49935+ select GRKERNSEC_CHROOT_MOUNT
49936+ select GRKERNSEC_CHROOT_PIVOT
49937+ select GRKERNSEC_CHROOT_DOUBLE
49938+ select GRKERNSEC_CHROOT_CHDIR
49939+ select GRKERNSEC_CHROOT_MKNOD
49940+ select GRKERNSEC_PROC
49941+ select GRKERNSEC_PROC_USERGROUP
49942+ select PAX_RANDUSTACK
49943+ select PAX_ASLR
49944+ select PAX_RANDMMAP
49945+ select PAX_REFCOUNT if (X86 || SPARC64)
49946+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
49947+
49948+ help
49949+ If you say Y here, several features in addition to those included
49950+ in the low additional security level will be enabled. These
49951+ features provide even more security to your system, though in rare
49952+ cases they may be incompatible with very old or poorly written
49953+ software. If you enable this option, make sure that your auth
49954+ service (identd) is running as gid 1001. With this option,
49955+ the following features (in addition to those provided in the
49956+ low additional security level) will be enabled:
49957+
49958+ - Failed fork logging
49959+ - Time change logging
49960+ - Signal logging
49961+ - Deny mounts in chroot
49962+ - Deny double chrooting
49963+ - Deny sysctl writes in chroot
49964+ - Deny mknod in chroot
49965+ - Deny access to abstract AF_UNIX sockets out of chroot
49966+ - Deny pivot_root in chroot
49967+ - Denied writes of /dev/kmem, /dev/mem, and /dev/port
49968+ - /proc restrictions with special GID set to 10 (usually wheel)
49969+ - Address Space Layout Randomization (ASLR)
49970+ - Prevent exploitation of most refcount overflows
49971+ - Bounds checking of copying between the kernel and userland
49972+
49973+config GRKERNSEC_HIGH
49974+ bool "High"
49975+ select GRKERNSEC_LINK
49976+ select GRKERNSEC_FIFO
49977+ select GRKERNSEC_DMESG
49978+ select GRKERNSEC_FORKFAIL
49979+ select GRKERNSEC_TIME
49980+ select GRKERNSEC_SIGNAL
49981+ select GRKERNSEC_CHROOT
49982+ select GRKERNSEC_CHROOT_SHMAT
49983+ select GRKERNSEC_CHROOT_UNIX
49984+ select GRKERNSEC_CHROOT_MOUNT
49985+ select GRKERNSEC_CHROOT_FCHDIR
49986+ select GRKERNSEC_CHROOT_PIVOT
49987+ select GRKERNSEC_CHROOT_DOUBLE
49988+ select GRKERNSEC_CHROOT_CHDIR
49989+ select GRKERNSEC_CHROOT_MKNOD
49990+ select GRKERNSEC_CHROOT_CAPS
49991+ select GRKERNSEC_CHROOT_SYSCTL
49992+ select GRKERNSEC_CHROOT_FINDTASK
49993+ select GRKERNSEC_SYSFS_RESTRICT
49994+ select GRKERNSEC_PROC
49995+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
49996+ select GRKERNSEC_HIDESYM
49997+ select GRKERNSEC_BRUTE
49998+ select GRKERNSEC_PROC_USERGROUP
49999+ select GRKERNSEC_KMEM
50000+ select GRKERNSEC_RESLOG
50001+ select GRKERNSEC_RANDNET
50002+ select GRKERNSEC_PROC_ADD
50003+ select GRKERNSEC_CHROOT_CHMOD
50004+ select GRKERNSEC_CHROOT_NICE
50005+ select GRKERNSEC_AUDIT_MOUNT
50006+ select GRKERNSEC_MODHARDEN if (MODULES)
50007+ select GRKERNSEC_HARDEN_PTRACE
50008+ select GRKERNSEC_VM86 if (X86_32)
50009+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
50010+ select PAX
50011+ select PAX_RANDUSTACK
50012+ select PAX_ASLR
50013+ select PAX_RANDMMAP
50014+ select PAX_NOEXEC
50015+ select PAX_MPROTECT
50016+ select PAX_EI_PAX
50017+ select PAX_PT_PAX_FLAGS
50018+ select PAX_HAVE_ACL_FLAGS
50019+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
50020+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
50021+ select PAX_RANDKSTACK if (X86_TSC && X86)
50022+ select PAX_SEGMEXEC if (X86_32)
50023+ select PAX_PAGEEXEC
50024+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
50025+ select PAX_EMUTRAMP if (PARISC)
50026+ select PAX_EMUSIGRT if (PARISC)
50027+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
50028+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
50029+ select PAX_REFCOUNT if (X86 || SPARC64)
50030+ select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
50031+ help
50032+ If you say Y here, many of the features of grsecurity will be
50033+ enabled, which will protect you against many kinds of attacks
50034+ against your system. The heightened security comes at a cost
50035+ of an increased chance of incompatibilities with rare software
50036+ on your machine. Since this security level enables PaX, you should
50037+ view <http://pax.grsecurity.net> and read about the PaX
50038+ project. While you are there, download chpax and run it on
50039+ binaries that cause problems with PaX. Also remember that
50040+ since the /proc restrictions are enabled, you must run your
50041+ identd as gid 1001. This security level enables the following
50042+ features in addition to those listed in the low and medium
50043+ security levels:
50044+
50045+ - Additional /proc restrictions
50046+ - Chmod restrictions in chroot
50047+ - No signals, ptrace, or viewing of processes outside of chroot
50048+ - Capability restrictions in chroot
50049+ - Deny fchdir out of chroot
50050+ - Priority restrictions in chroot
50051+ - Segmentation-based implementation of PaX
50052+ - Mprotect restrictions
50053+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
50054+ - Kernel stack randomization
50055+ - Mount/unmount/remount logging
50056+ - Kernel symbol hiding
50057+ - Prevention of memory exhaustion-based exploits
50058+ - Hardening of module auto-loading
50059+ - Ptrace restrictions
50060+ - Restricted vm86 mode
50061+ - Restricted sysfs/debugfs
50062+ - Active kernel exploit response
50063+
50064+config GRKERNSEC_CUSTOM
50065+ bool "Custom"
50066+ help
50067+ If you say Y here, you will be able to configure every grsecurity
50068+ option, which allows you to enable many more features that aren't
50069+ covered in the basic security levels. These additional features
50070+ include TPE, socket restrictions, and the sysctl system for
50071+ grsecurity. It is advised that you read through the help for
50072+ each option to determine its usefulness in your situation.
50073+
50074+endchoice
50075+
50076+menu "Address Space Protection"
50077+depends on GRKERNSEC
50078+
50079+config GRKERNSEC_KMEM
50080+ bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
50081+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
50082+ help
50083+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
50084+ be written to via mmap or otherwise to modify the running kernel.
50085+ /dev/port will also not be allowed to be opened. If you have module
50086+ support disabled, enabling this will close up four ways that are
50087+ currently used to insert malicious code into the running kernel.
50088+ Even with all these features enabled, we still highly recommend that
50089+ you use the RBAC system, as it is still possible for an attacker to
50090+ modify the running kernel through privileged I/O granted by ioperm/iopl.
50091+ If you are not using XFree86, you may be able to stop this additional
50092+ case by enabling the 'Disable privileged I/O' option. Though nothing
50093+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
50094+ but only to video memory, which is the only writing we allow in this
50095+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
50096+ not be allowed to mprotect it with PROT_WRITE later.
50097+ It is highly recommended that you say Y here if you meet all the
50098+ conditions above.
50099+
50100+config GRKERNSEC_VM86
50101+ bool "Restrict VM86 mode"
50102+ depends on X86_32
50103+
50104+ help
50105+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
50106+ make use of a special execution mode on 32bit x86 processors called
50107+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
50108+ video cards and will still work with this option enabled. The purpose
50109+ of the option is to prevent exploitation of emulation errors in
50110+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
50111+ Nearly all users should be able to enable this option.
50112+
50113+config GRKERNSEC_IO
50114+ bool "Disable privileged I/O"
50115+ depends on X86
50116+ select RTC_CLASS
50117+ select RTC_INTF_DEV
50118+ select RTC_DRV_CMOS
50119+
50120+ help
50121+ If you say Y here, all ioperm and iopl calls will return an error.
50122+ Ioperm and iopl can be used to modify the running kernel.
50123+ Unfortunately, some programs need this access to operate properly,
50124+ the most notable of which are XFree86 and hwclock. hwclock can be
50125+ remedied by having RTC support in the kernel, so real-time
50126+ clock support is enabled if this option is enabled, to ensure
50127+ that hwclock operates correctly. XFree86 still will not
50128+ operate correctly with this option enabled, so DO NOT CHOOSE Y
50129+ IF YOU USE XFree86. If you use XFree86 and you still want to
50130+ protect your kernel against modification, use the RBAC system.
50131+
50132+config GRKERNSEC_PROC_MEMMAP
50133+ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
50134+ default y if (PAX_NOEXEC || PAX_ASLR)
50135+ depends on PAX_NOEXEC || PAX_ASLR
50136+ help
50137+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
50138+ give no information about the addresses of its mappings if
50139+ PaX features that rely on random addresses are enabled on the task.
50140+ If you use PaX it is greatly recommended that you say Y here as it
50141+ closes up a hole that makes the full ASLR useless for suid
50142+ binaries.
50143+
50144+config GRKERNSEC_BRUTE
50145+ bool "Deter exploit bruteforcing"
50146+ help
50147+ If you say Y here, attempts to bruteforce exploits against forking
50148+ daemons such as apache or sshd, as well as against suid/sgid binaries
50149+ will be deterred. When a child of a forking daemon is killed by PaX
50150+ or crashes due to an illegal instruction or other suspicious signal,
50151+ the parent process will be delayed 30 seconds upon every subsequent
50152+ fork until the administrator is able to assess the situation and
50153+ restart the daemon.
50154+ In the suid/sgid case, the attempt is logged, the user has all their
50155+ processes terminated, and they are prevented from executing any further
50156+ processes for 15 minutes.
50157+ It is recommended that you also enable signal logging in the auditing
50158+ section so that logs are generated when a process triggers a suspicious
50159+ signal.
50160+ If the sysctl option is enabled, a sysctl option with name
50161+ "deter_bruteforce" is created.
50162+
50163+
50164+config GRKERNSEC_MODHARDEN
50165+ bool "Harden module auto-loading"
50166+ depends on MODULES
50167+ help
50168+ If you say Y here, module auto-loading in response to use of some
50169+ feature implemented by an unloaded module will be restricted to
50170+ root users. Enabling this option helps defend against attacks
50171+ by unprivileged users who abuse the auto-loading behavior to
50172+ cause a vulnerable module to load that is then exploited.
50173+
50174+ If this option prevents a legitimate use of auto-loading for a
50175+ non-root user, the administrator can execute modprobe manually
50176+ with the exact name of the module mentioned in the alert log.
50177+ Alternatively, the administrator can add the module to the list
50178+ of modules loaded at boot by modifying init scripts.
50179+
50180+ Modification of init scripts will most likely be needed on
50181+ Ubuntu servers with encrypted home directory support enabled,
50182+ as the first non-root user logging in will cause the ecb(aes),
50183+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
50184+
50185+config GRKERNSEC_HIDESYM
50186+ bool "Hide kernel symbols"
50187+ help
50188+ If you say Y here, getting information on loaded modules, and
50189+ displaying all kernel symbols through a syscall will be restricted
50190+ to users with CAP_SYS_MODULE. For software compatibility reasons,
50191+ /proc/kallsyms will be restricted to the root user. The RBAC
50192+ system can hide that entry even from root.
50193+
50194+ This option also prevents leaking of kernel addresses through
50195+ several /proc entries.
50196+
50197+ Note that this option is only effective provided the following
50198+ conditions are met:
50199+ 1) The kernel using grsecurity is not precompiled by some distribution
50200+ 2) You have also enabled GRKERNSEC_DMESG
50201+ 3) You are using the RBAC system and hiding other files such as your
50202+ kernel image and System.map. Alternatively, enabling this option
50203+ causes the permissions on /boot, /lib/modules, and the kernel
50204+ source directory to change at compile time to prevent
50205+ reading by non-root users.
50206+ If the above conditions are met, this option will aid in providing a
50207+ useful protection against local kernel exploitation of overflows
50208+ and arbitrary read/write vulnerabilities.
50209+
50210+config GRKERNSEC_KERN_LOCKOUT
50211+ bool "Active kernel exploit response"
50212+ depends on X86 || ARM || PPC || SPARC
50213+ help
50214+ If you say Y here, when a PaX alert is triggered due to suspicious
50215+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
50216+ or an OOPs occurs due to bad memory accesses, instead of just
50217+ terminating the offending process (and potentially allowing
50218+ a subsequent exploit from the same user), we will take one of two
50219+ actions:
50220+ If the user was root, we will panic the system
50221+ If the user was non-root, we will log the attempt, terminate
50222+ all processes owned by the user, then prevent them from creating
50223+ any new processes until the system is restarted
50224+ This deters repeated kernel exploitation/bruteforcing attempts
50225+ and is useful for later forensics.
50226+
50227+endmenu
50228+menu "Role Based Access Control Options"
50229+depends on GRKERNSEC
50230+
50231+config GRKERNSEC_RBAC_DEBUG
50232+ bool
50233+
50234+config GRKERNSEC_NO_RBAC
50235+ bool "Disable RBAC system"
50236+ help
50237+ If you say Y here, the /dev/grsec device will be removed from the kernel,
50238+ preventing the RBAC system from being enabled. You should only say Y
50239+ here if you have no intention of using the RBAC system, so as to prevent
50240+ an attacker with root access from misusing the RBAC system to hide files
50241+ and processes when loadable module support and /dev/[k]mem have been
50242+ locked down.
50243+
50244+config GRKERNSEC_ACL_HIDEKERN
50245+ bool "Hide kernel processes"
50246+ help
50247+ If you say Y here, all kernel threads will be hidden to all
50248+ processes but those whose subject has the "view hidden processes"
50249+ flag.
50250+
50251+config GRKERNSEC_ACL_MAXTRIES
50252+ int "Maximum tries before password lockout"
50253+ default 3
50254+ help
50255+ This option enforces the maximum number of times a user can attempt
50256+ to authorize themselves with the grsecurity RBAC system before being
50257+ denied the ability to attempt authorization again for a specified time.
50258+ The lower the number, the harder it will be to brute-force a password.
50259+
50260+config GRKERNSEC_ACL_TIMEOUT
50261+ int "Time to wait after max password tries, in seconds"
50262+ default 30
50263+ help
50264+ This option specifies the time the user must wait after attempting to
50265+ authorize to the RBAC system with the maximum number of invalid
50266+ passwords. The higher the number, the harder it will be to brute-force
50267+ a password.
50268+
50269+endmenu
50270+menu "Filesystem Protections"
50271+depends on GRKERNSEC
50272+
50273+config GRKERNSEC_PROC
50274+ bool "Proc restrictions"
50275+ help
50276+ If you say Y here, the permissions of the /proc filesystem
50277+ will be altered to enhance system security and privacy. You MUST
50278+ choose either a user only restriction or a user and group restriction.
50279+ Depending upon the option you choose, you can either restrict users to
50280+ see only the processes they themselves run, or choose a group that can
50281+ view all processes and files normally restricted to root if you choose
50282+ the "restrict to user only" option. NOTE: If you're running identd as
50283+ a non-root user, you will have to run it as the group you specify here.
50284+
50285+config GRKERNSEC_PROC_USER
50286+ bool "Restrict /proc to user only"
50287+ depends on GRKERNSEC_PROC
50288+ help
50289+ If you say Y here, non-root users will only be able to view their own
50290+ processes, and restricts them from viewing network-related information,
50291+ and viewing kernel symbol and module information.
50292+
50293+config GRKERNSEC_PROC_USERGROUP
50294+ bool "Allow special group"
50295+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
50296+ help
50297+ If you say Y here, you will be able to select a group that will be
50298+ able to view all processes and network-related information. If you've
50299+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
50300+ remain hidden. This option is useful if you want to run identd as
50301+ a non-root user.
50302+
50303+config GRKERNSEC_PROC_GID
50304+ int "GID for special group"
50305+ depends on GRKERNSEC_PROC_USERGROUP
50306+ default 1001
50307+
50308+config GRKERNSEC_PROC_ADD
50309+ bool "Additional restrictions"
50310+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
50311+ help
50312+ If you say Y here, additional restrictions will be placed on
50313+ /proc that keep normal users from viewing device information and
50314+ slabinfo information that could be useful for exploits.
50315+
50316+config GRKERNSEC_LINK
50317+ bool "Linking restrictions"
50318+ help
50319+ If you say Y here, /tmp race exploits will be prevented, since users
50320+ will no longer be able to follow symlinks owned by other users in
50321+ world-writable +t directories (e.g. /tmp), unless the owner of the
50322+ symlink is the owner of the directory. users will also not be
50323+ able to hardlink to files they do not own. If the sysctl option is
50324+ enabled, a sysctl option with name "linking_restrictions" is created.
50325+
50326+config GRKERNSEC_FIFO
50327+ bool "FIFO restrictions"
50328+ help
50329+ If you say Y here, users will not be able to write to FIFOs they don't
50330+ own in world-writable +t directories (e.g. /tmp), unless the owner of
50331+ the FIFO is the same owner of the directory it's held in. If the sysctl
50332+ option is enabled, a sysctl option with name "fifo_restrictions" is
50333+ created.
50334+
50335+config GRKERNSEC_SYSFS_RESTRICT
50336+ bool "Sysfs/debugfs restriction"
50337+ depends on SYSFS
50338+ help
50339+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
50340+ any filesystem normally mounted under it (e.g. debugfs) will only
50341+ be accessible by root. These filesystems generally provide access
50342+ to hardware and debug information that isn't appropriate for unprivileged
50343+ users of the system. Sysfs and debugfs have also become a large source
50344+ of new vulnerabilities, ranging from infoleaks to local compromise.
50345+ There has been very little oversight with an eye toward security involved
50346+ in adding new exporters of information to these filesystems, so their
50347+ use is discouraged.
50348+ This option is equivalent to a chmod 0700 of the mount paths.
50349+
50350+config GRKERNSEC_ROFS
50351+ bool "Runtime read-only mount protection"
50352+ help
50353+ If you say Y here, a sysctl option with name "romount_protect" will
50354+ be created. By setting this option to 1 at runtime, filesystems
50355+ will be protected in the following ways:
50356+ * No new writable mounts will be allowed
50357+ * Existing read-only mounts won't be able to be remounted read/write
50358+ * Write operations will be denied on all block devices
50359+ This option acts independently of grsec_lock: once it is set to 1,
50360+ it cannot be turned off. Therefore, please be mindful of the resulting
50361+ behavior if this option is enabled in an init script on a read-only
50362+ filesystem. This feature is mainly intended for secure embedded systems.
50363+
50364+config GRKERNSEC_CHROOT
50365+ bool "Chroot jail restrictions"
50366+ help
50367+ If you say Y here, you will be able to choose several options that will
50368+ make breaking out of a chrooted jail much more difficult. If you
50369+ encounter no software incompatibilities with the following options, it
50370+ is recommended that you enable each one.
50371+
50372+config GRKERNSEC_CHROOT_MOUNT
50373+ bool "Deny mounts"
50374+ depends on GRKERNSEC_CHROOT
50375+ help
50376+ If you say Y here, processes inside a chroot will not be able to
50377+ mount or remount filesystems. If the sysctl option is enabled, a
50378+ sysctl option with name "chroot_deny_mount" is created.
50379+
50380+config GRKERNSEC_CHROOT_DOUBLE
50381+ bool "Deny double-chroots"
50382+ depends on GRKERNSEC_CHROOT
50383+ help
50384+ If you say Y here, processes inside a chroot will not be able to chroot
50385+ again outside the chroot. This is a widely used method of breaking
50386+ out of a chroot jail and should not be allowed. If the sysctl
50387+ option is enabled, a sysctl option with name
50388+ "chroot_deny_chroot" is created.
50389+
50390+config GRKERNSEC_CHROOT_PIVOT
50391+ bool "Deny pivot_root in chroot"
50392+ depends on GRKERNSEC_CHROOT
50393+ help
50394+ If you say Y here, processes inside a chroot will not be able to use
50395+ a function called pivot_root() that was introduced in Linux 2.3.41. It
50396+ works similar to chroot in that it changes the root filesystem. This
50397+ function could be misused in a chrooted process to attempt to break out
50398+ of the chroot, and therefore should not be allowed. If the sysctl
50399+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
50400+ created.
50401+
50402+config GRKERNSEC_CHROOT_CHDIR
50403+ bool "Enforce chdir(\"/\") on all chroots"
50404+ depends on GRKERNSEC_CHROOT
50405+ help
50406+ If you say Y here, the current working directory of all newly-chrooted
50407+ applications will be set to the the root directory of the chroot.
50408+ The man page on chroot(2) states:
50409+ Note that this call does not change the current working
50410+ directory, so that `.' can be outside the tree rooted at
50411+ `/'. In particular, the super-user can escape from a
50412+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
50413+
50414+ It is recommended that you say Y here, since it's not known to break
50415+ any software. If the sysctl option is enabled, a sysctl option with
50416+ name "chroot_enforce_chdir" is created.
50417+
50418+config GRKERNSEC_CHROOT_CHMOD
50419+ bool "Deny (f)chmod +s"
50420+ depends on GRKERNSEC_CHROOT
50421+ help
50422+ If you say Y here, processes inside a chroot will not be able to chmod
50423+ or fchmod files to make them have suid or sgid bits. This protects
50424+ against another published method of breaking a chroot. If the sysctl
50425+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
50426+ created.
50427+
50428+config GRKERNSEC_CHROOT_FCHDIR
50429+ bool "Deny fchdir out of chroot"
50430+ depends on GRKERNSEC_CHROOT
50431+ help
50432+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
50433+ to a file descriptor of the chrooting process that points to a directory
50434+ outside the filesystem will be stopped. If the sysctl option
50435+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
50436+
50437+config GRKERNSEC_CHROOT_MKNOD
50438+ bool "Deny mknod"
50439+ depends on GRKERNSEC_CHROOT
50440+ help
50441+ If you say Y here, processes inside a chroot will not be allowed to
50442+ mknod. The problem with using mknod inside a chroot is that it
50443+ would allow an attacker to create a device entry that is the same
50444+ as one on the physical root of your system, which could range from
50445+ anything from the console device to a device for your harddrive (which
50446+ they could then use to wipe the drive or steal data). It is recommended
50447+ that you say Y here, unless you run into software incompatibilities.
50448+ If the sysctl option is enabled, a sysctl option with name
50449+ "chroot_deny_mknod" is created.
50450+
50451+config GRKERNSEC_CHROOT_SHMAT
50452+ bool "Deny shmat() out of chroot"
50453+ depends on GRKERNSEC_CHROOT
50454+ help
50455+ If you say Y here, processes inside a chroot will not be able to attach
50456+ to shared memory segments that were created outside of the chroot jail.
50457+ It is recommended that you say Y here. If the sysctl option is enabled,
50458+ a sysctl option with name "chroot_deny_shmat" is created.
50459+
50460+config GRKERNSEC_CHROOT_UNIX
50461+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
50462+ depends on GRKERNSEC_CHROOT
50463+ help
50464+ If you say Y here, processes inside a chroot will not be able to
50465+ connect to abstract (meaning not belonging to a filesystem) Unix
50466+ domain sockets that were bound outside of a chroot. It is recommended
50467+ that you say Y here. If the sysctl option is enabled, a sysctl option
50468+ with name "chroot_deny_unix" is created.
50469+
50470+config GRKERNSEC_CHROOT_FINDTASK
50471+ bool "Protect outside processes"
50472+ depends on GRKERNSEC_CHROOT
50473+ help
50474+ If you say Y here, processes inside a chroot will not be able to
50475+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
50476+ getsid, or view any process outside of the chroot. If the sysctl
50477+ option is enabled, a sysctl option with name "chroot_findtask" is
50478+ created.
50479+
50480+config GRKERNSEC_CHROOT_NICE
50481+ bool "Restrict priority changes"
50482+ depends on GRKERNSEC_CHROOT
50483+ help
50484+ If you say Y here, processes inside a chroot will not be able to raise
50485+ the priority of processes in the chroot, or alter the priority of
50486+ processes outside the chroot. This provides more security than simply
50487+ removing CAP_SYS_NICE from the process' capability set. If the
50488+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
50489+ is created.
50490+
50491+config GRKERNSEC_CHROOT_SYSCTL
50492+ bool "Deny sysctl writes"
50493+ depends on GRKERNSEC_CHROOT
50494+ help
50495+ If you say Y here, an attacker in a chroot will not be able to
50496+ write to sysctl entries, either by sysctl(2) or through a /proc
50497+ interface. It is strongly recommended that you say Y here. If the
50498+ sysctl option is enabled, a sysctl option with name
50499+ "chroot_deny_sysctl" is created.
50500+
50501+config GRKERNSEC_CHROOT_CAPS
50502+ bool "Capability restrictions"
50503+ depends on GRKERNSEC_CHROOT
50504+ help
50505+ If you say Y here, the capabilities on all root processes within a
50506+ chroot jail will be lowered to stop module insertion, raw i/o,
50507+ system and net admin tasks, rebooting the system, modifying immutable
50508+ files, modifying IPC owned by another, and changing the system time.
50509+ This is left an option because it can break some apps. Disable this
50510+ if your chrooted apps are having problems performing those kinds of
50511+ tasks. If the sysctl option is enabled, a sysctl option with
50512+ name "chroot_caps" is created.
50513+
50514+endmenu
50515+menu "Kernel Auditing"
50516+depends on GRKERNSEC
50517+
50518+config GRKERNSEC_AUDIT_GROUP
50519+ bool "Single group for auditing"
50520+ help
50521+ If you say Y here, the exec, chdir, and (un)mount logging features
50522+ will only operate on a group you specify. This option is recommended
50523+ if you only want to watch certain users instead of having a large
50524+ amount of logs from the entire system. If the sysctl option is enabled,
50525+ a sysctl option with name "audit_group" is created.
50526+
50527+config GRKERNSEC_AUDIT_GID
50528+ int "GID for auditing"
50529+ depends on GRKERNSEC_AUDIT_GROUP
50530+ default 1007
50531+
50532+config GRKERNSEC_EXECLOG
50533+ bool "Exec logging"
50534+ help
50535+ If you say Y here, all execve() calls will be logged (since the
50536+ other exec*() calls are frontends to execve(), all execution
50537+ will be logged). Useful for shell-servers that like to keep track
50538+ of their users. If the sysctl option is enabled, a sysctl option with
50539+ name "exec_logging" is created.
50540+ WARNING: This option when enabled will produce a LOT of logs, especially
50541+ on an active system.
50542+
50543+config GRKERNSEC_RESLOG
50544+ bool "Resource logging"
50545+ help
50546+ If you say Y here, all attempts to overstep resource limits will
50547+ be logged with the resource name, the requested size, and the current
50548+ limit. It is highly recommended that you say Y here. If the sysctl
50549+ option is enabled, a sysctl option with name "resource_logging" is
50550+ created. If the RBAC system is enabled, the sysctl value is ignored.
50551+
50552+config GRKERNSEC_CHROOT_EXECLOG
50553+ bool "Log execs within chroot"
50554+ help
50555+ If you say Y here, all executions inside a chroot jail will be logged
50556+ to syslog. This can cause a large amount of logs if certain
50557+ applications (eg. djb's daemontools) are installed on the system, and
50558+ is therefore left as an option. If the sysctl option is enabled, a
50559+ sysctl option with name "chroot_execlog" is created.
50560+
50561+config GRKERNSEC_AUDIT_PTRACE
50562+ bool "Ptrace logging"
50563+ help
50564+ If you say Y here, all attempts to attach to a process via ptrace
50565+ will be logged. If the sysctl option is enabled, a sysctl option
50566+ with name "audit_ptrace" is created.
50567+
50568+config GRKERNSEC_AUDIT_CHDIR
50569+ bool "Chdir logging"
50570+ help
50571+ If you say Y here, all chdir() calls will be logged. If the sysctl
50572+ option is enabled, a sysctl option with name "audit_chdir" is created.
50573+
50574+config GRKERNSEC_AUDIT_MOUNT
50575+ bool "(Un)Mount logging"
50576+ help
50577+ If you say Y here, all mounts and unmounts will be logged. If the
50578+ sysctl option is enabled, a sysctl option with name "audit_mount" is
50579+ created.
50580+
50581+config GRKERNSEC_SIGNAL
50582+ bool "Signal logging"
50583+ help
50584+ If you say Y here, certain important signals will be logged, such as
50585+ SIGSEGV, which will as a result inform you of when a error in a program
50586+ occurred, which in some cases could mean a possible exploit attempt.
50587+ If the sysctl option is enabled, a sysctl option with name
50588+ "signal_logging" is created.
50589+
50590+config GRKERNSEC_FORKFAIL
50591+ bool "Fork failure logging"
50592+ help
50593+ If you say Y here, all failed fork() attempts will be logged.
50594+ This could suggest a fork bomb, or someone attempting to overstep
50595+ their process limit. If the sysctl option is enabled, a sysctl option
50596+ with name "forkfail_logging" is created.
50597+
50598+config GRKERNSEC_TIME
50599+ bool "Time change logging"
50600+ help
50601+ If you say Y here, any changes of the system clock will be logged.
50602+ If the sysctl option is enabled, a sysctl option with name
50603+ "timechange_logging" is created.
50604+
50605+config GRKERNSEC_PROC_IPADDR
50606+ bool "/proc/<pid>/ipaddr support"
50607+ help
50608+ If you say Y here, a new entry will be added to each /proc/<pid>
50609+ directory that contains the IP address of the person using the task.
50610+ The IP is carried across local TCP and AF_UNIX stream sockets.
50611+ This information can be useful for IDS/IPSes to perform remote response
50612+ to a local attack. The entry is readable by only the owner of the
50613+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
50614+ the RBAC system), and thus does not create privacy concerns.
50615+
50616+config GRKERNSEC_RWXMAP_LOG
50617+ bool 'Denied RWX mmap/mprotect logging'
50618+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
50619+ help
50620+ If you say Y here, calls to mmap() and mprotect() with explicit
50621+ usage of PROT_WRITE and PROT_EXEC together will be logged when
50622+ denied by the PAX_MPROTECT feature. If the sysctl option is
50623+ enabled, a sysctl option with name "rwxmap_logging" is created.
50624+
50625+config GRKERNSEC_AUDIT_TEXTREL
50626+ bool 'ELF text relocations logging (READ HELP)'
50627+ depends on PAX_MPROTECT
50628+ help
50629+ If you say Y here, text relocations will be logged with the filename
50630+ of the offending library or binary. The purpose of the feature is
50631+ to help Linux distribution developers get rid of libraries and
50632+ binaries that need text relocations which hinder the future progress
50633+ of PaX. Only Linux distribution developers should say Y here, and
50634+ never on a production machine, as this option creates an information
50635+ leak that could aid an attacker in defeating the randomization of
50636+ a single memory region. If the sysctl option is enabled, a sysctl
50637+ option with name "audit_textrel" is created.
50638+
50639+endmenu
50640+
50641+menu "Executable Protections"
50642+depends on GRKERNSEC
50643+
50644+config GRKERNSEC_DMESG
50645+ bool "Dmesg(8) restriction"
50646+ help
50647+ If you say Y here, non-root users will not be able to use dmesg(8)
50648+ to view up to the last 4kb of messages in the kernel's log buffer.
50649+ The kernel's log buffer often contains kernel addresses and other
50650+ identifying information useful to an attacker in fingerprinting a
50651+ system for a targeted exploit.
50652+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
50653+ created.
50654+
50655+config GRKERNSEC_HARDEN_PTRACE
50656+ bool "Deter ptrace-based process snooping"
50657+ help
50658+ If you say Y here, TTY sniffers and other malicious monitoring
50659+ programs implemented through ptrace will be defeated. If you
50660+ have been using the RBAC system, this option has already been
50661+ enabled for several years for all users, with the ability to make
50662+ fine-grained exceptions.
50663+
50664+ This option only affects the ability of non-root users to ptrace
50665+ processes that are not a descendent of the ptracing process.
50666+ This means that strace ./binary and gdb ./binary will still work,
50667+ but attaching to arbitrary processes will not. If the sysctl
50668+ option is enabled, a sysctl option with name "harden_ptrace" is
50669+ created.
50670+
50671+config GRKERNSEC_TPE
50672+ bool "Trusted Path Execution (TPE)"
50673+ help
50674+ If you say Y here, you will be able to choose a gid to add to the
50675+ supplementary groups of users you want to mark as "untrusted."
50676+ These users will not be able to execute any files that are not in
50677+ root-owned directories writable only by root. If the sysctl option
50678+ is enabled, a sysctl option with name "tpe" is created.
50679+
50680+config GRKERNSEC_TPE_ALL
50681+ bool "Partially restrict all non-root users"
50682+ depends on GRKERNSEC_TPE
50683+ help
50684+ If you say Y here, all non-root users will be covered under
50685+ a weaker TPE restriction. This is separate from, and in addition to,
50686+ the main TPE options that you have selected elsewhere. Thus, if a
50687+ "trusted" GID is chosen, this restriction applies to even that GID.
50688+ Under this restriction, all non-root users will only be allowed to
50689+ execute files in directories they own that are not group or
50690+ world-writable, or in directories owned by root and writable only by
50691+ root. If the sysctl option is enabled, a sysctl option with name
50692+ "tpe_restrict_all" is created.
50693+
50694+config GRKERNSEC_TPE_INVERT
50695+ bool "Invert GID option"
50696+ depends on GRKERNSEC_TPE
50697+ help
50698+ If you say Y here, the group you specify in the TPE configuration will
50699+ decide what group TPE restrictions will be *disabled* for. This
50700+ option is useful if you want TPE restrictions to be applied to most
50701+ users on the system. If the sysctl option is enabled, a sysctl option
50702+ with name "tpe_invert" is created. Unlike other sysctl options, this
50703+ entry will default to on for backward-compatibility.
50704+
50705+config GRKERNSEC_TPE_GID
50706+ int "GID for untrusted users"
50707+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
50708+ default 1005
50709+ help
50710+ Setting this GID determines what group TPE restrictions will be
50711+ *enabled* for. If the sysctl option is enabled, a sysctl option
50712+ with name "tpe_gid" is created.
50713+
50714+config GRKERNSEC_TPE_GID
50715+ int "GID for trusted users"
50716+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
50717+ default 1005
50718+ help
50719+ Setting this GID determines what group TPE restrictions will be
50720+ *disabled* for. If the sysctl option is enabled, a sysctl option
50721+ with name "tpe_gid" is created.
50722+
50723+endmenu
50724+menu "Network Protections"
50725+depends on GRKERNSEC
50726+
50727+config GRKERNSEC_RANDNET
50728+ bool "Larger entropy pools"
50729+ help
50730+ If you say Y here, the entropy pools used for many features of Linux
50731+ and grsecurity will be doubled in size. Since several grsecurity
50732+ features use additional randomness, it is recommended that you say Y
50733+ here. Saying Y here has a similar effect as modifying
50734+ /proc/sys/kernel/random/poolsize.
50735+
50736+config GRKERNSEC_BLACKHOLE
50737+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
50738+ depends on NET
50739+ help
50740+ If you say Y here, neither TCP resets nor ICMP
50741+ destination-unreachable packets will be sent in response to packets
50742+ sent to ports for which no associated listening process exists.
50743+ This feature supports both IPV4 and IPV6 and exempts the
50744+ loopback interface from blackholing. Enabling this feature
50745+ makes a host more resilient to DoS attacks and reduces network
50746+ visibility against scanners.
50747+
50748+ The blackhole feature as-implemented is equivalent to the FreeBSD
50749+ blackhole feature, as it prevents RST responses to all packets, not
50750+ just SYNs. Under most application behavior this causes no
50751+ problems, but applications (like haproxy) may not close certain
50752+ connections in a way that cleanly terminates them on the remote
50753+ end, leaving the remote host in LAST_ACK state. Because of this
50754+ side-effect and to prevent intentional LAST_ACK DoSes, this
50755+ feature also adds automatic mitigation against such attacks.
50756+ The mitigation drastically reduces the amount of time a socket
50757+ can spend in LAST_ACK state. If you're using haproxy and not
50758+ all servers it connects to have this option enabled, consider
50759+ disabling this feature on the haproxy host.
50760+
50761+ If the sysctl option is enabled, two sysctl options with names
50762+ "ip_blackhole" and "lastack_retries" will be created.
50763+ While "ip_blackhole" takes the standard zero/non-zero on/off
50764+ toggle, "lastack_retries" uses the same kinds of values as
50765+ "tcp_retries1" and "tcp_retries2". The default value of 4
50766+ prevents a socket from lasting more than 45 seconds in LAST_ACK
50767+ state.
50768+
50769+config GRKERNSEC_SOCKET
50770+ bool "Socket restrictions"
50771+ depends on NET
50772+ help
50773+ If you say Y here, you will be able to choose from several options.
50774+ If you assign a GID on your system and add it to the supplementary
50775+ groups of users you want to restrict socket access to, this patch
50776+ will perform up to three things, based on the option(s) you choose.
50777+
50778+config GRKERNSEC_SOCKET_ALL
50779+ bool "Deny any sockets to group"
50780+ depends on GRKERNSEC_SOCKET
50781+ help
50782+ If you say Y here, you will be able to choose a GID of whose users will
50783+ be unable to connect to other hosts from your machine or run server
50784+ applications from your machine. If the sysctl option is enabled, a
50785+ sysctl option with name "socket_all" is created.
50786+
50787+config GRKERNSEC_SOCKET_ALL_GID
50788+ int "GID to deny all sockets for"
50789+ depends on GRKERNSEC_SOCKET_ALL
50790+ default 1004
50791+ help
50792+ Here you can choose the GID to disable socket access for. Remember to
50793+ add the users you want socket access disabled for to the GID
50794+ specified here. If the sysctl option is enabled, a sysctl option
50795+ with name "socket_all_gid" is created.
50796+
50797+config GRKERNSEC_SOCKET_CLIENT
50798+ bool "Deny client sockets to group"
50799+ depends on GRKERNSEC_SOCKET
50800+ help
50801+ If you say Y here, you will be able to choose a GID of whose users will
50802+ be unable to connect to other hosts from your machine, but will be
50803+ able to run servers. If this option is enabled, all users in the group
50804+ you specify will have to use passive mode when initiating ftp transfers
50805+ from the shell on your machine. If the sysctl option is enabled, a
50806+ sysctl option with name "socket_client" is created.
50807+
50808+config GRKERNSEC_SOCKET_CLIENT_GID
50809+ int "GID to deny client sockets for"
50810+ depends on GRKERNSEC_SOCKET_CLIENT
50811+ default 1003
50812+ help
50813+ Here you can choose the GID to disable client socket access for.
50814+ Remember to add the users you want client socket access disabled for to
50815+ the GID specified here. If the sysctl option is enabled, a sysctl
50816+ option with name "socket_client_gid" is created.
50817+
50818+config GRKERNSEC_SOCKET_SERVER
50819+ bool "Deny server sockets to group"
50820+ depends on GRKERNSEC_SOCKET
50821+ help
50822+ If you say Y here, you will be able to choose a GID of whose users will
50823+ be unable to run server applications from your machine. If the sysctl
50824+ option is enabled, a sysctl option with name "socket_server" is created.
50825+
50826+config GRKERNSEC_SOCKET_SERVER_GID
50827+ int "GID to deny server sockets for"
50828+ depends on GRKERNSEC_SOCKET_SERVER
50829+ default 1002
50830+ help
50831+ Here you can choose the GID to disable server socket access for.
50832+ Remember to add the users you want server socket access disabled for to
50833+ the GID specified here. If the sysctl option is enabled, a sysctl
50834+ option with name "socket_server_gid" is created.
50835+
50836+endmenu
50837+menu "Sysctl support"
50838+depends on GRKERNSEC && SYSCTL
50839+
50840+config GRKERNSEC_SYSCTL
50841+ bool "Sysctl support"
50842+ help
50843+ If you say Y here, you will be able to change the options that
50844+ grsecurity runs with at bootup, without having to recompile your
50845+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
50846+ to enable (1) or disable (0) various features. All the sysctl entries
50847+ are mutable until the "grsec_lock" entry is set to a non-zero value.
50848+ All features enabled in the kernel configuration are disabled at boot
50849+ if you do not say Y to the "Turn on features by default" option.
50850+ All options should be set at startup, and the grsec_lock entry should
50851+ be set to a non-zero value after all the options are set.
50852+ *THIS IS EXTREMELY IMPORTANT*
50853+
50854+config GRKERNSEC_SYSCTL_DISTRO
50855+ bool "Extra sysctl support for distro makers (READ HELP)"
50856+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
50857+ help
50858+ If you say Y here, additional sysctl options will be created
50859+ for features that affect processes running as root. Therefore,
50860+ it is critical when using this option that the grsec_lock entry be
50861+ enabled after boot. Only distros with prebuilt kernel packages
50862+ with this option enabled that can ensure grsec_lock is enabled
50863+ after boot should use this option.
50864+ *Failure to set grsec_lock after boot makes all grsec features
50865+ this option covers useless*
50866+
50867+ Currently this option creates the following sysctl entries:
50868+ "Disable Privileged I/O": "disable_priv_io"
50869+
50870+config GRKERNSEC_SYSCTL_ON
50871+ bool "Turn on features by default"
50872+ depends on GRKERNSEC_SYSCTL
50873+ help
50874+ If you say Y here, instead of having all features enabled in the
50875+ kernel configuration disabled at boot time, the features will be
50876+ enabled at boot time. It is recommended you say Y here unless
50877+ there is some reason you would want all sysctl-tunable features to
50878+ be disabled by default. As mentioned elsewhere, it is important
50879+ to enable the grsec_lock entry once you have finished modifying
50880+ the sysctl entries.
50881+
50882+endmenu
50883+menu "Logging Options"
50884+depends on GRKERNSEC
50885+
50886+config GRKERNSEC_FLOODTIME
50887+ int "Seconds in between log messages (minimum)"
50888+ default 10
50889+ help
50890+ This option allows you to enforce the number of seconds between
50891+ grsecurity log messages. The default should be suitable for most
50892+ people, however, if you choose to change it, choose a value small enough
50893+ to allow informative logs to be produced, but large enough to
50894+ prevent flooding.
50895+
50896+config GRKERNSEC_FLOODBURST
50897+ int "Number of messages in a burst (maximum)"
50898+ default 4
50899+ help
50900+ This option allows you to choose the maximum number of messages allowed
50901+ within the flood time interval you chose in a separate option. The
50902+ default should be suitable for most people, however if you find that
50903+ many of your logs are being interpreted as flooding, you may want to
50904+ raise this value.
50905+
50906+endmenu
50907+
50908+endmenu
50909diff -urNp linux-3.0.3/grsecurity/Makefile linux-3.0.3/grsecurity/Makefile
50910--- linux-3.0.3/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
50911+++ linux-3.0.3/grsecurity/Makefile 2011-08-23 21:48:14.000000000 -0400
50912@@ -0,0 +1,34 @@
50913+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
50914+# during 2001-2009 it has been completely redesigned by Brad Spengler
50915+# into an RBAC system
50916+#
50917+# All code in this directory and various hooks inserted throughout the kernel
50918+# are copyright Brad Spengler - Open Source Security, Inc., and released
50919+# under the GPL v2 or higher
50920+
50921+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
50922+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
50923+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
50924+
50925+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
50926+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
50927+ gracl_learn.o grsec_log.o
50928+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
50929+
50930+ifdef CONFIG_NET
50931+obj-y += grsec_sock.o
50932+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
50933+endif
50934+
50935+ifndef CONFIG_GRKERNSEC
50936+obj-y += grsec_disabled.o
50937+endif
50938+
50939+ifdef CONFIG_GRKERNSEC_HIDESYM
50940+extra-y := grsec_hidesym.o
50941+$(obj)/grsec_hidesym.o:
50942+ @-chmod -f 500 /boot
50943+ @-chmod -f 500 /lib/modules
50944+ @-chmod -f 700 .
50945+ @echo ' grsec: protected kernel image paths'
50946+endif
50947diff -urNp linux-3.0.3/include/acpi/acpi_bus.h linux-3.0.3/include/acpi/acpi_bus.h
50948--- linux-3.0.3/include/acpi/acpi_bus.h 2011-07-21 22:17:23.000000000 -0400
50949+++ linux-3.0.3/include/acpi/acpi_bus.h 2011-08-23 21:47:56.000000000 -0400
50950@@ -107,7 +107,7 @@ struct acpi_device_ops {
50951 acpi_op_bind bind;
50952 acpi_op_unbind unbind;
50953 acpi_op_notify notify;
50954-};
50955+} __no_const;
50956
50957 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
50958
50959diff -urNp linux-3.0.3/include/asm-generic/atomic-long.h linux-3.0.3/include/asm-generic/atomic-long.h
50960--- linux-3.0.3/include/asm-generic/atomic-long.h 2011-07-21 22:17:23.000000000 -0400
50961+++ linux-3.0.3/include/asm-generic/atomic-long.h 2011-08-23 21:47:56.000000000 -0400
50962@@ -22,6 +22,12 @@
50963
50964 typedef atomic64_t atomic_long_t;
50965
50966+#ifdef CONFIG_PAX_REFCOUNT
50967+typedef atomic64_unchecked_t atomic_long_unchecked_t;
50968+#else
50969+typedef atomic64_t atomic_long_unchecked_t;
50970+#endif
50971+
50972 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
50973
50974 static inline long atomic_long_read(atomic_long_t *l)
50975@@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
50976 return (long)atomic64_read(v);
50977 }
50978
50979+#ifdef CONFIG_PAX_REFCOUNT
50980+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
50981+{
50982+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
50983+
50984+ return (long)atomic64_read_unchecked(v);
50985+}
50986+#endif
50987+
50988 static inline void atomic_long_set(atomic_long_t *l, long i)
50989 {
50990 atomic64_t *v = (atomic64_t *)l;
50991@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
50992 atomic64_set(v, i);
50993 }
50994
50995+#ifdef CONFIG_PAX_REFCOUNT
50996+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
50997+{
50998+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
50999+
51000+ atomic64_set_unchecked(v, i);
51001+}
51002+#endif
51003+
51004 static inline void atomic_long_inc(atomic_long_t *l)
51005 {
51006 atomic64_t *v = (atomic64_t *)l;
51007@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
51008 atomic64_inc(v);
51009 }
51010
51011+#ifdef CONFIG_PAX_REFCOUNT
51012+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
51013+{
51014+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51015+
51016+ atomic64_inc_unchecked(v);
51017+}
51018+#endif
51019+
51020 static inline void atomic_long_dec(atomic_long_t *l)
51021 {
51022 atomic64_t *v = (atomic64_t *)l;
51023@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
51024 atomic64_dec(v);
51025 }
51026
51027+#ifdef CONFIG_PAX_REFCOUNT
51028+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
51029+{
51030+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51031+
51032+ atomic64_dec_unchecked(v);
51033+}
51034+#endif
51035+
51036 static inline void atomic_long_add(long i, atomic_long_t *l)
51037 {
51038 atomic64_t *v = (atomic64_t *)l;
51039@@ -59,6 +101,15 @@ static inline void atomic_long_add(long
51040 atomic64_add(i, v);
51041 }
51042
51043+#ifdef CONFIG_PAX_REFCOUNT
51044+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
51045+{
51046+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51047+
51048+ atomic64_add_unchecked(i, v);
51049+}
51050+#endif
51051+
51052 static inline void atomic_long_sub(long i, atomic_long_t *l)
51053 {
51054 atomic64_t *v = (atomic64_t *)l;
51055@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long
51056 atomic64_sub(i, v);
51057 }
51058
51059+#ifdef CONFIG_PAX_REFCOUNT
51060+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
51061+{
51062+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51063+
51064+ atomic64_sub_unchecked(i, v);
51065+}
51066+#endif
51067+
51068 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
51069 {
51070 atomic64_t *v = (atomic64_t *)l;
51071@@ -115,6 +175,15 @@ static inline long atomic_long_inc_retur
51072 return (long)atomic64_inc_return(v);
51073 }
51074
51075+#ifdef CONFIG_PAX_REFCOUNT
51076+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
51077+{
51078+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51079+
51080+ return (long)atomic64_inc_return_unchecked(v);
51081+}
51082+#endif
51083+
51084 static inline long atomic_long_dec_return(atomic_long_t *l)
51085 {
51086 atomic64_t *v = (atomic64_t *)l;
51087@@ -140,6 +209,12 @@ static inline long atomic_long_add_unles
51088
51089 typedef atomic_t atomic_long_t;
51090
51091+#ifdef CONFIG_PAX_REFCOUNT
51092+typedef atomic_unchecked_t atomic_long_unchecked_t;
51093+#else
51094+typedef atomic_t atomic_long_unchecked_t;
51095+#endif
51096+
51097 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
51098 static inline long atomic_long_read(atomic_long_t *l)
51099 {
51100@@ -148,6 +223,15 @@ static inline long atomic_long_read(atom
51101 return (long)atomic_read(v);
51102 }
51103
51104+#ifdef CONFIG_PAX_REFCOUNT
51105+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
51106+{
51107+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51108+
51109+ return (long)atomic_read_unchecked(v);
51110+}
51111+#endif
51112+
51113 static inline void atomic_long_set(atomic_long_t *l, long i)
51114 {
51115 atomic_t *v = (atomic_t *)l;
51116@@ -155,6 +239,15 @@ static inline void atomic_long_set(atomi
51117 atomic_set(v, i);
51118 }
51119
51120+#ifdef CONFIG_PAX_REFCOUNT
51121+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
51122+{
51123+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51124+
51125+ atomic_set_unchecked(v, i);
51126+}
51127+#endif
51128+
51129 static inline void atomic_long_inc(atomic_long_t *l)
51130 {
51131 atomic_t *v = (atomic_t *)l;
51132@@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomi
51133 atomic_inc(v);
51134 }
51135
51136+#ifdef CONFIG_PAX_REFCOUNT
51137+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
51138+{
51139+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51140+
51141+ atomic_inc_unchecked(v);
51142+}
51143+#endif
51144+
51145 static inline void atomic_long_dec(atomic_long_t *l)
51146 {
51147 atomic_t *v = (atomic_t *)l;
51148@@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomi
51149 atomic_dec(v);
51150 }
51151
51152+#ifdef CONFIG_PAX_REFCOUNT
51153+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
51154+{
51155+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51156+
51157+ atomic_dec_unchecked(v);
51158+}
51159+#endif
51160+
51161 static inline void atomic_long_add(long i, atomic_long_t *l)
51162 {
51163 atomic_t *v = (atomic_t *)l;
51164@@ -176,6 +287,15 @@ static inline void atomic_long_add(long
51165 atomic_add(i, v);
51166 }
51167
51168+#ifdef CONFIG_PAX_REFCOUNT
51169+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
51170+{
51171+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51172+
51173+ atomic_add_unchecked(i, v);
51174+}
51175+#endif
51176+
51177 static inline void atomic_long_sub(long i, atomic_long_t *l)
51178 {
51179 atomic_t *v = (atomic_t *)l;
51180@@ -183,6 +303,15 @@ static inline void atomic_long_sub(long
51181 atomic_sub(i, v);
51182 }
51183
51184+#ifdef CONFIG_PAX_REFCOUNT
51185+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
51186+{
51187+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51188+
51189+ atomic_sub_unchecked(i, v);
51190+}
51191+#endif
51192+
51193 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
51194 {
51195 atomic_t *v = (atomic_t *)l;
51196@@ -232,6 +361,15 @@ static inline long atomic_long_inc_retur
51197 return (long)atomic_inc_return(v);
51198 }
51199
51200+#ifdef CONFIG_PAX_REFCOUNT
51201+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
51202+{
51203+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51204+
51205+ return (long)atomic_inc_return_unchecked(v);
51206+}
51207+#endif
51208+
51209 static inline long atomic_long_dec_return(atomic_long_t *l)
51210 {
51211 atomic_t *v = (atomic_t *)l;
51212@@ -255,4 +393,49 @@ static inline long atomic_long_add_unles
51213
51214 #endif /* BITS_PER_LONG == 64 */
51215
51216+#ifdef CONFIG_PAX_REFCOUNT
51217+static inline void pax_refcount_needs_these_functions(void)
51218+{
51219+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
51220+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
51221+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
51222+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
51223+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
51224+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
51225+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
51226+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
51227+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
51228+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
51229+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
51230+
51231+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
51232+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
51233+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
51234+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
51235+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
51236+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
51237+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
51238+}
51239+#else
51240+#define atomic_read_unchecked(v) atomic_read(v)
51241+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
51242+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
51243+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
51244+#define atomic_inc_unchecked(v) atomic_inc(v)
51245+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
51246+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
51247+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
51248+#define atomic_dec_unchecked(v) atomic_dec(v)
51249+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
51250+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
51251+
51252+#define atomic_long_read_unchecked(v) atomic_long_read(v)
51253+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
51254+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
51255+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
51256+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
51257+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
51258+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
51259+#endif
51260+
51261 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
51262diff -urNp linux-3.0.3/include/asm-generic/cache.h linux-3.0.3/include/asm-generic/cache.h
51263--- linux-3.0.3/include/asm-generic/cache.h 2011-07-21 22:17:23.000000000 -0400
51264+++ linux-3.0.3/include/asm-generic/cache.h 2011-08-23 21:47:56.000000000 -0400
51265@@ -6,7 +6,7 @@
51266 * cache lines need to provide their own cache.h.
51267 */
51268
51269-#define L1_CACHE_SHIFT 5
51270-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
51271+#define L1_CACHE_SHIFT 5UL
51272+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
51273
51274 #endif /* __ASM_GENERIC_CACHE_H */
51275diff -urNp linux-3.0.3/include/asm-generic/int-l64.h linux-3.0.3/include/asm-generic/int-l64.h
51276--- linux-3.0.3/include/asm-generic/int-l64.h 2011-07-21 22:17:23.000000000 -0400
51277+++ linux-3.0.3/include/asm-generic/int-l64.h 2011-08-23 21:47:56.000000000 -0400
51278@@ -46,6 +46,8 @@ typedef unsigned int u32;
51279 typedef signed long s64;
51280 typedef unsigned long u64;
51281
51282+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
51283+
51284 #define S8_C(x) x
51285 #define U8_C(x) x ## U
51286 #define S16_C(x) x
51287diff -urNp linux-3.0.3/include/asm-generic/int-ll64.h linux-3.0.3/include/asm-generic/int-ll64.h
51288--- linux-3.0.3/include/asm-generic/int-ll64.h 2011-07-21 22:17:23.000000000 -0400
51289+++ linux-3.0.3/include/asm-generic/int-ll64.h 2011-08-23 21:47:56.000000000 -0400
51290@@ -51,6 +51,8 @@ typedef unsigned int u32;
51291 typedef signed long long s64;
51292 typedef unsigned long long u64;
51293
51294+typedef unsigned long long intoverflow_t;
51295+
51296 #define S8_C(x) x
51297 #define U8_C(x) x ## U
51298 #define S16_C(x) x
51299diff -urNp linux-3.0.3/include/asm-generic/kmap_types.h linux-3.0.3/include/asm-generic/kmap_types.h
51300--- linux-3.0.3/include/asm-generic/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
51301+++ linux-3.0.3/include/asm-generic/kmap_types.h 2011-08-23 21:47:56.000000000 -0400
51302@@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
51303 KMAP_D(17) KM_NMI,
51304 KMAP_D(18) KM_NMI_PTE,
51305 KMAP_D(19) KM_KDB,
51306+KMAP_D(20) KM_CLEARPAGE,
51307 /*
51308 * Remember to update debug_kmap_atomic() when adding new kmap types!
51309 */
51310-KMAP_D(20) KM_TYPE_NR
51311+KMAP_D(21) KM_TYPE_NR
51312 };
51313
51314 #undef KMAP_D
51315diff -urNp linux-3.0.3/include/asm-generic/pgtable.h linux-3.0.3/include/asm-generic/pgtable.h
51316--- linux-3.0.3/include/asm-generic/pgtable.h 2011-07-21 22:17:23.000000000 -0400
51317+++ linux-3.0.3/include/asm-generic/pgtable.h 2011-08-23 21:47:56.000000000 -0400
51318@@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
51319 #endif /* __HAVE_ARCH_PMD_WRITE */
51320 #endif
51321
51322+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
51323+static inline unsigned long pax_open_kernel(void) { return 0; }
51324+#endif
51325+
51326+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
51327+static inline unsigned long pax_close_kernel(void) { return 0; }
51328+#endif
51329+
51330 #endif /* !__ASSEMBLY__ */
51331
51332 #endif /* _ASM_GENERIC_PGTABLE_H */
51333diff -urNp linux-3.0.3/include/asm-generic/pgtable-nopmd.h linux-3.0.3/include/asm-generic/pgtable-nopmd.h
51334--- linux-3.0.3/include/asm-generic/pgtable-nopmd.h 2011-07-21 22:17:23.000000000 -0400
51335+++ linux-3.0.3/include/asm-generic/pgtable-nopmd.h 2011-08-23 21:47:56.000000000 -0400
51336@@ -1,14 +1,19 @@
51337 #ifndef _PGTABLE_NOPMD_H
51338 #define _PGTABLE_NOPMD_H
51339
51340-#ifndef __ASSEMBLY__
51341-
51342 #include <asm-generic/pgtable-nopud.h>
51343
51344-struct mm_struct;
51345-
51346 #define __PAGETABLE_PMD_FOLDED
51347
51348+#define PMD_SHIFT PUD_SHIFT
51349+#define PTRS_PER_PMD 1
51350+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
51351+#define PMD_MASK (~(PMD_SIZE-1))
51352+
51353+#ifndef __ASSEMBLY__
51354+
51355+struct mm_struct;
51356+
51357 /*
51358 * Having the pmd type consist of a pud gets the size right, and allows
51359 * us to conceptually access the pud entry that this pmd is folded into
51360@@ -16,11 +21,6 @@ struct mm_struct;
51361 */
51362 typedef struct { pud_t pud; } pmd_t;
51363
51364-#define PMD_SHIFT PUD_SHIFT
51365-#define PTRS_PER_PMD 1
51366-#define PMD_SIZE (1UL << PMD_SHIFT)
51367-#define PMD_MASK (~(PMD_SIZE-1))
51368-
51369 /*
51370 * The "pud_xxx()" functions here are trivial for a folded two-level
51371 * setup: the pmd is never bad, and a pmd always exists (as it's folded
51372diff -urNp linux-3.0.3/include/asm-generic/pgtable-nopud.h linux-3.0.3/include/asm-generic/pgtable-nopud.h
51373--- linux-3.0.3/include/asm-generic/pgtable-nopud.h 2011-07-21 22:17:23.000000000 -0400
51374+++ linux-3.0.3/include/asm-generic/pgtable-nopud.h 2011-08-23 21:47:56.000000000 -0400
51375@@ -1,10 +1,15 @@
51376 #ifndef _PGTABLE_NOPUD_H
51377 #define _PGTABLE_NOPUD_H
51378
51379-#ifndef __ASSEMBLY__
51380-
51381 #define __PAGETABLE_PUD_FOLDED
51382
51383+#define PUD_SHIFT PGDIR_SHIFT
51384+#define PTRS_PER_PUD 1
51385+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
51386+#define PUD_MASK (~(PUD_SIZE-1))
51387+
51388+#ifndef __ASSEMBLY__
51389+
51390 /*
51391 * Having the pud type consist of a pgd gets the size right, and allows
51392 * us to conceptually access the pgd entry that this pud is folded into
51393@@ -12,11 +17,6 @@
51394 */
51395 typedef struct { pgd_t pgd; } pud_t;
51396
51397-#define PUD_SHIFT PGDIR_SHIFT
51398-#define PTRS_PER_PUD 1
51399-#define PUD_SIZE (1UL << PUD_SHIFT)
51400-#define PUD_MASK (~(PUD_SIZE-1))
51401-
51402 /*
51403 * The "pgd_xxx()" functions here are trivial for a folded two-level
51404 * setup: the pud is never bad, and a pud always exists (as it's folded
51405diff -urNp linux-3.0.3/include/asm-generic/vmlinux.lds.h linux-3.0.3/include/asm-generic/vmlinux.lds.h
51406--- linux-3.0.3/include/asm-generic/vmlinux.lds.h 2011-07-21 22:17:23.000000000 -0400
51407+++ linux-3.0.3/include/asm-generic/vmlinux.lds.h 2011-08-23 21:47:56.000000000 -0400
51408@@ -217,6 +217,7 @@
51409 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
51410 VMLINUX_SYMBOL(__start_rodata) = .; \
51411 *(.rodata) *(.rodata.*) \
51412+ *(.data..read_only) \
51413 *(__vermagic) /* Kernel version magic */ \
51414 . = ALIGN(8); \
51415 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
51416@@ -723,17 +724,18 @@
51417 * section in the linker script will go there too. @phdr should have
51418 * a leading colon.
51419 *
51420- * Note that this macros defines __per_cpu_load as an absolute symbol.
51421+ * Note that this macros defines per_cpu_load as an absolute symbol.
51422 * If there is no need to put the percpu section at a predetermined
51423 * address, use PERCPU_SECTION.
51424 */
51425 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
51426- VMLINUX_SYMBOL(__per_cpu_load) = .; \
51427- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
51428+ per_cpu_load = .; \
51429+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
51430 - LOAD_OFFSET) { \
51431+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
51432 PERCPU_INPUT(cacheline) \
51433 } phdr \
51434- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
51435+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
51436
51437 /**
51438 * PERCPU_SECTION - define output section for percpu area, simple version
51439diff -urNp linux-3.0.3/include/drm/drm_crtc_helper.h linux-3.0.3/include/drm/drm_crtc_helper.h
51440--- linux-3.0.3/include/drm/drm_crtc_helper.h 2011-07-21 22:17:23.000000000 -0400
51441+++ linux-3.0.3/include/drm/drm_crtc_helper.h 2011-08-23 21:47:56.000000000 -0400
51442@@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
51443
51444 /* disable crtc when not in use - more explicit than dpms off */
51445 void (*disable)(struct drm_crtc *crtc);
51446-};
51447+} __no_const;
51448
51449 struct drm_encoder_helper_funcs {
51450 void (*dpms)(struct drm_encoder *encoder, int mode);
51451@@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
51452 struct drm_connector *connector);
51453 /* disable encoder when not in use - more explicit than dpms off */
51454 void (*disable)(struct drm_encoder *encoder);
51455-};
51456+} __no_const;
51457
51458 struct drm_connector_helper_funcs {
51459 int (*get_modes)(struct drm_connector *connector);
51460diff -urNp linux-3.0.3/include/drm/drmP.h linux-3.0.3/include/drm/drmP.h
51461--- linux-3.0.3/include/drm/drmP.h 2011-07-21 22:17:23.000000000 -0400
51462+++ linux-3.0.3/include/drm/drmP.h 2011-08-23 21:47:56.000000000 -0400
51463@@ -73,6 +73,7 @@
51464 #include <linux/workqueue.h>
51465 #include <linux/poll.h>
51466 #include <asm/pgalloc.h>
51467+#include <asm/local.h>
51468 #include "drm.h"
51469
51470 #include <linux/idr.h>
51471@@ -1033,7 +1034,7 @@ struct drm_device {
51472
51473 /** \name Usage Counters */
51474 /*@{ */
51475- int open_count; /**< Outstanding files open */
51476+ local_t open_count; /**< Outstanding files open */
51477 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
51478 atomic_t vma_count; /**< Outstanding vma areas open */
51479 int buf_use; /**< Buffers in use -- cannot alloc */
51480@@ -1044,7 +1045,7 @@ struct drm_device {
51481 /*@{ */
51482 unsigned long counters;
51483 enum drm_stat_type types[15];
51484- atomic_t counts[15];
51485+ atomic_unchecked_t counts[15];
51486 /*@} */
51487
51488 struct list_head filelist;
51489diff -urNp linux-3.0.3/include/drm/ttm/ttm_memory.h linux-3.0.3/include/drm/ttm/ttm_memory.h
51490--- linux-3.0.3/include/drm/ttm/ttm_memory.h 2011-07-21 22:17:23.000000000 -0400
51491+++ linux-3.0.3/include/drm/ttm/ttm_memory.h 2011-08-23 21:47:56.000000000 -0400
51492@@ -47,7 +47,7 @@
51493
51494 struct ttm_mem_shrink {
51495 int (*do_shrink) (struct ttm_mem_shrink *);
51496-};
51497+} __no_const;
51498
51499 /**
51500 * struct ttm_mem_global - Global memory accounting structure.
51501diff -urNp linux-3.0.3/include/linux/a.out.h linux-3.0.3/include/linux/a.out.h
51502--- linux-3.0.3/include/linux/a.out.h 2011-07-21 22:17:23.000000000 -0400
51503+++ linux-3.0.3/include/linux/a.out.h 2011-08-23 21:47:56.000000000 -0400
51504@@ -39,6 +39,14 @@ enum machine_type {
51505 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
51506 };
51507
51508+/* Constants for the N_FLAGS field */
51509+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
51510+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
51511+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
51512+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
51513+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
51514+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
51515+
51516 #if !defined (N_MAGIC)
51517 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
51518 #endif
51519diff -urNp linux-3.0.3/include/linux/atmdev.h linux-3.0.3/include/linux/atmdev.h
51520--- linux-3.0.3/include/linux/atmdev.h 2011-07-21 22:17:23.000000000 -0400
51521+++ linux-3.0.3/include/linux/atmdev.h 2011-08-23 21:47:56.000000000 -0400
51522@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
51523 #endif
51524
51525 struct k_atm_aal_stats {
51526-#define __HANDLE_ITEM(i) atomic_t i
51527+#define __HANDLE_ITEM(i) atomic_unchecked_t i
51528 __AAL_STAT_ITEMS
51529 #undef __HANDLE_ITEM
51530 };
51531diff -urNp linux-3.0.3/include/linux/binfmts.h linux-3.0.3/include/linux/binfmts.h
51532--- linux-3.0.3/include/linux/binfmts.h 2011-07-21 22:17:23.000000000 -0400
51533+++ linux-3.0.3/include/linux/binfmts.h 2011-08-23 21:47:56.000000000 -0400
51534@@ -88,6 +88,7 @@ struct linux_binfmt {
51535 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
51536 int (*load_shlib)(struct file *);
51537 int (*core_dump)(struct coredump_params *cprm);
51538+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
51539 unsigned long min_coredump; /* minimal dump size */
51540 };
51541
51542diff -urNp linux-3.0.3/include/linux/blkdev.h linux-3.0.3/include/linux/blkdev.h
51543--- linux-3.0.3/include/linux/blkdev.h 2011-07-21 22:17:23.000000000 -0400
51544+++ linux-3.0.3/include/linux/blkdev.h 2011-08-26 19:49:56.000000000 -0400
51545@@ -1308,7 +1308,7 @@ struct block_device_operations {
51546 /* this callback is with swap_lock and sometimes page table lock held */
51547 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
51548 struct module *owner;
51549-};
51550+} __do_const;
51551
51552 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
51553 unsigned long);
51554diff -urNp linux-3.0.3/include/linux/blktrace_api.h linux-3.0.3/include/linux/blktrace_api.h
51555--- linux-3.0.3/include/linux/blktrace_api.h 2011-07-21 22:17:23.000000000 -0400
51556+++ linux-3.0.3/include/linux/blktrace_api.h 2011-08-23 21:47:56.000000000 -0400
51557@@ -161,7 +161,7 @@ struct blk_trace {
51558 struct dentry *dir;
51559 struct dentry *dropped_file;
51560 struct dentry *msg_file;
51561- atomic_t dropped;
51562+ atomic_unchecked_t dropped;
51563 };
51564
51565 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
51566diff -urNp linux-3.0.3/include/linux/byteorder/little_endian.h linux-3.0.3/include/linux/byteorder/little_endian.h
51567--- linux-3.0.3/include/linux/byteorder/little_endian.h 2011-07-21 22:17:23.000000000 -0400
51568+++ linux-3.0.3/include/linux/byteorder/little_endian.h 2011-08-23 21:47:56.000000000 -0400
51569@@ -42,51 +42,51 @@
51570
51571 static inline __le64 __cpu_to_le64p(const __u64 *p)
51572 {
51573- return (__force __le64)*p;
51574+ return (__force const __le64)*p;
51575 }
51576 static inline __u64 __le64_to_cpup(const __le64 *p)
51577 {
51578- return (__force __u64)*p;
51579+ return (__force const __u64)*p;
51580 }
51581 static inline __le32 __cpu_to_le32p(const __u32 *p)
51582 {
51583- return (__force __le32)*p;
51584+ return (__force const __le32)*p;
51585 }
51586 static inline __u32 __le32_to_cpup(const __le32 *p)
51587 {
51588- return (__force __u32)*p;
51589+ return (__force const __u32)*p;
51590 }
51591 static inline __le16 __cpu_to_le16p(const __u16 *p)
51592 {
51593- return (__force __le16)*p;
51594+ return (__force const __le16)*p;
51595 }
51596 static inline __u16 __le16_to_cpup(const __le16 *p)
51597 {
51598- return (__force __u16)*p;
51599+ return (__force const __u16)*p;
51600 }
51601 static inline __be64 __cpu_to_be64p(const __u64 *p)
51602 {
51603- return (__force __be64)__swab64p(p);
51604+ return (__force const __be64)__swab64p(p);
51605 }
51606 static inline __u64 __be64_to_cpup(const __be64 *p)
51607 {
51608- return __swab64p((__u64 *)p);
51609+ return __swab64p((const __u64 *)p);
51610 }
51611 static inline __be32 __cpu_to_be32p(const __u32 *p)
51612 {
51613- return (__force __be32)__swab32p(p);
51614+ return (__force const __be32)__swab32p(p);
51615 }
51616 static inline __u32 __be32_to_cpup(const __be32 *p)
51617 {
51618- return __swab32p((__u32 *)p);
51619+ return __swab32p((const __u32 *)p);
51620 }
51621 static inline __be16 __cpu_to_be16p(const __u16 *p)
51622 {
51623- return (__force __be16)__swab16p(p);
51624+ return (__force const __be16)__swab16p(p);
51625 }
51626 static inline __u16 __be16_to_cpup(const __be16 *p)
51627 {
51628- return __swab16p((__u16 *)p);
51629+ return __swab16p((const __u16 *)p);
51630 }
51631 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
51632 #define __le64_to_cpus(x) do { (void)(x); } while (0)
51633diff -urNp linux-3.0.3/include/linux/cache.h linux-3.0.3/include/linux/cache.h
51634--- linux-3.0.3/include/linux/cache.h 2011-07-21 22:17:23.000000000 -0400
51635+++ linux-3.0.3/include/linux/cache.h 2011-08-23 21:47:56.000000000 -0400
51636@@ -16,6 +16,10 @@
51637 #define __read_mostly
51638 #endif
51639
51640+#ifndef __read_only
51641+#define __read_only __read_mostly
51642+#endif
51643+
51644 #ifndef ____cacheline_aligned
51645 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
51646 #endif
51647diff -urNp linux-3.0.3/include/linux/capability.h linux-3.0.3/include/linux/capability.h
51648--- linux-3.0.3/include/linux/capability.h 2011-07-21 22:17:23.000000000 -0400
51649+++ linux-3.0.3/include/linux/capability.h 2011-08-23 21:48:14.000000000 -0400
51650@@ -547,6 +547,9 @@ extern bool capable(int cap);
51651 extern bool ns_capable(struct user_namespace *ns, int cap);
51652 extern bool task_ns_capable(struct task_struct *t, int cap);
51653 extern bool nsown_capable(int cap);
51654+extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
51655+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
51656+extern bool capable_nolog(int cap);
51657
51658 /* audit system wants to get cap info from files as well */
51659 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
51660diff -urNp linux-3.0.3/include/linux/cleancache.h linux-3.0.3/include/linux/cleancache.h
51661--- linux-3.0.3/include/linux/cleancache.h 2011-07-21 22:17:23.000000000 -0400
51662+++ linux-3.0.3/include/linux/cleancache.h 2011-08-23 21:47:56.000000000 -0400
51663@@ -31,7 +31,7 @@ struct cleancache_ops {
51664 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
51665 void (*flush_inode)(int, struct cleancache_filekey);
51666 void (*flush_fs)(int);
51667-};
51668+} __no_const;
51669
51670 extern struct cleancache_ops
51671 cleancache_register_ops(struct cleancache_ops *ops);
51672diff -urNp linux-3.0.3/include/linux/compiler-gcc4.h linux-3.0.3/include/linux/compiler-gcc4.h
51673--- linux-3.0.3/include/linux/compiler-gcc4.h 2011-07-21 22:17:23.000000000 -0400
51674+++ linux-3.0.3/include/linux/compiler-gcc4.h 2011-08-26 19:49:56.000000000 -0400
51675@@ -31,6 +31,12 @@
51676
51677
51678 #if __GNUC_MINOR__ >= 5
51679+
51680+#ifdef CONSTIFY_PLUGIN
51681+#define __no_const __attribute__((no_const))
51682+#define __do_const __attribute__((do_const))
51683+#endif
51684+
51685 /*
51686 * Mark a position in code as unreachable. This can be used to
51687 * suppress control flow warnings after asm blocks that transfer
51688@@ -46,6 +52,11 @@
51689 #define __noclone __attribute__((__noclone__))
51690
51691 #endif
51692+
51693+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
51694+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
51695+#define __bos0(ptr) __bos((ptr), 0)
51696+#define __bos1(ptr) __bos((ptr), 1)
51697 #endif
51698
51699 #if __GNUC_MINOR__ > 0
51700diff -urNp linux-3.0.3/include/linux/compiler.h linux-3.0.3/include/linux/compiler.h
51701--- linux-3.0.3/include/linux/compiler.h 2011-07-21 22:17:23.000000000 -0400
51702+++ linux-3.0.3/include/linux/compiler.h 2011-08-26 19:49:56.000000000 -0400
51703@@ -264,6 +264,14 @@ void ftrace_likely_update(struct ftrace_
51704 # define __attribute_const__ /* unimplemented */
51705 #endif
51706
51707+#ifndef __no_const
51708+# define __no_const
51709+#endif
51710+
51711+#ifndef __do_const
51712+# define __do_const
51713+#endif
51714+
51715 /*
51716 * Tell gcc if a function is cold. The compiler will assume any path
51717 * directly leading to the call is unlikely.
51718@@ -273,6 +281,22 @@ void ftrace_likely_update(struct ftrace_
51719 #define __cold
51720 #endif
51721
51722+#ifndef __alloc_size
51723+#define __alloc_size(...)
51724+#endif
51725+
51726+#ifndef __bos
51727+#define __bos(ptr, arg)
51728+#endif
51729+
51730+#ifndef __bos0
51731+#define __bos0(ptr)
51732+#endif
51733+
51734+#ifndef __bos1
51735+#define __bos1(ptr)
51736+#endif
51737+
51738 /* Simple shorthand for a section definition */
51739 #ifndef __section
51740 # define __section(S) __attribute__ ((__section__(#S)))
51741@@ -306,6 +330,7 @@ void ftrace_likely_update(struct ftrace_
51742 * use is to mediate communication between process-level code and irq/NMI
51743 * handlers, all running on the same CPU.
51744 */
51745-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
51746+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
51747+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
51748
51749 #endif /* __LINUX_COMPILER_H */
51750diff -urNp linux-3.0.3/include/linux/cpuset.h linux-3.0.3/include/linux/cpuset.h
51751--- linux-3.0.3/include/linux/cpuset.h 2011-07-21 22:17:23.000000000 -0400
51752+++ linux-3.0.3/include/linux/cpuset.h 2011-08-23 21:47:56.000000000 -0400
51753@@ -118,7 +118,7 @@ static inline void put_mems_allowed(void
51754 * nodemask.
51755 */
51756 smp_mb();
51757- --ACCESS_ONCE(current->mems_allowed_change_disable);
51758+ --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
51759 }
51760
51761 static inline void set_mems_allowed(nodemask_t nodemask)
51762diff -urNp linux-3.0.3/include/linux/crypto.h linux-3.0.3/include/linux/crypto.h
51763--- linux-3.0.3/include/linux/crypto.h 2011-07-21 22:17:23.000000000 -0400
51764+++ linux-3.0.3/include/linux/crypto.h 2011-08-23 21:47:56.000000000 -0400
51765@@ -361,7 +361,7 @@ struct cipher_tfm {
51766 const u8 *key, unsigned int keylen);
51767 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
51768 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
51769-};
51770+} __no_const;
51771
51772 struct hash_tfm {
51773 int (*init)(struct hash_desc *desc);
51774@@ -382,13 +382,13 @@ struct compress_tfm {
51775 int (*cot_decompress)(struct crypto_tfm *tfm,
51776 const u8 *src, unsigned int slen,
51777 u8 *dst, unsigned int *dlen);
51778-};
51779+} __no_const;
51780
51781 struct rng_tfm {
51782 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
51783 unsigned int dlen);
51784 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
51785-};
51786+} __no_const;
51787
51788 #define crt_ablkcipher crt_u.ablkcipher
51789 #define crt_aead crt_u.aead
51790diff -urNp linux-3.0.3/include/linux/decompress/mm.h linux-3.0.3/include/linux/decompress/mm.h
51791--- linux-3.0.3/include/linux/decompress/mm.h 2011-07-21 22:17:23.000000000 -0400
51792+++ linux-3.0.3/include/linux/decompress/mm.h 2011-08-23 21:47:56.000000000 -0400
51793@@ -77,7 +77,7 @@ static void free(void *where)
51794 * warnings when not needed (indeed large_malloc / large_free are not
51795 * needed by inflate */
51796
51797-#define malloc(a) kmalloc(a, GFP_KERNEL)
51798+#define malloc(a) kmalloc((a), GFP_KERNEL)
51799 #define free(a) kfree(a)
51800
51801 #define large_malloc(a) vmalloc(a)
51802diff -urNp linux-3.0.3/include/linux/dma-mapping.h linux-3.0.3/include/linux/dma-mapping.h
51803--- linux-3.0.3/include/linux/dma-mapping.h 2011-07-21 22:17:23.000000000 -0400
51804+++ linux-3.0.3/include/linux/dma-mapping.h 2011-08-26 19:49:56.000000000 -0400
51805@@ -50,7 +50,7 @@ struct dma_map_ops {
51806 int (*dma_supported)(struct device *dev, u64 mask);
51807 int (*set_dma_mask)(struct device *dev, u64 mask);
51808 int is_phys;
51809-};
51810+} __do_const;
51811
51812 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
51813
51814diff -urNp linux-3.0.3/include/linux/efi.h linux-3.0.3/include/linux/efi.h
51815--- linux-3.0.3/include/linux/efi.h 2011-07-21 22:17:23.000000000 -0400
51816+++ linux-3.0.3/include/linux/efi.h 2011-08-23 21:47:56.000000000 -0400
51817@@ -410,7 +410,7 @@ struct efivar_operations {
51818 efi_get_variable_t *get_variable;
51819 efi_get_next_variable_t *get_next_variable;
51820 efi_set_variable_t *set_variable;
51821-};
51822+} __no_const;
51823
51824 struct efivars {
51825 /*
51826diff -urNp linux-3.0.3/include/linux/elf.h linux-3.0.3/include/linux/elf.h
51827--- linux-3.0.3/include/linux/elf.h 2011-07-21 22:17:23.000000000 -0400
51828+++ linux-3.0.3/include/linux/elf.h 2011-08-23 21:47:56.000000000 -0400
51829@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
51830 #define PT_GNU_EH_FRAME 0x6474e550
51831
51832 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
51833+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
51834+
51835+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
51836+
51837+/* Constants for the e_flags field */
51838+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
51839+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
51840+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
51841+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
51842+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
51843+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
51844
51845 /*
51846 * Extended Numbering
51847@@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
51848 #define DT_DEBUG 21
51849 #define DT_TEXTREL 22
51850 #define DT_JMPREL 23
51851+#define DT_FLAGS 30
51852+ #define DF_TEXTREL 0x00000004
51853 #define DT_ENCODING 32
51854 #define OLD_DT_LOOS 0x60000000
51855 #define DT_LOOS 0x6000000d
51856@@ -252,6 +265,19 @@ typedef struct elf64_hdr {
51857 #define PF_W 0x2
51858 #define PF_X 0x1
51859
51860+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
51861+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
51862+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
51863+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
51864+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
51865+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
51866+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
51867+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
51868+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
51869+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
51870+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
51871+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
51872+
51873 typedef struct elf32_phdr{
51874 Elf32_Word p_type;
51875 Elf32_Off p_offset;
51876@@ -344,6 +370,8 @@ typedef struct elf64_shdr {
51877 #define EI_OSABI 7
51878 #define EI_PAD 8
51879
51880+#define EI_PAX 14
51881+
51882 #define ELFMAG0 0x7f /* EI_MAG */
51883 #define ELFMAG1 'E'
51884 #define ELFMAG2 'L'
51885@@ -422,6 +450,7 @@ extern Elf32_Dyn _DYNAMIC [];
51886 #define elf_note elf32_note
51887 #define elf_addr_t Elf32_Off
51888 #define Elf_Half Elf32_Half
51889+#define elf_dyn Elf32_Dyn
51890
51891 #else
51892
51893@@ -432,6 +461,7 @@ extern Elf64_Dyn _DYNAMIC [];
51894 #define elf_note elf64_note
51895 #define elf_addr_t Elf64_Off
51896 #define Elf_Half Elf64_Half
51897+#define elf_dyn Elf64_Dyn
51898
51899 #endif
51900
51901diff -urNp linux-3.0.3/include/linux/firewire.h linux-3.0.3/include/linux/firewire.h
51902--- linux-3.0.3/include/linux/firewire.h 2011-07-21 22:17:23.000000000 -0400
51903+++ linux-3.0.3/include/linux/firewire.h 2011-08-23 21:47:56.000000000 -0400
51904@@ -428,7 +428,7 @@ struct fw_iso_context {
51905 union {
51906 fw_iso_callback_t sc;
51907 fw_iso_mc_callback_t mc;
51908- } callback;
51909+ } __no_const callback;
51910 void *callback_data;
51911 };
51912
51913diff -urNp linux-3.0.3/include/linux/fscache-cache.h linux-3.0.3/include/linux/fscache-cache.h
51914--- linux-3.0.3/include/linux/fscache-cache.h 2011-07-21 22:17:23.000000000 -0400
51915+++ linux-3.0.3/include/linux/fscache-cache.h 2011-08-23 21:47:56.000000000 -0400
51916@@ -102,7 +102,7 @@ struct fscache_operation {
51917 fscache_operation_release_t release;
51918 };
51919
51920-extern atomic_t fscache_op_debug_id;
51921+extern atomic_unchecked_t fscache_op_debug_id;
51922 extern void fscache_op_work_func(struct work_struct *work);
51923
51924 extern void fscache_enqueue_operation(struct fscache_operation *);
51925@@ -122,7 +122,7 @@ static inline void fscache_operation_ini
51926 {
51927 INIT_WORK(&op->work, fscache_op_work_func);
51928 atomic_set(&op->usage, 1);
51929- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
51930+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
51931 op->processor = processor;
51932 op->release = release;
51933 INIT_LIST_HEAD(&op->pend_link);
51934diff -urNp linux-3.0.3/include/linux/fs.h linux-3.0.3/include/linux/fs.h
51935--- linux-3.0.3/include/linux/fs.h 2011-07-21 22:17:23.000000000 -0400
51936+++ linux-3.0.3/include/linux/fs.h 2011-08-26 19:49:56.000000000 -0400
51937@@ -109,6 +109,11 @@ struct inodes_stat_t {
51938 /* File was opened by fanotify and shouldn't generate fanotify events */
51939 #define FMODE_NONOTIFY ((__force fmode_t)0x1000000)
51940
51941+/* Hack for grsec so as not to require read permission simply to execute
51942+ * a binary
51943+ */
51944+#define FMODE_GREXEC ((__force fmode_t)0x2000000)
51945+
51946 /*
51947 * The below are the various read and write types that we support. Some of
51948 * them include behavioral modifiers that send information down to the
51949@@ -1571,7 +1576,8 @@ struct file_operations {
51950 int (*setlease)(struct file *, long, struct file_lock **);
51951 long (*fallocate)(struct file *file, int mode, loff_t offset,
51952 loff_t len);
51953-};
51954+} __do_const;
51955+typedef struct file_operations __no_const file_operations_no_const;
51956
51957 #define IPERM_FLAG_RCU 0x0001
51958
51959diff -urNp linux-3.0.3/include/linux/fsnotify.h linux-3.0.3/include/linux/fsnotify.h
51960--- linux-3.0.3/include/linux/fsnotify.h 2011-07-21 22:17:23.000000000 -0400
51961+++ linux-3.0.3/include/linux/fsnotify.h 2011-08-24 18:10:29.000000000 -0400
51962@@ -314,7 +314,7 @@ static inline void fsnotify_change(struc
51963 */
51964 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
51965 {
51966- return kstrdup(name, GFP_KERNEL);
51967+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
51968 }
51969
51970 /*
51971diff -urNp linux-3.0.3/include/linux/fs_struct.h linux-3.0.3/include/linux/fs_struct.h
51972--- linux-3.0.3/include/linux/fs_struct.h 2011-07-21 22:17:23.000000000 -0400
51973+++ linux-3.0.3/include/linux/fs_struct.h 2011-08-23 21:47:56.000000000 -0400
51974@@ -6,7 +6,7 @@
51975 #include <linux/seqlock.h>
51976
51977 struct fs_struct {
51978- int users;
51979+ atomic_t users;
51980 spinlock_t lock;
51981 seqcount_t seq;
51982 int umask;
51983diff -urNp linux-3.0.3/include/linux/ftrace_event.h linux-3.0.3/include/linux/ftrace_event.h
51984--- linux-3.0.3/include/linux/ftrace_event.h 2011-07-21 22:17:23.000000000 -0400
51985+++ linux-3.0.3/include/linux/ftrace_event.h 2011-08-23 21:47:56.000000000 -0400
51986@@ -96,7 +96,7 @@ struct trace_event_functions {
51987 trace_print_func raw;
51988 trace_print_func hex;
51989 trace_print_func binary;
51990-};
51991+} __no_const;
51992
51993 struct trace_event {
51994 struct hlist_node node;
51995@@ -247,7 +247,7 @@ extern int trace_define_field(struct ftr
51996 extern int trace_add_event_call(struct ftrace_event_call *call);
51997 extern void trace_remove_event_call(struct ftrace_event_call *call);
51998
51999-#define is_signed_type(type) (((type)(-1)) < 0)
52000+#define is_signed_type(type) (((type)(-1)) < (type)1)
52001
52002 int trace_set_clr_event(const char *system, const char *event, int set);
52003
52004diff -urNp linux-3.0.3/include/linux/genhd.h linux-3.0.3/include/linux/genhd.h
52005--- linux-3.0.3/include/linux/genhd.h 2011-07-21 22:17:23.000000000 -0400
52006+++ linux-3.0.3/include/linux/genhd.h 2011-08-23 21:47:56.000000000 -0400
52007@@ -184,7 +184,7 @@ struct gendisk {
52008 struct kobject *slave_dir;
52009
52010 struct timer_rand_state *random;
52011- atomic_t sync_io; /* RAID */
52012+ atomic_unchecked_t sync_io; /* RAID */
52013 struct disk_events *ev;
52014 #ifdef CONFIG_BLK_DEV_INTEGRITY
52015 struct blk_integrity *integrity;
52016diff -urNp linux-3.0.3/include/linux/gracl.h linux-3.0.3/include/linux/gracl.h
52017--- linux-3.0.3/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
52018+++ linux-3.0.3/include/linux/gracl.h 2011-08-23 21:48:14.000000000 -0400
52019@@ -0,0 +1,317 @@
52020+#ifndef GR_ACL_H
52021+#define GR_ACL_H
52022+
52023+#include <linux/grdefs.h>
52024+#include <linux/resource.h>
52025+#include <linux/capability.h>
52026+#include <linux/dcache.h>
52027+#include <asm/resource.h>
52028+
52029+/* Major status information */
52030+
52031+#define GR_VERSION "grsecurity 2.2.2"
52032+#define GRSECURITY_VERSION 0x2202
52033+
52034+enum {
52035+ GR_SHUTDOWN = 0,
52036+ GR_ENABLE = 1,
52037+ GR_SPROLE = 2,
52038+ GR_RELOAD = 3,
52039+ GR_SEGVMOD = 4,
52040+ GR_STATUS = 5,
52041+ GR_UNSPROLE = 6,
52042+ GR_PASSSET = 7,
52043+ GR_SPROLEPAM = 8,
52044+};
52045+
52046+/* Password setup definitions
52047+ * kernel/grhash.c */
52048+enum {
52049+ GR_PW_LEN = 128,
52050+ GR_SALT_LEN = 16,
52051+ GR_SHA_LEN = 32,
52052+};
52053+
52054+enum {
52055+ GR_SPROLE_LEN = 64,
52056+};
52057+
52058+enum {
52059+ GR_NO_GLOB = 0,
52060+ GR_REG_GLOB,
52061+ GR_CREATE_GLOB
52062+};
52063+
52064+#define GR_NLIMITS 32
52065+
52066+/* Begin Data Structures */
52067+
52068+struct sprole_pw {
52069+ unsigned char *rolename;
52070+ unsigned char salt[GR_SALT_LEN];
52071+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
52072+};
52073+
52074+struct name_entry {
52075+ __u32 key;
52076+ ino_t inode;
52077+ dev_t device;
52078+ char *name;
52079+ __u16 len;
52080+ __u8 deleted;
52081+ struct name_entry *prev;
52082+ struct name_entry *next;
52083+};
52084+
52085+struct inodev_entry {
52086+ struct name_entry *nentry;
52087+ struct inodev_entry *prev;
52088+ struct inodev_entry *next;
52089+};
52090+
52091+struct acl_role_db {
52092+ struct acl_role_label **r_hash;
52093+ __u32 r_size;
52094+};
52095+
52096+struct inodev_db {
52097+ struct inodev_entry **i_hash;
52098+ __u32 i_size;
52099+};
52100+
52101+struct name_db {
52102+ struct name_entry **n_hash;
52103+ __u32 n_size;
52104+};
52105+
52106+struct crash_uid {
52107+ uid_t uid;
52108+ unsigned long expires;
52109+};
52110+
52111+struct gr_hash_struct {
52112+ void **table;
52113+ void **nametable;
52114+ void *first;
52115+ __u32 table_size;
52116+ __u32 used_size;
52117+ int type;
52118+};
52119+
52120+/* Userspace Grsecurity ACL data structures */
52121+
52122+struct acl_subject_label {
52123+ char *filename;
52124+ ino_t inode;
52125+ dev_t device;
52126+ __u32 mode;
52127+ kernel_cap_t cap_mask;
52128+ kernel_cap_t cap_lower;
52129+ kernel_cap_t cap_invert_audit;
52130+
52131+ struct rlimit res[GR_NLIMITS];
52132+ __u32 resmask;
52133+
52134+ __u8 user_trans_type;
52135+ __u8 group_trans_type;
52136+ uid_t *user_transitions;
52137+ gid_t *group_transitions;
52138+ __u16 user_trans_num;
52139+ __u16 group_trans_num;
52140+
52141+ __u32 sock_families[2];
52142+ __u32 ip_proto[8];
52143+ __u32 ip_type;
52144+ struct acl_ip_label **ips;
52145+ __u32 ip_num;
52146+ __u32 inaddr_any_override;
52147+
52148+ __u32 crashes;
52149+ unsigned long expires;
52150+
52151+ struct acl_subject_label *parent_subject;
52152+ struct gr_hash_struct *hash;
52153+ struct acl_subject_label *prev;
52154+ struct acl_subject_label *next;
52155+
52156+ struct acl_object_label **obj_hash;
52157+ __u32 obj_hash_size;
52158+ __u16 pax_flags;
52159+};
52160+
52161+struct role_allowed_ip {
52162+ __u32 addr;
52163+ __u32 netmask;
52164+
52165+ struct role_allowed_ip *prev;
52166+ struct role_allowed_ip *next;
52167+};
52168+
52169+struct role_transition {
52170+ char *rolename;
52171+
52172+ struct role_transition *prev;
52173+ struct role_transition *next;
52174+};
52175+
52176+struct acl_role_label {
52177+ char *rolename;
52178+ uid_t uidgid;
52179+ __u16 roletype;
52180+
52181+ __u16 auth_attempts;
52182+ unsigned long expires;
52183+
52184+ struct acl_subject_label *root_label;
52185+ struct gr_hash_struct *hash;
52186+
52187+ struct acl_role_label *prev;
52188+ struct acl_role_label *next;
52189+
52190+ struct role_transition *transitions;
52191+ struct role_allowed_ip *allowed_ips;
52192+ uid_t *domain_children;
52193+ __u16 domain_child_num;
52194+
52195+ struct acl_subject_label **subj_hash;
52196+ __u32 subj_hash_size;
52197+};
52198+
52199+struct user_acl_role_db {
52200+ struct acl_role_label **r_table;
52201+ __u32 num_pointers; /* Number of allocations to track */
52202+ __u32 num_roles; /* Number of roles */
52203+ __u32 num_domain_children; /* Number of domain children */
52204+ __u32 num_subjects; /* Number of subjects */
52205+ __u32 num_objects; /* Number of objects */
52206+};
52207+
52208+struct acl_object_label {
52209+ char *filename;
52210+ ino_t inode;
52211+ dev_t device;
52212+ __u32 mode;
52213+
52214+ struct acl_subject_label *nested;
52215+ struct acl_object_label *globbed;
52216+
52217+ /* next two structures not used */
52218+
52219+ struct acl_object_label *prev;
52220+ struct acl_object_label *next;
52221+};
52222+
52223+struct acl_ip_label {
52224+ char *iface;
52225+ __u32 addr;
52226+ __u32 netmask;
52227+ __u16 low, high;
52228+ __u8 mode;
52229+ __u32 type;
52230+ __u32 proto[8];
52231+
52232+ /* next two structures not used */
52233+
52234+ struct acl_ip_label *prev;
52235+ struct acl_ip_label *next;
52236+};
52237+
52238+struct gr_arg {
52239+ struct user_acl_role_db role_db;
52240+ unsigned char pw[GR_PW_LEN];
52241+ unsigned char salt[GR_SALT_LEN];
52242+ unsigned char sum[GR_SHA_LEN];
52243+ unsigned char sp_role[GR_SPROLE_LEN];
52244+ struct sprole_pw *sprole_pws;
52245+ dev_t segv_device;
52246+ ino_t segv_inode;
52247+ uid_t segv_uid;
52248+ __u16 num_sprole_pws;
52249+ __u16 mode;
52250+};
52251+
52252+struct gr_arg_wrapper {
52253+ struct gr_arg *arg;
52254+ __u32 version;
52255+ __u32 size;
52256+};
52257+
52258+struct subject_map {
52259+ struct acl_subject_label *user;
52260+ struct acl_subject_label *kernel;
52261+ struct subject_map *prev;
52262+ struct subject_map *next;
52263+};
52264+
52265+struct acl_subj_map_db {
52266+ struct subject_map **s_hash;
52267+ __u32 s_size;
52268+};
52269+
52270+/* End Data Structures Section */
52271+
52272+/* Hash functions generated by empirical testing by Brad Spengler
52273+ Makes good use of the low bits of the inode. Generally 0-1 times
52274+ in loop for successful match. 0-3 for unsuccessful match.
52275+ Shift/add algorithm with modulus of table size and an XOR*/
52276+
52277+static __inline__ unsigned int
52278+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
52279+{
52280+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
52281+}
52282+
52283+ static __inline__ unsigned int
52284+shash(const struct acl_subject_label *userp, const unsigned int sz)
52285+{
52286+ return ((const unsigned long)userp % sz);
52287+}
52288+
52289+static __inline__ unsigned int
52290+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
52291+{
52292+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
52293+}
52294+
52295+static __inline__ unsigned int
52296+nhash(const char *name, const __u16 len, const unsigned int sz)
52297+{
52298+ return full_name_hash((const unsigned char *)name, len) % sz;
52299+}
52300+
52301+#define FOR_EACH_ROLE_START(role) \
52302+ role = role_list; \
52303+ while (role) {
52304+
52305+#define FOR_EACH_ROLE_END(role) \
52306+ role = role->prev; \
52307+ }
52308+
52309+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
52310+ subj = NULL; \
52311+ iter = 0; \
52312+ while (iter < role->subj_hash_size) { \
52313+ if (subj == NULL) \
52314+ subj = role->subj_hash[iter]; \
52315+ if (subj == NULL) { \
52316+ iter++; \
52317+ continue; \
52318+ }
52319+
52320+#define FOR_EACH_SUBJECT_END(subj,iter) \
52321+ subj = subj->next; \
52322+ if (subj == NULL) \
52323+ iter++; \
52324+ }
52325+
52326+
52327+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
52328+ subj = role->hash->first; \
52329+ while (subj != NULL) {
52330+
52331+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
52332+ subj = subj->next; \
52333+ }
52334+
52335+#endif
52336+
52337diff -urNp linux-3.0.3/include/linux/gralloc.h linux-3.0.3/include/linux/gralloc.h
52338--- linux-3.0.3/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
52339+++ linux-3.0.3/include/linux/gralloc.h 2011-08-23 21:48:14.000000000 -0400
52340@@ -0,0 +1,9 @@
52341+#ifndef __GRALLOC_H
52342+#define __GRALLOC_H
52343+
52344+void acl_free_all(void);
52345+int acl_alloc_stack_init(unsigned long size);
52346+void *acl_alloc(unsigned long len);
52347+void *acl_alloc_num(unsigned long num, unsigned long len);
52348+
52349+#endif
52350diff -urNp linux-3.0.3/include/linux/grdefs.h linux-3.0.3/include/linux/grdefs.h
52351--- linux-3.0.3/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
52352+++ linux-3.0.3/include/linux/grdefs.h 2011-08-23 21:48:14.000000000 -0400
52353@@ -0,0 +1,140 @@
52354+#ifndef GRDEFS_H
52355+#define GRDEFS_H
52356+
52357+/* Begin grsecurity status declarations */
52358+
52359+enum {
52360+ GR_READY = 0x01,
52361+ GR_STATUS_INIT = 0x00 // disabled state
52362+};
52363+
52364+/* Begin ACL declarations */
52365+
52366+/* Role flags */
52367+
52368+enum {
52369+ GR_ROLE_USER = 0x0001,
52370+ GR_ROLE_GROUP = 0x0002,
52371+ GR_ROLE_DEFAULT = 0x0004,
52372+ GR_ROLE_SPECIAL = 0x0008,
52373+ GR_ROLE_AUTH = 0x0010,
52374+ GR_ROLE_NOPW = 0x0020,
52375+ GR_ROLE_GOD = 0x0040,
52376+ GR_ROLE_LEARN = 0x0080,
52377+ GR_ROLE_TPE = 0x0100,
52378+ GR_ROLE_DOMAIN = 0x0200,
52379+ GR_ROLE_PAM = 0x0400,
52380+ GR_ROLE_PERSIST = 0x0800
52381+};
52382+
52383+/* ACL Subject and Object mode flags */
52384+enum {
52385+ GR_DELETED = 0x80000000
52386+};
52387+
52388+/* ACL Object-only mode flags */
52389+enum {
52390+ GR_READ = 0x00000001,
52391+ GR_APPEND = 0x00000002,
52392+ GR_WRITE = 0x00000004,
52393+ GR_EXEC = 0x00000008,
52394+ GR_FIND = 0x00000010,
52395+ GR_INHERIT = 0x00000020,
52396+ GR_SETID = 0x00000040,
52397+ GR_CREATE = 0x00000080,
52398+ GR_DELETE = 0x00000100,
52399+ GR_LINK = 0x00000200,
52400+ GR_AUDIT_READ = 0x00000400,
52401+ GR_AUDIT_APPEND = 0x00000800,
52402+ GR_AUDIT_WRITE = 0x00001000,
52403+ GR_AUDIT_EXEC = 0x00002000,
52404+ GR_AUDIT_FIND = 0x00004000,
52405+ GR_AUDIT_INHERIT= 0x00008000,
52406+ GR_AUDIT_SETID = 0x00010000,
52407+ GR_AUDIT_CREATE = 0x00020000,
52408+ GR_AUDIT_DELETE = 0x00040000,
52409+ GR_AUDIT_LINK = 0x00080000,
52410+ GR_PTRACERD = 0x00100000,
52411+ GR_NOPTRACE = 0x00200000,
52412+ GR_SUPPRESS = 0x00400000,
52413+ GR_NOLEARN = 0x00800000,
52414+ GR_INIT_TRANSFER= 0x01000000
52415+};
52416+
52417+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
52418+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
52419+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
52420+
52421+/* ACL subject-only mode flags */
52422+enum {
52423+ GR_KILL = 0x00000001,
52424+ GR_VIEW = 0x00000002,
52425+ GR_PROTECTED = 0x00000004,
52426+ GR_LEARN = 0x00000008,
52427+ GR_OVERRIDE = 0x00000010,
52428+ /* just a placeholder, this mode is only used in userspace */
52429+ GR_DUMMY = 0x00000020,
52430+ GR_PROTSHM = 0x00000040,
52431+ GR_KILLPROC = 0x00000080,
52432+ GR_KILLIPPROC = 0x00000100,
52433+ /* just a placeholder, this mode is only used in userspace */
52434+ GR_NOTROJAN = 0x00000200,
52435+ GR_PROTPROCFD = 0x00000400,
52436+ GR_PROCACCT = 0x00000800,
52437+ GR_RELAXPTRACE = 0x00001000,
52438+ GR_NESTED = 0x00002000,
52439+ GR_INHERITLEARN = 0x00004000,
52440+ GR_PROCFIND = 0x00008000,
52441+ GR_POVERRIDE = 0x00010000,
52442+ GR_KERNELAUTH = 0x00020000,
52443+ GR_ATSECURE = 0x00040000,
52444+ GR_SHMEXEC = 0x00080000
52445+};
52446+
52447+enum {
52448+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
52449+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
52450+ GR_PAX_ENABLE_MPROTECT = 0x0004,
52451+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
52452+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
52453+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
52454+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
52455+ GR_PAX_DISABLE_MPROTECT = 0x0400,
52456+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
52457+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
52458+};
52459+
52460+enum {
52461+ GR_ID_USER = 0x01,
52462+ GR_ID_GROUP = 0x02,
52463+};
52464+
52465+enum {
52466+ GR_ID_ALLOW = 0x01,
52467+ GR_ID_DENY = 0x02,
52468+};
52469+
52470+#define GR_CRASH_RES 31
52471+#define GR_UIDTABLE_MAX 500
52472+
52473+/* begin resource learning section */
52474+enum {
52475+ GR_RLIM_CPU_BUMP = 60,
52476+ GR_RLIM_FSIZE_BUMP = 50000,
52477+ GR_RLIM_DATA_BUMP = 10000,
52478+ GR_RLIM_STACK_BUMP = 1000,
52479+ GR_RLIM_CORE_BUMP = 10000,
52480+ GR_RLIM_RSS_BUMP = 500000,
52481+ GR_RLIM_NPROC_BUMP = 1,
52482+ GR_RLIM_NOFILE_BUMP = 5,
52483+ GR_RLIM_MEMLOCK_BUMP = 50000,
52484+ GR_RLIM_AS_BUMP = 500000,
52485+ GR_RLIM_LOCKS_BUMP = 2,
52486+ GR_RLIM_SIGPENDING_BUMP = 5,
52487+ GR_RLIM_MSGQUEUE_BUMP = 10000,
52488+ GR_RLIM_NICE_BUMP = 1,
52489+ GR_RLIM_RTPRIO_BUMP = 1,
52490+ GR_RLIM_RTTIME_BUMP = 1000000
52491+};
52492+
52493+#endif
52494diff -urNp linux-3.0.3/include/linux/grinternal.h linux-3.0.3/include/linux/grinternal.h
52495--- linux-3.0.3/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
52496+++ linux-3.0.3/include/linux/grinternal.h 2011-08-23 21:48:14.000000000 -0400
52497@@ -0,0 +1,219 @@
52498+#ifndef __GRINTERNAL_H
52499+#define __GRINTERNAL_H
52500+
52501+#ifdef CONFIG_GRKERNSEC
52502+
52503+#include <linux/fs.h>
52504+#include <linux/mnt_namespace.h>
52505+#include <linux/nsproxy.h>
52506+#include <linux/gracl.h>
52507+#include <linux/grdefs.h>
52508+#include <linux/grmsg.h>
52509+
52510+void gr_add_learn_entry(const char *fmt, ...)
52511+ __attribute__ ((format (printf, 1, 2)));
52512+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
52513+ const struct vfsmount *mnt);
52514+__u32 gr_check_create(const struct dentry *new_dentry,
52515+ const struct dentry *parent,
52516+ const struct vfsmount *mnt, const __u32 mode);
52517+int gr_check_protected_task(const struct task_struct *task);
52518+__u32 to_gr_audit(const __u32 reqmode);
52519+int gr_set_acls(const int type);
52520+int gr_apply_subject_to_task(struct task_struct *task);
52521+int gr_acl_is_enabled(void);
52522+char gr_roletype_to_char(void);
52523+
52524+void gr_handle_alertkill(struct task_struct *task);
52525+char *gr_to_filename(const struct dentry *dentry,
52526+ const struct vfsmount *mnt);
52527+char *gr_to_filename1(const struct dentry *dentry,
52528+ const struct vfsmount *mnt);
52529+char *gr_to_filename2(const struct dentry *dentry,
52530+ const struct vfsmount *mnt);
52531+char *gr_to_filename3(const struct dentry *dentry,
52532+ const struct vfsmount *mnt);
52533+
52534+extern int grsec_enable_harden_ptrace;
52535+extern int grsec_enable_link;
52536+extern int grsec_enable_fifo;
52537+extern int grsec_enable_execve;
52538+extern int grsec_enable_shm;
52539+extern int grsec_enable_execlog;
52540+extern int grsec_enable_signal;
52541+extern int grsec_enable_audit_ptrace;
52542+extern int grsec_enable_forkfail;
52543+extern int grsec_enable_time;
52544+extern int grsec_enable_rofs;
52545+extern int grsec_enable_chroot_shmat;
52546+extern int grsec_enable_chroot_mount;
52547+extern int grsec_enable_chroot_double;
52548+extern int grsec_enable_chroot_pivot;
52549+extern int grsec_enable_chroot_chdir;
52550+extern int grsec_enable_chroot_chmod;
52551+extern int grsec_enable_chroot_mknod;
52552+extern int grsec_enable_chroot_fchdir;
52553+extern int grsec_enable_chroot_nice;
52554+extern int grsec_enable_chroot_execlog;
52555+extern int grsec_enable_chroot_caps;
52556+extern int grsec_enable_chroot_sysctl;
52557+extern int grsec_enable_chroot_unix;
52558+extern int grsec_enable_tpe;
52559+extern int grsec_tpe_gid;
52560+extern int grsec_enable_tpe_all;
52561+extern int grsec_enable_tpe_invert;
52562+extern int grsec_enable_socket_all;
52563+extern int grsec_socket_all_gid;
52564+extern int grsec_enable_socket_client;
52565+extern int grsec_socket_client_gid;
52566+extern int grsec_enable_socket_server;
52567+extern int grsec_socket_server_gid;
52568+extern int grsec_audit_gid;
52569+extern int grsec_enable_group;
52570+extern int grsec_enable_audit_textrel;
52571+extern int grsec_enable_log_rwxmaps;
52572+extern int grsec_enable_mount;
52573+extern int grsec_enable_chdir;
52574+extern int grsec_resource_logging;
52575+extern int grsec_enable_blackhole;
52576+extern int grsec_lastack_retries;
52577+extern int grsec_enable_brute;
52578+extern int grsec_lock;
52579+
52580+extern spinlock_t grsec_alert_lock;
52581+extern unsigned long grsec_alert_wtime;
52582+extern unsigned long grsec_alert_fyet;
52583+
52584+extern spinlock_t grsec_audit_lock;
52585+
52586+extern rwlock_t grsec_exec_file_lock;
52587+
52588+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
52589+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
52590+ (tsk)->exec_file->f_vfsmnt) : "/")
52591+
52592+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
52593+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
52594+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
52595+
52596+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
52597+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
52598+ (tsk)->exec_file->f_vfsmnt) : "/")
52599+
52600+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
52601+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
52602+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
52603+
52604+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
52605+
52606+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
52607+
52608+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
52609+ (task)->pid, (cred)->uid, \
52610+ (cred)->euid, (cred)->gid, (cred)->egid, \
52611+ gr_parent_task_fullpath(task), \
52612+ (task)->real_parent->comm, (task)->real_parent->pid, \
52613+ (pcred)->uid, (pcred)->euid, \
52614+ (pcred)->gid, (pcred)->egid
52615+
52616+#define GR_CHROOT_CAPS {{ \
52617+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
52618+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
52619+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
52620+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
52621+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
52622+ CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
52623+
52624+#define security_learn(normal_msg,args...) \
52625+({ \
52626+ read_lock(&grsec_exec_file_lock); \
52627+ gr_add_learn_entry(normal_msg "\n", ## args); \
52628+ read_unlock(&grsec_exec_file_lock); \
52629+})
52630+
52631+enum {
52632+ GR_DO_AUDIT,
52633+ GR_DONT_AUDIT,
52634+ /* used for non-audit messages that we shouldn't kill the task on */
52635+ GR_DONT_AUDIT_GOOD
52636+};
52637+
52638+enum {
52639+ GR_TTYSNIFF,
52640+ GR_RBAC,
52641+ GR_RBAC_STR,
52642+ GR_STR_RBAC,
52643+ GR_RBAC_MODE2,
52644+ GR_RBAC_MODE3,
52645+ GR_FILENAME,
52646+ GR_SYSCTL_HIDDEN,
52647+ GR_NOARGS,
52648+ GR_ONE_INT,
52649+ GR_ONE_INT_TWO_STR,
52650+ GR_ONE_STR,
52651+ GR_STR_INT,
52652+ GR_TWO_STR_INT,
52653+ GR_TWO_INT,
52654+ GR_TWO_U64,
52655+ GR_THREE_INT,
52656+ GR_FIVE_INT_TWO_STR,
52657+ GR_TWO_STR,
52658+ GR_THREE_STR,
52659+ GR_FOUR_STR,
52660+ GR_STR_FILENAME,
52661+ GR_FILENAME_STR,
52662+ GR_FILENAME_TWO_INT,
52663+ GR_FILENAME_TWO_INT_STR,
52664+ GR_TEXTREL,
52665+ GR_PTRACE,
52666+ GR_RESOURCE,
52667+ GR_CAP,
52668+ GR_SIG,
52669+ GR_SIG2,
52670+ GR_CRASH1,
52671+ GR_CRASH2,
52672+ GR_PSACCT,
52673+ GR_RWXMAP
52674+};
52675+
52676+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
52677+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
52678+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
52679+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
52680+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
52681+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
52682+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
52683+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
52684+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
52685+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
52686+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
52687+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
52688+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
52689+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
52690+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
52691+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
52692+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
52693+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
52694+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
52695+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
52696+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
52697+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
52698+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
52699+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
52700+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
52701+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
52702+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
52703+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
52704+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
52705+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
52706+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
52707+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
52708+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
52709+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
52710+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
52711+
52712+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
52713+
52714+#endif
52715+
52716+#endif
52717diff -urNp linux-3.0.3/include/linux/grmsg.h linux-3.0.3/include/linux/grmsg.h
52718--- linux-3.0.3/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
52719+++ linux-3.0.3/include/linux/grmsg.h 2011-08-25 17:27:26.000000000 -0400
52720@@ -0,0 +1,107 @@
52721+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
52722+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
52723+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
52724+#define GR_STOPMOD_MSG "denied modification of module state by "
52725+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
52726+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
52727+#define GR_IOPERM_MSG "denied use of ioperm() by "
52728+#define GR_IOPL_MSG "denied use of iopl() by "
52729+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
52730+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
52731+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
52732+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
52733+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
52734+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
52735+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
52736+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
52737+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
52738+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
52739+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
52740+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
52741+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
52742+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
52743+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
52744+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
52745+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
52746+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
52747+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
52748+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
52749+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
52750+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
52751+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
52752+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
52753+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
52754+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
52755+#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
52756+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
52757+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
52758+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
52759+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
52760+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
52761+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
52762+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
52763+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
52764+#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
52765+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
52766+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
52767+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
52768+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
52769+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
52770+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
52771+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
52772+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
52773+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
52774+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
52775+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
52776+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
52777+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
52778+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
52779+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
52780+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
52781+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
52782+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
52783+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
52784+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
52785+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
52786+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
52787+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
52788+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
52789+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
52790+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
52791+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
52792+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
52793+#define GR_FAILFORK_MSG "failed fork with errno %s by "
52794+#define GR_NICE_CHROOT_MSG "denied priority change by "
52795+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
52796+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
52797+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
52798+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
52799+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
52800+#define GR_TIME_MSG "time set by "
52801+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
52802+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
52803+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
52804+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
52805+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
52806+#define GR_BIND_MSG "denied bind() by "
52807+#define GR_CONNECT_MSG "denied connect() by "
52808+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
52809+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
52810+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
52811+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
52812+#define GR_CAP_ACL_MSG "use of %s denied for "
52813+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
52814+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
52815+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
52816+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
52817+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
52818+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
52819+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
52820+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
52821+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
52822+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
52823+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
52824+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
52825+#define GR_VM86_MSG "denied use of vm86 by "
52826+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
52827+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
52828diff -urNp linux-3.0.3/include/linux/grsecurity.h linux-3.0.3/include/linux/grsecurity.h
52829--- linux-3.0.3/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
52830+++ linux-3.0.3/include/linux/grsecurity.h 2011-08-25 17:27:36.000000000 -0400
52831@@ -0,0 +1,227 @@
52832+#ifndef GR_SECURITY_H
52833+#define GR_SECURITY_H
52834+#include <linux/fs.h>
52835+#include <linux/fs_struct.h>
52836+#include <linux/binfmts.h>
52837+#include <linux/gracl.h>
52838+
52839+/* notify of brain-dead configs */
52840+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52841+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
52842+#endif
52843+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
52844+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
52845+#endif
52846+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
52847+#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
52848+#endif
52849+#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
52850+#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
52851+#endif
52852+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
52853+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
52854+#endif
52855+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
52856+#error "CONFIG_PAX enabled, but no PaX options are enabled."
52857+#endif
52858+
52859+#include <linux/compat.h>
52860+
52861+struct user_arg_ptr {
52862+#ifdef CONFIG_COMPAT
52863+ bool is_compat;
52864+#endif
52865+ union {
52866+ const char __user *const __user *native;
52867+#ifdef CONFIG_COMPAT
52868+ compat_uptr_t __user *compat;
52869+#endif
52870+ } ptr;
52871+};
52872+
52873+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
52874+void gr_handle_brute_check(void);
52875+void gr_handle_kernel_exploit(void);
52876+int gr_process_user_ban(void);
52877+
52878+char gr_roletype_to_char(void);
52879+
52880+int gr_acl_enable_at_secure(void);
52881+
52882+int gr_check_user_change(int real, int effective, int fs);
52883+int gr_check_group_change(int real, int effective, int fs);
52884+
52885+void gr_del_task_from_ip_table(struct task_struct *p);
52886+
52887+int gr_pid_is_chrooted(struct task_struct *p);
52888+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
52889+int gr_handle_chroot_nice(void);
52890+int gr_handle_chroot_sysctl(const int op);
52891+int gr_handle_chroot_setpriority(struct task_struct *p,
52892+ const int niceval);
52893+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
52894+int gr_handle_chroot_chroot(const struct dentry *dentry,
52895+ const struct vfsmount *mnt);
52896+int gr_handle_chroot_caps(struct path *path);
52897+void gr_handle_chroot_chdir(struct path *path);
52898+int gr_handle_chroot_chmod(const struct dentry *dentry,
52899+ const struct vfsmount *mnt, const int mode);
52900+int gr_handle_chroot_mknod(const struct dentry *dentry,
52901+ const struct vfsmount *mnt, const int mode);
52902+int gr_handle_chroot_mount(const struct dentry *dentry,
52903+ const struct vfsmount *mnt,
52904+ const char *dev_name);
52905+int gr_handle_chroot_pivot(void);
52906+int gr_handle_chroot_unix(const pid_t pid);
52907+
52908+int gr_handle_rawio(const struct inode *inode);
52909+
52910+void gr_handle_ioperm(void);
52911+void gr_handle_iopl(void);
52912+
52913+int gr_tpe_allow(const struct file *file);
52914+
52915+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
52916+void gr_clear_chroot_entries(struct task_struct *task);
52917+
52918+void gr_log_forkfail(const int retval);
52919+void gr_log_timechange(void);
52920+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
52921+void gr_log_chdir(const struct dentry *dentry,
52922+ const struct vfsmount *mnt);
52923+void gr_log_chroot_exec(const struct dentry *dentry,
52924+ const struct vfsmount *mnt);
52925+void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
52926+void gr_log_remount(const char *devname, const int retval);
52927+void gr_log_unmount(const char *devname, const int retval);
52928+void gr_log_mount(const char *from, const char *to, const int retval);
52929+void gr_log_textrel(struct vm_area_struct *vma);
52930+void gr_log_rwxmmap(struct file *file);
52931+void gr_log_rwxmprotect(struct file *file);
52932+
52933+int gr_handle_follow_link(const struct inode *parent,
52934+ const struct inode *inode,
52935+ const struct dentry *dentry,
52936+ const struct vfsmount *mnt);
52937+int gr_handle_fifo(const struct dentry *dentry,
52938+ const struct vfsmount *mnt,
52939+ const struct dentry *dir, const int flag,
52940+ const int acc_mode);
52941+int gr_handle_hardlink(const struct dentry *dentry,
52942+ const struct vfsmount *mnt,
52943+ struct inode *inode,
52944+ const int mode, const char *to);
52945+
52946+int gr_is_capable(const int cap);
52947+int gr_is_capable_nolog(const int cap);
52948+void gr_learn_resource(const struct task_struct *task, const int limit,
52949+ const unsigned long wanted, const int gt);
52950+void gr_copy_label(struct task_struct *tsk);
52951+void gr_handle_crash(struct task_struct *task, const int sig);
52952+int gr_handle_signal(const struct task_struct *p, const int sig);
52953+int gr_check_crash_uid(const uid_t uid);
52954+int gr_check_protected_task(const struct task_struct *task);
52955+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
52956+int gr_acl_handle_mmap(const struct file *file,
52957+ const unsigned long prot);
52958+int gr_acl_handle_mprotect(const struct file *file,
52959+ const unsigned long prot);
52960+int gr_check_hidden_task(const struct task_struct *tsk);
52961+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
52962+ const struct vfsmount *mnt);
52963+__u32 gr_acl_handle_utime(const struct dentry *dentry,
52964+ const struct vfsmount *mnt);
52965+__u32 gr_acl_handle_access(const struct dentry *dentry,
52966+ const struct vfsmount *mnt, const int fmode);
52967+__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
52968+ const struct vfsmount *mnt, mode_t mode);
52969+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
52970+ const struct vfsmount *mnt, mode_t mode);
52971+__u32 gr_acl_handle_chown(const struct dentry *dentry,
52972+ const struct vfsmount *mnt);
52973+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
52974+ const struct vfsmount *mnt);
52975+int gr_handle_ptrace(struct task_struct *task, const long request);
52976+int gr_handle_proc_ptrace(struct task_struct *task);
52977+__u32 gr_acl_handle_execve(const struct dentry *dentry,
52978+ const struct vfsmount *mnt);
52979+int gr_check_crash_exec(const struct file *filp);
52980+int gr_acl_is_enabled(void);
52981+void gr_set_kernel_label(struct task_struct *task);
52982+void gr_set_role_label(struct task_struct *task, const uid_t uid,
52983+ const gid_t gid);
52984+int gr_set_proc_label(const struct dentry *dentry,
52985+ const struct vfsmount *mnt,
52986+ const int unsafe_share);
52987+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
52988+ const struct vfsmount *mnt);
52989+__u32 gr_acl_handle_open(const struct dentry *dentry,
52990+ const struct vfsmount *mnt, const int fmode);
52991+__u32 gr_acl_handle_creat(const struct dentry *dentry,
52992+ const struct dentry *p_dentry,
52993+ const struct vfsmount *p_mnt, const int fmode,
52994+ const int imode);
52995+void gr_handle_create(const struct dentry *dentry,
52996+ const struct vfsmount *mnt);
52997+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
52998+ const struct dentry *parent_dentry,
52999+ const struct vfsmount *parent_mnt,
53000+ const int mode);
53001+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
53002+ const struct dentry *parent_dentry,
53003+ const struct vfsmount *parent_mnt);
53004+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
53005+ const struct vfsmount *mnt);
53006+void gr_handle_delete(const ino_t ino, const dev_t dev);
53007+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
53008+ const struct vfsmount *mnt);
53009+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
53010+ const struct dentry *parent_dentry,
53011+ const struct vfsmount *parent_mnt,
53012+ const char *from);
53013+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
53014+ const struct dentry *parent_dentry,
53015+ const struct vfsmount *parent_mnt,
53016+ const struct dentry *old_dentry,
53017+ const struct vfsmount *old_mnt, const char *to);
53018+int gr_acl_handle_rename(struct dentry *new_dentry,
53019+ struct dentry *parent_dentry,
53020+ const struct vfsmount *parent_mnt,
53021+ struct dentry *old_dentry,
53022+ struct inode *old_parent_inode,
53023+ struct vfsmount *old_mnt, const char *newname);
53024+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
53025+ struct dentry *old_dentry,
53026+ struct dentry *new_dentry,
53027+ struct vfsmount *mnt, const __u8 replace);
53028+__u32 gr_check_link(const struct dentry *new_dentry,
53029+ const struct dentry *parent_dentry,
53030+ const struct vfsmount *parent_mnt,
53031+ const struct dentry *old_dentry,
53032+ const struct vfsmount *old_mnt);
53033+int gr_acl_handle_filldir(const struct file *file, const char *name,
53034+ const unsigned int namelen, const ino_t ino);
53035+
53036+__u32 gr_acl_handle_unix(const struct dentry *dentry,
53037+ const struct vfsmount *mnt);
53038+void gr_acl_handle_exit(void);
53039+void gr_acl_handle_psacct(struct task_struct *task, const long code);
53040+int gr_acl_handle_procpidmem(const struct task_struct *task);
53041+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
53042+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
53043+void gr_audit_ptrace(struct task_struct *task);
53044+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
53045+
53046+#ifdef CONFIG_GRKERNSEC
53047+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
53048+void gr_handle_vm86(void);
53049+void gr_handle_mem_readwrite(u64 from, u64 to);
53050+
53051+extern int grsec_enable_dmesg;
53052+extern int grsec_disable_privio;
53053+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53054+extern int grsec_enable_chroot_findtask;
53055+#endif
53056+#endif
53057+
53058+#endif
53059diff -urNp linux-3.0.3/include/linux/grsock.h linux-3.0.3/include/linux/grsock.h
53060--- linux-3.0.3/include/linux/grsock.h 1969-12-31 19:00:00.000000000 -0500
53061+++ linux-3.0.3/include/linux/grsock.h 2011-08-23 21:48:14.000000000 -0400
53062@@ -0,0 +1,19 @@
53063+#ifndef __GRSOCK_H
53064+#define __GRSOCK_H
53065+
53066+extern void gr_attach_curr_ip(const struct sock *sk);
53067+extern int gr_handle_sock_all(const int family, const int type,
53068+ const int protocol);
53069+extern int gr_handle_sock_server(const struct sockaddr *sck);
53070+extern int gr_handle_sock_server_other(const struct sock *sck);
53071+extern int gr_handle_sock_client(const struct sockaddr *sck);
53072+extern int gr_search_connect(struct socket * sock,
53073+ struct sockaddr_in * addr);
53074+extern int gr_search_bind(struct socket * sock,
53075+ struct sockaddr_in * addr);
53076+extern int gr_search_listen(struct socket * sock);
53077+extern int gr_search_accept(struct socket * sock);
53078+extern int gr_search_socket(const int domain, const int type,
53079+ const int protocol);
53080+
53081+#endif
53082diff -urNp linux-3.0.3/include/linux/hid.h linux-3.0.3/include/linux/hid.h
53083--- linux-3.0.3/include/linux/hid.h 2011-07-21 22:17:23.000000000 -0400
53084+++ linux-3.0.3/include/linux/hid.h 2011-08-23 21:47:56.000000000 -0400
53085@@ -675,7 +675,7 @@ struct hid_ll_driver {
53086 unsigned int code, int value);
53087
53088 int (*parse)(struct hid_device *hdev);
53089-};
53090+} __no_const;
53091
53092 #define PM_HINT_FULLON 1<<5
53093 #define PM_HINT_NORMAL 1<<1
53094diff -urNp linux-3.0.3/include/linux/highmem.h linux-3.0.3/include/linux/highmem.h
53095--- linux-3.0.3/include/linux/highmem.h 2011-07-21 22:17:23.000000000 -0400
53096+++ linux-3.0.3/include/linux/highmem.h 2011-08-23 21:47:56.000000000 -0400
53097@@ -185,6 +185,18 @@ static inline void clear_highpage(struct
53098 kunmap_atomic(kaddr, KM_USER0);
53099 }
53100
53101+static inline void sanitize_highpage(struct page *page)
53102+{
53103+ void *kaddr;
53104+ unsigned long flags;
53105+
53106+ local_irq_save(flags);
53107+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
53108+ clear_page(kaddr);
53109+ kunmap_atomic(kaddr, KM_CLEARPAGE);
53110+ local_irq_restore(flags);
53111+}
53112+
53113 static inline void zero_user_segments(struct page *page,
53114 unsigned start1, unsigned end1,
53115 unsigned start2, unsigned end2)
53116diff -urNp linux-3.0.3/include/linux/i2c.h linux-3.0.3/include/linux/i2c.h
53117--- linux-3.0.3/include/linux/i2c.h 2011-07-21 22:17:23.000000000 -0400
53118+++ linux-3.0.3/include/linux/i2c.h 2011-08-23 21:47:56.000000000 -0400
53119@@ -346,6 +346,7 @@ struct i2c_algorithm {
53120 /* To determine what the adapter supports */
53121 u32 (*functionality) (struct i2c_adapter *);
53122 };
53123+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
53124
53125 /*
53126 * i2c_adapter is the structure used to identify a physical i2c bus along
53127diff -urNp linux-3.0.3/include/linux/i2o.h linux-3.0.3/include/linux/i2o.h
53128--- linux-3.0.3/include/linux/i2o.h 2011-07-21 22:17:23.000000000 -0400
53129+++ linux-3.0.3/include/linux/i2o.h 2011-08-23 21:47:56.000000000 -0400
53130@@ -564,7 +564,7 @@ struct i2o_controller {
53131 struct i2o_device *exec; /* Executive */
53132 #if BITS_PER_LONG == 64
53133 spinlock_t context_list_lock; /* lock for context_list */
53134- atomic_t context_list_counter; /* needed for unique contexts */
53135+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
53136 struct list_head context_list; /* list of context id's
53137 and pointers */
53138 #endif
53139diff -urNp linux-3.0.3/include/linux/init.h linux-3.0.3/include/linux/init.h
53140--- linux-3.0.3/include/linux/init.h 2011-07-21 22:17:23.000000000 -0400
53141+++ linux-3.0.3/include/linux/init.h 2011-08-23 21:47:56.000000000 -0400
53142@@ -293,13 +293,13 @@ void __init parse_early_options(char *cm
53143
53144 /* Each module must use one module_init(). */
53145 #define module_init(initfn) \
53146- static inline initcall_t __inittest(void) \
53147+ static inline __used initcall_t __inittest(void) \
53148 { return initfn; } \
53149 int init_module(void) __attribute__((alias(#initfn)));
53150
53151 /* This is only required if you want to be unloadable. */
53152 #define module_exit(exitfn) \
53153- static inline exitcall_t __exittest(void) \
53154+ static inline __used exitcall_t __exittest(void) \
53155 { return exitfn; } \
53156 void cleanup_module(void) __attribute__((alias(#exitfn)));
53157
53158diff -urNp linux-3.0.3/include/linux/init_task.h linux-3.0.3/include/linux/init_task.h
53159--- linux-3.0.3/include/linux/init_task.h 2011-07-21 22:17:23.000000000 -0400
53160+++ linux-3.0.3/include/linux/init_task.h 2011-08-23 21:47:56.000000000 -0400
53161@@ -126,6 +126,12 @@ extern struct cred init_cred;
53162 # define INIT_PERF_EVENTS(tsk)
53163 #endif
53164
53165+#ifdef CONFIG_X86
53166+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
53167+#else
53168+#define INIT_TASK_THREAD_INFO
53169+#endif
53170+
53171 /*
53172 * INIT_TASK is used to set up the first task table, touch at
53173 * your own risk!. Base=0, limit=0x1fffff (=2MB)
53174@@ -164,6 +170,7 @@ extern struct cred init_cred;
53175 RCU_INIT_POINTER(.cred, &init_cred), \
53176 .comm = "swapper", \
53177 .thread = INIT_THREAD, \
53178+ INIT_TASK_THREAD_INFO \
53179 .fs = &init_fs, \
53180 .files = &init_files, \
53181 .signal = &init_signals, \
53182diff -urNp linux-3.0.3/include/linux/intel-iommu.h linux-3.0.3/include/linux/intel-iommu.h
53183--- linux-3.0.3/include/linux/intel-iommu.h 2011-07-21 22:17:23.000000000 -0400
53184+++ linux-3.0.3/include/linux/intel-iommu.h 2011-08-23 21:47:56.000000000 -0400
53185@@ -296,7 +296,7 @@ struct iommu_flush {
53186 u8 fm, u64 type);
53187 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
53188 unsigned int size_order, u64 type);
53189-};
53190+} __no_const;
53191
53192 enum {
53193 SR_DMAR_FECTL_REG,
53194diff -urNp linux-3.0.3/include/linux/interrupt.h linux-3.0.3/include/linux/interrupt.h
53195--- linux-3.0.3/include/linux/interrupt.h 2011-07-21 22:17:23.000000000 -0400
53196+++ linux-3.0.3/include/linux/interrupt.h 2011-08-23 21:47:56.000000000 -0400
53197@@ -422,7 +422,7 @@ enum
53198 /* map softirq index to softirq name. update 'softirq_to_name' in
53199 * kernel/softirq.c when adding a new softirq.
53200 */
53201-extern char *softirq_to_name[NR_SOFTIRQS];
53202+extern const char * const softirq_to_name[NR_SOFTIRQS];
53203
53204 /* softirq mask and active fields moved to irq_cpustat_t in
53205 * asm/hardirq.h to get better cache usage. KAO
53206@@ -430,12 +430,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
53207
53208 struct softirq_action
53209 {
53210- void (*action)(struct softirq_action *);
53211+ void (*action)(void);
53212 };
53213
53214 asmlinkage void do_softirq(void);
53215 asmlinkage void __do_softirq(void);
53216-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
53217+extern void open_softirq(int nr, void (*action)(void));
53218 extern void softirq_init(void);
53219 static inline void __raise_softirq_irqoff(unsigned int nr)
53220 {
53221diff -urNp linux-3.0.3/include/linux/kallsyms.h linux-3.0.3/include/linux/kallsyms.h
53222--- linux-3.0.3/include/linux/kallsyms.h 2011-07-21 22:17:23.000000000 -0400
53223+++ linux-3.0.3/include/linux/kallsyms.h 2011-08-23 21:48:14.000000000 -0400
53224@@ -15,7 +15,8 @@
53225
53226 struct module;
53227
53228-#ifdef CONFIG_KALLSYMS
53229+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
53230+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53231 /* Lookup the address for a symbol. Returns 0 if not found. */
53232 unsigned long kallsyms_lookup_name(const char *name);
53233
53234@@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(un
53235 /* Stupid that this does nothing, but I didn't create this mess. */
53236 #define __print_symbol(fmt, addr)
53237 #endif /*CONFIG_KALLSYMS*/
53238+#else /* when included by kallsyms.c, vsnprintf.c, or
53239+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
53240+extern void __print_symbol(const char *fmt, unsigned long address);
53241+extern int sprint_backtrace(char *buffer, unsigned long address);
53242+extern int sprint_symbol(char *buffer, unsigned long address);
53243+const char *kallsyms_lookup(unsigned long addr,
53244+ unsigned long *symbolsize,
53245+ unsigned long *offset,
53246+ char **modname, char *namebuf);
53247+#endif
53248
53249 /* This macro allows us to keep printk typechecking */
53250 static void __check_printsym_format(const char *fmt, ...)
53251diff -urNp linux-3.0.3/include/linux/kgdb.h linux-3.0.3/include/linux/kgdb.h
53252--- linux-3.0.3/include/linux/kgdb.h 2011-07-21 22:17:23.000000000 -0400
53253+++ linux-3.0.3/include/linux/kgdb.h 2011-08-26 19:49:56.000000000 -0400
53254@@ -53,7 +53,7 @@ extern int kgdb_connected;
53255 extern int kgdb_io_module_registered;
53256
53257 extern atomic_t kgdb_setting_breakpoint;
53258-extern atomic_t kgdb_cpu_doing_single_step;
53259+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
53260
53261 extern struct task_struct *kgdb_usethread;
53262 extern struct task_struct *kgdb_contthread;
53263@@ -251,7 +251,7 @@ struct kgdb_arch {
53264 void (*disable_hw_break)(struct pt_regs *regs);
53265 void (*remove_all_hw_break)(void);
53266 void (*correct_hw_break)(void);
53267-};
53268+} __do_const;
53269
53270 /**
53271 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
53272@@ -276,7 +276,7 @@ struct kgdb_io {
53273 void (*pre_exception) (void);
53274 void (*post_exception) (void);
53275 int is_console;
53276-};
53277+} __do_const;
53278
53279 extern struct kgdb_arch arch_kgdb_ops;
53280
53281diff -urNp linux-3.0.3/include/linux/kmod.h linux-3.0.3/include/linux/kmod.h
53282--- linux-3.0.3/include/linux/kmod.h 2011-07-21 22:17:23.000000000 -0400
53283+++ linux-3.0.3/include/linux/kmod.h 2011-08-23 21:48:14.000000000 -0400
53284@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysc
53285 * usually useless though. */
53286 extern int __request_module(bool wait, const char *name, ...) \
53287 __attribute__((format(printf, 2, 3)));
53288+extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
53289+ __attribute__((format(printf, 3, 4)));
53290 #define request_module(mod...) __request_module(true, mod)
53291 #define request_module_nowait(mod...) __request_module(false, mod)
53292 #define try_then_request_module(x, mod...) \
53293diff -urNp linux-3.0.3/include/linux/kvm_host.h linux-3.0.3/include/linux/kvm_host.h
53294--- linux-3.0.3/include/linux/kvm_host.h 2011-07-21 22:17:23.000000000 -0400
53295+++ linux-3.0.3/include/linux/kvm_host.h 2011-08-23 21:47:56.000000000 -0400
53296@@ -307,7 +307,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
53297 void vcpu_load(struct kvm_vcpu *vcpu);
53298 void vcpu_put(struct kvm_vcpu *vcpu);
53299
53300-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
53301+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
53302 struct module *module);
53303 void kvm_exit(void);
53304
53305@@ -446,7 +446,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
53306 struct kvm_guest_debug *dbg);
53307 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
53308
53309-int kvm_arch_init(void *opaque);
53310+int kvm_arch_init(const void *opaque);
53311 void kvm_arch_exit(void);
53312
53313 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
53314diff -urNp linux-3.0.3/include/linux/libata.h linux-3.0.3/include/linux/libata.h
53315--- linux-3.0.3/include/linux/libata.h 2011-07-21 22:17:23.000000000 -0400
53316+++ linux-3.0.3/include/linux/libata.h 2011-08-26 19:49:56.000000000 -0400
53317@@ -899,7 +899,7 @@ struct ata_port_operations {
53318 * fields must be pointers.
53319 */
53320 const struct ata_port_operations *inherits;
53321-};
53322+} __do_const;
53323
53324 struct ata_port_info {
53325 unsigned long flags;
53326diff -urNp linux-3.0.3/include/linux/mca.h linux-3.0.3/include/linux/mca.h
53327--- linux-3.0.3/include/linux/mca.h 2011-07-21 22:17:23.000000000 -0400
53328+++ linux-3.0.3/include/linux/mca.h 2011-08-23 21:47:56.000000000 -0400
53329@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
53330 int region);
53331 void * (*mca_transform_memory)(struct mca_device *,
53332 void *memory);
53333-};
53334+} __no_const;
53335
53336 struct mca_bus {
53337 u64 default_dma_mask;
53338diff -urNp linux-3.0.3/include/linux/memory.h linux-3.0.3/include/linux/memory.h
53339--- linux-3.0.3/include/linux/memory.h 2011-07-21 22:17:23.000000000 -0400
53340+++ linux-3.0.3/include/linux/memory.h 2011-08-23 21:47:56.000000000 -0400
53341@@ -144,7 +144,7 @@ struct memory_accessor {
53342 size_t count);
53343 ssize_t (*write)(struct memory_accessor *, const char *buf,
53344 off_t offset, size_t count);
53345-};
53346+} __no_const;
53347
53348 /*
53349 * Kernel text modification mutex, used for code patching. Users of this lock
53350diff -urNp linux-3.0.3/include/linux/mfd/abx500.h linux-3.0.3/include/linux/mfd/abx500.h
53351--- linux-3.0.3/include/linux/mfd/abx500.h 2011-07-21 22:17:23.000000000 -0400
53352+++ linux-3.0.3/include/linux/mfd/abx500.h 2011-08-23 21:47:56.000000000 -0400
53353@@ -234,6 +234,7 @@ struct abx500_ops {
53354 int (*event_registers_startup_state_get) (struct device *, u8 *);
53355 int (*startup_irq_enabled) (struct device *, unsigned int);
53356 };
53357+typedef struct abx500_ops __no_const abx500_ops_no_const;
53358
53359 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
53360 void abx500_remove_ops(struct device *dev);
53361diff -urNp linux-3.0.3/include/linux/mm.h linux-3.0.3/include/linux/mm.h
53362--- linux-3.0.3/include/linux/mm.h 2011-08-23 21:44:40.000000000 -0400
53363+++ linux-3.0.3/include/linux/mm.h 2011-08-23 21:47:56.000000000 -0400
53364@@ -113,7 +113,14 @@ extern unsigned int kobjsize(const void
53365
53366 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
53367 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
53368+
53369+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
53370+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
53371+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
53372+#else
53373 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
53374+#endif
53375+
53376 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
53377 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
53378
53379@@ -1009,34 +1016,6 @@ int set_page_dirty(struct page *page);
53380 int set_page_dirty_lock(struct page *page);
53381 int clear_page_dirty_for_io(struct page *page);
53382
53383-/* Is the vma a continuation of the stack vma above it? */
53384-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
53385-{
53386- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
53387-}
53388-
53389-static inline int stack_guard_page_start(struct vm_area_struct *vma,
53390- unsigned long addr)
53391-{
53392- return (vma->vm_flags & VM_GROWSDOWN) &&
53393- (vma->vm_start == addr) &&
53394- !vma_growsdown(vma->vm_prev, addr);
53395-}
53396-
53397-/* Is the vma a continuation of the stack vma below it? */
53398-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
53399-{
53400- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
53401-}
53402-
53403-static inline int stack_guard_page_end(struct vm_area_struct *vma,
53404- unsigned long addr)
53405-{
53406- return (vma->vm_flags & VM_GROWSUP) &&
53407- (vma->vm_end == addr) &&
53408- !vma_growsup(vma->vm_next, addr);
53409-}
53410-
53411 extern unsigned long move_page_tables(struct vm_area_struct *vma,
53412 unsigned long old_addr, struct vm_area_struct *new_vma,
53413 unsigned long new_addr, unsigned long len);
53414@@ -1169,6 +1148,15 @@ struct shrinker {
53415 extern void register_shrinker(struct shrinker *);
53416 extern void unregister_shrinker(struct shrinker *);
53417
53418+#ifdef CONFIG_MMU
53419+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
53420+#else
53421+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
53422+{
53423+ return __pgprot(0);
53424+}
53425+#endif
53426+
53427 int vma_wants_writenotify(struct vm_area_struct *vma);
53428
53429 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
53430@@ -1452,6 +1440,7 @@ out:
53431 }
53432
53433 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
53434+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
53435
53436 extern unsigned long do_brk(unsigned long, unsigned long);
53437
53438@@ -1510,6 +1499,10 @@ extern struct vm_area_struct * find_vma(
53439 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
53440 struct vm_area_struct **pprev);
53441
53442+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
53443+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
53444+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
53445+
53446 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
53447 NULL if none. Assume start_addr < end_addr. */
53448 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
53449@@ -1526,15 +1519,6 @@ static inline unsigned long vma_pages(st
53450 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
53451 }
53452
53453-#ifdef CONFIG_MMU
53454-pgprot_t vm_get_page_prot(unsigned long vm_flags);
53455-#else
53456-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
53457-{
53458- return __pgprot(0);
53459-}
53460-#endif
53461-
53462 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
53463 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
53464 unsigned long pfn, unsigned long size, pgprot_t);
53465@@ -1647,7 +1631,7 @@ extern int unpoison_memory(unsigned long
53466 extern int sysctl_memory_failure_early_kill;
53467 extern int sysctl_memory_failure_recovery;
53468 extern void shake_page(struct page *p, int access);
53469-extern atomic_long_t mce_bad_pages;
53470+extern atomic_long_unchecked_t mce_bad_pages;
53471 extern int soft_offline_page(struct page *page, int flags);
53472
53473 extern void dump_page(struct page *page);
53474@@ -1661,5 +1645,11 @@ extern void copy_user_huge_page(struct p
53475 unsigned int pages_per_huge_page);
53476 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
53477
53478+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
53479+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
53480+#else
53481+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
53482+#endif
53483+
53484 #endif /* __KERNEL__ */
53485 #endif /* _LINUX_MM_H */
53486diff -urNp linux-3.0.3/include/linux/mm_types.h linux-3.0.3/include/linux/mm_types.h
53487--- linux-3.0.3/include/linux/mm_types.h 2011-07-21 22:17:23.000000000 -0400
53488+++ linux-3.0.3/include/linux/mm_types.h 2011-08-23 21:47:56.000000000 -0400
53489@@ -184,6 +184,8 @@ struct vm_area_struct {
53490 #ifdef CONFIG_NUMA
53491 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
53492 #endif
53493+
53494+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
53495 };
53496
53497 struct core_thread {
53498@@ -316,6 +318,24 @@ struct mm_struct {
53499 #ifdef CONFIG_CPUMASK_OFFSTACK
53500 struct cpumask cpumask_allocation;
53501 #endif
53502+
53503+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
53504+ unsigned long pax_flags;
53505+#endif
53506+
53507+#ifdef CONFIG_PAX_DLRESOLVE
53508+ unsigned long call_dl_resolve;
53509+#endif
53510+
53511+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
53512+ unsigned long call_syscall;
53513+#endif
53514+
53515+#ifdef CONFIG_PAX_ASLR
53516+ unsigned long delta_mmap; /* randomized offset */
53517+ unsigned long delta_stack; /* randomized offset */
53518+#endif
53519+
53520 };
53521
53522 static inline void mm_init_cpumask(struct mm_struct *mm)
53523diff -urNp linux-3.0.3/include/linux/mmu_notifier.h linux-3.0.3/include/linux/mmu_notifier.h
53524--- linux-3.0.3/include/linux/mmu_notifier.h 2011-07-21 22:17:23.000000000 -0400
53525+++ linux-3.0.3/include/linux/mmu_notifier.h 2011-08-23 21:47:56.000000000 -0400
53526@@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destr
53527 */
53528 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
53529 ({ \
53530- pte_t __pte; \
53531+ pte_t ___pte; \
53532 struct vm_area_struct *___vma = __vma; \
53533 unsigned long ___address = __address; \
53534- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
53535+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
53536 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
53537- __pte; \
53538+ ___pte; \
53539 })
53540
53541 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
53542diff -urNp linux-3.0.3/include/linux/mmzone.h linux-3.0.3/include/linux/mmzone.h
53543--- linux-3.0.3/include/linux/mmzone.h 2011-07-21 22:17:23.000000000 -0400
53544+++ linux-3.0.3/include/linux/mmzone.h 2011-08-23 21:47:56.000000000 -0400
53545@@ -350,7 +350,7 @@ struct zone {
53546 unsigned long flags; /* zone flags, see below */
53547
53548 /* Zone statistics */
53549- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
53550+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
53551
53552 /*
53553 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
53554diff -urNp linux-3.0.3/include/linux/mod_devicetable.h linux-3.0.3/include/linux/mod_devicetable.h
53555--- linux-3.0.3/include/linux/mod_devicetable.h 2011-07-21 22:17:23.000000000 -0400
53556+++ linux-3.0.3/include/linux/mod_devicetable.h 2011-08-23 21:47:56.000000000 -0400
53557@@ -12,7 +12,7 @@
53558 typedef unsigned long kernel_ulong_t;
53559 #endif
53560
53561-#define PCI_ANY_ID (~0)
53562+#define PCI_ANY_ID ((__u16)~0)
53563
53564 struct pci_device_id {
53565 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
53566@@ -131,7 +131,7 @@ struct usb_device_id {
53567 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
53568 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
53569
53570-#define HID_ANY_ID (~0)
53571+#define HID_ANY_ID (~0U)
53572
53573 struct hid_device_id {
53574 __u16 bus;
53575diff -urNp linux-3.0.3/include/linux/module.h linux-3.0.3/include/linux/module.h
53576--- linux-3.0.3/include/linux/module.h 2011-07-21 22:17:23.000000000 -0400
53577+++ linux-3.0.3/include/linux/module.h 2011-08-23 21:47:56.000000000 -0400
53578@@ -16,6 +16,7 @@
53579 #include <linux/kobject.h>
53580 #include <linux/moduleparam.h>
53581 #include <linux/tracepoint.h>
53582+#include <linux/fs.h>
53583
53584 #include <linux/percpu.h>
53585 #include <asm/module.h>
53586@@ -325,19 +326,16 @@ struct module
53587 int (*init)(void);
53588
53589 /* If this is non-NULL, vfree after init() returns */
53590- void *module_init;
53591+ void *module_init_rx, *module_init_rw;
53592
53593 /* Here is the actual code + data, vfree'd on unload. */
53594- void *module_core;
53595+ void *module_core_rx, *module_core_rw;
53596
53597 /* Here are the sizes of the init and core sections */
53598- unsigned int init_size, core_size;
53599+ unsigned int init_size_rw, core_size_rw;
53600
53601 /* The size of the executable code in each section. */
53602- unsigned int init_text_size, core_text_size;
53603-
53604- /* Size of RO sections of the module (text+rodata) */
53605- unsigned int init_ro_size, core_ro_size;
53606+ unsigned int init_size_rx, core_size_rx;
53607
53608 /* Arch-specific module values */
53609 struct mod_arch_specific arch;
53610@@ -393,6 +391,10 @@ struct module
53611 #ifdef CONFIG_EVENT_TRACING
53612 struct ftrace_event_call **trace_events;
53613 unsigned int num_trace_events;
53614+ struct file_operations trace_id;
53615+ struct file_operations trace_enable;
53616+ struct file_operations trace_format;
53617+ struct file_operations trace_filter;
53618 #endif
53619 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
53620 unsigned int num_ftrace_callsites;
53621@@ -443,16 +445,46 @@ bool is_module_address(unsigned long add
53622 bool is_module_percpu_address(unsigned long addr);
53623 bool is_module_text_address(unsigned long addr);
53624
53625+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
53626+{
53627+
53628+#ifdef CONFIG_PAX_KERNEXEC
53629+ if (ktla_ktva(addr) >= (unsigned long)start &&
53630+ ktla_ktva(addr) < (unsigned long)start + size)
53631+ return 1;
53632+#endif
53633+
53634+ return ((void *)addr >= start && (void *)addr < start + size);
53635+}
53636+
53637+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
53638+{
53639+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
53640+}
53641+
53642+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
53643+{
53644+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
53645+}
53646+
53647+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
53648+{
53649+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
53650+}
53651+
53652+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
53653+{
53654+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
53655+}
53656+
53657 static inline int within_module_core(unsigned long addr, struct module *mod)
53658 {
53659- return (unsigned long)mod->module_core <= addr &&
53660- addr < (unsigned long)mod->module_core + mod->core_size;
53661+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
53662 }
53663
53664 static inline int within_module_init(unsigned long addr, struct module *mod)
53665 {
53666- return (unsigned long)mod->module_init <= addr &&
53667- addr < (unsigned long)mod->module_init + mod->init_size;
53668+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
53669 }
53670
53671 /* Search for module by name: must hold module_mutex. */
53672diff -urNp linux-3.0.3/include/linux/moduleloader.h linux-3.0.3/include/linux/moduleloader.h
53673--- linux-3.0.3/include/linux/moduleloader.h 2011-07-21 22:17:23.000000000 -0400
53674+++ linux-3.0.3/include/linux/moduleloader.h 2011-08-23 21:47:56.000000000 -0400
53675@@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
53676 sections. Returns NULL on failure. */
53677 void *module_alloc(unsigned long size);
53678
53679+#ifdef CONFIG_PAX_KERNEXEC
53680+void *module_alloc_exec(unsigned long size);
53681+#else
53682+#define module_alloc_exec(x) module_alloc(x)
53683+#endif
53684+
53685 /* Free memory returned from module_alloc. */
53686 void module_free(struct module *mod, void *module_region);
53687
53688+#ifdef CONFIG_PAX_KERNEXEC
53689+void module_free_exec(struct module *mod, void *module_region);
53690+#else
53691+#define module_free_exec(x, y) module_free((x), (y))
53692+#endif
53693+
53694 /* Apply the given relocation to the (simplified) ELF. Return -error
53695 or 0. */
53696 int apply_relocate(Elf_Shdr *sechdrs,
53697diff -urNp linux-3.0.3/include/linux/moduleparam.h linux-3.0.3/include/linux/moduleparam.h
53698--- linux-3.0.3/include/linux/moduleparam.h 2011-07-21 22:17:23.000000000 -0400
53699+++ linux-3.0.3/include/linux/moduleparam.h 2011-08-23 21:47:56.000000000 -0400
53700@@ -255,7 +255,7 @@ static inline void __kernel_param_unlock
53701 * @len is usually just sizeof(string).
53702 */
53703 #define module_param_string(name, string, len, perm) \
53704- static const struct kparam_string __param_string_##name \
53705+ static const struct kparam_string __param_string_##name __used \
53706 = { len, string }; \
53707 __module_param_call(MODULE_PARAM_PREFIX, name, \
53708 &param_ops_string, \
53709@@ -370,7 +370,7 @@ extern int param_get_invbool(char *buffe
53710 * module_param_named() for why this might be necessary.
53711 */
53712 #define module_param_array_named(name, array, type, nump, perm) \
53713- static const struct kparam_array __param_arr_##name \
53714+ static const struct kparam_array __param_arr_##name __used \
53715 = { .max = ARRAY_SIZE(array), .num = nump, \
53716 .ops = &param_ops_##type, \
53717 .elemsize = sizeof(array[0]), .elem = array }; \
53718diff -urNp linux-3.0.3/include/linux/namei.h linux-3.0.3/include/linux/namei.h
53719--- linux-3.0.3/include/linux/namei.h 2011-07-21 22:17:23.000000000 -0400
53720+++ linux-3.0.3/include/linux/namei.h 2011-08-23 21:47:56.000000000 -0400
53721@@ -24,7 +24,7 @@ struct nameidata {
53722 unsigned seq;
53723 int last_type;
53724 unsigned depth;
53725- char *saved_names[MAX_NESTED_LINKS + 1];
53726+ const char *saved_names[MAX_NESTED_LINKS + 1];
53727
53728 /* Intent data */
53729 union {
53730@@ -91,12 +91,12 @@ extern int follow_up(struct path *);
53731 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
53732 extern void unlock_rename(struct dentry *, struct dentry *);
53733
53734-static inline void nd_set_link(struct nameidata *nd, char *path)
53735+static inline void nd_set_link(struct nameidata *nd, const char *path)
53736 {
53737 nd->saved_names[nd->depth] = path;
53738 }
53739
53740-static inline char *nd_get_link(struct nameidata *nd)
53741+static inline const char *nd_get_link(const struct nameidata *nd)
53742 {
53743 return nd->saved_names[nd->depth];
53744 }
53745diff -urNp linux-3.0.3/include/linux/netdevice.h linux-3.0.3/include/linux/netdevice.h
53746--- linux-3.0.3/include/linux/netdevice.h 2011-08-23 21:44:40.000000000 -0400
53747+++ linux-3.0.3/include/linux/netdevice.h 2011-08-23 21:47:56.000000000 -0400
53748@@ -979,6 +979,7 @@ struct net_device_ops {
53749 int (*ndo_set_features)(struct net_device *dev,
53750 u32 features);
53751 };
53752+typedef struct net_device_ops __no_const net_device_ops_no_const;
53753
53754 /*
53755 * The DEVICE structure.
53756diff -urNp linux-3.0.3/include/linux/netfilter/xt_gradm.h linux-3.0.3/include/linux/netfilter/xt_gradm.h
53757--- linux-3.0.3/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
53758+++ linux-3.0.3/include/linux/netfilter/xt_gradm.h 2011-08-23 21:48:14.000000000 -0400
53759@@ -0,0 +1,9 @@
53760+#ifndef _LINUX_NETFILTER_XT_GRADM_H
53761+#define _LINUX_NETFILTER_XT_GRADM_H 1
53762+
53763+struct xt_gradm_mtinfo {
53764+ __u16 flags;
53765+ __u16 invflags;
53766+};
53767+
53768+#endif
53769diff -urNp linux-3.0.3/include/linux/oprofile.h linux-3.0.3/include/linux/oprofile.h
53770--- linux-3.0.3/include/linux/oprofile.h 2011-07-21 22:17:23.000000000 -0400
53771+++ linux-3.0.3/include/linux/oprofile.h 2011-08-23 21:47:56.000000000 -0400
53772@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super
53773 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
53774 char const * name, ulong * val);
53775
53776-/** Create a file for read-only access to an atomic_t. */
53777+/** Create a file for read-only access to an atomic_unchecked_t. */
53778 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
53779- char const * name, atomic_t * val);
53780+ char const * name, atomic_unchecked_t * val);
53781
53782 /** create a directory */
53783 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
53784diff -urNp linux-3.0.3/include/linux/padata.h linux-3.0.3/include/linux/padata.h
53785--- linux-3.0.3/include/linux/padata.h 2011-07-21 22:17:23.000000000 -0400
53786+++ linux-3.0.3/include/linux/padata.h 2011-08-23 21:47:56.000000000 -0400
53787@@ -129,7 +129,7 @@ struct parallel_data {
53788 struct padata_instance *pinst;
53789 struct padata_parallel_queue __percpu *pqueue;
53790 struct padata_serial_queue __percpu *squeue;
53791- atomic_t seq_nr;
53792+ atomic_unchecked_t seq_nr;
53793 atomic_t reorder_objects;
53794 atomic_t refcnt;
53795 unsigned int max_seq_nr;
53796diff -urNp linux-3.0.3/include/linux/perf_event.h linux-3.0.3/include/linux/perf_event.h
53797--- linux-3.0.3/include/linux/perf_event.h 2011-07-21 22:17:23.000000000 -0400
53798+++ linux-3.0.3/include/linux/perf_event.h 2011-08-23 21:47:56.000000000 -0400
53799@@ -761,8 +761,8 @@ struct perf_event {
53800
53801 enum perf_event_active_state state;
53802 unsigned int attach_state;
53803- local64_t count;
53804- atomic64_t child_count;
53805+ local64_t count; /* PaX: fix it one day */
53806+ atomic64_unchecked_t child_count;
53807
53808 /*
53809 * These are the total time in nanoseconds that the event
53810@@ -813,8 +813,8 @@ struct perf_event {
53811 * These accumulate total time (in nanoseconds) that children
53812 * events have been enabled and running, respectively.
53813 */
53814- atomic64_t child_total_time_enabled;
53815- atomic64_t child_total_time_running;
53816+ atomic64_unchecked_t child_total_time_enabled;
53817+ atomic64_unchecked_t child_total_time_running;
53818
53819 /*
53820 * Protect attach/detach and child_list:
53821diff -urNp linux-3.0.3/include/linux/pipe_fs_i.h linux-3.0.3/include/linux/pipe_fs_i.h
53822--- linux-3.0.3/include/linux/pipe_fs_i.h 2011-07-21 22:17:23.000000000 -0400
53823+++ linux-3.0.3/include/linux/pipe_fs_i.h 2011-08-23 21:47:56.000000000 -0400
53824@@ -46,9 +46,9 @@ struct pipe_buffer {
53825 struct pipe_inode_info {
53826 wait_queue_head_t wait;
53827 unsigned int nrbufs, curbuf, buffers;
53828- unsigned int readers;
53829- unsigned int writers;
53830- unsigned int waiting_writers;
53831+ atomic_t readers;
53832+ atomic_t writers;
53833+ atomic_t waiting_writers;
53834 unsigned int r_counter;
53835 unsigned int w_counter;
53836 struct page *tmp_page;
53837diff -urNp linux-3.0.3/include/linux/pm_runtime.h linux-3.0.3/include/linux/pm_runtime.h
53838--- linux-3.0.3/include/linux/pm_runtime.h 2011-07-21 22:17:23.000000000 -0400
53839+++ linux-3.0.3/include/linux/pm_runtime.h 2011-08-23 21:47:56.000000000 -0400
53840@@ -94,7 +94,7 @@ static inline bool pm_runtime_callbacks_
53841
53842 static inline void pm_runtime_mark_last_busy(struct device *dev)
53843 {
53844- ACCESS_ONCE(dev->power.last_busy) = jiffies;
53845+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
53846 }
53847
53848 #else /* !CONFIG_PM_RUNTIME */
53849diff -urNp linux-3.0.3/include/linux/poison.h linux-3.0.3/include/linux/poison.h
53850--- linux-3.0.3/include/linux/poison.h 2011-07-21 22:17:23.000000000 -0400
53851+++ linux-3.0.3/include/linux/poison.h 2011-08-23 21:47:56.000000000 -0400
53852@@ -19,8 +19,8 @@
53853 * under normal circumstances, used to verify that nobody uses
53854 * non-initialized list entries.
53855 */
53856-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
53857-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
53858+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
53859+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
53860
53861 /********** include/linux/timer.h **********/
53862 /*
53863diff -urNp linux-3.0.3/include/linux/preempt.h linux-3.0.3/include/linux/preempt.h
53864--- linux-3.0.3/include/linux/preempt.h 2011-07-21 22:17:23.000000000 -0400
53865+++ linux-3.0.3/include/linux/preempt.h 2011-08-23 21:47:56.000000000 -0400
53866@@ -115,7 +115,7 @@ struct preempt_ops {
53867 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
53868 void (*sched_out)(struct preempt_notifier *notifier,
53869 struct task_struct *next);
53870-};
53871+} __no_const;
53872
53873 /**
53874 * preempt_notifier - key for installing preemption notifiers
53875diff -urNp linux-3.0.3/include/linux/proc_fs.h linux-3.0.3/include/linux/proc_fs.h
53876--- linux-3.0.3/include/linux/proc_fs.h 2011-07-21 22:17:23.000000000 -0400
53877+++ linux-3.0.3/include/linux/proc_fs.h 2011-08-23 21:48:14.000000000 -0400
53878@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
53879 return proc_create_data(name, mode, parent, proc_fops, NULL);
53880 }
53881
53882+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
53883+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
53884+{
53885+#ifdef CONFIG_GRKERNSEC_PROC_USER
53886+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
53887+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53888+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
53889+#else
53890+ return proc_create_data(name, mode, parent, proc_fops, NULL);
53891+#endif
53892+}
53893+
53894+
53895 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
53896 mode_t mode, struct proc_dir_entry *base,
53897 read_proc_t *read_proc, void * data)
53898@@ -258,7 +271,7 @@ union proc_op {
53899 int (*proc_show)(struct seq_file *m,
53900 struct pid_namespace *ns, struct pid *pid,
53901 struct task_struct *task);
53902-};
53903+} __no_const;
53904
53905 struct ctl_table_header;
53906 struct ctl_table;
53907diff -urNp linux-3.0.3/include/linux/ptrace.h linux-3.0.3/include/linux/ptrace.h
53908--- linux-3.0.3/include/linux/ptrace.h 2011-07-21 22:17:23.000000000 -0400
53909+++ linux-3.0.3/include/linux/ptrace.h 2011-08-23 21:48:14.000000000 -0400
53910@@ -115,10 +115,10 @@ extern void __ptrace_unlink(struct task_
53911 extern void exit_ptrace(struct task_struct *tracer);
53912 #define PTRACE_MODE_READ 1
53913 #define PTRACE_MODE_ATTACH 2
53914-/* Returns 0 on success, -errno on denial. */
53915-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
53916 /* Returns true on success, false on denial. */
53917 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
53918+/* Returns true on success, false on denial. */
53919+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
53920
53921 static inline int ptrace_reparented(struct task_struct *child)
53922 {
53923diff -urNp linux-3.0.3/include/linux/random.h linux-3.0.3/include/linux/random.h
53924--- linux-3.0.3/include/linux/random.h 2011-08-23 21:44:40.000000000 -0400
53925+++ linux-3.0.3/include/linux/random.h 2011-08-23 21:47:56.000000000 -0400
53926@@ -69,12 +69,17 @@ void srandom32(u32 seed);
53927
53928 u32 prandom32(struct rnd_state *);
53929
53930+static inline unsigned long pax_get_random_long(void)
53931+{
53932+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
53933+}
53934+
53935 /*
53936 * Handle minimum values for seeds
53937 */
53938 static inline u32 __seed(u32 x, u32 m)
53939 {
53940- return (x < m) ? x + m : x;
53941+ return (x <= m) ? x + m + 1 : x;
53942 }
53943
53944 /**
53945diff -urNp linux-3.0.3/include/linux/reboot.h linux-3.0.3/include/linux/reboot.h
53946--- linux-3.0.3/include/linux/reboot.h 2011-07-21 22:17:23.000000000 -0400
53947+++ linux-3.0.3/include/linux/reboot.h 2011-08-23 21:47:56.000000000 -0400
53948@@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
53949 * Architecture-specific implementations of sys_reboot commands.
53950 */
53951
53952-extern void machine_restart(char *cmd);
53953-extern void machine_halt(void);
53954-extern void machine_power_off(void);
53955+extern void machine_restart(char *cmd) __noreturn;
53956+extern void machine_halt(void) __noreturn;
53957+extern void machine_power_off(void) __noreturn;
53958
53959 extern void machine_shutdown(void);
53960 struct pt_regs;
53961@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
53962 */
53963
53964 extern void kernel_restart_prepare(char *cmd);
53965-extern void kernel_restart(char *cmd);
53966-extern void kernel_halt(void);
53967-extern void kernel_power_off(void);
53968+extern void kernel_restart(char *cmd) __noreturn;
53969+extern void kernel_halt(void) __noreturn;
53970+extern void kernel_power_off(void) __noreturn;
53971
53972 extern int C_A_D; /* for sysctl */
53973 void ctrl_alt_del(void);
53974@@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
53975 * Emergency restart, callable from an interrupt handler.
53976 */
53977
53978-extern void emergency_restart(void);
53979+extern void emergency_restart(void) __noreturn;
53980 #include <asm/emergency-restart.h>
53981
53982 #endif
53983diff -urNp linux-3.0.3/include/linux/reiserfs_fs.h linux-3.0.3/include/linux/reiserfs_fs.h
53984--- linux-3.0.3/include/linux/reiserfs_fs.h 2011-07-21 22:17:23.000000000 -0400
53985+++ linux-3.0.3/include/linux/reiserfs_fs.h 2011-08-23 21:47:56.000000000 -0400
53986@@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset
53987 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
53988
53989 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
53990-#define get_generation(s) atomic_read (&fs_generation(s))
53991+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
53992 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
53993 #define __fs_changed(gen,s) (gen != get_generation (s))
53994 #define fs_changed(gen,s) \
53995diff -urNp linux-3.0.3/include/linux/reiserfs_fs_sb.h linux-3.0.3/include/linux/reiserfs_fs_sb.h
53996--- linux-3.0.3/include/linux/reiserfs_fs_sb.h 2011-07-21 22:17:23.000000000 -0400
53997+++ linux-3.0.3/include/linux/reiserfs_fs_sb.h 2011-08-23 21:47:56.000000000 -0400
53998@@ -386,7 +386,7 @@ struct reiserfs_sb_info {
53999 /* Comment? -Hans */
54000 wait_queue_head_t s_wait;
54001 /* To be obsoleted soon by per buffer seals.. -Hans */
54002- atomic_t s_generation_counter; // increased by one every time the
54003+ atomic_unchecked_t s_generation_counter; // increased by one every time the
54004 // tree gets re-balanced
54005 unsigned long s_properties; /* File system properties. Currently holds
54006 on-disk FS format */
54007diff -urNp linux-3.0.3/include/linux/relay.h linux-3.0.3/include/linux/relay.h
54008--- linux-3.0.3/include/linux/relay.h 2011-07-21 22:17:23.000000000 -0400
54009+++ linux-3.0.3/include/linux/relay.h 2011-08-23 21:47:56.000000000 -0400
54010@@ -159,7 +159,7 @@ struct rchan_callbacks
54011 * The callback should return 0 if successful, negative if not.
54012 */
54013 int (*remove_buf_file)(struct dentry *dentry);
54014-};
54015+} __no_const;
54016
54017 /*
54018 * CONFIG_RELAY kernel API, kernel/relay.c
54019diff -urNp linux-3.0.3/include/linux/rfkill.h linux-3.0.3/include/linux/rfkill.h
54020--- linux-3.0.3/include/linux/rfkill.h 2011-07-21 22:17:23.000000000 -0400
54021+++ linux-3.0.3/include/linux/rfkill.h 2011-08-23 21:47:56.000000000 -0400
54022@@ -147,6 +147,7 @@ struct rfkill_ops {
54023 void (*query)(struct rfkill *rfkill, void *data);
54024 int (*set_block)(void *data, bool blocked);
54025 };
54026+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
54027
54028 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
54029 /**
54030diff -urNp linux-3.0.3/include/linux/rmap.h linux-3.0.3/include/linux/rmap.h
54031--- linux-3.0.3/include/linux/rmap.h 2011-07-21 22:17:23.000000000 -0400
54032+++ linux-3.0.3/include/linux/rmap.h 2011-08-23 21:47:56.000000000 -0400
54033@@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struc
54034 void anon_vma_init(void); /* create anon_vma_cachep */
54035 int anon_vma_prepare(struct vm_area_struct *);
54036 void unlink_anon_vmas(struct vm_area_struct *);
54037-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
54038-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
54039+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
54040+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
54041 void __anon_vma_link(struct vm_area_struct *);
54042
54043 static inline void anon_vma_merge(struct vm_area_struct *vma,
54044diff -urNp linux-3.0.3/include/linux/sched.h linux-3.0.3/include/linux/sched.h
54045--- linux-3.0.3/include/linux/sched.h 2011-07-21 22:17:23.000000000 -0400
54046+++ linux-3.0.3/include/linux/sched.h 2011-08-25 17:22:27.000000000 -0400
54047@@ -100,6 +100,7 @@ struct bio_list;
54048 struct fs_struct;
54049 struct perf_event_context;
54050 struct blk_plug;
54051+struct linux_binprm;
54052
54053 /*
54054 * List of flags we want to share for kernel threads,
54055@@ -380,10 +381,13 @@ struct user_namespace;
54056 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
54057
54058 extern int sysctl_max_map_count;
54059+extern unsigned long sysctl_heap_stack_gap;
54060
54061 #include <linux/aio.h>
54062
54063 #ifdef CONFIG_MMU
54064+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
54065+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
54066 extern void arch_pick_mmap_layout(struct mm_struct *mm);
54067 extern unsigned long
54068 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
54069@@ -629,6 +633,17 @@ struct signal_struct {
54070 #ifdef CONFIG_TASKSTATS
54071 struct taskstats *stats;
54072 #endif
54073+
54074+#ifdef CONFIG_GRKERNSEC
54075+ u32 curr_ip;
54076+ u32 saved_ip;
54077+ u32 gr_saddr;
54078+ u32 gr_daddr;
54079+ u16 gr_sport;
54080+ u16 gr_dport;
54081+ u8 used_accept:1;
54082+#endif
54083+
54084 #ifdef CONFIG_AUDIT
54085 unsigned audit_tty;
54086 struct tty_audit_buf *tty_audit_buf;
54087@@ -710,6 +725,11 @@ struct user_struct {
54088 struct key *session_keyring; /* UID's default session keyring */
54089 #endif
54090
54091+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
54092+ unsigned int banned;
54093+ unsigned long ban_expires;
54094+#endif
54095+
54096 /* Hash table maintenance information */
54097 struct hlist_node uidhash_node;
54098 uid_t uid;
54099@@ -1340,8 +1360,8 @@ struct task_struct {
54100 struct list_head thread_group;
54101
54102 struct completion *vfork_done; /* for vfork() */
54103- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
54104- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
54105+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
54106+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
54107
54108 cputime_t utime, stime, utimescaled, stimescaled;
54109 cputime_t gtime;
54110@@ -1357,13 +1377,6 @@ struct task_struct {
54111 struct task_cputime cputime_expires;
54112 struct list_head cpu_timers[3];
54113
54114-/* process credentials */
54115- const struct cred __rcu *real_cred; /* objective and real subjective task
54116- * credentials (COW) */
54117- const struct cred __rcu *cred; /* effective (overridable) subjective task
54118- * credentials (COW) */
54119- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
54120-
54121 char comm[TASK_COMM_LEN]; /* executable name excluding path
54122 - access with [gs]et_task_comm (which lock
54123 it with task_lock())
54124@@ -1380,8 +1393,16 @@ struct task_struct {
54125 #endif
54126 /* CPU-specific state of this task */
54127 struct thread_struct thread;
54128+/* thread_info moved to task_struct */
54129+#ifdef CONFIG_X86
54130+ struct thread_info tinfo;
54131+#endif
54132 /* filesystem information */
54133 struct fs_struct *fs;
54134+
54135+ const struct cred __rcu *cred; /* effective (overridable) subjective task
54136+ * credentials (COW) */
54137+
54138 /* open file information */
54139 struct files_struct *files;
54140 /* namespaces */
54141@@ -1428,6 +1449,11 @@ struct task_struct {
54142 struct rt_mutex_waiter *pi_blocked_on;
54143 #endif
54144
54145+/* process credentials */
54146+ const struct cred __rcu *real_cred; /* objective and real subjective task
54147+ * credentials (COW) */
54148+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
54149+
54150 #ifdef CONFIG_DEBUG_MUTEXES
54151 /* mutex deadlock detection */
54152 struct mutex_waiter *blocked_on;
54153@@ -1538,6 +1564,21 @@ struct task_struct {
54154 unsigned long default_timer_slack_ns;
54155
54156 struct list_head *scm_work_list;
54157+
54158+#ifdef CONFIG_GRKERNSEC
54159+ /* grsecurity */
54160+ struct dentry *gr_chroot_dentry;
54161+ struct acl_subject_label *acl;
54162+ struct acl_role_label *role;
54163+ struct file *exec_file;
54164+ u16 acl_role_id;
54165+ /* is this the task that authenticated to the special role */
54166+ u8 acl_sp_role;
54167+ u8 is_writable;
54168+ u8 brute;
54169+ u8 gr_is_chrooted;
54170+#endif
54171+
54172 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
54173 /* Index of current stored address in ret_stack */
54174 int curr_ret_stack;
54175@@ -1572,6 +1613,57 @@ struct task_struct {
54176 #endif
54177 };
54178
54179+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
54180+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
54181+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
54182+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
54183+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
54184+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
54185+
54186+#ifdef CONFIG_PAX_SOFTMODE
54187+extern int pax_softmode;
54188+#endif
54189+
54190+extern int pax_check_flags(unsigned long *);
54191+
54192+/* if tsk != current then task_lock must be held on it */
54193+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
54194+static inline unsigned long pax_get_flags(struct task_struct *tsk)
54195+{
54196+ if (likely(tsk->mm))
54197+ return tsk->mm->pax_flags;
54198+ else
54199+ return 0UL;
54200+}
54201+
54202+/* if tsk != current then task_lock must be held on it */
54203+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
54204+{
54205+ if (likely(tsk->mm)) {
54206+ tsk->mm->pax_flags = flags;
54207+ return 0;
54208+ }
54209+ return -EINVAL;
54210+}
54211+#endif
54212+
54213+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
54214+extern void pax_set_initial_flags(struct linux_binprm *bprm);
54215+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
54216+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
54217+#endif
54218+
54219+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
54220+extern void pax_report_insns(void *pc, void *sp);
54221+extern void pax_report_refcount_overflow(struct pt_regs *regs);
54222+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
54223+
54224+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
54225+extern void pax_track_stack(void);
54226+#else
54227+static inline void pax_track_stack(void) {}
54228+#endif
54229+
54230 /* Future-safe accessor for struct task_struct's cpus_allowed. */
54231 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
54232
54233@@ -1768,6 +1860,7 @@ extern void thread_group_times(struct ta
54234 #define PF_DUMPCORE 0x00000200 /* dumped core */
54235 #define PF_SIGNALED 0x00000400 /* killed by a signal */
54236 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
54237+#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
54238 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
54239 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
54240 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
54241@@ -2056,7 +2149,9 @@ void yield(void);
54242 extern struct exec_domain default_exec_domain;
54243
54244 union thread_union {
54245+#ifndef CONFIG_X86
54246 struct thread_info thread_info;
54247+#endif
54248 unsigned long stack[THREAD_SIZE/sizeof(long)];
54249 };
54250
54251@@ -2089,6 +2184,7 @@ extern struct pid_namespace init_pid_ns;
54252 */
54253
54254 extern struct task_struct *find_task_by_vpid(pid_t nr);
54255+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
54256 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
54257 struct pid_namespace *ns);
54258
54259@@ -2225,7 +2321,7 @@ extern void __cleanup_sighand(struct sig
54260 extern void exit_itimers(struct signal_struct *);
54261 extern void flush_itimer_signals(void);
54262
54263-extern NORET_TYPE void do_group_exit(int);
54264+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
54265
54266 extern void daemonize(const char *, ...);
54267 extern int allow_signal(int);
54268@@ -2393,13 +2489,17 @@ static inline unsigned long *end_of_stac
54269
54270 #endif
54271
54272-static inline int object_is_on_stack(void *obj)
54273+static inline int object_starts_on_stack(void *obj)
54274 {
54275- void *stack = task_stack_page(current);
54276+ const void *stack = task_stack_page(current);
54277
54278 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
54279 }
54280
54281+#ifdef CONFIG_PAX_USERCOPY
54282+extern int object_is_on_stack(const void *obj, unsigned long len);
54283+#endif
54284+
54285 extern void thread_info_cache_init(void);
54286
54287 #ifdef CONFIG_DEBUG_STACK_USAGE
54288diff -urNp linux-3.0.3/include/linux/screen_info.h linux-3.0.3/include/linux/screen_info.h
54289--- linux-3.0.3/include/linux/screen_info.h 2011-07-21 22:17:23.000000000 -0400
54290+++ linux-3.0.3/include/linux/screen_info.h 2011-08-23 21:47:56.000000000 -0400
54291@@ -43,7 +43,8 @@ struct screen_info {
54292 __u16 pages; /* 0x32 */
54293 __u16 vesa_attributes; /* 0x34 */
54294 __u32 capabilities; /* 0x36 */
54295- __u8 _reserved[6]; /* 0x3a */
54296+ __u16 vesapm_size; /* 0x3a */
54297+ __u8 _reserved[4]; /* 0x3c */
54298 } __attribute__((packed));
54299
54300 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
54301diff -urNp linux-3.0.3/include/linux/security.h linux-3.0.3/include/linux/security.h
54302--- linux-3.0.3/include/linux/security.h 2011-07-21 22:17:23.000000000 -0400
54303+++ linux-3.0.3/include/linux/security.h 2011-08-23 21:48:14.000000000 -0400
54304@@ -36,6 +36,7 @@
54305 #include <linux/key.h>
54306 #include <linux/xfrm.h>
54307 #include <linux/slab.h>
54308+#include <linux/grsecurity.h>
54309 #include <net/flow.h>
54310
54311 /* Maximum number of letters for an LSM name string */
54312diff -urNp linux-3.0.3/include/linux/seq_file.h linux-3.0.3/include/linux/seq_file.h
54313--- linux-3.0.3/include/linux/seq_file.h 2011-07-21 22:17:23.000000000 -0400
54314+++ linux-3.0.3/include/linux/seq_file.h 2011-08-23 21:47:56.000000000 -0400
54315@@ -32,6 +32,7 @@ struct seq_operations {
54316 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
54317 int (*show) (struct seq_file *m, void *v);
54318 };
54319+typedef struct seq_operations __no_const seq_operations_no_const;
54320
54321 #define SEQ_SKIP 1
54322
54323diff -urNp linux-3.0.3/include/linux/shmem_fs.h linux-3.0.3/include/linux/shmem_fs.h
54324--- linux-3.0.3/include/linux/shmem_fs.h 2011-07-21 22:17:23.000000000 -0400
54325+++ linux-3.0.3/include/linux/shmem_fs.h 2011-08-23 21:47:56.000000000 -0400
54326@@ -10,7 +10,7 @@
54327
54328 #define SHMEM_NR_DIRECT 16
54329
54330-#define SHMEM_SYMLINK_INLINE_LEN (SHMEM_NR_DIRECT * sizeof(swp_entry_t))
54331+#define SHMEM_SYMLINK_INLINE_LEN 64
54332
54333 struct shmem_inode_info {
54334 spinlock_t lock;
54335diff -urNp linux-3.0.3/include/linux/shm.h linux-3.0.3/include/linux/shm.h
54336--- linux-3.0.3/include/linux/shm.h 2011-07-21 22:17:23.000000000 -0400
54337+++ linux-3.0.3/include/linux/shm.h 2011-08-23 21:48:14.000000000 -0400
54338@@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
54339 pid_t shm_cprid;
54340 pid_t shm_lprid;
54341 struct user_struct *mlock_user;
54342+#ifdef CONFIG_GRKERNSEC
54343+ time_t shm_createtime;
54344+ pid_t shm_lapid;
54345+#endif
54346 };
54347
54348 /* shm_mode upper byte flags */
54349diff -urNp linux-3.0.3/include/linux/skbuff.h linux-3.0.3/include/linux/skbuff.h
54350--- linux-3.0.3/include/linux/skbuff.h 2011-07-21 22:17:23.000000000 -0400
54351+++ linux-3.0.3/include/linux/skbuff.h 2011-08-23 21:47:56.000000000 -0400
54352@@ -592,7 +592,7 @@ static inline struct skb_shared_hwtstamp
54353 */
54354 static inline int skb_queue_empty(const struct sk_buff_head *list)
54355 {
54356- return list->next == (struct sk_buff *)list;
54357+ return list->next == (const struct sk_buff *)list;
54358 }
54359
54360 /**
54361@@ -605,7 +605,7 @@ static inline int skb_queue_empty(const
54362 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
54363 const struct sk_buff *skb)
54364 {
54365- return skb->next == (struct sk_buff *)list;
54366+ return skb->next == (const struct sk_buff *)list;
54367 }
54368
54369 /**
54370@@ -618,7 +618,7 @@ static inline bool skb_queue_is_last(con
54371 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
54372 const struct sk_buff *skb)
54373 {
54374- return skb->prev == (struct sk_buff *)list;
54375+ return skb->prev == (const struct sk_buff *)list;
54376 }
54377
54378 /**
54379@@ -1440,7 +1440,7 @@ static inline int pskb_network_may_pull(
54380 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
54381 */
54382 #ifndef NET_SKB_PAD
54383-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
54384+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
54385 #endif
54386
54387 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
54388diff -urNp linux-3.0.3/include/linux/slab_def.h linux-3.0.3/include/linux/slab_def.h
54389--- linux-3.0.3/include/linux/slab_def.h 2011-07-21 22:17:23.000000000 -0400
54390+++ linux-3.0.3/include/linux/slab_def.h 2011-08-23 21:47:56.000000000 -0400
54391@@ -96,10 +96,10 @@ struct kmem_cache {
54392 unsigned long node_allocs;
54393 unsigned long node_frees;
54394 unsigned long node_overflow;
54395- atomic_t allochit;
54396- atomic_t allocmiss;
54397- atomic_t freehit;
54398- atomic_t freemiss;
54399+ atomic_unchecked_t allochit;
54400+ atomic_unchecked_t allocmiss;
54401+ atomic_unchecked_t freehit;
54402+ atomic_unchecked_t freemiss;
54403
54404 /*
54405 * If debugging is enabled, then the allocator can add additional
54406diff -urNp linux-3.0.3/include/linux/slab.h linux-3.0.3/include/linux/slab.h
54407--- linux-3.0.3/include/linux/slab.h 2011-07-21 22:17:23.000000000 -0400
54408+++ linux-3.0.3/include/linux/slab.h 2011-08-23 21:47:56.000000000 -0400
54409@@ -11,12 +11,20 @@
54410
54411 #include <linux/gfp.h>
54412 #include <linux/types.h>
54413+#include <linux/err.h>
54414
54415 /*
54416 * Flags to pass to kmem_cache_create().
54417 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
54418 */
54419 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
54420+
54421+#ifdef CONFIG_PAX_USERCOPY
54422+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
54423+#else
54424+#define SLAB_USERCOPY 0x00000000UL
54425+#endif
54426+
54427 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
54428 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
54429 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
54430@@ -87,10 +95,13 @@
54431 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
54432 * Both make kfree a no-op.
54433 */
54434-#define ZERO_SIZE_PTR ((void *)16)
54435+#define ZERO_SIZE_PTR \
54436+({ \
54437+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
54438+ (void *)(-MAX_ERRNO-1L); \
54439+})
54440
54441-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
54442- (unsigned long)ZERO_SIZE_PTR)
54443+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
54444
54445 /*
54446 * struct kmem_cache related prototypes
54447@@ -141,6 +152,7 @@ void * __must_check krealloc(const void
54448 void kfree(const void *);
54449 void kzfree(const void *);
54450 size_t ksize(const void *);
54451+void check_object_size(const void *ptr, unsigned long n, bool to);
54452
54453 /*
54454 * Allocator specific definitions. These are mainly used to establish optimized
54455@@ -333,4 +345,59 @@ static inline void *kzalloc_node(size_t
54456
54457 void __init kmem_cache_init_late(void);
54458
54459+#define kmalloc(x, y) \
54460+({ \
54461+ void *___retval; \
54462+ intoverflow_t ___x = (intoverflow_t)x; \
54463+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
54464+ ___retval = NULL; \
54465+ else \
54466+ ___retval = kmalloc((size_t)___x, (y)); \
54467+ ___retval; \
54468+})
54469+
54470+#define kmalloc_node(x, y, z) \
54471+({ \
54472+ void *___retval; \
54473+ intoverflow_t ___x = (intoverflow_t)x; \
54474+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
54475+ ___retval = NULL; \
54476+ else \
54477+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
54478+ ___retval; \
54479+})
54480+
54481+#define kzalloc(x, y) \
54482+({ \
54483+ void *___retval; \
54484+ intoverflow_t ___x = (intoverflow_t)x; \
54485+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
54486+ ___retval = NULL; \
54487+ else \
54488+ ___retval = kzalloc((size_t)___x, (y)); \
54489+ ___retval; \
54490+})
54491+
54492+#define __krealloc(x, y, z) \
54493+({ \
54494+ void *___retval; \
54495+ intoverflow_t ___y = (intoverflow_t)y; \
54496+ if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
54497+ ___retval = NULL; \
54498+ else \
54499+ ___retval = __krealloc((x), (size_t)___y, (z)); \
54500+ ___retval; \
54501+})
54502+
54503+#define krealloc(x, y, z) \
54504+({ \
54505+ void *___retval; \
54506+ intoverflow_t ___y = (intoverflow_t)y; \
54507+ if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
54508+ ___retval = NULL; \
54509+ else \
54510+ ___retval = krealloc((x), (size_t)___y, (z)); \
54511+ ___retval; \
54512+})
54513+
54514 #endif /* _LINUX_SLAB_H */
54515diff -urNp linux-3.0.3/include/linux/slub_def.h linux-3.0.3/include/linux/slub_def.h
54516--- linux-3.0.3/include/linux/slub_def.h 2011-07-21 22:17:23.000000000 -0400
54517+++ linux-3.0.3/include/linux/slub_def.h 2011-08-23 21:47:56.000000000 -0400
54518@@ -82,7 +82,7 @@ struct kmem_cache {
54519 struct kmem_cache_order_objects max;
54520 struct kmem_cache_order_objects min;
54521 gfp_t allocflags; /* gfp flags to use on each alloc */
54522- int refcount; /* Refcount for slab cache destroy */
54523+ atomic_t refcount; /* Refcount for slab cache destroy */
54524 void (*ctor)(void *);
54525 int inuse; /* Offset to metadata */
54526 int align; /* Alignment */
54527@@ -218,7 +218,7 @@ static __always_inline struct kmem_cache
54528 }
54529
54530 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
54531-void *__kmalloc(size_t size, gfp_t flags);
54532+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
54533
54534 static __always_inline void *
54535 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
54536diff -urNp linux-3.0.3/include/linux/sonet.h linux-3.0.3/include/linux/sonet.h
54537--- linux-3.0.3/include/linux/sonet.h 2011-07-21 22:17:23.000000000 -0400
54538+++ linux-3.0.3/include/linux/sonet.h 2011-08-23 21:47:56.000000000 -0400
54539@@ -61,7 +61,7 @@ struct sonet_stats {
54540 #include <asm/atomic.h>
54541
54542 struct k_sonet_stats {
54543-#define __HANDLE_ITEM(i) atomic_t i
54544+#define __HANDLE_ITEM(i) atomic_unchecked_t i
54545 __SONET_ITEMS
54546 #undef __HANDLE_ITEM
54547 };
54548diff -urNp linux-3.0.3/include/linux/sunrpc/clnt.h linux-3.0.3/include/linux/sunrpc/clnt.h
54549--- linux-3.0.3/include/linux/sunrpc/clnt.h 2011-07-21 22:17:23.000000000 -0400
54550+++ linux-3.0.3/include/linux/sunrpc/clnt.h 2011-08-23 21:47:56.000000000 -0400
54551@@ -169,9 +169,9 @@ static inline unsigned short rpc_get_por
54552 {
54553 switch (sap->sa_family) {
54554 case AF_INET:
54555- return ntohs(((struct sockaddr_in *)sap)->sin_port);
54556+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
54557 case AF_INET6:
54558- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
54559+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
54560 }
54561 return 0;
54562 }
54563@@ -204,7 +204,7 @@ static inline bool __rpc_cmp_addr4(const
54564 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
54565 const struct sockaddr *src)
54566 {
54567- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
54568+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
54569 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
54570
54571 dsin->sin_family = ssin->sin_family;
54572@@ -301,7 +301,7 @@ static inline u32 rpc_get_scope_id(const
54573 if (sa->sa_family != AF_INET6)
54574 return 0;
54575
54576- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
54577+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
54578 }
54579
54580 #endif /* __KERNEL__ */
54581diff -urNp linux-3.0.3/include/linux/sunrpc/svc_rdma.h linux-3.0.3/include/linux/sunrpc/svc_rdma.h
54582--- linux-3.0.3/include/linux/sunrpc/svc_rdma.h 2011-07-21 22:17:23.000000000 -0400
54583+++ linux-3.0.3/include/linux/sunrpc/svc_rdma.h 2011-08-23 21:47:56.000000000 -0400
54584@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
54585 extern unsigned int svcrdma_max_requests;
54586 extern unsigned int svcrdma_max_req_size;
54587
54588-extern atomic_t rdma_stat_recv;
54589-extern atomic_t rdma_stat_read;
54590-extern atomic_t rdma_stat_write;
54591-extern atomic_t rdma_stat_sq_starve;
54592-extern atomic_t rdma_stat_rq_starve;
54593-extern atomic_t rdma_stat_rq_poll;
54594-extern atomic_t rdma_stat_rq_prod;
54595-extern atomic_t rdma_stat_sq_poll;
54596-extern atomic_t rdma_stat_sq_prod;
54597+extern atomic_unchecked_t rdma_stat_recv;
54598+extern atomic_unchecked_t rdma_stat_read;
54599+extern atomic_unchecked_t rdma_stat_write;
54600+extern atomic_unchecked_t rdma_stat_sq_starve;
54601+extern atomic_unchecked_t rdma_stat_rq_starve;
54602+extern atomic_unchecked_t rdma_stat_rq_poll;
54603+extern atomic_unchecked_t rdma_stat_rq_prod;
54604+extern atomic_unchecked_t rdma_stat_sq_poll;
54605+extern atomic_unchecked_t rdma_stat_sq_prod;
54606
54607 #define RPCRDMA_VERSION 1
54608
54609diff -urNp linux-3.0.3/include/linux/sysctl.h linux-3.0.3/include/linux/sysctl.h
54610--- linux-3.0.3/include/linux/sysctl.h 2011-07-21 22:17:23.000000000 -0400
54611+++ linux-3.0.3/include/linux/sysctl.h 2011-08-23 21:48:14.000000000 -0400
54612@@ -155,7 +155,11 @@ enum
54613 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
54614 };
54615
54616-
54617+#ifdef CONFIG_PAX_SOFTMODE
54618+enum {
54619+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
54620+};
54621+#endif
54622
54623 /* CTL_VM names: */
54624 enum
54625@@ -967,6 +971,8 @@ typedef int proc_handler (struct ctl_tab
54626
54627 extern int proc_dostring(struct ctl_table *, int,
54628 void __user *, size_t *, loff_t *);
54629+extern int proc_dostring_modpriv(struct ctl_table *, int,
54630+ void __user *, size_t *, loff_t *);
54631 extern int proc_dointvec(struct ctl_table *, int,
54632 void __user *, size_t *, loff_t *);
54633 extern int proc_dointvec_minmax(struct ctl_table *, int,
54634diff -urNp linux-3.0.3/include/linux/tty_ldisc.h linux-3.0.3/include/linux/tty_ldisc.h
54635--- linux-3.0.3/include/linux/tty_ldisc.h 2011-07-21 22:17:23.000000000 -0400
54636+++ linux-3.0.3/include/linux/tty_ldisc.h 2011-08-23 21:47:56.000000000 -0400
54637@@ -148,7 +148,7 @@ struct tty_ldisc_ops {
54638
54639 struct module *owner;
54640
54641- int refcount;
54642+ atomic_t refcount;
54643 };
54644
54645 struct tty_ldisc {
54646diff -urNp linux-3.0.3/include/linux/types.h linux-3.0.3/include/linux/types.h
54647--- linux-3.0.3/include/linux/types.h 2011-07-21 22:17:23.000000000 -0400
54648+++ linux-3.0.3/include/linux/types.h 2011-08-23 21:47:56.000000000 -0400
54649@@ -213,10 +213,26 @@ typedef struct {
54650 int counter;
54651 } atomic_t;
54652
54653+#ifdef CONFIG_PAX_REFCOUNT
54654+typedef struct {
54655+ int counter;
54656+} atomic_unchecked_t;
54657+#else
54658+typedef atomic_t atomic_unchecked_t;
54659+#endif
54660+
54661 #ifdef CONFIG_64BIT
54662 typedef struct {
54663 long counter;
54664 } atomic64_t;
54665+
54666+#ifdef CONFIG_PAX_REFCOUNT
54667+typedef struct {
54668+ long counter;
54669+} atomic64_unchecked_t;
54670+#else
54671+typedef atomic64_t atomic64_unchecked_t;
54672+#endif
54673 #endif
54674
54675 struct list_head {
54676diff -urNp linux-3.0.3/include/linux/uaccess.h linux-3.0.3/include/linux/uaccess.h
54677--- linux-3.0.3/include/linux/uaccess.h 2011-07-21 22:17:23.000000000 -0400
54678+++ linux-3.0.3/include/linux/uaccess.h 2011-08-23 21:47:56.000000000 -0400
54679@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
54680 long ret; \
54681 mm_segment_t old_fs = get_fs(); \
54682 \
54683- set_fs(KERNEL_DS); \
54684 pagefault_disable(); \
54685+ set_fs(KERNEL_DS); \
54686 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
54687- pagefault_enable(); \
54688 set_fs(old_fs); \
54689+ pagefault_enable(); \
54690 ret; \
54691 })
54692
54693diff -urNp linux-3.0.3/include/linux/unaligned/access_ok.h linux-3.0.3/include/linux/unaligned/access_ok.h
54694--- linux-3.0.3/include/linux/unaligned/access_ok.h 2011-07-21 22:17:23.000000000 -0400
54695+++ linux-3.0.3/include/linux/unaligned/access_ok.h 2011-08-23 21:47:56.000000000 -0400
54696@@ -6,32 +6,32 @@
54697
54698 static inline u16 get_unaligned_le16(const void *p)
54699 {
54700- return le16_to_cpup((__le16 *)p);
54701+ return le16_to_cpup((const __le16 *)p);
54702 }
54703
54704 static inline u32 get_unaligned_le32(const void *p)
54705 {
54706- return le32_to_cpup((__le32 *)p);
54707+ return le32_to_cpup((const __le32 *)p);
54708 }
54709
54710 static inline u64 get_unaligned_le64(const void *p)
54711 {
54712- return le64_to_cpup((__le64 *)p);
54713+ return le64_to_cpup((const __le64 *)p);
54714 }
54715
54716 static inline u16 get_unaligned_be16(const void *p)
54717 {
54718- return be16_to_cpup((__be16 *)p);
54719+ return be16_to_cpup((const __be16 *)p);
54720 }
54721
54722 static inline u32 get_unaligned_be32(const void *p)
54723 {
54724- return be32_to_cpup((__be32 *)p);
54725+ return be32_to_cpup((const __be32 *)p);
54726 }
54727
54728 static inline u64 get_unaligned_be64(const void *p)
54729 {
54730- return be64_to_cpup((__be64 *)p);
54731+ return be64_to_cpup((const __be64 *)p);
54732 }
54733
54734 static inline void put_unaligned_le16(u16 val, void *p)
54735diff -urNp linux-3.0.3/include/linux/vmalloc.h linux-3.0.3/include/linux/vmalloc.h
54736--- linux-3.0.3/include/linux/vmalloc.h 2011-07-21 22:17:23.000000000 -0400
54737+++ linux-3.0.3/include/linux/vmalloc.h 2011-08-23 21:47:56.000000000 -0400
54738@@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
54739 #define VM_MAP 0x00000004 /* vmap()ed pages */
54740 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
54741 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
54742+
54743+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
54744+#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
54745+#endif
54746+
54747 /* bits [20..32] reserved for arch specific ioremap internals */
54748
54749 /*
54750@@ -155,4 +160,103 @@ pcpu_free_vm_areas(struct vm_struct **vm
54751 # endif
54752 #endif
54753
54754+#define vmalloc(x) \
54755+({ \
54756+ void *___retval; \
54757+ intoverflow_t ___x = (intoverflow_t)x; \
54758+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
54759+ ___retval = NULL; \
54760+ else \
54761+ ___retval = vmalloc((unsigned long)___x); \
54762+ ___retval; \
54763+})
54764+
54765+#define vzalloc(x) \
54766+({ \
54767+ void *___retval; \
54768+ intoverflow_t ___x = (intoverflow_t)x; \
54769+ if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
54770+ ___retval = NULL; \
54771+ else \
54772+ ___retval = vzalloc((unsigned long)___x); \
54773+ ___retval; \
54774+})
54775+
54776+#define __vmalloc(x, y, z) \
54777+({ \
54778+ void *___retval; \
54779+ intoverflow_t ___x = (intoverflow_t)x; \
54780+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
54781+ ___retval = NULL; \
54782+ else \
54783+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
54784+ ___retval; \
54785+})
54786+
54787+#define vmalloc_user(x) \
54788+({ \
54789+ void *___retval; \
54790+ intoverflow_t ___x = (intoverflow_t)x; \
54791+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
54792+ ___retval = NULL; \
54793+ else \
54794+ ___retval = vmalloc_user((unsigned long)___x); \
54795+ ___retval; \
54796+})
54797+
54798+#define vmalloc_exec(x) \
54799+({ \
54800+ void *___retval; \
54801+ intoverflow_t ___x = (intoverflow_t)x; \
54802+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
54803+ ___retval = NULL; \
54804+ else \
54805+ ___retval = vmalloc_exec((unsigned long)___x); \
54806+ ___retval; \
54807+})
54808+
54809+#define vmalloc_node(x, y) \
54810+({ \
54811+ void *___retval; \
54812+ intoverflow_t ___x = (intoverflow_t)x; \
54813+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
54814+ ___retval = NULL; \
54815+ else \
54816+ ___retval = vmalloc_node((unsigned long)___x, (y));\
54817+ ___retval; \
54818+})
54819+
54820+#define vzalloc_node(x, y) \
54821+({ \
54822+ void *___retval; \
54823+ intoverflow_t ___x = (intoverflow_t)x; \
54824+ if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
54825+ ___retval = NULL; \
54826+ else \
54827+ ___retval = vzalloc_node((unsigned long)___x, (y));\
54828+ ___retval; \
54829+})
54830+
54831+#define vmalloc_32(x) \
54832+({ \
54833+ void *___retval; \
54834+ intoverflow_t ___x = (intoverflow_t)x; \
54835+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
54836+ ___retval = NULL; \
54837+ else \
54838+ ___retval = vmalloc_32((unsigned long)___x); \
54839+ ___retval; \
54840+})
54841+
54842+#define vmalloc_32_user(x) \
54843+({ \
54844+void *___retval; \
54845+ intoverflow_t ___x = (intoverflow_t)x; \
54846+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
54847+ ___retval = NULL; \
54848+ else \
54849+ ___retval = vmalloc_32_user((unsigned long)___x);\
54850+ ___retval; \
54851+})
54852+
54853 #endif /* _LINUX_VMALLOC_H */
54854diff -urNp linux-3.0.3/include/linux/vmstat.h linux-3.0.3/include/linux/vmstat.h
54855--- linux-3.0.3/include/linux/vmstat.h 2011-07-21 22:17:23.000000000 -0400
54856+++ linux-3.0.3/include/linux/vmstat.h 2011-08-23 21:47:56.000000000 -0400
54857@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(in
54858 /*
54859 * Zone based page accounting with per cpu differentials.
54860 */
54861-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
54862+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
54863
54864 static inline void zone_page_state_add(long x, struct zone *zone,
54865 enum zone_stat_item item)
54866 {
54867- atomic_long_add(x, &zone->vm_stat[item]);
54868- atomic_long_add(x, &vm_stat[item]);
54869+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
54870+ atomic_long_add_unchecked(x, &vm_stat[item]);
54871 }
54872
54873 static inline unsigned long global_page_state(enum zone_stat_item item)
54874 {
54875- long x = atomic_long_read(&vm_stat[item]);
54876+ long x = atomic_long_read_unchecked(&vm_stat[item]);
54877 #ifdef CONFIG_SMP
54878 if (x < 0)
54879 x = 0;
54880@@ -109,7 +109,7 @@ static inline unsigned long global_page_
54881 static inline unsigned long zone_page_state(struct zone *zone,
54882 enum zone_stat_item item)
54883 {
54884- long x = atomic_long_read(&zone->vm_stat[item]);
54885+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
54886 #ifdef CONFIG_SMP
54887 if (x < 0)
54888 x = 0;
54889@@ -126,7 +126,7 @@ static inline unsigned long zone_page_st
54890 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
54891 enum zone_stat_item item)
54892 {
54893- long x = atomic_long_read(&zone->vm_stat[item]);
54894+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
54895
54896 #ifdef CONFIG_SMP
54897 int cpu;
54898@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state
54899
54900 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
54901 {
54902- atomic_long_inc(&zone->vm_stat[item]);
54903- atomic_long_inc(&vm_stat[item]);
54904+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
54905+ atomic_long_inc_unchecked(&vm_stat[item]);
54906 }
54907
54908 static inline void __inc_zone_page_state(struct page *page,
54909@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state
54910
54911 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
54912 {
54913- atomic_long_dec(&zone->vm_stat[item]);
54914- atomic_long_dec(&vm_stat[item]);
54915+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
54916+ atomic_long_dec_unchecked(&vm_stat[item]);
54917 }
54918
54919 static inline void __dec_zone_page_state(struct page *page,
54920diff -urNp linux-3.0.3/include/media/saa7146_vv.h linux-3.0.3/include/media/saa7146_vv.h
54921--- linux-3.0.3/include/media/saa7146_vv.h 2011-07-21 22:17:23.000000000 -0400
54922+++ linux-3.0.3/include/media/saa7146_vv.h 2011-08-24 18:26:09.000000000 -0400
54923@@ -163,7 +163,7 @@ struct saa7146_ext_vv
54924 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
54925
54926 /* the extension can override this */
54927- struct v4l2_ioctl_ops ops;
54928+ v4l2_ioctl_ops_no_const ops;
54929 /* pointer to the saa7146 core ops */
54930 const struct v4l2_ioctl_ops *core_ops;
54931
54932diff -urNp linux-3.0.3/include/media/v4l2-ioctl.h linux-3.0.3/include/media/v4l2-ioctl.h
54933--- linux-3.0.3/include/media/v4l2-ioctl.h 2011-07-21 22:17:23.000000000 -0400
54934+++ linux-3.0.3/include/media/v4l2-ioctl.h 2011-08-24 18:25:45.000000000 -0400
54935@@ -272,6 +272,7 @@ struct v4l2_ioctl_ops {
54936 long (*vidioc_default) (struct file *file, void *fh,
54937 bool valid_prio, int cmd, void *arg);
54938 };
54939+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
54940
54941
54942 /* v4l debugging and diagnostics */
54943diff -urNp linux-3.0.3/include/net/caif/cfctrl.h linux-3.0.3/include/net/caif/cfctrl.h
54944--- linux-3.0.3/include/net/caif/cfctrl.h 2011-07-21 22:17:23.000000000 -0400
54945+++ linux-3.0.3/include/net/caif/cfctrl.h 2011-08-23 21:47:56.000000000 -0400
54946@@ -52,7 +52,7 @@ struct cfctrl_rsp {
54947 void (*radioset_rsp)(void);
54948 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
54949 struct cflayer *client_layer);
54950-};
54951+} __no_const;
54952
54953 /* Link Setup Parameters for CAIF-Links. */
54954 struct cfctrl_link_param {
54955@@ -101,8 +101,8 @@ struct cfctrl_request_info {
54956 struct cfctrl {
54957 struct cfsrvl serv;
54958 struct cfctrl_rsp res;
54959- atomic_t req_seq_no;
54960- atomic_t rsp_seq_no;
54961+ atomic_unchecked_t req_seq_no;
54962+ atomic_unchecked_t rsp_seq_no;
54963 struct list_head list;
54964 /* Protects from simultaneous access to first_req list */
54965 spinlock_t info_list_lock;
54966diff -urNp linux-3.0.3/include/net/flow.h linux-3.0.3/include/net/flow.h
54967--- linux-3.0.3/include/net/flow.h 2011-07-21 22:17:23.000000000 -0400
54968+++ linux-3.0.3/include/net/flow.h 2011-08-23 21:47:56.000000000 -0400
54969@@ -188,6 +188,6 @@ extern struct flow_cache_object *flow_ca
54970 u8 dir, flow_resolve_t resolver, void *ctx);
54971
54972 extern void flow_cache_flush(void);
54973-extern atomic_t flow_cache_genid;
54974+extern atomic_unchecked_t flow_cache_genid;
54975
54976 #endif
54977diff -urNp linux-3.0.3/include/net/inetpeer.h linux-3.0.3/include/net/inetpeer.h
54978--- linux-3.0.3/include/net/inetpeer.h 2011-07-21 22:17:23.000000000 -0400
54979+++ linux-3.0.3/include/net/inetpeer.h 2011-08-23 21:47:56.000000000 -0400
54980@@ -43,8 +43,8 @@ struct inet_peer {
54981 */
54982 union {
54983 struct {
54984- atomic_t rid; /* Frag reception counter */
54985- atomic_t ip_id_count; /* IP ID for the next packet */
54986+ atomic_unchecked_t rid; /* Frag reception counter */
54987+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
54988 __u32 tcp_ts;
54989 __u32 tcp_ts_stamp;
54990 u32 metrics[RTAX_MAX];
54991@@ -108,7 +108,7 @@ static inline __u16 inet_getid(struct in
54992 {
54993 more++;
54994 inet_peer_refcheck(p);
54995- return atomic_add_return(more, &p->ip_id_count) - more;
54996+ return atomic_add_return_unchecked(more, &p->ip_id_count) - more;
54997 }
54998
54999 #endif /* _NET_INETPEER_H */
55000diff -urNp linux-3.0.3/include/net/ip_fib.h linux-3.0.3/include/net/ip_fib.h
55001--- linux-3.0.3/include/net/ip_fib.h 2011-07-21 22:17:23.000000000 -0400
55002+++ linux-3.0.3/include/net/ip_fib.h 2011-08-23 21:47:56.000000000 -0400
55003@@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(s
55004
55005 #define FIB_RES_SADDR(net, res) \
55006 ((FIB_RES_NH(res).nh_saddr_genid == \
55007- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
55008+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
55009 FIB_RES_NH(res).nh_saddr : \
55010 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
55011 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
55012diff -urNp linux-3.0.3/include/net/ip_vs.h linux-3.0.3/include/net/ip_vs.h
55013--- linux-3.0.3/include/net/ip_vs.h 2011-07-21 22:17:23.000000000 -0400
55014+++ linux-3.0.3/include/net/ip_vs.h 2011-08-23 21:47:56.000000000 -0400
55015@@ -509,7 +509,7 @@ struct ip_vs_conn {
55016 struct ip_vs_conn *control; /* Master control connection */
55017 atomic_t n_control; /* Number of controlled ones */
55018 struct ip_vs_dest *dest; /* real server */
55019- atomic_t in_pkts; /* incoming packet counter */
55020+ atomic_unchecked_t in_pkts; /* incoming packet counter */
55021
55022 /* packet transmitter for different forwarding methods. If it
55023 mangles the packet, it must return NF_DROP or better NF_STOLEN,
55024@@ -647,7 +647,7 @@ struct ip_vs_dest {
55025 __be16 port; /* port number of the server */
55026 union nf_inet_addr addr; /* IP address of the server */
55027 volatile unsigned flags; /* dest status flags */
55028- atomic_t conn_flags; /* flags to copy to conn */
55029+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
55030 atomic_t weight; /* server weight */
55031
55032 atomic_t refcnt; /* reference counter */
55033diff -urNp linux-3.0.3/include/net/irda/ircomm_core.h linux-3.0.3/include/net/irda/ircomm_core.h
55034--- linux-3.0.3/include/net/irda/ircomm_core.h 2011-07-21 22:17:23.000000000 -0400
55035+++ linux-3.0.3/include/net/irda/ircomm_core.h 2011-08-23 21:47:56.000000000 -0400
55036@@ -51,7 +51,7 @@ typedef struct {
55037 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
55038 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
55039 struct ircomm_info *);
55040-} call_t;
55041+} __no_const call_t;
55042
55043 struct ircomm_cb {
55044 irda_queue_t queue;
55045diff -urNp linux-3.0.3/include/net/irda/ircomm_tty.h linux-3.0.3/include/net/irda/ircomm_tty.h
55046--- linux-3.0.3/include/net/irda/ircomm_tty.h 2011-07-21 22:17:23.000000000 -0400
55047+++ linux-3.0.3/include/net/irda/ircomm_tty.h 2011-08-23 21:47:56.000000000 -0400
55048@@ -35,6 +35,7 @@
55049 #include <linux/termios.h>
55050 #include <linux/timer.h>
55051 #include <linux/tty.h> /* struct tty_struct */
55052+#include <asm/local.h>
55053
55054 #include <net/irda/irias_object.h>
55055 #include <net/irda/ircomm_core.h>
55056@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
55057 unsigned short close_delay;
55058 unsigned short closing_wait; /* time to wait before closing */
55059
55060- int open_count;
55061- int blocked_open; /* # of blocked opens */
55062+ local_t open_count;
55063+ local_t blocked_open; /* # of blocked opens */
55064
55065 /* Protect concurent access to :
55066 * o self->open_count
55067diff -urNp linux-3.0.3/include/net/iucv/af_iucv.h linux-3.0.3/include/net/iucv/af_iucv.h
55068--- linux-3.0.3/include/net/iucv/af_iucv.h 2011-07-21 22:17:23.000000000 -0400
55069+++ linux-3.0.3/include/net/iucv/af_iucv.h 2011-08-23 21:47:56.000000000 -0400
55070@@ -87,7 +87,7 @@ struct iucv_sock {
55071 struct iucv_sock_list {
55072 struct hlist_head head;
55073 rwlock_t lock;
55074- atomic_t autobind_name;
55075+ atomic_unchecked_t autobind_name;
55076 };
55077
55078 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
55079diff -urNp linux-3.0.3/include/net/lapb.h linux-3.0.3/include/net/lapb.h
55080--- linux-3.0.3/include/net/lapb.h 2011-07-21 22:17:23.000000000 -0400
55081+++ linux-3.0.3/include/net/lapb.h 2011-08-23 21:47:56.000000000 -0400
55082@@ -95,7 +95,7 @@ struct lapb_cb {
55083 struct sk_buff_head write_queue;
55084 struct sk_buff_head ack_queue;
55085 unsigned char window;
55086- struct lapb_register_struct callbacks;
55087+ struct lapb_register_struct *callbacks;
55088
55089 /* FRMR control information */
55090 struct lapb_frame frmr_data;
55091diff -urNp linux-3.0.3/include/net/neighbour.h linux-3.0.3/include/net/neighbour.h
55092--- linux-3.0.3/include/net/neighbour.h 2011-07-21 22:17:23.000000000 -0400
55093+++ linux-3.0.3/include/net/neighbour.h 2011-08-26 19:49:56.000000000 -0400
55094@@ -117,14 +117,14 @@ struct neighbour {
55095 };
55096
55097 struct neigh_ops {
55098- int family;
55099+ const int family;
55100 void (*solicit)(struct neighbour *, struct sk_buff*);
55101 void (*error_report)(struct neighbour *, struct sk_buff*);
55102 int (*output)(struct sk_buff*);
55103 int (*connected_output)(struct sk_buff*);
55104 int (*hh_output)(struct sk_buff*);
55105 int (*queue_xmit)(struct sk_buff*);
55106-};
55107+} __do_const;
55108
55109 struct pneigh_entry {
55110 struct pneigh_entry *next;
55111diff -urNp linux-3.0.3/include/net/netlink.h linux-3.0.3/include/net/netlink.h
55112--- linux-3.0.3/include/net/netlink.h 2011-07-21 22:17:23.000000000 -0400
55113+++ linux-3.0.3/include/net/netlink.h 2011-08-23 21:47:56.000000000 -0400
55114@@ -562,7 +562,7 @@ static inline void *nlmsg_get_pos(struct
55115 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
55116 {
55117 if (mark)
55118- skb_trim(skb, (unsigned char *) mark - skb->data);
55119+ skb_trim(skb, (const unsigned char *) mark - skb->data);
55120 }
55121
55122 /**
55123diff -urNp linux-3.0.3/include/net/netns/ipv4.h linux-3.0.3/include/net/netns/ipv4.h
55124--- linux-3.0.3/include/net/netns/ipv4.h 2011-07-21 22:17:23.000000000 -0400
55125+++ linux-3.0.3/include/net/netns/ipv4.h 2011-08-23 21:47:56.000000000 -0400
55126@@ -56,8 +56,8 @@ struct netns_ipv4 {
55127
55128 unsigned int sysctl_ping_group_range[2];
55129
55130- atomic_t rt_genid;
55131- atomic_t dev_addr_genid;
55132+ atomic_unchecked_t rt_genid;
55133+ atomic_unchecked_t dev_addr_genid;
55134
55135 #ifdef CONFIG_IP_MROUTE
55136 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
55137diff -urNp linux-3.0.3/include/net/sctp/sctp.h linux-3.0.3/include/net/sctp/sctp.h
55138--- linux-3.0.3/include/net/sctp/sctp.h 2011-07-21 22:17:23.000000000 -0400
55139+++ linux-3.0.3/include/net/sctp/sctp.h 2011-08-23 21:47:56.000000000 -0400
55140@@ -315,9 +315,9 @@ do { \
55141
55142 #else /* SCTP_DEBUG */
55143
55144-#define SCTP_DEBUG_PRINTK(whatever...)
55145-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
55146-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
55147+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
55148+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
55149+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
55150 #define SCTP_ENABLE_DEBUG
55151 #define SCTP_DISABLE_DEBUG
55152 #define SCTP_ASSERT(expr, str, func)
55153diff -urNp linux-3.0.3/include/net/sock.h linux-3.0.3/include/net/sock.h
55154--- linux-3.0.3/include/net/sock.h 2011-07-21 22:17:23.000000000 -0400
55155+++ linux-3.0.3/include/net/sock.h 2011-08-23 21:47:56.000000000 -0400
55156@@ -277,7 +277,7 @@ struct sock {
55157 #ifdef CONFIG_RPS
55158 __u32 sk_rxhash;
55159 #endif
55160- atomic_t sk_drops;
55161+ atomic_unchecked_t sk_drops;
55162 int sk_rcvbuf;
55163
55164 struct sk_filter __rcu *sk_filter;
55165@@ -1390,7 +1390,7 @@ static inline void sk_nocaps_add(struct
55166 }
55167
55168 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
55169- char __user *from, char *to,
55170+ char __user *from, unsigned char *to,
55171 int copy, int offset)
55172 {
55173 if (skb->ip_summed == CHECKSUM_NONE) {
55174diff -urNp linux-3.0.3/include/net/tcp.h linux-3.0.3/include/net/tcp.h
55175--- linux-3.0.3/include/net/tcp.h 2011-07-21 22:17:23.000000000 -0400
55176+++ linux-3.0.3/include/net/tcp.h 2011-08-23 21:47:56.000000000 -0400
55177@@ -1374,8 +1374,8 @@ enum tcp_seq_states {
55178 struct tcp_seq_afinfo {
55179 char *name;
55180 sa_family_t family;
55181- struct file_operations seq_fops;
55182- struct seq_operations seq_ops;
55183+ file_operations_no_const seq_fops;
55184+ seq_operations_no_const seq_ops;
55185 };
55186
55187 struct tcp_iter_state {
55188diff -urNp linux-3.0.3/include/net/udp.h linux-3.0.3/include/net/udp.h
55189--- linux-3.0.3/include/net/udp.h 2011-07-21 22:17:23.000000000 -0400
55190+++ linux-3.0.3/include/net/udp.h 2011-08-23 21:47:56.000000000 -0400
55191@@ -234,8 +234,8 @@ struct udp_seq_afinfo {
55192 char *name;
55193 sa_family_t family;
55194 struct udp_table *udp_table;
55195- struct file_operations seq_fops;
55196- struct seq_operations seq_ops;
55197+ file_operations_no_const seq_fops;
55198+ seq_operations_no_const seq_ops;
55199 };
55200
55201 struct udp_iter_state {
55202diff -urNp linux-3.0.3/include/net/xfrm.h linux-3.0.3/include/net/xfrm.h
55203--- linux-3.0.3/include/net/xfrm.h 2011-07-21 22:17:23.000000000 -0400
55204+++ linux-3.0.3/include/net/xfrm.h 2011-08-23 21:47:56.000000000 -0400
55205@@ -505,7 +505,7 @@ struct xfrm_policy {
55206 struct timer_list timer;
55207
55208 struct flow_cache_object flo;
55209- atomic_t genid;
55210+ atomic_unchecked_t genid;
55211 u32 priority;
55212 u32 index;
55213 struct xfrm_mark mark;
55214diff -urNp linux-3.0.3/include/rdma/iw_cm.h linux-3.0.3/include/rdma/iw_cm.h
55215--- linux-3.0.3/include/rdma/iw_cm.h 2011-07-21 22:17:23.000000000 -0400
55216+++ linux-3.0.3/include/rdma/iw_cm.h 2011-08-23 21:47:56.000000000 -0400
55217@@ -120,7 +120,7 @@ struct iw_cm_verbs {
55218 int backlog);
55219
55220 int (*destroy_listen)(struct iw_cm_id *cm_id);
55221-};
55222+} __no_const;
55223
55224 /**
55225 * iw_create_cm_id - Create an IW CM identifier.
55226diff -urNp linux-3.0.3/include/scsi/libfc.h linux-3.0.3/include/scsi/libfc.h
55227--- linux-3.0.3/include/scsi/libfc.h 2011-07-21 22:17:23.000000000 -0400
55228+++ linux-3.0.3/include/scsi/libfc.h 2011-08-23 21:47:56.000000000 -0400
55229@@ -750,6 +750,7 @@ struct libfc_function_template {
55230 */
55231 void (*disc_stop_final) (struct fc_lport *);
55232 };
55233+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
55234
55235 /**
55236 * struct fc_disc - Discovery context
55237@@ -853,7 +854,7 @@ struct fc_lport {
55238 struct fc_vport *vport;
55239
55240 /* Operational Information */
55241- struct libfc_function_template tt;
55242+ libfc_function_template_no_const tt;
55243 u8 link_up;
55244 u8 qfull;
55245 enum fc_lport_state state;
55246diff -urNp linux-3.0.3/include/scsi/scsi_device.h linux-3.0.3/include/scsi/scsi_device.h
55247--- linux-3.0.3/include/scsi/scsi_device.h 2011-07-21 22:17:23.000000000 -0400
55248+++ linux-3.0.3/include/scsi/scsi_device.h 2011-08-23 21:47:56.000000000 -0400
55249@@ -161,9 +161,9 @@ struct scsi_device {
55250 unsigned int max_device_blocked; /* what device_blocked counts down from */
55251 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
55252
55253- atomic_t iorequest_cnt;
55254- atomic_t iodone_cnt;
55255- atomic_t ioerr_cnt;
55256+ atomic_unchecked_t iorequest_cnt;
55257+ atomic_unchecked_t iodone_cnt;
55258+ atomic_unchecked_t ioerr_cnt;
55259
55260 struct device sdev_gendev,
55261 sdev_dev;
55262diff -urNp linux-3.0.3/include/scsi/scsi_transport_fc.h linux-3.0.3/include/scsi/scsi_transport_fc.h
55263--- linux-3.0.3/include/scsi/scsi_transport_fc.h 2011-07-21 22:17:23.000000000 -0400
55264+++ linux-3.0.3/include/scsi/scsi_transport_fc.h 2011-08-26 19:49:56.000000000 -0400
55265@@ -711,7 +711,7 @@ struct fc_function_template {
55266 unsigned long show_host_system_hostname:1;
55267
55268 unsigned long disable_target_scan:1;
55269-};
55270+} __do_const;
55271
55272
55273 /**
55274diff -urNp linux-3.0.3/include/sound/ak4xxx-adda.h linux-3.0.3/include/sound/ak4xxx-adda.h
55275--- linux-3.0.3/include/sound/ak4xxx-adda.h 2011-07-21 22:17:23.000000000 -0400
55276+++ linux-3.0.3/include/sound/ak4xxx-adda.h 2011-08-23 21:47:56.000000000 -0400
55277@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
55278 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
55279 unsigned char val);
55280 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
55281-};
55282+} __no_const;
55283
55284 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
55285
55286diff -urNp linux-3.0.3/include/sound/hwdep.h linux-3.0.3/include/sound/hwdep.h
55287--- linux-3.0.3/include/sound/hwdep.h 2011-07-21 22:17:23.000000000 -0400
55288+++ linux-3.0.3/include/sound/hwdep.h 2011-08-23 21:47:56.000000000 -0400
55289@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
55290 struct snd_hwdep_dsp_status *status);
55291 int (*dsp_load)(struct snd_hwdep *hw,
55292 struct snd_hwdep_dsp_image *image);
55293-};
55294+} __no_const;
55295
55296 struct snd_hwdep {
55297 struct snd_card *card;
55298diff -urNp linux-3.0.3/include/sound/info.h linux-3.0.3/include/sound/info.h
55299--- linux-3.0.3/include/sound/info.h 2011-07-21 22:17:23.000000000 -0400
55300+++ linux-3.0.3/include/sound/info.h 2011-08-23 21:47:56.000000000 -0400
55301@@ -44,7 +44,7 @@ struct snd_info_entry_text {
55302 struct snd_info_buffer *buffer);
55303 void (*write)(struct snd_info_entry *entry,
55304 struct snd_info_buffer *buffer);
55305-};
55306+} __no_const;
55307
55308 struct snd_info_entry_ops {
55309 int (*open)(struct snd_info_entry *entry,
55310diff -urNp linux-3.0.3/include/sound/pcm.h linux-3.0.3/include/sound/pcm.h
55311--- linux-3.0.3/include/sound/pcm.h 2011-07-21 22:17:23.000000000 -0400
55312+++ linux-3.0.3/include/sound/pcm.h 2011-08-23 21:47:56.000000000 -0400
55313@@ -81,6 +81,7 @@ struct snd_pcm_ops {
55314 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
55315 int (*ack)(struct snd_pcm_substream *substream);
55316 };
55317+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
55318
55319 /*
55320 *
55321diff -urNp linux-3.0.3/include/sound/sb16_csp.h linux-3.0.3/include/sound/sb16_csp.h
55322--- linux-3.0.3/include/sound/sb16_csp.h 2011-07-21 22:17:23.000000000 -0400
55323+++ linux-3.0.3/include/sound/sb16_csp.h 2011-08-23 21:47:56.000000000 -0400
55324@@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
55325 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
55326 int (*csp_stop) (struct snd_sb_csp * p);
55327 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
55328-};
55329+} __no_const;
55330
55331 /*
55332 * CSP private data
55333diff -urNp linux-3.0.3/include/sound/soc.h linux-3.0.3/include/sound/soc.h
55334--- linux-3.0.3/include/sound/soc.h 2011-07-21 22:17:23.000000000 -0400
55335+++ linux-3.0.3/include/sound/soc.h 2011-08-26 19:49:56.000000000 -0400
55336@@ -636,7 +636,7 @@ struct snd_soc_platform_driver {
55337
55338 /* platform stream ops */
55339 struct snd_pcm_ops *ops;
55340-};
55341+} __do_const;
55342
55343 struct snd_soc_platform {
55344 const char *name;
55345diff -urNp linux-3.0.3/include/sound/ymfpci.h linux-3.0.3/include/sound/ymfpci.h
55346--- linux-3.0.3/include/sound/ymfpci.h 2011-07-21 22:17:23.000000000 -0400
55347+++ linux-3.0.3/include/sound/ymfpci.h 2011-08-23 21:47:56.000000000 -0400
55348@@ -358,7 +358,7 @@ struct snd_ymfpci {
55349 spinlock_t reg_lock;
55350 spinlock_t voice_lock;
55351 wait_queue_head_t interrupt_sleep;
55352- atomic_t interrupt_sleep_count;
55353+ atomic_unchecked_t interrupt_sleep_count;
55354 struct snd_info_entry *proc_entry;
55355 const struct firmware *dsp_microcode;
55356 const struct firmware *controller_microcode;
55357diff -urNp linux-3.0.3/include/target/target_core_base.h linux-3.0.3/include/target/target_core_base.h
55358--- linux-3.0.3/include/target/target_core_base.h 2011-07-21 22:17:23.000000000 -0400
55359+++ linux-3.0.3/include/target/target_core_base.h 2011-08-23 21:47:56.000000000 -0400
55360@@ -364,7 +364,7 @@ struct t10_reservation_ops {
55361 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
55362 int (*t10_pr_register)(struct se_cmd *);
55363 int (*t10_pr_clear)(struct se_cmd *);
55364-};
55365+} __no_const;
55366
55367 struct t10_reservation_template {
55368 /* Reservation effects all target ports */
55369@@ -432,8 +432,8 @@ struct se_transport_task {
55370 atomic_t t_task_cdbs_left;
55371 atomic_t t_task_cdbs_ex_left;
55372 atomic_t t_task_cdbs_timeout_left;
55373- atomic_t t_task_cdbs_sent;
55374- atomic_t t_transport_aborted;
55375+ atomic_unchecked_t t_task_cdbs_sent;
55376+ atomic_unchecked_t t_transport_aborted;
55377 atomic_t t_transport_active;
55378 atomic_t t_transport_complete;
55379 atomic_t t_transport_queue_active;
55380@@ -774,7 +774,7 @@ struct se_device {
55381 atomic_t active_cmds;
55382 atomic_t simple_cmds;
55383 atomic_t depth_left;
55384- atomic_t dev_ordered_id;
55385+ atomic_unchecked_t dev_ordered_id;
55386 atomic_t dev_tur_active;
55387 atomic_t execute_tasks;
55388 atomic_t dev_status_thr_count;
55389diff -urNp linux-3.0.3/include/trace/events/irq.h linux-3.0.3/include/trace/events/irq.h
55390--- linux-3.0.3/include/trace/events/irq.h 2011-07-21 22:17:23.000000000 -0400
55391+++ linux-3.0.3/include/trace/events/irq.h 2011-08-23 21:47:56.000000000 -0400
55392@@ -36,7 +36,7 @@ struct softirq_action;
55393 */
55394 TRACE_EVENT(irq_handler_entry,
55395
55396- TP_PROTO(int irq, struct irqaction *action),
55397+ TP_PROTO(int irq, const struct irqaction *action),
55398
55399 TP_ARGS(irq, action),
55400
55401@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
55402 */
55403 TRACE_EVENT(irq_handler_exit,
55404
55405- TP_PROTO(int irq, struct irqaction *action, int ret),
55406+ TP_PROTO(int irq, const struct irqaction *action, int ret),
55407
55408 TP_ARGS(irq, action, ret),
55409
55410diff -urNp linux-3.0.3/include/video/udlfb.h linux-3.0.3/include/video/udlfb.h
55411--- linux-3.0.3/include/video/udlfb.h 2011-07-21 22:17:23.000000000 -0400
55412+++ linux-3.0.3/include/video/udlfb.h 2011-08-23 21:47:56.000000000 -0400
55413@@ -51,10 +51,10 @@ struct dlfb_data {
55414 int base8;
55415 u32 pseudo_palette[256];
55416 /* blit-only rendering path metrics, exposed through sysfs */
55417- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
55418- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
55419- atomic_t bytes_sent; /* to usb, after compression including overhead */
55420- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
55421+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
55422+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
55423+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
55424+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
55425 };
55426
55427 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
55428diff -urNp linux-3.0.3/include/video/uvesafb.h linux-3.0.3/include/video/uvesafb.h
55429--- linux-3.0.3/include/video/uvesafb.h 2011-07-21 22:17:23.000000000 -0400
55430+++ linux-3.0.3/include/video/uvesafb.h 2011-08-23 21:47:56.000000000 -0400
55431@@ -177,6 +177,7 @@ struct uvesafb_par {
55432 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
55433 u8 pmi_setpal; /* PMI for palette changes */
55434 u16 *pmi_base; /* protected mode interface location */
55435+ u8 *pmi_code; /* protected mode code location */
55436 void *pmi_start;
55437 void *pmi_pal;
55438 u8 *vbe_state_orig; /*
55439diff -urNp linux-3.0.3/init/do_mounts.c linux-3.0.3/init/do_mounts.c
55440--- linux-3.0.3/init/do_mounts.c 2011-07-21 22:17:23.000000000 -0400
55441+++ linux-3.0.3/init/do_mounts.c 2011-08-23 21:47:56.000000000 -0400
55442@@ -287,7 +287,7 @@ static void __init get_fs_names(char *pa
55443
55444 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
55445 {
55446- int err = sys_mount(name, "/root", fs, flags, data);
55447+ int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
55448 if (err)
55449 return err;
55450
55451@@ -383,18 +383,18 @@ void __init change_floppy(char *fmt, ...
55452 va_start(args, fmt);
55453 vsprintf(buf, fmt, args);
55454 va_end(args);
55455- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
55456+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
55457 if (fd >= 0) {
55458 sys_ioctl(fd, FDEJECT, 0);
55459 sys_close(fd);
55460 }
55461 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
55462- fd = sys_open("/dev/console", O_RDWR, 0);
55463+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
55464 if (fd >= 0) {
55465 sys_ioctl(fd, TCGETS, (long)&termios);
55466 termios.c_lflag &= ~ICANON;
55467 sys_ioctl(fd, TCSETSF, (long)&termios);
55468- sys_read(fd, &c, 1);
55469+ sys_read(fd, (char __user *)&c, 1);
55470 termios.c_lflag |= ICANON;
55471 sys_ioctl(fd, TCSETSF, (long)&termios);
55472 sys_close(fd);
55473@@ -488,6 +488,6 @@ void __init prepare_namespace(void)
55474 mount_root();
55475 out:
55476 devtmpfs_mount("dev");
55477- sys_mount(".", "/", NULL, MS_MOVE, NULL);
55478+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
55479 sys_chroot((const char __user __force *)".");
55480 }
55481diff -urNp linux-3.0.3/init/do_mounts.h linux-3.0.3/init/do_mounts.h
55482--- linux-3.0.3/init/do_mounts.h 2011-07-21 22:17:23.000000000 -0400
55483+++ linux-3.0.3/init/do_mounts.h 2011-08-23 21:47:56.000000000 -0400
55484@@ -15,15 +15,15 @@ extern int root_mountflags;
55485
55486 static inline int create_dev(char *name, dev_t dev)
55487 {
55488- sys_unlink(name);
55489- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
55490+ sys_unlink((__force char __user *)name);
55491+ return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
55492 }
55493
55494 #if BITS_PER_LONG == 32
55495 static inline u32 bstat(char *name)
55496 {
55497 struct stat64 stat;
55498- if (sys_stat64(name, &stat) != 0)
55499+ if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
55500 return 0;
55501 if (!S_ISBLK(stat.st_mode))
55502 return 0;
55503diff -urNp linux-3.0.3/init/do_mounts_initrd.c linux-3.0.3/init/do_mounts_initrd.c
55504--- linux-3.0.3/init/do_mounts_initrd.c 2011-07-21 22:17:23.000000000 -0400
55505+++ linux-3.0.3/init/do_mounts_initrd.c 2011-08-23 21:47:56.000000000 -0400
55506@@ -44,13 +44,13 @@ static void __init handle_initrd(void)
55507 create_dev("/dev/root.old", Root_RAM0);
55508 /* mount initrd on rootfs' /root */
55509 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
55510- sys_mkdir("/old", 0700);
55511- root_fd = sys_open("/", 0, 0);
55512- old_fd = sys_open("/old", 0, 0);
55513+ sys_mkdir((__force const char __user *)"/old", 0700);
55514+ root_fd = sys_open((__force const char __user *)"/", 0, 0);
55515+ old_fd = sys_open((__force const char __user *)"/old", 0, 0);
55516 /* move initrd over / and chdir/chroot in initrd root */
55517- sys_chdir("/root");
55518- sys_mount(".", "/", NULL, MS_MOVE, NULL);
55519- sys_chroot(".");
55520+ sys_chdir((__force const char __user *)"/root");
55521+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
55522+ sys_chroot((__force const char __user *)".");
55523
55524 /*
55525 * In case that a resume from disk is carried out by linuxrc or one of
55526@@ -67,15 +67,15 @@ static void __init handle_initrd(void)
55527
55528 /* move initrd to rootfs' /old */
55529 sys_fchdir(old_fd);
55530- sys_mount("/", ".", NULL, MS_MOVE, NULL);
55531+ sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
55532 /* switch root and cwd back to / of rootfs */
55533 sys_fchdir(root_fd);
55534- sys_chroot(".");
55535+ sys_chroot((__force const char __user *)".");
55536 sys_close(old_fd);
55537 sys_close(root_fd);
55538
55539 if (new_decode_dev(real_root_dev) == Root_RAM0) {
55540- sys_chdir("/old");
55541+ sys_chdir((__force const char __user *)"/old");
55542 return;
55543 }
55544
55545@@ -83,17 +83,17 @@ static void __init handle_initrd(void)
55546 mount_root();
55547
55548 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
55549- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
55550+ error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
55551 if (!error)
55552 printk("okay\n");
55553 else {
55554- int fd = sys_open("/dev/root.old", O_RDWR, 0);
55555+ int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
55556 if (error == -ENOENT)
55557 printk("/initrd does not exist. Ignored.\n");
55558 else
55559 printk("failed\n");
55560 printk(KERN_NOTICE "Unmounting old root\n");
55561- sys_umount("/old", MNT_DETACH);
55562+ sys_umount((__force char __user *)"/old", MNT_DETACH);
55563 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
55564 if (fd < 0) {
55565 error = fd;
55566@@ -116,11 +116,11 @@ int __init initrd_load(void)
55567 * mounted in the normal path.
55568 */
55569 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
55570- sys_unlink("/initrd.image");
55571+ sys_unlink((__force const char __user *)"/initrd.image");
55572 handle_initrd();
55573 return 1;
55574 }
55575 }
55576- sys_unlink("/initrd.image");
55577+ sys_unlink((__force const char __user *)"/initrd.image");
55578 return 0;
55579 }
55580diff -urNp linux-3.0.3/init/do_mounts_md.c linux-3.0.3/init/do_mounts_md.c
55581--- linux-3.0.3/init/do_mounts_md.c 2011-07-21 22:17:23.000000000 -0400
55582+++ linux-3.0.3/init/do_mounts_md.c 2011-08-23 21:47:56.000000000 -0400
55583@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
55584 partitioned ? "_d" : "", minor,
55585 md_setup_args[ent].device_names);
55586
55587- fd = sys_open(name, 0, 0);
55588+ fd = sys_open((__force char __user *)name, 0, 0);
55589 if (fd < 0) {
55590 printk(KERN_ERR "md: open failed - cannot start "
55591 "array %s\n", name);
55592@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
55593 * array without it
55594 */
55595 sys_close(fd);
55596- fd = sys_open(name, 0, 0);
55597+ fd = sys_open((__force char __user *)name, 0, 0);
55598 sys_ioctl(fd, BLKRRPART, 0);
55599 }
55600 sys_close(fd);
55601diff -urNp linux-3.0.3/init/initramfs.c linux-3.0.3/init/initramfs.c
55602--- linux-3.0.3/init/initramfs.c 2011-07-21 22:17:23.000000000 -0400
55603+++ linux-3.0.3/init/initramfs.c 2011-08-23 21:47:56.000000000 -0400
55604@@ -74,7 +74,7 @@ static void __init free_hash(void)
55605 }
55606 }
55607
55608-static long __init do_utime(char __user *filename, time_t mtime)
55609+static long __init do_utime(__force char __user *filename, time_t mtime)
55610 {
55611 struct timespec t[2];
55612
55613@@ -109,7 +109,7 @@ static void __init dir_utime(void)
55614 struct dir_entry *de, *tmp;
55615 list_for_each_entry_safe(de, tmp, &dir_list, list) {
55616 list_del(&de->list);
55617- do_utime(de->name, de->mtime);
55618+ do_utime((__force char __user *)de->name, de->mtime);
55619 kfree(de->name);
55620 kfree(de);
55621 }
55622@@ -271,7 +271,7 @@ static int __init maybe_link(void)
55623 if (nlink >= 2) {
55624 char *old = find_link(major, minor, ino, mode, collected);
55625 if (old)
55626- return (sys_link(old, collected) < 0) ? -1 : 1;
55627+ return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
55628 }
55629 return 0;
55630 }
55631@@ -280,11 +280,11 @@ static void __init clean_path(char *path
55632 {
55633 struct stat st;
55634
55635- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
55636+ if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
55637 if (S_ISDIR(st.st_mode))
55638- sys_rmdir(path);
55639+ sys_rmdir((__force char __user *)path);
55640 else
55641- sys_unlink(path);
55642+ sys_unlink((__force char __user *)path);
55643 }
55644 }
55645
55646@@ -305,7 +305,7 @@ static int __init do_name(void)
55647 int openflags = O_WRONLY|O_CREAT;
55648 if (ml != 1)
55649 openflags |= O_TRUNC;
55650- wfd = sys_open(collected, openflags, mode);
55651+ wfd = sys_open((__force char __user *)collected, openflags, mode);
55652
55653 if (wfd >= 0) {
55654 sys_fchown(wfd, uid, gid);
55655@@ -317,17 +317,17 @@ static int __init do_name(void)
55656 }
55657 }
55658 } else if (S_ISDIR(mode)) {
55659- sys_mkdir(collected, mode);
55660- sys_chown(collected, uid, gid);
55661- sys_chmod(collected, mode);
55662+ sys_mkdir((__force char __user *)collected, mode);
55663+ sys_chown((__force char __user *)collected, uid, gid);
55664+ sys_chmod((__force char __user *)collected, mode);
55665 dir_add(collected, mtime);
55666 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
55667 S_ISFIFO(mode) || S_ISSOCK(mode)) {
55668 if (maybe_link() == 0) {
55669- sys_mknod(collected, mode, rdev);
55670- sys_chown(collected, uid, gid);
55671- sys_chmod(collected, mode);
55672- do_utime(collected, mtime);
55673+ sys_mknod((__force char __user *)collected, mode, rdev);
55674+ sys_chown((__force char __user *)collected, uid, gid);
55675+ sys_chmod((__force char __user *)collected, mode);
55676+ do_utime((__force char __user *)collected, mtime);
55677 }
55678 }
55679 return 0;
55680@@ -336,15 +336,15 @@ static int __init do_name(void)
55681 static int __init do_copy(void)
55682 {
55683 if (count >= body_len) {
55684- sys_write(wfd, victim, body_len);
55685+ sys_write(wfd, (__force char __user *)victim, body_len);
55686 sys_close(wfd);
55687- do_utime(vcollected, mtime);
55688+ do_utime((__force char __user *)vcollected, mtime);
55689 kfree(vcollected);
55690 eat(body_len);
55691 state = SkipIt;
55692 return 0;
55693 } else {
55694- sys_write(wfd, victim, count);
55695+ sys_write(wfd, (__force char __user *)victim, count);
55696 body_len -= count;
55697 eat(count);
55698 return 1;
55699@@ -355,9 +355,9 @@ static int __init do_symlink(void)
55700 {
55701 collected[N_ALIGN(name_len) + body_len] = '\0';
55702 clean_path(collected, 0);
55703- sys_symlink(collected + N_ALIGN(name_len), collected);
55704- sys_lchown(collected, uid, gid);
55705- do_utime(collected, mtime);
55706+ sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
55707+ sys_lchown((__force char __user *)collected, uid, gid);
55708+ do_utime((__force char __user *)collected, mtime);
55709 state = SkipIt;
55710 next_state = Reset;
55711 return 0;
55712diff -urNp linux-3.0.3/init/Kconfig linux-3.0.3/init/Kconfig
55713--- linux-3.0.3/init/Kconfig 2011-07-21 22:17:23.000000000 -0400
55714+++ linux-3.0.3/init/Kconfig 2011-08-23 21:47:56.000000000 -0400
55715@@ -1195,7 +1195,7 @@ config SLUB_DEBUG
55716
55717 config COMPAT_BRK
55718 bool "Disable heap randomization"
55719- default y
55720+ default n
55721 help
55722 Randomizing heap placement makes heap exploits harder, but it
55723 also breaks ancient binaries (including anything libc5 based).
55724diff -urNp linux-3.0.3/init/main.c linux-3.0.3/init/main.c
55725--- linux-3.0.3/init/main.c 2011-07-21 22:17:23.000000000 -0400
55726+++ linux-3.0.3/init/main.c 2011-08-23 21:48:14.000000000 -0400
55727@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void)
55728 extern void tc_init(void);
55729 #endif
55730
55731+extern void grsecurity_init(void);
55732+
55733 /*
55734 * Debug helper: via this flag we know that we are in 'early bootup code'
55735 * where only the boot processor is running with IRQ disabled. This means
55736@@ -149,6 +151,49 @@ static int __init set_reset_devices(char
55737
55738 __setup("reset_devices", set_reset_devices);
55739
55740+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
55741+extern char pax_enter_kernel_user[];
55742+extern char pax_exit_kernel_user[];
55743+extern pgdval_t clone_pgd_mask;
55744+#endif
55745+
55746+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
55747+static int __init setup_pax_nouderef(char *str)
55748+{
55749+#ifdef CONFIG_X86_32
55750+ unsigned int cpu;
55751+ struct desc_struct *gdt;
55752+
55753+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
55754+ gdt = get_cpu_gdt_table(cpu);
55755+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
55756+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
55757+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
55758+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
55759+ }
55760+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
55761+#else
55762+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
55763+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
55764+ clone_pgd_mask = ~(pgdval_t)0UL;
55765+#endif
55766+
55767+ return 0;
55768+}
55769+early_param("pax_nouderef", setup_pax_nouderef);
55770+#endif
55771+
55772+#ifdef CONFIG_PAX_SOFTMODE
55773+int pax_softmode;
55774+
55775+static int __init setup_pax_softmode(char *str)
55776+{
55777+ get_option(&str, &pax_softmode);
55778+ return 1;
55779+}
55780+__setup("pax_softmode=", setup_pax_softmode);
55781+#endif
55782+
55783 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
55784 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
55785 static const char *panic_later, *panic_param;
55786@@ -667,6 +712,7 @@ int __init_or_module do_one_initcall(ini
55787 {
55788 int count = preempt_count();
55789 int ret;
55790+ const char *msg1 = "", *msg2 = "";
55791
55792 if (initcall_debug)
55793 ret = do_one_initcall_debug(fn);
55794@@ -679,15 +725,15 @@ int __init_or_module do_one_initcall(ini
55795 sprintf(msgbuf, "error code %d ", ret);
55796
55797 if (preempt_count() != count) {
55798- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
55799+ msg1 = " preemption imbalance";
55800 preempt_count() = count;
55801 }
55802 if (irqs_disabled()) {
55803- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
55804+ msg2 = " disabled interrupts";
55805 local_irq_enable();
55806 }
55807- if (msgbuf[0]) {
55808- printk("initcall %pF returned with %s\n", fn, msgbuf);
55809+ if (msgbuf[0] || *msg1 || *msg2) {
55810+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
55811 }
55812
55813 return ret;
55814@@ -805,7 +851,7 @@ static int __init kernel_init(void * unu
55815 do_basic_setup();
55816
55817 /* Open the /dev/console on the rootfs, this should never fail */
55818- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
55819+ if (sys_open((__force const char __user *) "/dev/console", O_RDWR, 0) < 0)
55820 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
55821
55822 (void) sys_dup(0);
55823@@ -818,11 +864,13 @@ static int __init kernel_init(void * unu
55824 if (!ramdisk_execute_command)
55825 ramdisk_execute_command = "/init";
55826
55827- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
55828+ if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
55829 ramdisk_execute_command = NULL;
55830 prepare_namespace();
55831 }
55832
55833+ grsecurity_init();
55834+
55835 /*
55836 * Ok, we have completed the initial bootup, and
55837 * we're essentially up and running. Get rid of the
55838diff -urNp linux-3.0.3/ipc/mqueue.c linux-3.0.3/ipc/mqueue.c
55839--- linux-3.0.3/ipc/mqueue.c 2011-07-21 22:17:23.000000000 -0400
55840+++ linux-3.0.3/ipc/mqueue.c 2011-08-23 21:48:14.000000000 -0400
55841@@ -154,6 +154,7 @@ static struct inode *mqueue_get_inode(st
55842 mq_bytes = (mq_msg_tblsz +
55843 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
55844
55845+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
55846 spin_lock(&mq_lock);
55847 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
55848 u->mq_bytes + mq_bytes >
55849diff -urNp linux-3.0.3/ipc/msg.c linux-3.0.3/ipc/msg.c
55850--- linux-3.0.3/ipc/msg.c 2011-07-21 22:17:23.000000000 -0400
55851+++ linux-3.0.3/ipc/msg.c 2011-08-23 21:47:56.000000000 -0400
55852@@ -309,18 +309,19 @@ static inline int msg_security(struct ke
55853 return security_msg_queue_associate(msq, msgflg);
55854 }
55855
55856+static struct ipc_ops msg_ops = {
55857+ .getnew = newque,
55858+ .associate = msg_security,
55859+ .more_checks = NULL
55860+};
55861+
55862 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
55863 {
55864 struct ipc_namespace *ns;
55865- struct ipc_ops msg_ops;
55866 struct ipc_params msg_params;
55867
55868 ns = current->nsproxy->ipc_ns;
55869
55870- msg_ops.getnew = newque;
55871- msg_ops.associate = msg_security;
55872- msg_ops.more_checks = NULL;
55873-
55874 msg_params.key = key;
55875 msg_params.flg = msgflg;
55876
55877diff -urNp linux-3.0.3/ipc/sem.c linux-3.0.3/ipc/sem.c
55878--- linux-3.0.3/ipc/sem.c 2011-08-23 21:44:40.000000000 -0400
55879+++ linux-3.0.3/ipc/sem.c 2011-08-23 21:48:14.000000000 -0400
55880@@ -318,10 +318,15 @@ static inline int sem_more_checks(struct
55881 return 0;
55882 }
55883
55884+static struct ipc_ops sem_ops = {
55885+ .getnew = newary,
55886+ .associate = sem_security,
55887+ .more_checks = sem_more_checks
55888+};
55889+
55890 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
55891 {
55892 struct ipc_namespace *ns;
55893- struct ipc_ops sem_ops;
55894 struct ipc_params sem_params;
55895
55896 ns = current->nsproxy->ipc_ns;
55897@@ -329,10 +334,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
55898 if (nsems < 0 || nsems > ns->sc_semmsl)
55899 return -EINVAL;
55900
55901- sem_ops.getnew = newary;
55902- sem_ops.associate = sem_security;
55903- sem_ops.more_checks = sem_more_checks;
55904-
55905 sem_params.key = key;
55906 sem_params.flg = semflg;
55907 sem_params.u.nsems = nsems;
55908@@ -854,6 +855,8 @@ static int semctl_main(struct ipc_namesp
55909 int nsems;
55910 struct list_head tasks;
55911
55912+ pax_track_stack();
55913+
55914 sma = sem_lock_check(ns, semid);
55915 if (IS_ERR(sma))
55916 return PTR_ERR(sma);
55917@@ -1301,6 +1304,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
55918 struct ipc_namespace *ns;
55919 struct list_head tasks;
55920
55921+ pax_track_stack();
55922+
55923 ns = current->nsproxy->ipc_ns;
55924
55925 if (nsops < 1 || semid < 0)
55926diff -urNp linux-3.0.3/ipc/shm.c linux-3.0.3/ipc/shm.c
55927--- linux-3.0.3/ipc/shm.c 2011-07-21 22:17:23.000000000 -0400
55928+++ linux-3.0.3/ipc/shm.c 2011-08-23 21:48:14.000000000 -0400
55929@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_name
55930 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
55931 #endif
55932
55933+#ifdef CONFIG_GRKERNSEC
55934+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
55935+ const time_t shm_createtime, const uid_t cuid,
55936+ const int shmid);
55937+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
55938+ const time_t shm_createtime);
55939+#endif
55940+
55941 void shm_init_ns(struct ipc_namespace *ns)
55942 {
55943 ns->shm_ctlmax = SHMMAX;
55944@@ -401,6 +409,14 @@ static int newseg(struct ipc_namespace *
55945 shp->shm_lprid = 0;
55946 shp->shm_atim = shp->shm_dtim = 0;
55947 shp->shm_ctim = get_seconds();
55948+#ifdef CONFIG_GRKERNSEC
55949+ {
55950+ struct timespec timeval;
55951+ do_posix_clock_monotonic_gettime(&timeval);
55952+
55953+ shp->shm_createtime = timeval.tv_sec;
55954+ }
55955+#endif
55956 shp->shm_segsz = size;
55957 shp->shm_nattch = 0;
55958 shp->shm_file = file;
55959@@ -451,18 +467,19 @@ static inline int shm_more_checks(struct
55960 return 0;
55961 }
55962
55963+static struct ipc_ops shm_ops = {
55964+ .getnew = newseg,
55965+ .associate = shm_security,
55966+ .more_checks = shm_more_checks
55967+};
55968+
55969 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
55970 {
55971 struct ipc_namespace *ns;
55972- struct ipc_ops shm_ops;
55973 struct ipc_params shm_params;
55974
55975 ns = current->nsproxy->ipc_ns;
55976
55977- shm_ops.getnew = newseg;
55978- shm_ops.associate = shm_security;
55979- shm_ops.more_checks = shm_more_checks;
55980-
55981 shm_params.key = key;
55982 shm_params.flg = shmflg;
55983 shm_params.u.size = size;
55984@@ -762,8 +779,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int,
55985 case SHM_LOCK:
55986 case SHM_UNLOCK:
55987 {
55988- struct file *uninitialized_var(shm_file);
55989-
55990 lru_add_drain_all(); /* drain pagevecs to lru lists */
55991
55992 shp = shm_lock_check(ns, shmid);
55993@@ -896,9 +911,21 @@ long do_shmat(int shmid, char __user *sh
55994 if (err)
55995 goto out_unlock;
55996
55997+#ifdef CONFIG_GRKERNSEC
55998+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
55999+ shp->shm_perm.cuid, shmid) ||
56000+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
56001+ err = -EACCES;
56002+ goto out_unlock;
56003+ }
56004+#endif
56005+
56006 path = shp->shm_file->f_path;
56007 path_get(&path);
56008 shp->shm_nattch++;
56009+#ifdef CONFIG_GRKERNSEC
56010+ shp->shm_lapid = current->pid;
56011+#endif
56012 size = i_size_read(path.dentry->d_inode);
56013 shm_unlock(shp);
56014
56015diff -urNp linux-3.0.3/kernel/acct.c linux-3.0.3/kernel/acct.c
56016--- linux-3.0.3/kernel/acct.c 2011-07-21 22:17:23.000000000 -0400
56017+++ linux-3.0.3/kernel/acct.c 2011-08-23 21:47:56.000000000 -0400
56018@@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_a
56019 */
56020 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
56021 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
56022- file->f_op->write(file, (char *)&ac,
56023+ file->f_op->write(file, (__force char __user *)&ac,
56024 sizeof(acct_t), &file->f_pos);
56025 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
56026 set_fs(fs);
56027diff -urNp linux-3.0.3/kernel/audit.c linux-3.0.3/kernel/audit.c
56028--- linux-3.0.3/kernel/audit.c 2011-07-21 22:17:23.000000000 -0400
56029+++ linux-3.0.3/kernel/audit.c 2011-08-23 21:47:56.000000000 -0400
56030@@ -112,7 +112,7 @@ u32 audit_sig_sid = 0;
56031 3) suppressed due to audit_rate_limit
56032 4) suppressed due to audit_backlog_limit
56033 */
56034-static atomic_t audit_lost = ATOMIC_INIT(0);
56035+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
56036
56037 /* The netlink socket. */
56038 static struct sock *audit_sock;
56039@@ -234,7 +234,7 @@ void audit_log_lost(const char *message)
56040 unsigned long now;
56041 int print;
56042
56043- atomic_inc(&audit_lost);
56044+ atomic_inc_unchecked(&audit_lost);
56045
56046 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
56047
56048@@ -253,7 +253,7 @@ void audit_log_lost(const char *message)
56049 printk(KERN_WARNING
56050 "audit: audit_lost=%d audit_rate_limit=%d "
56051 "audit_backlog_limit=%d\n",
56052- atomic_read(&audit_lost),
56053+ atomic_read_unchecked(&audit_lost),
56054 audit_rate_limit,
56055 audit_backlog_limit);
56056 audit_panic(message);
56057@@ -686,7 +686,7 @@ static int audit_receive_msg(struct sk_b
56058 status_set.pid = audit_pid;
56059 status_set.rate_limit = audit_rate_limit;
56060 status_set.backlog_limit = audit_backlog_limit;
56061- status_set.lost = atomic_read(&audit_lost);
56062+ status_set.lost = atomic_read_unchecked(&audit_lost);
56063 status_set.backlog = skb_queue_len(&audit_skb_queue);
56064 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
56065 &status_set, sizeof(status_set));
56066diff -urNp linux-3.0.3/kernel/auditsc.c linux-3.0.3/kernel/auditsc.c
56067--- linux-3.0.3/kernel/auditsc.c 2011-07-21 22:17:23.000000000 -0400
56068+++ linux-3.0.3/kernel/auditsc.c 2011-08-23 21:47:56.000000000 -0400
56069@@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_conte
56070 }
56071
56072 /* global counter which is incremented every time something logs in */
56073-static atomic_t session_id = ATOMIC_INIT(0);
56074+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
56075
56076 /**
56077 * audit_set_loginuid - set a task's audit_context loginuid
56078@@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT
56079 */
56080 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
56081 {
56082- unsigned int sessionid = atomic_inc_return(&session_id);
56083+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
56084 struct audit_context *context = task->audit_context;
56085
56086 if (context && context->in_syscall) {
56087diff -urNp linux-3.0.3/kernel/capability.c linux-3.0.3/kernel/capability.c
56088--- linux-3.0.3/kernel/capability.c 2011-07-21 22:17:23.000000000 -0400
56089+++ linux-3.0.3/kernel/capability.c 2011-08-23 21:48:14.000000000 -0400
56090@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_
56091 * before modification is attempted and the application
56092 * fails.
56093 */
56094+ if (tocopy > ARRAY_SIZE(kdata))
56095+ return -EFAULT;
56096+
56097 if (copy_to_user(dataptr, kdata, tocopy
56098 * sizeof(struct __user_cap_data_struct))) {
56099 return -EFAULT;
56100@@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *n
56101 BUG();
56102 }
56103
56104- if (security_capable(ns, current_cred(), cap) == 0) {
56105+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
56106 current->flags |= PF_SUPERPRIV;
56107 return true;
56108 }
56109@@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *n
56110 }
56111 EXPORT_SYMBOL(ns_capable);
56112
56113+bool ns_capable_nolog(struct user_namespace *ns, int cap)
56114+{
56115+ if (unlikely(!cap_valid(cap))) {
56116+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
56117+ BUG();
56118+ }
56119+
56120+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
56121+ current->flags |= PF_SUPERPRIV;
56122+ return true;
56123+ }
56124+ return false;
56125+}
56126+EXPORT_SYMBOL(ns_capable_nolog);
56127+
56128+bool capable_nolog(int cap)
56129+{
56130+ return ns_capable_nolog(&init_user_ns, cap);
56131+}
56132+EXPORT_SYMBOL(capable_nolog);
56133+
56134 /**
56135 * task_ns_capable - Determine whether current task has a superior
56136 * capability targeted at a specific task's user namespace.
56137@@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct
56138 }
56139 EXPORT_SYMBOL(task_ns_capable);
56140
56141+bool task_ns_capable_nolog(struct task_struct *t, int cap)
56142+{
56143+ return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
56144+}
56145+EXPORT_SYMBOL(task_ns_capable_nolog);
56146+
56147 /**
56148 * nsown_capable - Check superior capability to one's own user_ns
56149 * @cap: The capability in question
56150diff -urNp linux-3.0.3/kernel/cgroup.c linux-3.0.3/kernel/cgroup.c
56151--- linux-3.0.3/kernel/cgroup.c 2011-07-21 22:17:23.000000000 -0400
56152+++ linux-3.0.3/kernel/cgroup.c 2011-08-23 21:48:14.000000000 -0400
56153@@ -593,6 +593,8 @@ static struct css_set *find_css_set(
56154 struct hlist_head *hhead;
56155 struct cg_cgroup_link *link;
56156
56157+ pax_track_stack();
56158+
56159 /* First see if we already have a cgroup group that matches
56160 * the desired set */
56161 read_lock(&css_set_lock);
56162diff -urNp linux-3.0.3/kernel/compat.c linux-3.0.3/kernel/compat.c
56163--- linux-3.0.3/kernel/compat.c 2011-07-21 22:17:23.000000000 -0400
56164+++ linux-3.0.3/kernel/compat.c 2011-08-23 21:48:14.000000000 -0400
56165@@ -13,6 +13,7 @@
56166
56167 #include <linux/linkage.h>
56168 #include <linux/compat.h>
56169+#include <linux/module.h>
56170 #include <linux/errno.h>
56171 #include <linux/time.h>
56172 #include <linux/signal.h>
56173diff -urNp linux-3.0.3/kernel/configs.c linux-3.0.3/kernel/configs.c
56174--- linux-3.0.3/kernel/configs.c 2011-07-21 22:17:23.000000000 -0400
56175+++ linux-3.0.3/kernel/configs.c 2011-08-23 21:48:14.000000000 -0400
56176@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
56177 struct proc_dir_entry *entry;
56178
56179 /* create the current config file */
56180+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
56181+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
56182+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
56183+ &ikconfig_file_ops);
56184+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
56185+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
56186+ &ikconfig_file_ops);
56187+#endif
56188+#else
56189 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
56190 &ikconfig_file_ops);
56191+#endif
56192+
56193 if (!entry)
56194 return -ENOMEM;
56195
56196diff -urNp linux-3.0.3/kernel/cred.c linux-3.0.3/kernel/cred.c
56197--- linux-3.0.3/kernel/cred.c 2011-07-21 22:17:23.000000000 -0400
56198+++ linux-3.0.3/kernel/cred.c 2011-08-25 17:23:03.000000000 -0400
56199@@ -158,6 +158,8 @@ static void put_cred_rcu(struct rcu_head
56200 */
56201 void __put_cred(struct cred *cred)
56202 {
56203+ pax_track_stack();
56204+
56205 kdebug("__put_cred(%p{%d,%d})", cred,
56206 atomic_read(&cred->usage),
56207 read_cred_subscribers(cred));
56208@@ -182,6 +184,8 @@ void exit_creds(struct task_struct *tsk)
56209 {
56210 struct cred *cred;
56211
56212+ pax_track_stack();
56213+
56214 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
56215 atomic_read(&tsk->cred->usage),
56216 read_cred_subscribers(tsk->cred));
56217@@ -220,6 +224,8 @@ const struct cred *get_task_cred(struct
56218 {
56219 const struct cred *cred;
56220
56221+ pax_track_stack();
56222+
56223 rcu_read_lock();
56224
56225 do {
56226@@ -239,6 +245,8 @@ struct cred *cred_alloc_blank(void)
56227 {
56228 struct cred *new;
56229
56230+ pax_track_stack();
56231+
56232 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
56233 if (!new)
56234 return NULL;
56235@@ -287,6 +295,8 @@ struct cred *prepare_creds(void)
56236 const struct cred *old;
56237 struct cred *new;
56238
56239+ pax_track_stack();
56240+
56241 validate_process_creds();
56242
56243 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
56244@@ -333,6 +343,8 @@ struct cred *prepare_exec_creds(void)
56245 struct thread_group_cred *tgcred = NULL;
56246 struct cred *new;
56247
56248+ pax_track_stack();
56249+
56250 #ifdef CONFIG_KEYS
56251 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
56252 if (!tgcred)
56253@@ -385,6 +397,8 @@ int copy_creds(struct task_struct *p, un
56254 struct cred *new;
56255 int ret;
56256
56257+ pax_track_stack();
56258+
56259 if (
56260 #ifdef CONFIG_KEYS
56261 !p->cred->thread_keyring &&
56262@@ -475,6 +489,8 @@ int commit_creds(struct cred *new)
56263 struct task_struct *task = current;
56264 const struct cred *old = task->real_cred;
56265
56266+ pax_track_stack();
56267+
56268 kdebug("commit_creds(%p{%d,%d})", new,
56269 atomic_read(&new->usage),
56270 read_cred_subscribers(new));
56271@@ -489,6 +505,8 @@ int commit_creds(struct cred *new)
56272
56273 get_cred(new); /* we will require a ref for the subj creds too */
56274
56275+ gr_set_role_label(task, new->uid, new->gid);
56276+
56277 /* dumpability changes */
56278 if (old->euid != new->euid ||
56279 old->egid != new->egid ||
56280@@ -508,10 +526,8 @@ int commit_creds(struct cred *new)
56281 key_fsgid_changed(task);
56282
56283 /* do it
56284- * - What if a process setreuid()'s and this brings the
56285- * new uid over his NPROC rlimit? We can check this now
56286- * cheaply with the new uid cache, so if it matters
56287- * we should be checking for it. -DaveM
56288+ * RLIMIT_NPROC limits on user->processes have already been checked
56289+ * in set_user().
56290 */
56291 alter_cred_subscribers(new, 2);
56292 if (new->user != old->user)
56293@@ -551,6 +567,8 @@ EXPORT_SYMBOL(commit_creds);
56294 */
56295 void abort_creds(struct cred *new)
56296 {
56297+ pax_track_stack();
56298+
56299 kdebug("abort_creds(%p{%d,%d})", new,
56300 atomic_read(&new->usage),
56301 read_cred_subscribers(new));
56302@@ -574,6 +592,8 @@ const struct cred *override_creds(const
56303 {
56304 const struct cred *old = current->cred;
56305
56306+ pax_track_stack();
56307+
56308 kdebug("override_creds(%p{%d,%d})", new,
56309 atomic_read(&new->usage),
56310 read_cred_subscribers(new));
56311@@ -603,6 +623,8 @@ void revert_creds(const struct cred *old
56312 {
56313 const struct cred *override = current->cred;
56314
56315+ pax_track_stack();
56316+
56317 kdebug("revert_creds(%p{%d,%d})", old,
56318 atomic_read(&old->usage),
56319 read_cred_subscribers(old));
56320@@ -649,6 +671,8 @@ struct cred *prepare_kernel_cred(struct
56321 const struct cred *old;
56322 struct cred *new;
56323
56324+ pax_track_stack();
56325+
56326 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
56327 if (!new)
56328 return NULL;
56329@@ -703,6 +727,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
56330 */
56331 int set_security_override(struct cred *new, u32 secid)
56332 {
56333+ pax_track_stack();
56334+
56335 return security_kernel_act_as(new, secid);
56336 }
56337 EXPORT_SYMBOL(set_security_override);
56338@@ -722,6 +748,8 @@ int set_security_override_from_ctx(struc
56339 u32 secid;
56340 int ret;
56341
56342+ pax_track_stack();
56343+
56344 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
56345 if (ret < 0)
56346 return ret;
56347diff -urNp linux-3.0.3/kernel/debug/debug_core.c linux-3.0.3/kernel/debug/debug_core.c
56348--- linux-3.0.3/kernel/debug/debug_core.c 2011-07-21 22:17:23.000000000 -0400
56349+++ linux-3.0.3/kernel/debug/debug_core.c 2011-08-23 21:47:56.000000000 -0400
56350@@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_loc
56351 */
56352 static atomic_t masters_in_kgdb;
56353 static atomic_t slaves_in_kgdb;
56354-static atomic_t kgdb_break_tasklet_var;
56355+static atomic_unchecked_t kgdb_break_tasklet_var;
56356 atomic_t kgdb_setting_breakpoint;
56357
56358 struct task_struct *kgdb_usethread;
56359@@ -129,7 +129,7 @@ int kgdb_single_step;
56360 static pid_t kgdb_sstep_pid;
56361
56362 /* to keep track of the CPU which is doing the single stepping*/
56363-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
56364+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
56365
56366 /*
56367 * If you are debugging a problem where roundup (the collection of
56368@@ -542,7 +542,7 @@ return_normal:
56369 * kernel will only try for the value of sstep_tries before
56370 * giving up and continuing on.
56371 */
56372- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
56373+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
56374 (kgdb_info[cpu].task &&
56375 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
56376 atomic_set(&kgdb_active, -1);
56377@@ -636,8 +636,8 @@ cpu_master_loop:
56378 }
56379
56380 kgdb_restore:
56381- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
56382- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
56383+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
56384+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
56385 if (kgdb_info[sstep_cpu].task)
56386 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
56387 else
56388@@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(vo
56389 static void kgdb_tasklet_bpt(unsigned long ing)
56390 {
56391 kgdb_breakpoint();
56392- atomic_set(&kgdb_break_tasklet_var, 0);
56393+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
56394 }
56395
56396 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
56397
56398 void kgdb_schedule_breakpoint(void)
56399 {
56400- if (atomic_read(&kgdb_break_tasklet_var) ||
56401+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
56402 atomic_read(&kgdb_active) != -1 ||
56403 atomic_read(&kgdb_setting_breakpoint))
56404 return;
56405- atomic_inc(&kgdb_break_tasklet_var);
56406+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
56407 tasklet_schedule(&kgdb_tasklet_breakpoint);
56408 }
56409 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
56410diff -urNp linux-3.0.3/kernel/debug/kdb/kdb_main.c linux-3.0.3/kernel/debug/kdb/kdb_main.c
56411--- linux-3.0.3/kernel/debug/kdb/kdb_main.c 2011-07-21 22:17:23.000000000 -0400
56412+++ linux-3.0.3/kernel/debug/kdb/kdb_main.c 2011-08-23 21:47:56.000000000 -0400
56413@@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const cha
56414 list_for_each_entry(mod, kdb_modules, list) {
56415
56416 kdb_printf("%-20s%8u 0x%p ", mod->name,
56417- mod->core_size, (void *)mod);
56418+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
56419 #ifdef CONFIG_MODULE_UNLOAD
56420 kdb_printf("%4d ", module_refcount(mod));
56421 #endif
56422@@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const cha
56423 kdb_printf(" (Loading)");
56424 else
56425 kdb_printf(" (Live)");
56426- kdb_printf(" 0x%p", mod->module_core);
56427+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
56428
56429 #ifdef CONFIG_MODULE_UNLOAD
56430 {
56431diff -urNp linux-3.0.3/kernel/events/core.c linux-3.0.3/kernel/events/core.c
56432--- linux-3.0.3/kernel/events/core.c 2011-08-23 21:44:40.000000000 -0400
56433+++ linux-3.0.3/kernel/events/core.c 2011-08-23 21:47:56.000000000 -0400
56434@@ -170,7 +170,7 @@ int perf_proc_update_handler(struct ctl_
56435 return 0;
56436 }
56437
56438-static atomic64_t perf_event_id;
56439+static atomic64_unchecked_t perf_event_id;
56440
56441 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
56442 enum event_type_t event_type);
56443@@ -2488,7 +2488,7 @@ static void __perf_event_read(void *info
56444
56445 static inline u64 perf_event_count(struct perf_event *event)
56446 {
56447- return local64_read(&event->count) + atomic64_read(&event->child_count);
56448+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
56449 }
56450
56451 static u64 perf_event_read(struct perf_event *event)
56452@@ -3023,9 +3023,9 @@ u64 perf_event_read_value(struct perf_ev
56453 mutex_lock(&event->child_mutex);
56454 total += perf_event_read(event);
56455 *enabled += event->total_time_enabled +
56456- atomic64_read(&event->child_total_time_enabled);
56457+ atomic64_read_unchecked(&event->child_total_time_enabled);
56458 *running += event->total_time_running +
56459- atomic64_read(&event->child_total_time_running);
56460+ atomic64_read_unchecked(&event->child_total_time_running);
56461
56462 list_for_each_entry(child, &event->child_list, child_list) {
56463 total += perf_event_read(child);
56464@@ -3388,10 +3388,10 @@ void perf_event_update_userpage(struct p
56465 userpg->offset -= local64_read(&event->hw.prev_count);
56466
56467 userpg->time_enabled = event->total_time_enabled +
56468- atomic64_read(&event->child_total_time_enabled);
56469+ atomic64_read_unchecked(&event->child_total_time_enabled);
56470
56471 userpg->time_running = event->total_time_running +
56472- atomic64_read(&event->child_total_time_running);
56473+ atomic64_read_unchecked(&event->child_total_time_running);
56474
56475 barrier();
56476 ++userpg->lock;
56477@@ -4188,11 +4188,11 @@ static void perf_output_read_one(struct
56478 values[n++] = perf_event_count(event);
56479 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
56480 values[n++] = enabled +
56481- atomic64_read(&event->child_total_time_enabled);
56482+ atomic64_read_unchecked(&event->child_total_time_enabled);
56483 }
56484 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
56485 values[n++] = running +
56486- atomic64_read(&event->child_total_time_running);
56487+ atomic64_read_unchecked(&event->child_total_time_running);
56488 }
56489 if (read_format & PERF_FORMAT_ID)
56490 values[n++] = primary_event_id(event);
56491@@ -6190,7 +6190,7 @@ perf_event_alloc(struct perf_event_attr
56492 event->parent = parent_event;
56493
56494 event->ns = get_pid_ns(current->nsproxy->pid_ns);
56495- event->id = atomic64_inc_return(&perf_event_id);
56496+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
56497
56498 event->state = PERF_EVENT_STATE_INACTIVE;
56499
56500@@ -6713,10 +6713,10 @@ static void sync_child_event(struct perf
56501 /*
56502 * Add back the child's count to the parent's count:
56503 */
56504- atomic64_add(child_val, &parent_event->child_count);
56505- atomic64_add(child_event->total_time_enabled,
56506+ atomic64_add_unchecked(child_val, &parent_event->child_count);
56507+ atomic64_add_unchecked(child_event->total_time_enabled,
56508 &parent_event->child_total_time_enabled);
56509- atomic64_add(child_event->total_time_running,
56510+ atomic64_add_unchecked(child_event->total_time_running,
56511 &parent_event->child_total_time_running);
56512
56513 /*
56514diff -urNp linux-3.0.3/kernel/exit.c linux-3.0.3/kernel/exit.c
56515--- linux-3.0.3/kernel/exit.c 2011-07-21 22:17:23.000000000 -0400
56516+++ linux-3.0.3/kernel/exit.c 2011-08-23 21:48:14.000000000 -0400
56517@@ -57,6 +57,10 @@
56518 #include <asm/pgtable.h>
56519 #include <asm/mmu_context.h>
56520
56521+#ifdef CONFIG_GRKERNSEC
56522+extern rwlock_t grsec_exec_file_lock;
56523+#endif
56524+
56525 static void exit_mm(struct task_struct * tsk);
56526
56527 static void __unhash_process(struct task_struct *p, bool group_dead)
56528@@ -169,6 +173,10 @@ void release_task(struct task_struct * p
56529 struct task_struct *leader;
56530 int zap_leader;
56531 repeat:
56532+#ifdef CONFIG_NET
56533+ gr_del_task_from_ip_table(p);
56534+#endif
56535+
56536 tracehook_prepare_release_task(p);
56537 /* don't need to get the RCU readlock here - the process is dead and
56538 * can't be modifying its own credentials. But shut RCU-lockdep up */
56539@@ -338,11 +346,22 @@ static void reparent_to_kthreadd(void)
56540 {
56541 write_lock_irq(&tasklist_lock);
56542
56543+#ifdef CONFIG_GRKERNSEC
56544+ write_lock(&grsec_exec_file_lock);
56545+ if (current->exec_file) {
56546+ fput(current->exec_file);
56547+ current->exec_file = NULL;
56548+ }
56549+ write_unlock(&grsec_exec_file_lock);
56550+#endif
56551+
56552 ptrace_unlink(current);
56553 /* Reparent to init */
56554 current->real_parent = current->parent = kthreadd_task;
56555 list_move_tail(&current->sibling, &current->real_parent->children);
56556
56557+ gr_set_kernel_label(current);
56558+
56559 /* Set the exit signal to SIGCHLD so we signal init on exit */
56560 current->exit_signal = SIGCHLD;
56561
56562@@ -394,7 +413,7 @@ int allow_signal(int sig)
56563 * know it'll be handled, so that they don't get converted to
56564 * SIGKILL or just silently dropped.
56565 */
56566- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
56567+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
56568 recalc_sigpending();
56569 spin_unlock_irq(&current->sighand->siglock);
56570 return 0;
56571@@ -430,6 +449,17 @@ void daemonize(const char *name, ...)
56572 vsnprintf(current->comm, sizeof(current->comm), name, args);
56573 va_end(args);
56574
56575+#ifdef CONFIG_GRKERNSEC
56576+ write_lock(&grsec_exec_file_lock);
56577+ if (current->exec_file) {
56578+ fput(current->exec_file);
56579+ current->exec_file = NULL;
56580+ }
56581+ write_unlock(&grsec_exec_file_lock);
56582+#endif
56583+
56584+ gr_set_kernel_label(current);
56585+
56586 /*
56587 * If we were started as result of loading a module, close all of the
56588 * user space pages. We don't need them, and if we didn't close them
56589@@ -904,15 +934,8 @@ NORET_TYPE void do_exit(long code)
56590 struct task_struct *tsk = current;
56591 int group_dead;
56592
56593- profile_task_exit(tsk);
56594-
56595- WARN_ON(atomic_read(&tsk->fs_excl));
56596- WARN_ON(blk_needs_flush_plug(tsk));
56597-
56598 if (unlikely(in_interrupt()))
56599 panic("Aiee, killing interrupt handler!");
56600- if (unlikely(!tsk->pid))
56601- panic("Attempted to kill the idle task!");
56602
56603 /*
56604 * If do_exit is called because this processes oopsed, it's possible
56605@@ -923,6 +946,14 @@ NORET_TYPE void do_exit(long code)
56606 */
56607 set_fs(USER_DS);
56608
56609+ profile_task_exit(tsk);
56610+
56611+ WARN_ON(atomic_read(&tsk->fs_excl));
56612+ WARN_ON(blk_needs_flush_plug(tsk));
56613+
56614+ if (unlikely(!tsk->pid))
56615+ panic("Attempted to kill the idle task!");
56616+
56617 tracehook_report_exit(&code);
56618
56619 validate_creds_for_do_exit(tsk);
56620@@ -983,6 +1014,9 @@ NORET_TYPE void do_exit(long code)
56621 tsk->exit_code = code;
56622 taskstats_exit(tsk, group_dead);
56623
56624+ gr_acl_handle_psacct(tsk, code);
56625+ gr_acl_handle_exit();
56626+
56627 exit_mm(tsk);
56628
56629 if (group_dead)
56630diff -urNp linux-3.0.3/kernel/fork.c linux-3.0.3/kernel/fork.c
56631--- linux-3.0.3/kernel/fork.c 2011-07-21 22:17:23.000000000 -0400
56632+++ linux-3.0.3/kernel/fork.c 2011-08-25 17:23:36.000000000 -0400
56633@@ -286,7 +286,7 @@ static struct task_struct *dup_task_stru
56634 *stackend = STACK_END_MAGIC; /* for overflow detection */
56635
56636 #ifdef CONFIG_CC_STACKPROTECTOR
56637- tsk->stack_canary = get_random_int();
56638+ tsk->stack_canary = pax_get_random_long();
56639 #endif
56640
56641 /* One for us, one for whoever does the "release_task()" (usually parent) */
56642@@ -308,13 +308,77 @@ out:
56643 }
56644
56645 #ifdef CONFIG_MMU
56646+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
56647+{
56648+ struct vm_area_struct *tmp;
56649+ unsigned long charge;
56650+ struct mempolicy *pol;
56651+ struct file *file;
56652+
56653+ charge = 0;
56654+ if (mpnt->vm_flags & VM_ACCOUNT) {
56655+ unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
56656+ if (security_vm_enough_memory(len))
56657+ goto fail_nomem;
56658+ charge = len;
56659+ }
56660+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
56661+ if (!tmp)
56662+ goto fail_nomem;
56663+ *tmp = *mpnt;
56664+ tmp->vm_mm = mm;
56665+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
56666+ pol = mpol_dup(vma_policy(mpnt));
56667+ if (IS_ERR(pol))
56668+ goto fail_nomem_policy;
56669+ vma_set_policy(tmp, pol);
56670+ if (anon_vma_fork(tmp, mpnt))
56671+ goto fail_nomem_anon_vma_fork;
56672+ tmp->vm_flags &= ~VM_LOCKED;
56673+ tmp->vm_next = tmp->vm_prev = NULL;
56674+ tmp->vm_mirror = NULL;
56675+ file = tmp->vm_file;
56676+ if (file) {
56677+ struct inode *inode = file->f_path.dentry->d_inode;
56678+ struct address_space *mapping = file->f_mapping;
56679+
56680+ get_file(file);
56681+ if (tmp->vm_flags & VM_DENYWRITE)
56682+ atomic_dec(&inode->i_writecount);
56683+ mutex_lock(&mapping->i_mmap_mutex);
56684+ if (tmp->vm_flags & VM_SHARED)
56685+ mapping->i_mmap_writable++;
56686+ flush_dcache_mmap_lock(mapping);
56687+ /* insert tmp into the share list, just after mpnt */
56688+ vma_prio_tree_add(tmp, mpnt);
56689+ flush_dcache_mmap_unlock(mapping);
56690+ mutex_unlock(&mapping->i_mmap_mutex);
56691+ }
56692+
56693+ /*
56694+ * Clear hugetlb-related page reserves for children. This only
56695+ * affects MAP_PRIVATE mappings. Faults generated by the child
56696+ * are not guaranteed to succeed, even if read-only
56697+ */
56698+ if (is_vm_hugetlb_page(tmp))
56699+ reset_vma_resv_huge_pages(tmp);
56700+
56701+ return tmp;
56702+
56703+fail_nomem_anon_vma_fork:
56704+ mpol_put(pol);
56705+fail_nomem_policy:
56706+ kmem_cache_free(vm_area_cachep, tmp);
56707+fail_nomem:
56708+ vm_unacct_memory(charge);
56709+ return NULL;
56710+}
56711+
56712 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
56713 {
56714 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
56715 struct rb_node **rb_link, *rb_parent;
56716 int retval;
56717- unsigned long charge;
56718- struct mempolicy *pol;
56719
56720 down_write(&oldmm->mmap_sem);
56721 flush_cache_dup_mm(oldmm);
56722@@ -326,8 +390,8 @@ static int dup_mmap(struct mm_struct *mm
56723 mm->locked_vm = 0;
56724 mm->mmap = NULL;
56725 mm->mmap_cache = NULL;
56726- mm->free_area_cache = oldmm->mmap_base;
56727- mm->cached_hole_size = ~0UL;
56728+ mm->free_area_cache = oldmm->free_area_cache;
56729+ mm->cached_hole_size = oldmm->cached_hole_size;
56730 mm->map_count = 0;
56731 cpumask_clear(mm_cpumask(mm));
56732 mm->mm_rb = RB_ROOT;
56733@@ -343,8 +407,6 @@ static int dup_mmap(struct mm_struct *mm
56734
56735 prev = NULL;
56736 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
56737- struct file *file;
56738-
56739 if (mpnt->vm_flags & VM_DONTCOPY) {
56740 long pages = vma_pages(mpnt);
56741 mm->total_vm -= pages;
56742@@ -352,55 +414,13 @@ static int dup_mmap(struct mm_struct *mm
56743 -pages);
56744 continue;
56745 }
56746- charge = 0;
56747- if (mpnt->vm_flags & VM_ACCOUNT) {
56748- unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
56749- if (security_vm_enough_memory(len))
56750- goto fail_nomem;
56751- charge = len;
56752- }
56753- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
56754- if (!tmp)
56755- goto fail_nomem;
56756- *tmp = *mpnt;
56757- INIT_LIST_HEAD(&tmp->anon_vma_chain);
56758- pol = mpol_dup(vma_policy(mpnt));
56759- retval = PTR_ERR(pol);
56760- if (IS_ERR(pol))
56761- goto fail_nomem_policy;
56762- vma_set_policy(tmp, pol);
56763- tmp->vm_mm = mm;
56764- if (anon_vma_fork(tmp, mpnt))
56765- goto fail_nomem_anon_vma_fork;
56766- tmp->vm_flags &= ~VM_LOCKED;
56767- tmp->vm_next = tmp->vm_prev = NULL;
56768- file = tmp->vm_file;
56769- if (file) {
56770- struct inode *inode = file->f_path.dentry->d_inode;
56771- struct address_space *mapping = file->f_mapping;
56772-
56773- get_file(file);
56774- if (tmp->vm_flags & VM_DENYWRITE)
56775- atomic_dec(&inode->i_writecount);
56776- mutex_lock(&mapping->i_mmap_mutex);
56777- if (tmp->vm_flags & VM_SHARED)
56778- mapping->i_mmap_writable++;
56779- flush_dcache_mmap_lock(mapping);
56780- /* insert tmp into the share list, just after mpnt */
56781- vma_prio_tree_add(tmp, mpnt);
56782- flush_dcache_mmap_unlock(mapping);
56783- mutex_unlock(&mapping->i_mmap_mutex);
56784+ tmp = dup_vma(mm, mpnt);
56785+ if (!tmp) {
56786+ retval = -ENOMEM;
56787+ goto out;
56788 }
56789
56790 /*
56791- * Clear hugetlb-related page reserves for children. This only
56792- * affects MAP_PRIVATE mappings. Faults generated by the child
56793- * are not guaranteed to succeed, even if read-only
56794- */
56795- if (is_vm_hugetlb_page(tmp))
56796- reset_vma_resv_huge_pages(tmp);
56797-
56798- /*
56799 * Link in the new vma and copy the page table entries.
56800 */
56801 *pprev = tmp;
56802@@ -421,6 +441,31 @@ static int dup_mmap(struct mm_struct *mm
56803 if (retval)
56804 goto out;
56805 }
56806+
56807+#ifdef CONFIG_PAX_SEGMEXEC
56808+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
56809+ struct vm_area_struct *mpnt_m;
56810+
56811+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
56812+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
56813+
56814+ if (!mpnt->vm_mirror)
56815+ continue;
56816+
56817+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
56818+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
56819+ mpnt->vm_mirror = mpnt_m;
56820+ } else {
56821+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
56822+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
56823+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
56824+ mpnt->vm_mirror->vm_mirror = mpnt;
56825+ }
56826+ }
56827+ BUG_ON(mpnt_m);
56828+ }
56829+#endif
56830+
56831 /* a new mm has just been created */
56832 arch_dup_mmap(oldmm, mm);
56833 retval = 0;
56834@@ -429,14 +474,6 @@ out:
56835 flush_tlb_mm(oldmm);
56836 up_write(&oldmm->mmap_sem);
56837 return retval;
56838-fail_nomem_anon_vma_fork:
56839- mpol_put(pol);
56840-fail_nomem_policy:
56841- kmem_cache_free(vm_area_cachep, tmp);
56842-fail_nomem:
56843- retval = -ENOMEM;
56844- vm_unacct_memory(charge);
56845- goto out;
56846 }
56847
56848 static inline int mm_alloc_pgd(struct mm_struct * mm)
56849@@ -836,13 +873,14 @@ static int copy_fs(unsigned long clone_f
56850 spin_unlock(&fs->lock);
56851 return -EAGAIN;
56852 }
56853- fs->users++;
56854+ atomic_inc(&fs->users);
56855 spin_unlock(&fs->lock);
56856 return 0;
56857 }
56858 tsk->fs = copy_fs_struct(fs);
56859 if (!tsk->fs)
56860 return -ENOMEM;
56861+ gr_set_chroot_entries(tsk, &tsk->fs->root);
56862 return 0;
56863 }
56864
56865@@ -1104,12 +1142,16 @@ static struct task_struct *copy_process(
56866 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
56867 #endif
56868 retval = -EAGAIN;
56869+
56870+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
56871+
56872 if (atomic_read(&p->real_cred->user->processes) >=
56873 task_rlimit(p, RLIMIT_NPROC)) {
56874- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
56875- p->real_cred->user != INIT_USER)
56876+ if (p->real_cred->user != INIT_USER &&
56877+ !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE))
56878 goto bad_fork_free;
56879 }
56880+ current->flags &= ~PF_NPROC_EXCEEDED;
56881
56882 retval = copy_creds(p, clone_flags);
56883 if (retval < 0)
56884@@ -1250,6 +1292,8 @@ static struct task_struct *copy_process(
56885 if (clone_flags & CLONE_THREAD)
56886 p->tgid = current->tgid;
56887
56888+ gr_copy_label(p);
56889+
56890 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
56891 /*
56892 * Clear TID on mm_release()?
56893@@ -1414,6 +1458,8 @@ bad_fork_cleanup_count:
56894 bad_fork_free:
56895 free_task(p);
56896 fork_out:
56897+ gr_log_forkfail(retval);
56898+
56899 return ERR_PTR(retval);
56900 }
56901
56902@@ -1502,6 +1548,8 @@ long do_fork(unsigned long clone_flags,
56903 if (clone_flags & CLONE_PARENT_SETTID)
56904 put_user(nr, parent_tidptr);
56905
56906+ gr_handle_brute_check();
56907+
56908 if (clone_flags & CLONE_VFORK) {
56909 p->vfork_done = &vfork;
56910 init_completion(&vfork);
56911@@ -1610,7 +1658,7 @@ static int unshare_fs(unsigned long unsh
56912 return 0;
56913
56914 /* don't need lock here; in the worst case we'll do useless copy */
56915- if (fs->users == 1)
56916+ if (atomic_read(&fs->users) == 1)
56917 return 0;
56918
56919 *new_fsp = copy_fs_struct(fs);
56920@@ -1697,7 +1745,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
56921 fs = current->fs;
56922 spin_lock(&fs->lock);
56923 current->fs = new_fs;
56924- if (--fs->users)
56925+ gr_set_chroot_entries(current, &current->fs->root);
56926+ if (atomic_dec_return(&fs->users))
56927 new_fs = NULL;
56928 else
56929 new_fs = fs;
56930diff -urNp linux-3.0.3/kernel/futex.c linux-3.0.3/kernel/futex.c
56931--- linux-3.0.3/kernel/futex.c 2011-08-23 21:44:40.000000000 -0400
56932+++ linux-3.0.3/kernel/futex.c 2011-08-23 21:48:14.000000000 -0400
56933@@ -54,6 +54,7 @@
56934 #include <linux/mount.h>
56935 #include <linux/pagemap.h>
56936 #include <linux/syscalls.h>
56937+#include <linux/ptrace.h>
56938 #include <linux/signal.h>
56939 #include <linux/module.h>
56940 #include <linux/magic.h>
56941@@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fsh
56942 struct page *page, *page_head;
56943 int err, ro = 0;
56944
56945+#ifdef CONFIG_PAX_SEGMEXEC
56946+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
56947+ return -EFAULT;
56948+#endif
56949+
56950 /*
56951 * The futex address must be "naturally" aligned.
56952 */
56953@@ -1863,6 +1869,8 @@ static int futex_wait(u32 __user *uaddr,
56954 struct futex_q q = futex_q_init;
56955 int ret;
56956
56957+ pax_track_stack();
56958+
56959 if (!bitset)
56960 return -EINVAL;
56961 q.bitset = bitset;
56962@@ -2259,6 +2267,8 @@ static int futex_wait_requeue_pi(u32 __u
56963 struct futex_q q = futex_q_init;
56964 int res, ret;
56965
56966+ pax_track_stack();
56967+
56968 if (!bitset)
56969 return -EINVAL;
56970
56971@@ -2431,7 +2441,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
56972 {
56973 struct robust_list_head __user *head;
56974 unsigned long ret;
56975+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
56976 const struct cred *cred = current_cred(), *pcred;
56977+#endif
56978
56979 if (!futex_cmpxchg_enabled)
56980 return -ENOSYS;
56981@@ -2447,6 +2459,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
56982 if (!p)
56983 goto err_unlock;
56984 ret = -EPERM;
56985+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56986+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
56987+ goto err_unlock;
56988+#else
56989 pcred = __task_cred(p);
56990 /* If victim is in different user_ns, then uids are not
56991 comparable, so we must have CAP_SYS_PTRACE */
56992@@ -2461,6 +2477,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
56993 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
56994 goto err_unlock;
56995 ok:
56996+#endif
56997 head = p->robust_list;
56998 rcu_read_unlock();
56999 }
57000@@ -2712,6 +2729,7 @@ static int __init futex_init(void)
57001 {
57002 u32 curval;
57003 int i;
57004+ mm_segment_t oldfs;
57005
57006 /*
57007 * This will fail and we want it. Some arch implementations do
57008@@ -2723,8 +2741,11 @@ static int __init futex_init(void)
57009 * implementation, the non-functional ones will return
57010 * -ENOSYS.
57011 */
57012+ oldfs = get_fs();
57013+ set_fs(USER_DS);
57014 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
57015 futex_cmpxchg_enabled = 1;
57016+ set_fs(oldfs);
57017
57018 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
57019 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
57020diff -urNp linux-3.0.3/kernel/futex_compat.c linux-3.0.3/kernel/futex_compat.c
57021--- linux-3.0.3/kernel/futex_compat.c 2011-07-21 22:17:23.000000000 -0400
57022+++ linux-3.0.3/kernel/futex_compat.c 2011-08-23 21:48:14.000000000 -0400
57023@@ -10,6 +10,7 @@
57024 #include <linux/compat.h>
57025 #include <linux/nsproxy.h>
57026 #include <linux/futex.h>
57027+#include <linux/ptrace.h>
57028
57029 #include <asm/uaccess.h>
57030
57031@@ -136,7 +137,10 @@ compat_sys_get_robust_list(int pid, comp
57032 {
57033 struct compat_robust_list_head __user *head;
57034 unsigned long ret;
57035- const struct cred *cred = current_cred(), *pcred;
57036+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
57037+ const struct cred *cred = current_cred();
57038+ const struct cred *pcred;
57039+#endif
57040
57041 if (!futex_cmpxchg_enabled)
57042 return -ENOSYS;
57043@@ -152,6 +156,10 @@ compat_sys_get_robust_list(int pid, comp
57044 if (!p)
57045 goto err_unlock;
57046 ret = -EPERM;
57047+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57048+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
57049+ goto err_unlock;
57050+#else
57051 pcred = __task_cred(p);
57052 /* If victim is in different user_ns, then uids are not
57053 comparable, so we must have CAP_SYS_PTRACE */
57054@@ -166,6 +174,7 @@ compat_sys_get_robust_list(int pid, comp
57055 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
57056 goto err_unlock;
57057 ok:
57058+#endif
57059 head = p->compat_robust_list;
57060 rcu_read_unlock();
57061 }
57062diff -urNp linux-3.0.3/kernel/gcov/base.c linux-3.0.3/kernel/gcov/base.c
57063--- linux-3.0.3/kernel/gcov/base.c 2011-07-21 22:17:23.000000000 -0400
57064+++ linux-3.0.3/kernel/gcov/base.c 2011-08-23 21:47:56.000000000 -0400
57065@@ -102,11 +102,6 @@ void gcov_enable_events(void)
57066 }
57067
57068 #ifdef CONFIG_MODULES
57069-static inline int within(void *addr, void *start, unsigned long size)
57070-{
57071- return ((addr >= start) && (addr < start + size));
57072-}
57073-
57074 /* Update list and generate events when modules are unloaded. */
57075 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
57076 void *data)
57077@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
57078 prev = NULL;
57079 /* Remove entries located in module from linked list. */
57080 for (info = gcov_info_head; info; info = info->next) {
57081- if (within(info, mod->module_core, mod->core_size)) {
57082+ if (within_module_core_rw((unsigned long)info, mod)) {
57083 if (prev)
57084 prev->next = info->next;
57085 else
57086diff -urNp linux-3.0.3/kernel/hrtimer.c linux-3.0.3/kernel/hrtimer.c
57087--- linux-3.0.3/kernel/hrtimer.c 2011-07-21 22:17:23.000000000 -0400
57088+++ linux-3.0.3/kernel/hrtimer.c 2011-08-23 21:47:56.000000000 -0400
57089@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
57090 local_irq_restore(flags);
57091 }
57092
57093-static void run_hrtimer_softirq(struct softirq_action *h)
57094+static void run_hrtimer_softirq(void)
57095 {
57096 hrtimer_peek_ahead_timers();
57097 }
57098diff -urNp linux-3.0.3/kernel/jump_label.c linux-3.0.3/kernel/jump_label.c
57099--- linux-3.0.3/kernel/jump_label.c 2011-07-21 22:17:23.000000000 -0400
57100+++ linux-3.0.3/kernel/jump_label.c 2011-08-23 21:47:56.000000000 -0400
57101@@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entr
57102
57103 size = (((unsigned long)stop - (unsigned long)start)
57104 / sizeof(struct jump_entry));
57105+ pax_open_kernel();
57106 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
57107+ pax_close_kernel();
57108 }
57109
57110 static void jump_label_update(struct jump_label_key *key, int enable);
57111@@ -297,10 +299,12 @@ static void jump_label_invalidate_module
57112 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
57113 struct jump_entry *iter;
57114
57115+ pax_open_kernel();
57116 for (iter = iter_start; iter < iter_stop; iter++) {
57117 if (within_module_init(iter->code, mod))
57118 iter->code = 0;
57119 }
57120+ pax_close_kernel();
57121 }
57122
57123 static int
57124diff -urNp linux-3.0.3/kernel/kallsyms.c linux-3.0.3/kernel/kallsyms.c
57125--- linux-3.0.3/kernel/kallsyms.c 2011-07-21 22:17:23.000000000 -0400
57126+++ linux-3.0.3/kernel/kallsyms.c 2011-08-23 21:48:14.000000000 -0400
57127@@ -11,6 +11,9 @@
57128 * Changed the compression method from stem compression to "table lookup"
57129 * compression (see scripts/kallsyms.c for a more complete description)
57130 */
57131+#ifdef CONFIG_GRKERNSEC_HIDESYM
57132+#define __INCLUDED_BY_HIDESYM 1
57133+#endif
57134 #include <linux/kallsyms.h>
57135 #include <linux/module.h>
57136 #include <linux/init.h>
57137@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_mark
57138
57139 static inline int is_kernel_inittext(unsigned long addr)
57140 {
57141+ if (system_state != SYSTEM_BOOTING)
57142+ return 0;
57143+
57144 if (addr >= (unsigned long)_sinittext
57145 && addr <= (unsigned long)_einittext)
57146 return 1;
57147 return 0;
57148 }
57149
57150+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
57151+#ifdef CONFIG_MODULES
57152+static inline int is_module_text(unsigned long addr)
57153+{
57154+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
57155+ return 1;
57156+
57157+ addr = ktla_ktva(addr);
57158+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
57159+}
57160+#else
57161+static inline int is_module_text(unsigned long addr)
57162+{
57163+ return 0;
57164+}
57165+#endif
57166+#endif
57167+
57168 static inline int is_kernel_text(unsigned long addr)
57169 {
57170 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
57171@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigne
57172
57173 static inline int is_kernel(unsigned long addr)
57174 {
57175+
57176+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
57177+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
57178+ return 1;
57179+
57180+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
57181+#else
57182 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
57183+#endif
57184+
57185 return 1;
57186 return in_gate_area_no_mm(addr);
57187 }
57188
57189 static int is_ksym_addr(unsigned long addr)
57190 {
57191+
57192+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
57193+ if (is_module_text(addr))
57194+ return 0;
57195+#endif
57196+
57197 if (all_var)
57198 return is_kernel(addr);
57199
57200@@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(st
57201
57202 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
57203 {
57204- iter->name[0] = '\0';
57205 iter->nameoff = get_symbol_offset(new_pos);
57206 iter->pos = new_pos;
57207 }
57208@@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, vo
57209 {
57210 struct kallsym_iter *iter = m->private;
57211
57212+#ifdef CONFIG_GRKERNSEC_HIDESYM
57213+ if (current_uid())
57214+ return 0;
57215+#endif
57216+
57217 /* Some debugging symbols have no name. Ignore them. */
57218 if (!iter->name[0])
57219 return 0;
57220@@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *i
57221 struct kallsym_iter *iter;
57222 int ret;
57223
57224- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
57225+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
57226 if (!iter)
57227 return -ENOMEM;
57228 reset_iter(iter, 0);
57229diff -urNp linux-3.0.3/kernel/kmod.c linux-3.0.3/kernel/kmod.c
57230--- linux-3.0.3/kernel/kmod.c 2011-07-21 22:17:23.000000000 -0400
57231+++ linux-3.0.3/kernel/kmod.c 2011-08-23 21:48:14.000000000 -0400
57232@@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
57233 * If module auto-loading support is disabled then this function
57234 * becomes a no-operation.
57235 */
57236-int __request_module(bool wait, const char *fmt, ...)
57237+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
57238 {
57239- va_list args;
57240 char module_name[MODULE_NAME_LEN];
57241 unsigned int max_modprobes;
57242 int ret;
57243- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
57244+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
57245 static char *envp[] = { "HOME=/",
57246 "TERM=linux",
57247 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
57248@@ -88,9 +87,7 @@ int __request_module(bool wait, const ch
57249 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
57250 static int kmod_loop_msg;
57251
57252- va_start(args, fmt);
57253- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
57254- va_end(args);
57255+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
57256 if (ret >= MODULE_NAME_LEN)
57257 return -ENAMETOOLONG;
57258
57259@@ -98,6 +95,20 @@ int __request_module(bool wait, const ch
57260 if (ret)
57261 return ret;
57262
57263+#ifdef CONFIG_GRKERNSEC_MODHARDEN
57264+ if (!current_uid()) {
57265+ /* hack to workaround consolekit/udisks stupidity */
57266+ read_lock(&tasklist_lock);
57267+ if (!strcmp(current->comm, "mount") &&
57268+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
57269+ read_unlock(&tasklist_lock);
57270+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
57271+ return -EPERM;
57272+ }
57273+ read_unlock(&tasklist_lock);
57274+ }
57275+#endif
57276+
57277 /* If modprobe needs a service that is in a module, we get a recursive
57278 * loop. Limit the number of running kmod threads to max_threads/2 or
57279 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
57280@@ -131,6 +142,47 @@ int __request_module(bool wait, const ch
57281 atomic_dec(&kmod_concurrent);
57282 return ret;
57283 }
57284+
57285+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
57286+{
57287+ va_list args;
57288+ int ret;
57289+
57290+ va_start(args, fmt);
57291+ ret = ____request_module(wait, module_param, fmt, args);
57292+ va_end(args);
57293+
57294+ return ret;
57295+}
57296+
57297+int __request_module(bool wait, const char *fmt, ...)
57298+{
57299+ va_list args;
57300+ int ret;
57301+
57302+#ifdef CONFIG_GRKERNSEC_MODHARDEN
57303+ if (current_uid()) {
57304+ char module_param[MODULE_NAME_LEN];
57305+
57306+ memset(module_param, 0, sizeof(module_param));
57307+
57308+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
57309+
57310+ va_start(args, fmt);
57311+ ret = ____request_module(wait, module_param, fmt, args);
57312+ va_end(args);
57313+
57314+ return ret;
57315+ }
57316+#endif
57317+
57318+ va_start(args, fmt);
57319+ ret = ____request_module(wait, NULL, fmt, args);
57320+ va_end(args);
57321+
57322+ return ret;
57323+}
57324+
57325 EXPORT_SYMBOL(__request_module);
57326 #endif /* CONFIG_MODULES */
57327
57328diff -urNp linux-3.0.3/kernel/kprobes.c linux-3.0.3/kernel/kprobes.c
57329--- linux-3.0.3/kernel/kprobes.c 2011-07-21 22:17:23.000000000 -0400
57330+++ linux-3.0.3/kernel/kprobes.c 2011-08-23 21:47:56.000000000 -0400
57331@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_
57332 * kernel image and loaded module images reside. This is required
57333 * so x86_64 can correctly handle the %rip-relative fixups.
57334 */
57335- kip->insns = module_alloc(PAGE_SIZE);
57336+ kip->insns = module_alloc_exec(PAGE_SIZE);
57337 if (!kip->insns) {
57338 kfree(kip);
57339 return NULL;
57340@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(st
57341 */
57342 if (!list_is_singular(&kip->list)) {
57343 list_del(&kip->list);
57344- module_free(NULL, kip->insns);
57345+ module_free_exec(NULL, kip->insns);
57346 kfree(kip);
57347 }
57348 return 1;
57349@@ -1936,7 +1936,7 @@ static int __init init_kprobes(void)
57350 {
57351 int i, err = 0;
57352 unsigned long offset = 0, size = 0;
57353- char *modname, namebuf[128];
57354+ char *modname, namebuf[KSYM_NAME_LEN];
57355 const char *symbol_name;
57356 void *addr;
57357 struct kprobe_blackpoint *kb;
57358@@ -2062,7 +2062,7 @@ static int __kprobes show_kprobe_addr(st
57359 const char *sym = NULL;
57360 unsigned int i = *(loff_t *) v;
57361 unsigned long offset = 0;
57362- char *modname, namebuf[128];
57363+ char *modname, namebuf[KSYM_NAME_LEN];
57364
57365 head = &kprobe_table[i];
57366 preempt_disable();
57367diff -urNp linux-3.0.3/kernel/lockdep.c linux-3.0.3/kernel/lockdep.c
57368--- linux-3.0.3/kernel/lockdep.c 2011-07-21 22:17:23.000000000 -0400
57369+++ linux-3.0.3/kernel/lockdep.c 2011-08-23 21:47:56.000000000 -0400
57370@@ -583,6 +583,10 @@ static int static_obj(void *obj)
57371 end = (unsigned long) &_end,
57372 addr = (unsigned long) obj;
57373
57374+#ifdef CONFIG_PAX_KERNEXEC
57375+ start = ktla_ktva(start);
57376+#endif
57377+
57378 /*
57379 * static variable?
57380 */
57381@@ -718,6 +722,7 @@ register_lock_class(struct lockdep_map *
57382 if (!static_obj(lock->key)) {
57383 debug_locks_off();
57384 printk("INFO: trying to register non-static key.\n");
57385+ printk("lock:%pS key:%pS.\n", lock, lock->key);
57386 printk("the code is fine but needs lockdep annotation.\n");
57387 printk("turning off the locking correctness validator.\n");
57388 dump_stack();
57389@@ -2936,7 +2941,7 @@ static int __lock_acquire(struct lockdep
57390 if (!class)
57391 return 0;
57392 }
57393- atomic_inc((atomic_t *)&class->ops);
57394+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
57395 if (very_verbose(class)) {
57396 printk("\nacquire class [%p] %s", class->key, class->name);
57397 if (class->name_version > 1)
57398diff -urNp linux-3.0.3/kernel/lockdep_proc.c linux-3.0.3/kernel/lockdep_proc.c
57399--- linux-3.0.3/kernel/lockdep_proc.c 2011-07-21 22:17:23.000000000 -0400
57400+++ linux-3.0.3/kernel/lockdep_proc.c 2011-08-23 21:47:56.000000000 -0400
57401@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
57402
57403 static void print_name(struct seq_file *m, struct lock_class *class)
57404 {
57405- char str[128];
57406+ char str[KSYM_NAME_LEN];
57407 const char *name = class->name;
57408
57409 if (!name) {
57410diff -urNp linux-3.0.3/kernel/module.c linux-3.0.3/kernel/module.c
57411--- linux-3.0.3/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
57412+++ linux-3.0.3/kernel/module.c 2011-08-23 21:48:14.000000000 -0400
57413@@ -58,6 +58,7 @@
57414 #include <linux/jump_label.h>
57415 #include <linux/pfn.h>
57416 #include <linux/bsearch.h>
57417+#include <linux/grsecurity.h>
57418
57419 #define CREATE_TRACE_POINTS
57420 #include <trace/events/module.h>
57421@@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_not
57422
57423 /* Bounds of module allocation, for speeding __module_address.
57424 * Protected by module_mutex. */
57425-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
57426+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
57427+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
57428
57429 int register_module_notifier(struct notifier_block * nb)
57430 {
57431@@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(cons
57432 return true;
57433
57434 list_for_each_entry_rcu(mod, &modules, list) {
57435- struct symsearch arr[] = {
57436+ struct symsearch modarr[] = {
57437 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
57438 NOT_GPL_ONLY, false },
57439 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
57440@@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(cons
57441 #endif
57442 };
57443
57444- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
57445+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
57446 return true;
57447 }
57448 return false;
57449@@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(
57450 static int percpu_modalloc(struct module *mod,
57451 unsigned long size, unsigned long align)
57452 {
57453- if (align > PAGE_SIZE) {
57454+ if (align-1 >= PAGE_SIZE) {
57455 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
57456 mod->name, align, PAGE_SIZE);
57457 align = PAGE_SIZE;
57458@@ -1166,7 +1168,7 @@ resolve_symbol_wait(struct module *mod,
57459 */
57460 #ifdef CONFIG_SYSFS
57461
57462-#ifdef CONFIG_KALLSYMS
57463+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
57464 static inline bool sect_empty(const Elf_Shdr *sect)
57465 {
57466 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
57467@@ -1632,21 +1634,21 @@ static void set_section_ro_nx(void *base
57468
57469 static void unset_module_core_ro_nx(struct module *mod)
57470 {
57471- set_page_attributes(mod->module_core + mod->core_text_size,
57472- mod->module_core + mod->core_size,
57473+ set_page_attributes(mod->module_core_rw,
57474+ mod->module_core_rw + mod->core_size_rw,
57475 set_memory_x);
57476- set_page_attributes(mod->module_core,
57477- mod->module_core + mod->core_ro_size,
57478+ set_page_attributes(mod->module_core_rx,
57479+ mod->module_core_rx + mod->core_size_rx,
57480 set_memory_rw);
57481 }
57482
57483 static void unset_module_init_ro_nx(struct module *mod)
57484 {
57485- set_page_attributes(mod->module_init + mod->init_text_size,
57486- mod->module_init + mod->init_size,
57487+ set_page_attributes(mod->module_init_rw,
57488+ mod->module_init_rw + mod->init_size_rw,
57489 set_memory_x);
57490- set_page_attributes(mod->module_init,
57491- mod->module_init + mod->init_ro_size,
57492+ set_page_attributes(mod->module_init_rx,
57493+ mod->module_init_rx + mod->init_size_rx,
57494 set_memory_rw);
57495 }
57496
57497@@ -1657,14 +1659,14 @@ void set_all_modules_text_rw(void)
57498
57499 mutex_lock(&module_mutex);
57500 list_for_each_entry_rcu(mod, &modules, list) {
57501- if ((mod->module_core) && (mod->core_text_size)) {
57502- set_page_attributes(mod->module_core,
57503- mod->module_core + mod->core_text_size,
57504+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
57505+ set_page_attributes(mod->module_core_rx,
57506+ mod->module_core_rx + mod->core_size_rx,
57507 set_memory_rw);
57508 }
57509- if ((mod->module_init) && (mod->init_text_size)) {
57510- set_page_attributes(mod->module_init,
57511- mod->module_init + mod->init_text_size,
57512+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
57513+ set_page_attributes(mod->module_init_rx,
57514+ mod->module_init_rx + mod->init_size_rx,
57515 set_memory_rw);
57516 }
57517 }
57518@@ -1678,14 +1680,14 @@ void set_all_modules_text_ro(void)
57519
57520 mutex_lock(&module_mutex);
57521 list_for_each_entry_rcu(mod, &modules, list) {
57522- if ((mod->module_core) && (mod->core_text_size)) {
57523- set_page_attributes(mod->module_core,
57524- mod->module_core + mod->core_text_size,
57525+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
57526+ set_page_attributes(mod->module_core_rx,
57527+ mod->module_core_rx + mod->core_size_rx,
57528 set_memory_ro);
57529 }
57530- if ((mod->module_init) && (mod->init_text_size)) {
57531- set_page_attributes(mod->module_init,
57532- mod->module_init + mod->init_text_size,
57533+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
57534+ set_page_attributes(mod->module_init_rx,
57535+ mod->module_init_rx + mod->init_size_rx,
57536 set_memory_ro);
57537 }
57538 }
57539@@ -1722,16 +1724,19 @@ static void free_module(struct module *m
57540
57541 /* This may be NULL, but that's OK */
57542 unset_module_init_ro_nx(mod);
57543- module_free(mod, mod->module_init);
57544+ module_free(mod, mod->module_init_rw);
57545+ module_free_exec(mod, mod->module_init_rx);
57546 kfree(mod->args);
57547 percpu_modfree(mod);
57548
57549 /* Free lock-classes: */
57550- lockdep_free_key_range(mod->module_core, mod->core_size);
57551+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
57552+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
57553
57554 /* Finally, free the core (containing the module structure) */
57555 unset_module_core_ro_nx(mod);
57556- module_free(mod, mod->module_core);
57557+ module_free_exec(mod, mod->module_core_rx);
57558+ module_free(mod, mod->module_core_rw);
57559
57560 #ifdef CONFIG_MPU
57561 update_protections(current->mm);
57562@@ -1800,10 +1805,31 @@ static int simplify_symbols(struct modul
57563 unsigned int i;
57564 int ret = 0;
57565 const struct kernel_symbol *ksym;
57566+#ifdef CONFIG_GRKERNSEC_MODHARDEN
57567+ int is_fs_load = 0;
57568+ int register_filesystem_found = 0;
57569+ char *p;
57570+
57571+ p = strstr(mod->args, "grsec_modharden_fs");
57572+ if (p) {
57573+ char *endptr = p + strlen("grsec_modharden_fs");
57574+ /* copy \0 as well */
57575+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
57576+ is_fs_load = 1;
57577+ }
57578+#endif
57579
57580 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
57581 const char *name = info->strtab + sym[i].st_name;
57582
57583+#ifdef CONFIG_GRKERNSEC_MODHARDEN
57584+ /* it's a real shame this will never get ripped and copied
57585+ upstream! ;(
57586+ */
57587+ if (is_fs_load && !strcmp(name, "register_filesystem"))
57588+ register_filesystem_found = 1;
57589+#endif
57590+
57591 switch (sym[i].st_shndx) {
57592 case SHN_COMMON:
57593 /* We compiled with -fno-common. These are not
57594@@ -1824,7 +1850,9 @@ static int simplify_symbols(struct modul
57595 ksym = resolve_symbol_wait(mod, info, name);
57596 /* Ok if resolved. */
57597 if (ksym && !IS_ERR(ksym)) {
57598+ pax_open_kernel();
57599 sym[i].st_value = ksym->value;
57600+ pax_close_kernel();
57601 break;
57602 }
57603
57604@@ -1843,11 +1871,20 @@ static int simplify_symbols(struct modul
57605 secbase = (unsigned long)mod_percpu(mod);
57606 else
57607 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
57608+ pax_open_kernel();
57609 sym[i].st_value += secbase;
57610+ pax_close_kernel();
57611 break;
57612 }
57613 }
57614
57615+#ifdef CONFIG_GRKERNSEC_MODHARDEN
57616+ if (is_fs_load && !register_filesystem_found) {
57617+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
57618+ ret = -EPERM;
57619+ }
57620+#endif
57621+
57622 return ret;
57623 }
57624
57625@@ -1931,22 +1968,12 @@ static void layout_sections(struct modul
57626 || s->sh_entsize != ~0UL
57627 || strstarts(sname, ".init"))
57628 continue;
57629- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
57630+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
57631+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
57632+ else
57633+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
57634 DEBUGP("\t%s\n", name);
57635 }
57636- switch (m) {
57637- case 0: /* executable */
57638- mod->core_size = debug_align(mod->core_size);
57639- mod->core_text_size = mod->core_size;
57640- break;
57641- case 1: /* RO: text and ro-data */
57642- mod->core_size = debug_align(mod->core_size);
57643- mod->core_ro_size = mod->core_size;
57644- break;
57645- case 3: /* whole core */
57646- mod->core_size = debug_align(mod->core_size);
57647- break;
57648- }
57649 }
57650
57651 DEBUGP("Init section allocation order:\n");
57652@@ -1960,23 +1987,13 @@ static void layout_sections(struct modul
57653 || s->sh_entsize != ~0UL
57654 || !strstarts(sname, ".init"))
57655 continue;
57656- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
57657- | INIT_OFFSET_MASK);
57658+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
57659+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
57660+ else
57661+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
57662+ s->sh_entsize |= INIT_OFFSET_MASK;
57663 DEBUGP("\t%s\n", sname);
57664 }
57665- switch (m) {
57666- case 0: /* executable */
57667- mod->init_size = debug_align(mod->init_size);
57668- mod->init_text_size = mod->init_size;
57669- break;
57670- case 1: /* RO: text and ro-data */
57671- mod->init_size = debug_align(mod->init_size);
57672- mod->init_ro_size = mod->init_size;
57673- break;
57674- case 3: /* whole init */
57675- mod->init_size = debug_align(mod->init_size);
57676- break;
57677- }
57678 }
57679 }
57680
57681@@ -2141,7 +2158,7 @@ static void layout_symtab(struct module
57682
57683 /* Put symbol section at end of init part of module. */
57684 symsect->sh_flags |= SHF_ALLOC;
57685- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
57686+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
57687 info->index.sym) | INIT_OFFSET_MASK;
57688 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
57689
57690@@ -2158,19 +2175,19 @@ static void layout_symtab(struct module
57691 }
57692
57693 /* Append room for core symbols at end of core part. */
57694- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
57695- mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
57696+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
57697+ mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
57698
57699 /* Put string table section at end of init part of module. */
57700 strsect->sh_flags |= SHF_ALLOC;
57701- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
57702+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
57703 info->index.str) | INIT_OFFSET_MASK;
57704 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
57705
57706 /* Append room for core symbols' strings at end of core part. */
57707- info->stroffs = mod->core_size;
57708+ info->stroffs = mod->core_size_rx;
57709 __set_bit(0, info->strmap);
57710- mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
57711+ mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
57712 }
57713
57714 static void add_kallsyms(struct module *mod, const struct load_info *info)
57715@@ -2186,11 +2203,13 @@ static void add_kallsyms(struct module *
57716 /* Make sure we get permanent strtab: don't use info->strtab. */
57717 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
57718
57719+ pax_open_kernel();
57720+
57721 /* Set types up while we still have access to sections. */
57722 for (i = 0; i < mod->num_symtab; i++)
57723 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
57724
57725- mod->core_symtab = dst = mod->module_core + info->symoffs;
57726+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
57727 src = mod->symtab;
57728 *dst = *src;
57729 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
57730@@ -2203,10 +2222,12 @@ static void add_kallsyms(struct module *
57731 }
57732 mod->core_num_syms = ndst;
57733
57734- mod->core_strtab = s = mod->module_core + info->stroffs;
57735+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
57736 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
57737 if (test_bit(i, info->strmap))
57738 *++s = mod->strtab[i];
57739+
57740+ pax_close_kernel();
57741 }
57742 #else
57743 static inline void layout_symtab(struct module *mod, struct load_info *info)
57744@@ -2235,17 +2256,33 @@ static void dynamic_debug_remove(struct
57745 ddebug_remove_module(debug->modname);
57746 }
57747
57748-static void *module_alloc_update_bounds(unsigned long size)
57749+static void *module_alloc_update_bounds_rw(unsigned long size)
57750 {
57751 void *ret = module_alloc(size);
57752
57753 if (ret) {
57754 mutex_lock(&module_mutex);
57755 /* Update module bounds. */
57756- if ((unsigned long)ret < module_addr_min)
57757- module_addr_min = (unsigned long)ret;
57758- if ((unsigned long)ret + size > module_addr_max)
57759- module_addr_max = (unsigned long)ret + size;
57760+ if ((unsigned long)ret < module_addr_min_rw)
57761+ module_addr_min_rw = (unsigned long)ret;
57762+ if ((unsigned long)ret + size > module_addr_max_rw)
57763+ module_addr_max_rw = (unsigned long)ret + size;
57764+ mutex_unlock(&module_mutex);
57765+ }
57766+ return ret;
57767+}
57768+
57769+static void *module_alloc_update_bounds_rx(unsigned long size)
57770+{
57771+ void *ret = module_alloc_exec(size);
57772+
57773+ if (ret) {
57774+ mutex_lock(&module_mutex);
57775+ /* Update module bounds. */
57776+ if ((unsigned long)ret < module_addr_min_rx)
57777+ module_addr_min_rx = (unsigned long)ret;
57778+ if ((unsigned long)ret + size > module_addr_max_rx)
57779+ module_addr_max_rx = (unsigned long)ret + size;
57780 mutex_unlock(&module_mutex);
57781 }
57782 return ret;
57783@@ -2538,7 +2575,7 @@ static int move_module(struct module *mo
57784 void *ptr;
57785
57786 /* Do the allocs. */
57787- ptr = module_alloc_update_bounds(mod->core_size);
57788+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
57789 /*
57790 * The pointer to this block is stored in the module structure
57791 * which is inside the block. Just mark it as not being a
57792@@ -2548,23 +2585,50 @@ static int move_module(struct module *mo
57793 if (!ptr)
57794 return -ENOMEM;
57795
57796- memset(ptr, 0, mod->core_size);
57797- mod->module_core = ptr;
57798+ memset(ptr, 0, mod->core_size_rw);
57799+ mod->module_core_rw = ptr;
57800
57801- ptr = module_alloc_update_bounds(mod->init_size);
57802+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
57803 /*
57804 * The pointer to this block is stored in the module structure
57805 * which is inside the block. This block doesn't need to be
57806 * scanned as it contains data and code that will be freed
57807 * after the module is initialized.
57808 */
57809- kmemleak_ignore(ptr);
57810- if (!ptr && mod->init_size) {
57811- module_free(mod, mod->module_core);
57812+ kmemleak_not_leak(ptr);
57813+ if (!ptr && mod->init_size_rw) {
57814+ module_free(mod, mod->module_core_rw);
57815 return -ENOMEM;
57816 }
57817- memset(ptr, 0, mod->init_size);
57818- mod->module_init = ptr;
57819+ memset(ptr, 0, mod->init_size_rw);
57820+ mod->module_init_rw = ptr;
57821+
57822+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
57823+ kmemleak_not_leak(ptr);
57824+ if (!ptr) {
57825+ module_free(mod, mod->module_init_rw);
57826+ module_free(mod, mod->module_core_rw);
57827+ return -ENOMEM;
57828+ }
57829+
57830+ pax_open_kernel();
57831+ memset(ptr, 0, mod->core_size_rx);
57832+ pax_close_kernel();
57833+ mod->module_core_rx = ptr;
57834+
57835+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
57836+ kmemleak_not_leak(ptr);
57837+ if (!ptr && mod->init_size_rx) {
57838+ module_free_exec(mod, mod->module_core_rx);
57839+ module_free(mod, mod->module_init_rw);
57840+ module_free(mod, mod->module_core_rw);
57841+ return -ENOMEM;
57842+ }
57843+
57844+ pax_open_kernel();
57845+ memset(ptr, 0, mod->init_size_rx);
57846+ pax_close_kernel();
57847+ mod->module_init_rx = ptr;
57848
57849 /* Transfer each section which specifies SHF_ALLOC */
57850 DEBUGP("final section addresses:\n");
57851@@ -2575,16 +2639,45 @@ static int move_module(struct module *mo
57852 if (!(shdr->sh_flags & SHF_ALLOC))
57853 continue;
57854
57855- if (shdr->sh_entsize & INIT_OFFSET_MASK)
57856- dest = mod->module_init
57857- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
57858- else
57859- dest = mod->module_core + shdr->sh_entsize;
57860+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
57861+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
57862+ dest = mod->module_init_rw
57863+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
57864+ else
57865+ dest = mod->module_init_rx
57866+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
57867+ } else {
57868+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
57869+ dest = mod->module_core_rw + shdr->sh_entsize;
57870+ else
57871+ dest = mod->module_core_rx + shdr->sh_entsize;
57872+ }
57873+
57874+ if (shdr->sh_type != SHT_NOBITS) {
57875+
57876+#ifdef CONFIG_PAX_KERNEXEC
57877+#ifdef CONFIG_X86_64
57878+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
57879+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
57880+#endif
57881+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
57882+ pax_open_kernel();
57883+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
57884+ pax_close_kernel();
57885+ } else
57886+#endif
57887
57888- if (shdr->sh_type != SHT_NOBITS)
57889 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
57890+ }
57891 /* Update sh_addr to point to copy in image. */
57892- shdr->sh_addr = (unsigned long)dest;
57893+
57894+#ifdef CONFIG_PAX_KERNEXEC
57895+ if (shdr->sh_flags & SHF_EXECINSTR)
57896+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
57897+ else
57898+#endif
57899+
57900+ shdr->sh_addr = (unsigned long)dest;
57901 DEBUGP("\t0x%lx %s\n",
57902 shdr->sh_addr, info->secstrings + shdr->sh_name);
57903 }
57904@@ -2635,12 +2728,12 @@ static void flush_module_icache(const st
57905 * Do it before processing of module parameters, so the module
57906 * can provide parameter accessor functions of its own.
57907 */
57908- if (mod->module_init)
57909- flush_icache_range((unsigned long)mod->module_init,
57910- (unsigned long)mod->module_init
57911- + mod->init_size);
57912- flush_icache_range((unsigned long)mod->module_core,
57913- (unsigned long)mod->module_core + mod->core_size);
57914+ if (mod->module_init_rx)
57915+ flush_icache_range((unsigned long)mod->module_init_rx,
57916+ (unsigned long)mod->module_init_rx
57917+ + mod->init_size_rx);
57918+ flush_icache_range((unsigned long)mod->module_core_rx,
57919+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
57920
57921 set_fs(old_fs);
57922 }
57923@@ -2712,8 +2805,10 @@ static void module_deallocate(struct mod
57924 {
57925 kfree(info->strmap);
57926 percpu_modfree(mod);
57927- module_free(mod, mod->module_init);
57928- module_free(mod, mod->module_core);
57929+ module_free_exec(mod, mod->module_init_rx);
57930+ module_free_exec(mod, mod->module_core_rx);
57931+ module_free(mod, mod->module_init_rw);
57932+ module_free(mod, mod->module_core_rw);
57933 }
57934
57935 static int post_relocation(struct module *mod, const struct load_info *info)
57936@@ -2770,9 +2865,38 @@ static struct module *load_module(void _
57937 if (err)
57938 goto free_unload;
57939
57940+ /* Now copy in args */
57941+ mod->args = strndup_user(uargs, ~0UL >> 1);
57942+ if (IS_ERR(mod->args)) {
57943+ err = PTR_ERR(mod->args);
57944+ goto free_unload;
57945+ }
57946+
57947 /* Set up MODINFO_ATTR fields */
57948 setup_modinfo(mod, &info);
57949
57950+#ifdef CONFIG_GRKERNSEC_MODHARDEN
57951+ {
57952+ char *p, *p2;
57953+
57954+ if (strstr(mod->args, "grsec_modharden_netdev")) {
57955+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
57956+ err = -EPERM;
57957+ goto free_modinfo;
57958+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
57959+ p += strlen("grsec_modharden_normal");
57960+ p2 = strstr(p, "_");
57961+ if (p2) {
57962+ *p2 = '\0';
57963+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
57964+ *p2 = '_';
57965+ }
57966+ err = -EPERM;
57967+ goto free_modinfo;
57968+ }
57969+ }
57970+#endif
57971+
57972 /* Fix up syms, so that st_value is a pointer to location. */
57973 err = simplify_symbols(mod, &info);
57974 if (err < 0)
57975@@ -2788,13 +2912,6 @@ static struct module *load_module(void _
57976
57977 flush_module_icache(mod);
57978
57979- /* Now copy in args */
57980- mod->args = strndup_user(uargs, ~0UL >> 1);
57981- if (IS_ERR(mod->args)) {
57982- err = PTR_ERR(mod->args);
57983- goto free_arch_cleanup;
57984- }
57985-
57986 /* Mark state as coming so strong_try_module_get() ignores us. */
57987 mod->state = MODULE_STATE_COMING;
57988
57989@@ -2854,11 +2971,10 @@ static struct module *load_module(void _
57990 unlock:
57991 mutex_unlock(&module_mutex);
57992 synchronize_sched();
57993- kfree(mod->args);
57994- free_arch_cleanup:
57995 module_arch_cleanup(mod);
57996 free_modinfo:
57997 free_modinfo(mod);
57998+ kfree(mod->args);
57999 free_unload:
58000 module_unload_free(mod);
58001 free_module:
58002@@ -2899,16 +3015,16 @@ SYSCALL_DEFINE3(init_module, void __user
58003 MODULE_STATE_COMING, mod);
58004
58005 /* Set RO and NX regions for core */
58006- set_section_ro_nx(mod->module_core,
58007- mod->core_text_size,
58008- mod->core_ro_size,
58009- mod->core_size);
58010+ set_section_ro_nx(mod->module_core_rx,
58011+ mod->core_size_rx,
58012+ mod->core_size_rx,
58013+ mod->core_size_rx);
58014
58015 /* Set RO and NX regions for init */
58016- set_section_ro_nx(mod->module_init,
58017- mod->init_text_size,
58018- mod->init_ro_size,
58019- mod->init_size);
58020+ set_section_ro_nx(mod->module_init_rx,
58021+ mod->init_size_rx,
58022+ mod->init_size_rx,
58023+ mod->init_size_rx);
58024
58025 do_mod_ctors(mod);
58026 /* Start the module */
58027@@ -2954,11 +3070,12 @@ SYSCALL_DEFINE3(init_module, void __user
58028 mod->strtab = mod->core_strtab;
58029 #endif
58030 unset_module_init_ro_nx(mod);
58031- module_free(mod, mod->module_init);
58032- mod->module_init = NULL;
58033- mod->init_size = 0;
58034- mod->init_ro_size = 0;
58035- mod->init_text_size = 0;
58036+ module_free(mod, mod->module_init_rw);
58037+ module_free_exec(mod, mod->module_init_rx);
58038+ mod->module_init_rw = NULL;
58039+ mod->module_init_rx = NULL;
58040+ mod->init_size_rw = 0;
58041+ mod->init_size_rx = 0;
58042 mutex_unlock(&module_mutex);
58043
58044 return 0;
58045@@ -2989,10 +3106,16 @@ static const char *get_ksymbol(struct mo
58046 unsigned long nextval;
58047
58048 /* At worse, next value is at end of module */
58049- if (within_module_init(addr, mod))
58050- nextval = (unsigned long)mod->module_init+mod->init_text_size;
58051+ if (within_module_init_rx(addr, mod))
58052+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
58053+ else if (within_module_init_rw(addr, mod))
58054+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
58055+ else if (within_module_core_rx(addr, mod))
58056+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
58057+ else if (within_module_core_rw(addr, mod))
58058+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
58059 else
58060- nextval = (unsigned long)mod->module_core+mod->core_text_size;
58061+ return NULL;
58062
58063 /* Scan for closest preceding symbol, and next symbol. (ELF
58064 starts real symbols at 1). */
58065@@ -3238,7 +3361,7 @@ static int m_show(struct seq_file *m, vo
58066 char buf[8];
58067
58068 seq_printf(m, "%s %u",
58069- mod->name, mod->init_size + mod->core_size);
58070+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
58071 print_unload_info(m, mod);
58072
58073 /* Informative for users. */
58074@@ -3247,7 +3370,7 @@ static int m_show(struct seq_file *m, vo
58075 mod->state == MODULE_STATE_COMING ? "Loading":
58076 "Live");
58077 /* Used by oprofile and other similar tools. */
58078- seq_printf(m, " 0x%pK", mod->module_core);
58079+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
58080
58081 /* Taints info */
58082 if (mod->taints)
58083@@ -3283,7 +3406,17 @@ static const struct file_operations proc
58084
58085 static int __init proc_modules_init(void)
58086 {
58087+#ifndef CONFIG_GRKERNSEC_HIDESYM
58088+#ifdef CONFIG_GRKERNSEC_PROC_USER
58089+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
58090+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58091+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
58092+#else
58093 proc_create("modules", 0, NULL, &proc_modules_operations);
58094+#endif
58095+#else
58096+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
58097+#endif
58098 return 0;
58099 }
58100 module_init(proc_modules_init);
58101@@ -3342,12 +3475,12 @@ struct module *__module_address(unsigned
58102 {
58103 struct module *mod;
58104
58105- if (addr < module_addr_min || addr > module_addr_max)
58106+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
58107+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
58108 return NULL;
58109
58110 list_for_each_entry_rcu(mod, &modules, list)
58111- if (within_module_core(addr, mod)
58112- || within_module_init(addr, mod))
58113+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
58114 return mod;
58115 return NULL;
58116 }
58117@@ -3381,11 +3514,20 @@ bool is_module_text_address(unsigned lon
58118 */
58119 struct module *__module_text_address(unsigned long addr)
58120 {
58121- struct module *mod = __module_address(addr);
58122+ struct module *mod;
58123+
58124+#ifdef CONFIG_X86_32
58125+ addr = ktla_ktva(addr);
58126+#endif
58127+
58128+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
58129+ return NULL;
58130+
58131+ mod = __module_address(addr);
58132+
58133 if (mod) {
58134 /* Make sure it's within the text section. */
58135- if (!within(addr, mod->module_init, mod->init_text_size)
58136- && !within(addr, mod->module_core, mod->core_text_size))
58137+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
58138 mod = NULL;
58139 }
58140 return mod;
58141diff -urNp linux-3.0.3/kernel/mutex.c linux-3.0.3/kernel/mutex.c
58142--- linux-3.0.3/kernel/mutex.c 2011-07-21 22:17:23.000000000 -0400
58143+++ linux-3.0.3/kernel/mutex.c 2011-08-23 21:47:56.000000000 -0400
58144@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock,
58145 spin_lock_mutex(&lock->wait_lock, flags);
58146
58147 debug_mutex_lock_common(lock, &waiter);
58148- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
58149+ debug_mutex_add_waiter(lock, &waiter, task);
58150
58151 /* add waiting tasks to the end of the waitqueue (FIFO): */
58152 list_add_tail(&waiter.list, &lock->wait_list);
58153@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock,
58154 * TASK_UNINTERRUPTIBLE case.)
58155 */
58156 if (unlikely(signal_pending_state(state, task))) {
58157- mutex_remove_waiter(lock, &waiter,
58158- task_thread_info(task));
58159+ mutex_remove_waiter(lock, &waiter, task);
58160 mutex_release(&lock->dep_map, 1, ip);
58161 spin_unlock_mutex(&lock->wait_lock, flags);
58162
58163@@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock,
58164 done:
58165 lock_acquired(&lock->dep_map, ip);
58166 /* got the lock - rejoice! */
58167- mutex_remove_waiter(lock, &waiter, current_thread_info());
58168+ mutex_remove_waiter(lock, &waiter, task);
58169 mutex_set_owner(lock);
58170
58171 /* set it to 0 if there are no waiters left: */
58172diff -urNp linux-3.0.3/kernel/mutex-debug.c linux-3.0.3/kernel/mutex-debug.c
58173--- linux-3.0.3/kernel/mutex-debug.c 2011-07-21 22:17:23.000000000 -0400
58174+++ linux-3.0.3/kernel/mutex-debug.c 2011-08-23 21:47:56.000000000 -0400
58175@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
58176 }
58177
58178 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
58179- struct thread_info *ti)
58180+ struct task_struct *task)
58181 {
58182 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
58183
58184 /* Mark the current thread as blocked on the lock: */
58185- ti->task->blocked_on = waiter;
58186+ task->blocked_on = waiter;
58187 }
58188
58189 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
58190- struct thread_info *ti)
58191+ struct task_struct *task)
58192 {
58193 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
58194- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
58195- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
58196- ti->task->blocked_on = NULL;
58197+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
58198+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
58199+ task->blocked_on = NULL;
58200
58201 list_del_init(&waiter->list);
58202 waiter->task = NULL;
58203diff -urNp linux-3.0.3/kernel/mutex-debug.h linux-3.0.3/kernel/mutex-debug.h
58204--- linux-3.0.3/kernel/mutex-debug.h 2011-07-21 22:17:23.000000000 -0400
58205+++ linux-3.0.3/kernel/mutex-debug.h 2011-08-23 21:47:56.000000000 -0400
58206@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(stru
58207 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
58208 extern void debug_mutex_add_waiter(struct mutex *lock,
58209 struct mutex_waiter *waiter,
58210- struct thread_info *ti);
58211+ struct task_struct *task);
58212 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
58213- struct thread_info *ti);
58214+ struct task_struct *task);
58215 extern void debug_mutex_unlock(struct mutex *lock);
58216 extern void debug_mutex_init(struct mutex *lock, const char *name,
58217 struct lock_class_key *key);
58218diff -urNp linux-3.0.3/kernel/padata.c linux-3.0.3/kernel/padata.c
58219--- linux-3.0.3/kernel/padata.c 2011-07-21 22:17:23.000000000 -0400
58220+++ linux-3.0.3/kernel/padata.c 2011-08-23 21:47:56.000000000 -0400
58221@@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_ins
58222 padata->pd = pd;
58223 padata->cb_cpu = cb_cpu;
58224
58225- if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
58226- atomic_set(&pd->seq_nr, -1);
58227+ if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
58228+ atomic_set_unchecked(&pd->seq_nr, -1);
58229
58230- padata->seq_nr = atomic_inc_return(&pd->seq_nr);
58231+ padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
58232
58233 target_cpu = padata_cpu_hash(padata);
58234 queue = per_cpu_ptr(pd->pqueue, target_cpu);
58235@@ -444,7 +444,7 @@ static struct parallel_data *padata_allo
58236 padata_init_pqueues(pd);
58237 padata_init_squeues(pd);
58238 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
58239- atomic_set(&pd->seq_nr, -1);
58240+ atomic_set_unchecked(&pd->seq_nr, -1);
58241 atomic_set(&pd->reorder_objects, 0);
58242 atomic_set(&pd->refcnt, 0);
58243 pd->pinst = pinst;
58244diff -urNp linux-3.0.3/kernel/panic.c linux-3.0.3/kernel/panic.c
58245--- linux-3.0.3/kernel/panic.c 2011-07-21 22:17:23.000000000 -0400
58246+++ linux-3.0.3/kernel/panic.c 2011-08-23 21:48:14.000000000 -0400
58247@@ -369,7 +369,7 @@ static void warn_slowpath_common(const c
58248 const char *board;
58249
58250 printk(KERN_WARNING "------------[ cut here ]------------\n");
58251- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
58252+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
58253 board = dmi_get_system_info(DMI_PRODUCT_NAME);
58254 if (board)
58255 printk(KERN_WARNING "Hardware name: %s\n", board);
58256@@ -424,7 +424,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
58257 */
58258 void __stack_chk_fail(void)
58259 {
58260- panic("stack-protector: Kernel stack is corrupted in: %p\n",
58261+ dump_stack();
58262+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
58263 __builtin_return_address(0));
58264 }
58265 EXPORT_SYMBOL(__stack_chk_fail);
58266diff -urNp linux-3.0.3/kernel/pid.c linux-3.0.3/kernel/pid.c
58267--- linux-3.0.3/kernel/pid.c 2011-07-21 22:17:23.000000000 -0400
58268+++ linux-3.0.3/kernel/pid.c 2011-08-23 21:48:14.000000000 -0400
58269@@ -33,6 +33,7 @@
58270 #include <linux/rculist.h>
58271 #include <linux/bootmem.h>
58272 #include <linux/hash.h>
58273+#include <linux/security.h>
58274 #include <linux/pid_namespace.h>
58275 #include <linux/init_task.h>
58276 #include <linux/syscalls.h>
58277@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
58278
58279 int pid_max = PID_MAX_DEFAULT;
58280
58281-#define RESERVED_PIDS 300
58282+#define RESERVED_PIDS 500
58283
58284 int pid_max_min = RESERVED_PIDS + 1;
58285 int pid_max_max = PID_MAX_LIMIT;
58286@@ -419,8 +420,15 @@ EXPORT_SYMBOL(pid_task);
58287 */
58288 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
58289 {
58290+ struct task_struct *task;
58291+
58292 rcu_lockdep_assert(rcu_read_lock_held());
58293- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
58294+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
58295+
58296+ if (gr_pid_is_chrooted(task))
58297+ return NULL;
58298+
58299+ return task;
58300 }
58301
58302 struct task_struct *find_task_by_vpid(pid_t vnr)
58303@@ -428,6 +436,12 @@ struct task_struct *find_task_by_vpid(pi
58304 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
58305 }
58306
58307+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
58308+{
58309+ rcu_lockdep_assert(rcu_read_lock_held());
58310+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
58311+}
58312+
58313 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
58314 {
58315 struct pid *pid;
58316diff -urNp linux-3.0.3/kernel/posix-cpu-timers.c linux-3.0.3/kernel/posix-cpu-timers.c
58317--- linux-3.0.3/kernel/posix-cpu-timers.c 2011-07-21 22:17:23.000000000 -0400
58318+++ linux-3.0.3/kernel/posix-cpu-timers.c 2011-08-23 21:48:14.000000000 -0400
58319@@ -6,6 +6,7 @@
58320 #include <linux/posix-timers.h>
58321 #include <linux/errno.h>
58322 #include <linux/math64.h>
58323+#include <linux/security.h>
58324 #include <asm/uaccess.h>
58325 #include <linux/kernel_stat.h>
58326 #include <trace/events/timer.h>
58327@@ -1604,14 +1605,14 @@ struct k_clock clock_posix_cpu = {
58328
58329 static __init int init_posix_cpu_timers(void)
58330 {
58331- struct k_clock process = {
58332+ static struct k_clock process = {
58333 .clock_getres = process_cpu_clock_getres,
58334 .clock_get = process_cpu_clock_get,
58335 .timer_create = process_cpu_timer_create,
58336 .nsleep = process_cpu_nsleep,
58337 .nsleep_restart = process_cpu_nsleep_restart,
58338 };
58339- struct k_clock thread = {
58340+ static struct k_clock thread = {
58341 .clock_getres = thread_cpu_clock_getres,
58342 .clock_get = thread_cpu_clock_get,
58343 .timer_create = thread_cpu_timer_create,
58344diff -urNp linux-3.0.3/kernel/posix-timers.c linux-3.0.3/kernel/posix-timers.c
58345--- linux-3.0.3/kernel/posix-timers.c 2011-07-21 22:17:23.000000000 -0400
58346+++ linux-3.0.3/kernel/posix-timers.c 2011-08-23 21:48:14.000000000 -0400
58347@@ -43,6 +43,7 @@
58348 #include <linux/idr.h>
58349 #include <linux/posix-clock.h>
58350 #include <linux/posix-timers.h>
58351+#include <linux/grsecurity.h>
58352 #include <linux/syscalls.h>
58353 #include <linux/wait.h>
58354 #include <linux/workqueue.h>
58355@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
58356 * which we beg off on and pass to do_sys_settimeofday().
58357 */
58358
58359-static struct k_clock posix_clocks[MAX_CLOCKS];
58360+static struct k_clock *posix_clocks[MAX_CLOCKS];
58361
58362 /*
58363 * These ones are defined below.
58364@@ -227,7 +228,7 @@ static int posix_get_boottime(const cloc
58365 */
58366 static __init int init_posix_timers(void)
58367 {
58368- struct k_clock clock_realtime = {
58369+ static struct k_clock clock_realtime = {
58370 .clock_getres = hrtimer_get_res,
58371 .clock_get = posix_clock_realtime_get,
58372 .clock_set = posix_clock_realtime_set,
58373@@ -239,7 +240,7 @@ static __init int init_posix_timers(void
58374 .timer_get = common_timer_get,
58375 .timer_del = common_timer_del,
58376 };
58377- struct k_clock clock_monotonic = {
58378+ static struct k_clock clock_monotonic = {
58379 .clock_getres = hrtimer_get_res,
58380 .clock_get = posix_ktime_get_ts,
58381 .nsleep = common_nsleep,
58382@@ -249,19 +250,19 @@ static __init int init_posix_timers(void
58383 .timer_get = common_timer_get,
58384 .timer_del = common_timer_del,
58385 };
58386- struct k_clock clock_monotonic_raw = {
58387+ static struct k_clock clock_monotonic_raw = {
58388 .clock_getres = hrtimer_get_res,
58389 .clock_get = posix_get_monotonic_raw,
58390 };
58391- struct k_clock clock_realtime_coarse = {
58392+ static struct k_clock clock_realtime_coarse = {
58393 .clock_getres = posix_get_coarse_res,
58394 .clock_get = posix_get_realtime_coarse,
58395 };
58396- struct k_clock clock_monotonic_coarse = {
58397+ static struct k_clock clock_monotonic_coarse = {
58398 .clock_getres = posix_get_coarse_res,
58399 .clock_get = posix_get_monotonic_coarse,
58400 };
58401- struct k_clock clock_boottime = {
58402+ static struct k_clock clock_boottime = {
58403 .clock_getres = hrtimer_get_res,
58404 .clock_get = posix_get_boottime,
58405 .nsleep = common_nsleep,
58406@@ -272,6 +273,8 @@ static __init int init_posix_timers(void
58407 .timer_del = common_timer_del,
58408 };
58409
58410+ pax_track_stack();
58411+
58412 posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
58413 posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
58414 posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
58415@@ -473,7 +476,7 @@ void posix_timers_register_clock(const c
58416 return;
58417 }
58418
58419- posix_clocks[clock_id] = *new_clock;
58420+ posix_clocks[clock_id] = new_clock;
58421 }
58422 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
58423
58424@@ -519,9 +522,9 @@ static struct k_clock *clockid_to_kclock
58425 return (id & CLOCKFD_MASK) == CLOCKFD ?
58426 &clock_posix_dynamic : &clock_posix_cpu;
58427
58428- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
58429+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
58430 return NULL;
58431- return &posix_clocks[id];
58432+ return posix_clocks[id];
58433 }
58434
58435 static int common_timer_create(struct k_itimer *new_timer)
58436@@ -959,6 +962,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
58437 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
58438 return -EFAULT;
58439
58440+ /* only the CLOCK_REALTIME clock can be set, all other clocks
58441+ have their clock_set fptr set to a nosettime dummy function
58442+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
58443+ call common_clock_set, which calls do_sys_settimeofday, which
58444+ we hook
58445+ */
58446+
58447 return kc->clock_set(which_clock, &new_tp);
58448 }
58449
58450diff -urNp linux-3.0.3/kernel/power/poweroff.c linux-3.0.3/kernel/power/poweroff.c
58451--- linux-3.0.3/kernel/power/poweroff.c 2011-07-21 22:17:23.000000000 -0400
58452+++ linux-3.0.3/kernel/power/poweroff.c 2011-08-23 21:47:56.000000000 -0400
58453@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
58454 .enable_mask = SYSRQ_ENABLE_BOOT,
58455 };
58456
58457-static int pm_sysrq_init(void)
58458+static int __init pm_sysrq_init(void)
58459 {
58460 register_sysrq_key('o', &sysrq_poweroff_op);
58461 return 0;
58462diff -urNp linux-3.0.3/kernel/power/process.c linux-3.0.3/kernel/power/process.c
58463--- linux-3.0.3/kernel/power/process.c 2011-07-21 22:17:23.000000000 -0400
58464+++ linux-3.0.3/kernel/power/process.c 2011-08-23 21:47:56.000000000 -0400
58465@@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_
58466 u64 elapsed_csecs64;
58467 unsigned int elapsed_csecs;
58468 bool wakeup = false;
58469+ bool timedout = false;
58470
58471 do_gettimeofday(&start);
58472
58473@@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_
58474
58475 while (true) {
58476 todo = 0;
58477+ if (time_after(jiffies, end_time))
58478+ timedout = true;
58479 read_lock(&tasklist_lock);
58480 do_each_thread(g, p) {
58481 if (frozen(p) || !freezable(p))
58482@@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_
58483 * try_to_stop() after schedule() in ptrace/signal
58484 * stop sees TIF_FREEZE.
58485 */
58486- if (!task_is_stopped_or_traced(p) &&
58487- !freezer_should_skip(p))
58488+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
58489 todo++;
58490+ if (timedout) {
58491+ printk(KERN_ERR "Task refusing to freeze:\n");
58492+ sched_show_task(p);
58493+ }
58494+ }
58495 } while_each_thread(g, p);
58496 read_unlock(&tasklist_lock);
58497
58498@@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_
58499 todo += wq_busy;
58500 }
58501
58502- if (!todo || time_after(jiffies, end_time))
58503+ if (!todo || timedout)
58504 break;
58505
58506 if (pm_wakeup_pending()) {
58507diff -urNp linux-3.0.3/kernel/printk.c linux-3.0.3/kernel/printk.c
58508--- linux-3.0.3/kernel/printk.c 2011-07-21 22:17:23.000000000 -0400
58509+++ linux-3.0.3/kernel/printk.c 2011-08-23 21:48:14.000000000 -0400
58510@@ -313,12 +313,17 @@ static int check_syslog_permissions(int
58511 if (from_file && type != SYSLOG_ACTION_OPEN)
58512 return 0;
58513
58514+#ifdef CONFIG_GRKERNSEC_DMESG
58515+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
58516+ return -EPERM;
58517+#endif
58518+
58519 if (syslog_action_restricted(type)) {
58520 if (capable(CAP_SYSLOG))
58521 return 0;
58522 /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */
58523 if (capable(CAP_SYS_ADMIN)) {
58524- WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN "
58525+ printk_once(KERN_WARNING "Attempt to access syslog with CAP_SYS_ADMIN "
58526 "but no CAP_SYSLOG (deprecated).\n");
58527 return 0;
58528 }
58529diff -urNp linux-3.0.3/kernel/profile.c linux-3.0.3/kernel/profile.c
58530--- linux-3.0.3/kernel/profile.c 2011-07-21 22:17:23.000000000 -0400
58531+++ linux-3.0.3/kernel/profile.c 2011-08-23 21:47:56.000000000 -0400
58532@@ -39,7 +39,7 @@ struct profile_hit {
58533 /* Oprofile timer tick hook */
58534 static int (*timer_hook)(struct pt_regs *) __read_mostly;
58535
58536-static atomic_t *prof_buffer;
58537+static atomic_unchecked_t *prof_buffer;
58538 static unsigned long prof_len, prof_shift;
58539
58540 int prof_on __read_mostly;
58541@@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
58542 hits[i].pc = 0;
58543 continue;
58544 }
58545- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
58546+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
58547 hits[i].hits = hits[i].pc = 0;
58548 }
58549 }
58550@@ -342,9 +342,9 @@ static void do_profile_hits(int type, vo
58551 * Add the current hit(s) and flush the write-queue out
58552 * to the global buffer:
58553 */
58554- atomic_add(nr_hits, &prof_buffer[pc]);
58555+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
58556 for (i = 0; i < NR_PROFILE_HIT; ++i) {
58557- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
58558+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
58559 hits[i].pc = hits[i].hits = 0;
58560 }
58561 out:
58562@@ -419,7 +419,7 @@ static void do_profile_hits(int type, vo
58563 {
58564 unsigned long pc;
58565 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
58566- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
58567+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
58568 }
58569 #endif /* !CONFIG_SMP */
58570
58571@@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
58572 return -EFAULT;
58573 buf++; p++; count--; read++;
58574 }
58575- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
58576+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
58577 if (copy_to_user(buf, (void *)pnt, count))
58578 return -EFAULT;
58579 read += count;
58580@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
58581 }
58582 #endif
58583 profile_discard_flip_buffers();
58584- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
58585+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
58586 return count;
58587 }
58588
58589diff -urNp linux-3.0.3/kernel/ptrace.c linux-3.0.3/kernel/ptrace.c
58590--- linux-3.0.3/kernel/ptrace.c 2011-07-21 22:17:23.000000000 -0400
58591+++ linux-3.0.3/kernel/ptrace.c 2011-08-23 21:48:14.000000000 -0400
58592@@ -132,7 +132,8 @@ int ptrace_check_attach(struct task_stru
58593 return ret;
58594 }
58595
58596-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
58597+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
58598+ unsigned int log)
58599 {
58600 const struct cred *cred = current_cred(), *tcred;
58601
58602@@ -158,7 +159,8 @@ int __ptrace_may_access(struct task_stru
58603 cred->gid == tcred->sgid &&
58604 cred->gid == tcred->gid))
58605 goto ok;
58606- if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
58607+ if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
58608+ (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
58609 goto ok;
58610 rcu_read_unlock();
58611 return -EPERM;
58612@@ -167,7 +169,9 @@ ok:
58613 smp_rmb();
58614 if (task->mm)
58615 dumpable = get_dumpable(task->mm);
58616- if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
58617+ if (!dumpable &&
58618+ ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
58619+ (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
58620 return -EPERM;
58621
58622 return security_ptrace_access_check(task, mode);
58623@@ -177,7 +181,16 @@ bool ptrace_may_access(struct task_struc
58624 {
58625 int err;
58626 task_lock(task);
58627- err = __ptrace_may_access(task, mode);
58628+ err = __ptrace_may_access(task, mode, 0);
58629+ task_unlock(task);
58630+ return !err;
58631+}
58632+
58633+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
58634+{
58635+ int err;
58636+ task_lock(task);
58637+ err = __ptrace_may_access(task, mode, 1);
58638 task_unlock(task);
58639 return !err;
58640 }
58641@@ -205,7 +218,7 @@ static int ptrace_attach(struct task_str
58642 goto out;
58643
58644 task_lock(task);
58645- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
58646+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
58647 task_unlock(task);
58648 if (retval)
58649 goto unlock_creds;
58650@@ -218,7 +231,7 @@ static int ptrace_attach(struct task_str
58651 goto unlock_tasklist;
58652
58653 task->ptrace = PT_PTRACED;
58654- if (task_ns_capable(task, CAP_SYS_PTRACE))
58655+ if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
58656 task->ptrace |= PT_PTRACE_CAP;
58657
58658 __ptrace_link(task, current);
58659@@ -406,6 +419,8 @@ int ptrace_readdata(struct task_struct *
58660 {
58661 int copied = 0;
58662
58663+ pax_track_stack();
58664+
58665 while (len > 0) {
58666 char buf[128];
58667 int this_len, retval;
58668@@ -417,7 +432,7 @@ int ptrace_readdata(struct task_struct *
58669 break;
58670 return -EIO;
58671 }
58672- if (copy_to_user(dst, buf, retval))
58673+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
58674 return -EFAULT;
58675 copied += retval;
58676 src += retval;
58677@@ -431,6 +446,8 @@ int ptrace_writedata(struct task_struct
58678 {
58679 int copied = 0;
58680
58681+ pax_track_stack();
58682+
58683 while (len > 0) {
58684 char buf[128];
58685 int this_len, retval;
58686@@ -613,9 +630,11 @@ int ptrace_request(struct task_struct *c
58687 {
58688 int ret = -EIO;
58689 siginfo_t siginfo;
58690- void __user *datavp = (void __user *) data;
58691+ void __user *datavp = (__force void __user *) data;
58692 unsigned long __user *datalp = datavp;
58693
58694+ pax_track_stack();
58695+
58696 switch (request) {
58697 case PTRACE_PEEKTEXT:
58698 case PTRACE_PEEKDATA:
58699@@ -761,14 +780,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
58700 goto out;
58701 }
58702
58703+ if (gr_handle_ptrace(child, request)) {
58704+ ret = -EPERM;
58705+ goto out_put_task_struct;
58706+ }
58707+
58708 if (request == PTRACE_ATTACH) {
58709 ret = ptrace_attach(child);
58710 /*
58711 * Some architectures need to do book-keeping after
58712 * a ptrace attach.
58713 */
58714- if (!ret)
58715+ if (!ret) {
58716 arch_ptrace_attach(child);
58717+ gr_audit_ptrace(child);
58718+ }
58719 goto out_put_task_struct;
58720 }
58721
58722@@ -793,7 +819,7 @@ int generic_ptrace_peekdata(struct task_
58723 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
58724 if (copied != sizeof(tmp))
58725 return -EIO;
58726- return put_user(tmp, (unsigned long __user *)data);
58727+ return put_user(tmp, (__force unsigned long __user *)data);
58728 }
58729
58730 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
58731@@ -816,6 +842,8 @@ int compat_ptrace_request(struct task_st
58732 siginfo_t siginfo;
58733 int ret;
58734
58735+ pax_track_stack();
58736+
58737 switch (request) {
58738 case PTRACE_PEEKTEXT:
58739 case PTRACE_PEEKDATA:
58740@@ -903,14 +931,21 @@ asmlinkage long compat_sys_ptrace(compat
58741 goto out;
58742 }
58743
58744+ if (gr_handle_ptrace(child, request)) {
58745+ ret = -EPERM;
58746+ goto out_put_task_struct;
58747+ }
58748+
58749 if (request == PTRACE_ATTACH) {
58750 ret = ptrace_attach(child);
58751 /*
58752 * Some architectures need to do book-keeping after
58753 * a ptrace attach.
58754 */
58755- if (!ret)
58756+ if (!ret) {
58757 arch_ptrace_attach(child);
58758+ gr_audit_ptrace(child);
58759+ }
58760 goto out_put_task_struct;
58761 }
58762
58763diff -urNp linux-3.0.3/kernel/rcutorture.c linux-3.0.3/kernel/rcutorture.c
58764--- linux-3.0.3/kernel/rcutorture.c 2011-07-21 22:17:23.000000000 -0400
58765+++ linux-3.0.3/kernel/rcutorture.c 2011-08-23 21:47:56.000000000 -0400
58766@@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
58767 { 0 };
58768 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
58769 { 0 };
58770-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
58771-static atomic_t n_rcu_torture_alloc;
58772-static atomic_t n_rcu_torture_alloc_fail;
58773-static atomic_t n_rcu_torture_free;
58774-static atomic_t n_rcu_torture_mberror;
58775-static atomic_t n_rcu_torture_error;
58776+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
58777+static atomic_unchecked_t n_rcu_torture_alloc;
58778+static atomic_unchecked_t n_rcu_torture_alloc_fail;
58779+static atomic_unchecked_t n_rcu_torture_free;
58780+static atomic_unchecked_t n_rcu_torture_mberror;
58781+static atomic_unchecked_t n_rcu_torture_error;
58782 static long n_rcu_torture_boost_ktrerror;
58783 static long n_rcu_torture_boost_rterror;
58784 static long n_rcu_torture_boost_failure;
58785@@ -223,11 +223,11 @@ rcu_torture_alloc(void)
58786
58787 spin_lock_bh(&rcu_torture_lock);
58788 if (list_empty(&rcu_torture_freelist)) {
58789- atomic_inc(&n_rcu_torture_alloc_fail);
58790+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
58791 spin_unlock_bh(&rcu_torture_lock);
58792 return NULL;
58793 }
58794- atomic_inc(&n_rcu_torture_alloc);
58795+ atomic_inc_unchecked(&n_rcu_torture_alloc);
58796 p = rcu_torture_freelist.next;
58797 list_del_init(p);
58798 spin_unlock_bh(&rcu_torture_lock);
58799@@ -240,7 +240,7 @@ rcu_torture_alloc(void)
58800 static void
58801 rcu_torture_free(struct rcu_torture *p)
58802 {
58803- atomic_inc(&n_rcu_torture_free);
58804+ atomic_inc_unchecked(&n_rcu_torture_free);
58805 spin_lock_bh(&rcu_torture_lock);
58806 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
58807 spin_unlock_bh(&rcu_torture_lock);
58808@@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
58809 i = rp->rtort_pipe_count;
58810 if (i > RCU_TORTURE_PIPE_LEN)
58811 i = RCU_TORTURE_PIPE_LEN;
58812- atomic_inc(&rcu_torture_wcount[i]);
58813+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
58814 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
58815 rp->rtort_mbtest = 0;
58816 rcu_torture_free(rp);
58817@@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_fr
58818 i = rp->rtort_pipe_count;
58819 if (i > RCU_TORTURE_PIPE_LEN)
58820 i = RCU_TORTURE_PIPE_LEN;
58821- atomic_inc(&rcu_torture_wcount[i]);
58822+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
58823 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
58824 rp->rtort_mbtest = 0;
58825 list_del(&rp->rtort_free);
58826@@ -882,7 +882,7 @@ rcu_torture_writer(void *arg)
58827 i = old_rp->rtort_pipe_count;
58828 if (i > RCU_TORTURE_PIPE_LEN)
58829 i = RCU_TORTURE_PIPE_LEN;
58830- atomic_inc(&rcu_torture_wcount[i]);
58831+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
58832 old_rp->rtort_pipe_count++;
58833 cur_ops->deferred_free(old_rp);
58834 }
58835@@ -951,7 +951,7 @@ static void rcu_torture_timer(unsigned l
58836 return;
58837 }
58838 if (p->rtort_mbtest == 0)
58839- atomic_inc(&n_rcu_torture_mberror);
58840+ atomic_inc_unchecked(&n_rcu_torture_mberror);
58841 spin_lock(&rand_lock);
58842 cur_ops->read_delay(&rand);
58843 n_rcu_torture_timers++;
58844@@ -1013,7 +1013,7 @@ rcu_torture_reader(void *arg)
58845 continue;
58846 }
58847 if (p->rtort_mbtest == 0)
58848- atomic_inc(&n_rcu_torture_mberror);
58849+ atomic_inc_unchecked(&n_rcu_torture_mberror);
58850 cur_ops->read_delay(&rand);
58851 preempt_disable();
58852 pipe_count = p->rtort_pipe_count;
58853@@ -1072,16 +1072,16 @@ rcu_torture_printk(char *page)
58854 rcu_torture_current,
58855 rcu_torture_current_version,
58856 list_empty(&rcu_torture_freelist),
58857- atomic_read(&n_rcu_torture_alloc),
58858- atomic_read(&n_rcu_torture_alloc_fail),
58859- atomic_read(&n_rcu_torture_free),
58860- atomic_read(&n_rcu_torture_mberror),
58861+ atomic_read_unchecked(&n_rcu_torture_alloc),
58862+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
58863+ atomic_read_unchecked(&n_rcu_torture_free),
58864+ atomic_read_unchecked(&n_rcu_torture_mberror),
58865 n_rcu_torture_boost_ktrerror,
58866 n_rcu_torture_boost_rterror,
58867 n_rcu_torture_boost_failure,
58868 n_rcu_torture_boosts,
58869 n_rcu_torture_timers);
58870- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
58871+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
58872 n_rcu_torture_boost_ktrerror != 0 ||
58873 n_rcu_torture_boost_rterror != 0 ||
58874 n_rcu_torture_boost_failure != 0)
58875@@ -1089,7 +1089,7 @@ rcu_torture_printk(char *page)
58876 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
58877 if (i > 1) {
58878 cnt += sprintf(&page[cnt], "!!! ");
58879- atomic_inc(&n_rcu_torture_error);
58880+ atomic_inc_unchecked(&n_rcu_torture_error);
58881 WARN_ON_ONCE(1);
58882 }
58883 cnt += sprintf(&page[cnt], "Reader Pipe: ");
58884@@ -1103,7 +1103,7 @@ rcu_torture_printk(char *page)
58885 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
58886 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
58887 cnt += sprintf(&page[cnt], " %d",
58888- atomic_read(&rcu_torture_wcount[i]));
58889+ atomic_read_unchecked(&rcu_torture_wcount[i]));
58890 }
58891 cnt += sprintf(&page[cnt], "\n");
58892 if (cur_ops->stats)
58893@@ -1412,7 +1412,7 @@ rcu_torture_cleanup(void)
58894
58895 if (cur_ops->cleanup)
58896 cur_ops->cleanup();
58897- if (atomic_read(&n_rcu_torture_error))
58898+ if (atomic_read_unchecked(&n_rcu_torture_error))
58899 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
58900 else
58901 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
58902@@ -1476,17 +1476,17 @@ rcu_torture_init(void)
58903
58904 rcu_torture_current = NULL;
58905 rcu_torture_current_version = 0;
58906- atomic_set(&n_rcu_torture_alloc, 0);
58907- atomic_set(&n_rcu_torture_alloc_fail, 0);
58908- atomic_set(&n_rcu_torture_free, 0);
58909- atomic_set(&n_rcu_torture_mberror, 0);
58910- atomic_set(&n_rcu_torture_error, 0);
58911+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
58912+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
58913+ atomic_set_unchecked(&n_rcu_torture_free, 0);
58914+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
58915+ atomic_set_unchecked(&n_rcu_torture_error, 0);
58916 n_rcu_torture_boost_ktrerror = 0;
58917 n_rcu_torture_boost_rterror = 0;
58918 n_rcu_torture_boost_failure = 0;
58919 n_rcu_torture_boosts = 0;
58920 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
58921- atomic_set(&rcu_torture_wcount[i], 0);
58922+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
58923 for_each_possible_cpu(cpu) {
58924 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
58925 per_cpu(rcu_torture_count, cpu)[i] = 0;
58926diff -urNp linux-3.0.3/kernel/rcutree.c linux-3.0.3/kernel/rcutree.c
58927--- linux-3.0.3/kernel/rcutree.c 2011-07-21 22:17:23.000000000 -0400
58928+++ linux-3.0.3/kernel/rcutree.c 2011-08-23 21:47:56.000000000 -0400
58929@@ -1470,7 +1470,7 @@ __rcu_process_callbacks(struct rcu_state
58930 /*
58931 * Do softirq processing for the current CPU.
58932 */
58933-static void rcu_process_callbacks(struct softirq_action *unused)
58934+static void rcu_process_callbacks(void)
58935 {
58936 __rcu_process_callbacks(&rcu_sched_state,
58937 &__get_cpu_var(rcu_sched_data));
58938diff -urNp linux-3.0.3/kernel/rcutree_plugin.h linux-3.0.3/kernel/rcutree_plugin.h
58939--- linux-3.0.3/kernel/rcutree_plugin.h 2011-07-21 22:17:23.000000000 -0400
58940+++ linux-3.0.3/kernel/rcutree_plugin.h 2011-08-23 21:47:56.000000000 -0400
58941@@ -822,7 +822,7 @@ void synchronize_rcu_expedited(void)
58942
58943 /* Clean up and exit. */
58944 smp_mb(); /* ensure expedited GP seen before counter increment. */
58945- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
58946+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
58947 unlock_mb_ret:
58948 mutex_unlock(&sync_rcu_preempt_exp_mutex);
58949 mb_ret:
58950@@ -1774,8 +1774,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expe
58951
58952 #else /* #ifndef CONFIG_SMP */
58953
58954-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
58955-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
58956+static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
58957+static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
58958
58959 static int synchronize_sched_expedited_cpu_stop(void *data)
58960 {
58961@@ -1830,7 +1830,7 @@ void synchronize_sched_expedited(void)
58962 int firstsnap, s, snap, trycount = 0;
58963
58964 /* Note that atomic_inc_return() implies full memory barrier. */
58965- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
58966+ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
58967 get_online_cpus();
58968
58969 /*
58970@@ -1851,7 +1851,7 @@ void synchronize_sched_expedited(void)
58971 }
58972
58973 /* Check to see if someone else did our work for us. */
58974- s = atomic_read(&sync_sched_expedited_done);
58975+ s = atomic_read_unchecked(&sync_sched_expedited_done);
58976 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
58977 smp_mb(); /* ensure test happens before caller kfree */
58978 return;
58979@@ -1866,7 +1866,7 @@ void synchronize_sched_expedited(void)
58980 * grace period works for us.
58981 */
58982 get_online_cpus();
58983- snap = atomic_read(&sync_sched_expedited_started) - 1;
58984+ snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
58985 smp_mb(); /* ensure read is before try_stop_cpus(). */
58986 }
58987
58988@@ -1877,12 +1877,12 @@ void synchronize_sched_expedited(void)
58989 * than we did beat us to the punch.
58990 */
58991 do {
58992- s = atomic_read(&sync_sched_expedited_done);
58993+ s = atomic_read_unchecked(&sync_sched_expedited_done);
58994 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
58995 smp_mb(); /* ensure test happens before caller kfree */
58996 break;
58997 }
58998- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
58999+ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
59000
59001 put_online_cpus();
59002 }
59003diff -urNp linux-3.0.3/kernel/relay.c linux-3.0.3/kernel/relay.c
59004--- linux-3.0.3/kernel/relay.c 2011-07-21 22:17:23.000000000 -0400
59005+++ linux-3.0.3/kernel/relay.c 2011-08-23 21:48:14.000000000 -0400
59006@@ -1236,6 +1236,8 @@ static ssize_t subbuf_splice_actor(struc
59007 };
59008 ssize_t ret;
59009
59010+ pax_track_stack();
59011+
59012 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
59013 return 0;
59014 if (splice_grow_spd(pipe, &spd))
59015diff -urNp linux-3.0.3/kernel/resource.c linux-3.0.3/kernel/resource.c
59016--- linux-3.0.3/kernel/resource.c 2011-07-21 22:17:23.000000000 -0400
59017+++ linux-3.0.3/kernel/resource.c 2011-08-23 21:48:14.000000000 -0400
59018@@ -141,8 +141,18 @@ static const struct file_operations proc
59019
59020 static int __init ioresources_init(void)
59021 {
59022+#ifdef CONFIG_GRKERNSEC_PROC_ADD
59023+#ifdef CONFIG_GRKERNSEC_PROC_USER
59024+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
59025+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
59026+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59027+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
59028+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
59029+#endif
59030+#else
59031 proc_create("ioports", 0, NULL, &proc_ioports_operations);
59032 proc_create("iomem", 0, NULL, &proc_iomem_operations);
59033+#endif
59034 return 0;
59035 }
59036 __initcall(ioresources_init);
59037diff -urNp linux-3.0.3/kernel/rtmutex-tester.c linux-3.0.3/kernel/rtmutex-tester.c
59038--- linux-3.0.3/kernel/rtmutex-tester.c 2011-07-21 22:17:23.000000000 -0400
59039+++ linux-3.0.3/kernel/rtmutex-tester.c 2011-08-23 21:47:56.000000000 -0400
59040@@ -20,7 +20,7 @@
59041 #define MAX_RT_TEST_MUTEXES 8
59042
59043 static spinlock_t rttest_lock;
59044-static atomic_t rttest_event;
59045+static atomic_unchecked_t rttest_event;
59046
59047 struct test_thread_data {
59048 int opcode;
59049@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_
59050
59051 case RTTEST_LOCKCONT:
59052 td->mutexes[td->opdata] = 1;
59053- td->event = atomic_add_return(1, &rttest_event);
59054+ td->event = atomic_add_return_unchecked(1, &rttest_event);
59055 return 0;
59056
59057 case RTTEST_RESET:
59058@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_
59059 return 0;
59060
59061 case RTTEST_RESETEVENT:
59062- atomic_set(&rttest_event, 0);
59063+ atomic_set_unchecked(&rttest_event, 0);
59064 return 0;
59065
59066 default:
59067@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_
59068 return ret;
59069
59070 td->mutexes[id] = 1;
59071- td->event = atomic_add_return(1, &rttest_event);
59072+ td->event = atomic_add_return_unchecked(1, &rttest_event);
59073 rt_mutex_lock(&mutexes[id]);
59074- td->event = atomic_add_return(1, &rttest_event);
59075+ td->event = atomic_add_return_unchecked(1, &rttest_event);
59076 td->mutexes[id] = 4;
59077 return 0;
59078
59079@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_
59080 return ret;
59081
59082 td->mutexes[id] = 1;
59083- td->event = atomic_add_return(1, &rttest_event);
59084+ td->event = atomic_add_return_unchecked(1, &rttest_event);
59085 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
59086- td->event = atomic_add_return(1, &rttest_event);
59087+ td->event = atomic_add_return_unchecked(1, &rttest_event);
59088 td->mutexes[id] = ret ? 0 : 4;
59089 return ret ? -EINTR : 0;
59090
59091@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_
59092 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
59093 return ret;
59094
59095- td->event = atomic_add_return(1, &rttest_event);
59096+ td->event = atomic_add_return_unchecked(1, &rttest_event);
59097 rt_mutex_unlock(&mutexes[id]);
59098- td->event = atomic_add_return(1, &rttest_event);
59099+ td->event = atomic_add_return_unchecked(1, &rttest_event);
59100 td->mutexes[id] = 0;
59101 return 0;
59102
59103@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mu
59104 break;
59105
59106 td->mutexes[dat] = 2;
59107- td->event = atomic_add_return(1, &rttest_event);
59108+ td->event = atomic_add_return_unchecked(1, &rttest_event);
59109 break;
59110
59111 default:
59112@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mu
59113 return;
59114
59115 td->mutexes[dat] = 3;
59116- td->event = atomic_add_return(1, &rttest_event);
59117+ td->event = atomic_add_return_unchecked(1, &rttest_event);
59118 break;
59119
59120 case RTTEST_LOCKNOWAIT:
59121@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mu
59122 return;
59123
59124 td->mutexes[dat] = 1;
59125- td->event = atomic_add_return(1, &rttest_event);
59126+ td->event = atomic_add_return_unchecked(1, &rttest_event);
59127 return;
59128
59129 default:
59130diff -urNp linux-3.0.3/kernel/sched_autogroup.c linux-3.0.3/kernel/sched_autogroup.c
59131--- linux-3.0.3/kernel/sched_autogroup.c 2011-07-21 22:17:23.000000000 -0400
59132+++ linux-3.0.3/kernel/sched_autogroup.c 2011-08-23 21:47:56.000000000 -0400
59133@@ -7,7 +7,7 @@
59134
59135 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
59136 static struct autogroup autogroup_default;
59137-static atomic_t autogroup_seq_nr;
59138+static atomic_unchecked_t autogroup_seq_nr;
59139
59140 static void __init autogroup_init(struct task_struct *init_task)
59141 {
59142@@ -78,7 +78,7 @@ static inline struct autogroup *autogrou
59143
59144 kref_init(&ag->kref);
59145 init_rwsem(&ag->lock);
59146- ag->id = atomic_inc_return(&autogroup_seq_nr);
59147+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
59148 ag->tg = tg;
59149 #ifdef CONFIG_RT_GROUP_SCHED
59150 /*
59151diff -urNp linux-3.0.3/kernel/sched.c linux-3.0.3/kernel/sched.c
59152--- linux-3.0.3/kernel/sched.c 2011-07-21 22:17:23.000000000 -0400
59153+++ linux-3.0.3/kernel/sched.c 2011-08-23 21:48:14.000000000 -0400
59154@@ -4251,6 +4251,8 @@ asmlinkage void __sched schedule(void)
59155 struct rq *rq;
59156 int cpu;
59157
59158+ pax_track_stack();
59159+
59160 need_resched:
59161 preempt_disable();
59162 cpu = smp_processor_id();
59163@@ -4934,6 +4936,8 @@ int can_nice(const struct task_struct *p
59164 /* convert nice value [19,-20] to rlimit style value [1,40] */
59165 int nice_rlim = 20 - nice;
59166
59167+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
59168+
59169 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
59170 capable(CAP_SYS_NICE));
59171 }
59172@@ -4967,7 +4971,8 @@ SYSCALL_DEFINE1(nice, int, increment)
59173 if (nice > 19)
59174 nice = 19;
59175
59176- if (increment < 0 && !can_nice(current, nice))
59177+ if (increment < 0 && (!can_nice(current, nice) ||
59178+ gr_handle_chroot_nice()))
59179 return -EPERM;
59180
59181 retval = security_task_setnice(current, nice);
59182@@ -5111,6 +5116,7 @@ recheck:
59183 unsigned long rlim_rtprio =
59184 task_rlimit(p, RLIMIT_RTPRIO);
59185
59186+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
59187 /* can't set/change the rt policy */
59188 if (policy != p->policy && !rlim_rtprio)
59189 return -EPERM;
59190diff -urNp linux-3.0.3/kernel/sched_fair.c linux-3.0.3/kernel/sched_fair.c
59191--- linux-3.0.3/kernel/sched_fair.c 2011-07-21 22:17:23.000000000 -0400
59192+++ linux-3.0.3/kernel/sched_fair.c 2011-08-23 21:47:56.000000000 -0400
59193@@ -4050,7 +4050,7 @@ static void nohz_idle_balance(int this_c
59194 * run_rebalance_domains is triggered when needed from the scheduler tick.
59195 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
59196 */
59197-static void run_rebalance_domains(struct softirq_action *h)
59198+static void run_rebalance_domains(void)
59199 {
59200 int this_cpu = smp_processor_id();
59201 struct rq *this_rq = cpu_rq(this_cpu);
59202diff -urNp linux-3.0.3/kernel/signal.c linux-3.0.3/kernel/signal.c
59203--- linux-3.0.3/kernel/signal.c 2011-07-21 22:17:23.000000000 -0400
59204+++ linux-3.0.3/kernel/signal.c 2011-08-23 21:48:14.000000000 -0400
59205@@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cache
59206
59207 int print_fatal_signals __read_mostly;
59208
59209-static void __user *sig_handler(struct task_struct *t, int sig)
59210+static __sighandler_t sig_handler(struct task_struct *t, int sig)
59211 {
59212 return t->sighand->action[sig - 1].sa.sa_handler;
59213 }
59214
59215-static int sig_handler_ignored(void __user *handler, int sig)
59216+static int sig_handler_ignored(__sighandler_t handler, int sig)
59217 {
59218 /* Is it explicitly or implicitly ignored? */
59219 return handler == SIG_IGN ||
59220@@ -60,7 +60,7 @@ static int sig_handler_ignored(void __us
59221 static int sig_task_ignored(struct task_struct *t, int sig,
59222 int from_ancestor_ns)
59223 {
59224- void __user *handler;
59225+ __sighandler_t handler;
59226
59227 handler = sig_handler(t, sig);
59228
59229@@ -320,6 +320,9 @@ __sigqueue_alloc(int sig, struct task_st
59230 atomic_inc(&user->sigpending);
59231 rcu_read_unlock();
59232
59233+ if (!override_rlimit)
59234+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
59235+
59236 if (override_rlimit ||
59237 atomic_read(&user->sigpending) <=
59238 task_rlimit(t, RLIMIT_SIGPENDING)) {
59239@@ -444,7 +447,7 @@ flush_signal_handlers(struct task_struct
59240
59241 int unhandled_signal(struct task_struct *tsk, int sig)
59242 {
59243- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
59244+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
59245 if (is_global_init(tsk))
59246 return 1;
59247 if (handler != SIG_IGN && handler != SIG_DFL)
59248@@ -770,6 +773,13 @@ static int check_kill_permission(int sig
59249 }
59250 }
59251
59252+ /* allow glibc communication via tgkill to other threads in our
59253+ thread group */
59254+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
59255+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
59256+ && gr_handle_signal(t, sig))
59257+ return -EPERM;
59258+
59259 return security_task_kill(t, info, sig, 0);
59260 }
59261
59262@@ -1092,7 +1102,7 @@ __group_send_sig_info(int sig, struct si
59263 return send_signal(sig, info, p, 1);
59264 }
59265
59266-static int
59267+int
59268 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
59269 {
59270 return send_signal(sig, info, t, 0);
59271@@ -1129,6 +1139,7 @@ force_sig_info(int sig, struct siginfo *
59272 unsigned long int flags;
59273 int ret, blocked, ignored;
59274 struct k_sigaction *action;
59275+ int is_unhandled = 0;
59276
59277 spin_lock_irqsave(&t->sighand->siglock, flags);
59278 action = &t->sighand->action[sig-1];
59279@@ -1143,9 +1154,18 @@ force_sig_info(int sig, struct siginfo *
59280 }
59281 if (action->sa.sa_handler == SIG_DFL)
59282 t->signal->flags &= ~SIGNAL_UNKILLABLE;
59283+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
59284+ is_unhandled = 1;
59285 ret = specific_send_sig_info(sig, info, t);
59286 spin_unlock_irqrestore(&t->sighand->siglock, flags);
59287
59288+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
59289+ normal operation */
59290+ if (is_unhandled) {
59291+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
59292+ gr_handle_crash(t, sig);
59293+ }
59294+
59295 return ret;
59296 }
59297
59298@@ -1212,8 +1232,11 @@ int group_send_sig_info(int sig, struct
59299 ret = check_kill_permission(sig, info, p);
59300 rcu_read_unlock();
59301
59302- if (!ret && sig)
59303+ if (!ret && sig) {
59304 ret = do_send_sig_info(sig, info, p, true);
59305+ if (!ret)
59306+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
59307+ }
59308
59309 return ret;
59310 }
59311@@ -1839,6 +1862,8 @@ void ptrace_notify(int exit_code)
59312 {
59313 siginfo_t info;
59314
59315+ pax_track_stack();
59316+
59317 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
59318
59319 memset(&info, 0, sizeof info);
59320@@ -2639,7 +2664,15 @@ do_send_specific(pid_t tgid, pid_t pid,
59321 int error = -ESRCH;
59322
59323 rcu_read_lock();
59324- p = find_task_by_vpid(pid);
59325+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59326+ /* allow glibc communication via tgkill to other threads in our
59327+ thread group */
59328+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
59329+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
59330+ p = find_task_by_vpid_unrestricted(pid);
59331+ else
59332+#endif
59333+ p = find_task_by_vpid(pid);
59334 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
59335 error = check_kill_permission(sig, info, p);
59336 /*
59337diff -urNp linux-3.0.3/kernel/smp.c linux-3.0.3/kernel/smp.c
59338--- linux-3.0.3/kernel/smp.c 2011-07-21 22:17:23.000000000 -0400
59339+++ linux-3.0.3/kernel/smp.c 2011-08-23 21:47:56.000000000 -0400
59340@@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t fu
59341 }
59342 EXPORT_SYMBOL(smp_call_function);
59343
59344-void ipi_call_lock(void)
59345+void ipi_call_lock(void) __acquires(call_function.lock)
59346 {
59347 raw_spin_lock(&call_function.lock);
59348 }
59349
59350-void ipi_call_unlock(void)
59351+void ipi_call_unlock(void) __releases(call_function.lock)
59352 {
59353 raw_spin_unlock(&call_function.lock);
59354 }
59355
59356-void ipi_call_lock_irq(void)
59357+void ipi_call_lock_irq(void) __acquires(call_function.lock)
59358 {
59359 raw_spin_lock_irq(&call_function.lock);
59360 }
59361
59362-void ipi_call_unlock_irq(void)
59363+void ipi_call_unlock_irq(void) __releases(call_function.lock)
59364 {
59365 raw_spin_unlock_irq(&call_function.lock);
59366 }
59367diff -urNp linux-3.0.3/kernel/softirq.c linux-3.0.3/kernel/softirq.c
59368--- linux-3.0.3/kernel/softirq.c 2011-07-21 22:17:23.000000000 -0400
59369+++ linux-3.0.3/kernel/softirq.c 2011-08-23 21:47:56.000000000 -0400
59370@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
59371
59372 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
59373
59374-char *softirq_to_name[NR_SOFTIRQS] = {
59375+const char * const softirq_to_name[NR_SOFTIRQS] = {
59376 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
59377 "TASKLET", "SCHED", "HRTIMER", "RCU"
59378 };
59379@@ -235,7 +235,7 @@ restart:
59380 kstat_incr_softirqs_this_cpu(vec_nr);
59381
59382 trace_softirq_entry(vec_nr);
59383- h->action(h);
59384+ h->action();
59385 trace_softirq_exit(vec_nr);
59386 if (unlikely(prev_count != preempt_count())) {
59387 printk(KERN_ERR "huh, entered softirq %u %s %p"
59388@@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
59389 local_irq_restore(flags);
59390 }
59391
59392-void open_softirq(int nr, void (*action)(struct softirq_action *))
59393+void open_softirq(int nr, void (*action)(void))
59394 {
59395- softirq_vec[nr].action = action;
59396+ pax_open_kernel();
59397+ *(void **)&softirq_vec[nr].action = action;
59398+ pax_close_kernel();
59399 }
59400
59401 /*
59402@@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct
59403
59404 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
59405
59406-static void tasklet_action(struct softirq_action *a)
59407+static void tasklet_action(void)
59408 {
59409 struct tasklet_struct *list;
59410
59411@@ -476,7 +478,7 @@ static void tasklet_action(struct softir
59412 }
59413 }
59414
59415-static void tasklet_hi_action(struct softirq_action *a)
59416+static void tasklet_hi_action(void)
59417 {
59418 struct tasklet_struct *list;
59419
59420diff -urNp linux-3.0.3/kernel/sys.c linux-3.0.3/kernel/sys.c
59421--- linux-3.0.3/kernel/sys.c 2011-07-21 22:17:23.000000000 -0400
59422+++ linux-3.0.3/kernel/sys.c 2011-08-25 17:24:58.000000000 -0400
59423@@ -154,6 +154,12 @@ static int set_one_prio(struct task_stru
59424 error = -EACCES;
59425 goto out;
59426 }
59427+
59428+ if (gr_handle_chroot_setpriority(p, niceval)) {
59429+ error = -EACCES;
59430+ goto out;
59431+ }
59432+
59433 no_nice = security_task_setnice(p, niceval);
59434 if (no_nice) {
59435 error = no_nice;
59436@@ -537,6 +543,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
59437 goto error;
59438 }
59439
59440+ if (gr_check_group_change(new->gid, new->egid, -1))
59441+ goto error;
59442+
59443 if (rgid != (gid_t) -1 ||
59444 (egid != (gid_t) -1 && egid != old->gid))
59445 new->sgid = new->egid;
59446@@ -566,6 +575,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
59447 old = current_cred();
59448
59449 retval = -EPERM;
59450+
59451+ if (gr_check_group_change(gid, gid, gid))
59452+ goto error;
59453+
59454 if (nsown_capable(CAP_SETGID))
59455 new->gid = new->egid = new->sgid = new->fsgid = gid;
59456 else if (gid == old->gid || gid == old->sgid)
59457@@ -591,11 +604,18 @@ static int set_user(struct cred *new)
59458 if (!new_user)
59459 return -EAGAIN;
59460
59461+ /*
59462+ * We don't fail in case of NPROC limit excess here because too many
59463+ * poorly written programs don't check set*uid() return code, assuming
59464+ * it never fails if called by root. We may still enforce NPROC limit
59465+ * for programs doing set*uid()+execve() by harmlessly deferring the
59466+ * failure to the execve() stage.
59467+ */
59468 if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
59469- new_user != INIT_USER) {
59470- free_uid(new_user);
59471- return -EAGAIN;
59472- }
59473+ new_user != INIT_USER)
59474+ current->flags |= PF_NPROC_EXCEEDED;
59475+ else
59476+ current->flags &= ~PF_NPROC_EXCEEDED;
59477
59478 free_uid(new->user);
59479 new->user = new_user;
59480@@ -646,6 +666,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
59481 goto error;
59482 }
59483
59484+ if (gr_check_user_change(new->uid, new->euid, -1))
59485+ goto error;
59486+
59487 if (new->uid != old->uid) {
59488 retval = set_user(new);
59489 if (retval < 0)
59490@@ -690,6 +713,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
59491 old = current_cred();
59492
59493 retval = -EPERM;
59494+
59495+ if (gr_check_crash_uid(uid))
59496+ goto error;
59497+ if (gr_check_user_change(uid, uid, uid))
59498+ goto error;
59499+
59500 if (nsown_capable(CAP_SETUID)) {
59501 new->suid = new->uid = uid;
59502 if (uid != old->uid) {
59503@@ -744,6 +773,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
59504 goto error;
59505 }
59506
59507+ if (gr_check_user_change(ruid, euid, -1))
59508+ goto error;
59509+
59510 if (ruid != (uid_t) -1) {
59511 new->uid = ruid;
59512 if (ruid != old->uid) {
59513@@ -808,6 +840,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
59514 goto error;
59515 }
59516
59517+ if (gr_check_group_change(rgid, egid, -1))
59518+ goto error;
59519+
59520 if (rgid != (gid_t) -1)
59521 new->gid = rgid;
59522 if (egid != (gid_t) -1)
59523@@ -854,6 +889,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
59524 old = current_cred();
59525 old_fsuid = old->fsuid;
59526
59527+ if (gr_check_user_change(-1, -1, uid))
59528+ goto error;
59529+
59530 if (uid == old->uid || uid == old->euid ||
59531 uid == old->suid || uid == old->fsuid ||
59532 nsown_capable(CAP_SETUID)) {
59533@@ -864,6 +902,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
59534 }
59535 }
59536
59537+error:
59538 abort_creds(new);
59539 return old_fsuid;
59540
59541@@ -890,12 +929,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
59542 if (gid == old->gid || gid == old->egid ||
59543 gid == old->sgid || gid == old->fsgid ||
59544 nsown_capable(CAP_SETGID)) {
59545+ if (gr_check_group_change(-1, -1, gid))
59546+ goto error;
59547+
59548 if (gid != old_fsgid) {
59549 new->fsgid = gid;
59550 goto change_okay;
59551 }
59552 }
59553
59554+error:
59555 abort_creds(new);
59556 return old_fsgid;
59557
59558@@ -1642,7 +1685,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
59559 error = get_dumpable(me->mm);
59560 break;
59561 case PR_SET_DUMPABLE:
59562- if (arg2 < 0 || arg2 > 1) {
59563+ if (arg2 > 1) {
59564 error = -EINVAL;
59565 break;
59566 }
59567diff -urNp linux-3.0.3/kernel/sysctl.c linux-3.0.3/kernel/sysctl.c
59568--- linux-3.0.3/kernel/sysctl.c 2011-07-21 22:17:23.000000000 -0400
59569+++ linux-3.0.3/kernel/sysctl.c 2011-08-23 21:48:14.000000000 -0400
59570@@ -85,6 +85,13 @@
59571
59572
59573 #if defined(CONFIG_SYSCTL)
59574+#include <linux/grsecurity.h>
59575+#include <linux/grinternal.h>
59576+
59577+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
59578+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
59579+ const int op);
59580+extern int gr_handle_chroot_sysctl(const int op);
59581
59582 /* External variables not in a header file. */
59583 extern int sysctl_overcommit_memory;
59584@@ -197,6 +204,7 @@ static int sysrq_sysctl_handler(ctl_tabl
59585 }
59586
59587 #endif
59588+extern struct ctl_table grsecurity_table[];
59589
59590 static struct ctl_table root_table[];
59591 static struct ctl_table_root sysctl_table_root;
59592@@ -226,6 +234,20 @@ extern struct ctl_table epoll_table[];
59593 int sysctl_legacy_va_layout;
59594 #endif
59595
59596+#ifdef CONFIG_PAX_SOFTMODE
59597+static ctl_table pax_table[] = {
59598+ {
59599+ .procname = "softmode",
59600+ .data = &pax_softmode,
59601+ .maxlen = sizeof(unsigned int),
59602+ .mode = 0600,
59603+ .proc_handler = &proc_dointvec,
59604+ },
59605+
59606+ { }
59607+};
59608+#endif
59609+
59610 /* The default sysctl tables: */
59611
59612 static struct ctl_table root_table[] = {
59613@@ -272,6 +294,22 @@ static int max_extfrag_threshold = 1000;
59614 #endif
59615
59616 static struct ctl_table kern_table[] = {
59617+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
59618+ {
59619+ .procname = "grsecurity",
59620+ .mode = 0500,
59621+ .child = grsecurity_table,
59622+ },
59623+#endif
59624+
59625+#ifdef CONFIG_PAX_SOFTMODE
59626+ {
59627+ .procname = "pax",
59628+ .mode = 0500,
59629+ .child = pax_table,
59630+ },
59631+#endif
59632+
59633 {
59634 .procname = "sched_child_runs_first",
59635 .data = &sysctl_sched_child_runs_first,
59636@@ -546,7 +584,7 @@ static struct ctl_table kern_table[] = {
59637 .data = &modprobe_path,
59638 .maxlen = KMOD_PATH_LEN,
59639 .mode = 0644,
59640- .proc_handler = proc_dostring,
59641+ .proc_handler = proc_dostring_modpriv,
59642 },
59643 {
59644 .procname = "modules_disabled",
59645@@ -713,16 +751,20 @@ static struct ctl_table kern_table[] = {
59646 .extra1 = &zero,
59647 .extra2 = &one,
59648 },
59649+#endif
59650 {
59651 .procname = "kptr_restrict",
59652 .data = &kptr_restrict,
59653 .maxlen = sizeof(int),
59654 .mode = 0644,
59655 .proc_handler = proc_dmesg_restrict,
59656+#ifdef CONFIG_GRKERNSEC_HIDESYM
59657+ .extra1 = &two,
59658+#else
59659 .extra1 = &zero,
59660+#endif
59661 .extra2 = &two,
59662 },
59663-#endif
59664 {
59665 .procname = "ngroups_max",
59666 .data = &ngroups_max,
59667@@ -1205,6 +1247,13 @@ static struct ctl_table vm_table[] = {
59668 .proc_handler = proc_dointvec_minmax,
59669 .extra1 = &zero,
59670 },
59671+ {
59672+ .procname = "heap_stack_gap",
59673+ .data = &sysctl_heap_stack_gap,
59674+ .maxlen = sizeof(sysctl_heap_stack_gap),
59675+ .mode = 0644,
59676+ .proc_handler = proc_doulongvec_minmax,
59677+ },
59678 #else
59679 {
59680 .procname = "nr_trim_pages",
59681@@ -1714,6 +1763,17 @@ static int test_perm(int mode, int op)
59682 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
59683 {
59684 int mode;
59685+ int error;
59686+
59687+ if (table->parent != NULL && table->parent->procname != NULL &&
59688+ table->procname != NULL &&
59689+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
59690+ return -EACCES;
59691+ if (gr_handle_chroot_sysctl(op))
59692+ return -EACCES;
59693+ error = gr_handle_sysctl(table, op);
59694+ if (error)
59695+ return error;
59696
59697 if (root->permissions)
59698 mode = root->permissions(root, current->nsproxy, table);
59699@@ -2118,6 +2178,16 @@ int proc_dostring(struct ctl_table *tabl
59700 buffer, lenp, ppos);
59701 }
59702
59703+int proc_dostring_modpriv(struct ctl_table *table, int write,
59704+ void __user *buffer, size_t *lenp, loff_t *ppos)
59705+{
59706+ if (write && !capable(CAP_SYS_MODULE))
59707+ return -EPERM;
59708+
59709+ return _proc_do_string(table->data, table->maxlen, write,
59710+ buffer, lenp, ppos);
59711+}
59712+
59713 static size_t proc_skip_spaces(char **buf)
59714 {
59715 size_t ret;
59716@@ -2223,6 +2293,8 @@ static int proc_put_long(void __user **b
59717 len = strlen(tmp);
59718 if (len > *size)
59719 len = *size;
59720+ if (len > sizeof(tmp))
59721+ len = sizeof(tmp);
59722 if (copy_to_user(*buf, tmp, len))
59723 return -EFAULT;
59724 *size -= len;
59725@@ -2539,8 +2611,11 @@ static int __do_proc_doulongvec_minmax(v
59726 *i = val;
59727 } else {
59728 val = convdiv * (*i) / convmul;
59729- if (!first)
59730+ if (!first) {
59731 err = proc_put_char(&buffer, &left, '\t');
59732+ if (err)
59733+ break;
59734+ }
59735 err = proc_put_long(&buffer, &left, val, false);
59736 if (err)
59737 break;
59738@@ -2935,6 +3010,12 @@ int proc_dostring(struct ctl_table *tabl
59739 return -ENOSYS;
59740 }
59741
59742+int proc_dostring_modpriv(struct ctl_table *table, int write,
59743+ void __user *buffer, size_t *lenp, loff_t *ppos)
59744+{
59745+ return -ENOSYS;
59746+}
59747+
59748 int proc_dointvec(struct ctl_table *table, int write,
59749 void __user *buffer, size_t *lenp, loff_t *ppos)
59750 {
59751@@ -2991,6 +3072,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
59752 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
59753 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
59754 EXPORT_SYMBOL(proc_dostring);
59755+EXPORT_SYMBOL(proc_dostring_modpriv);
59756 EXPORT_SYMBOL(proc_doulongvec_minmax);
59757 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
59758 EXPORT_SYMBOL(register_sysctl_table);
59759diff -urNp linux-3.0.3/kernel/sysctl_check.c linux-3.0.3/kernel/sysctl_check.c
59760--- linux-3.0.3/kernel/sysctl_check.c 2011-07-21 22:17:23.000000000 -0400
59761+++ linux-3.0.3/kernel/sysctl_check.c 2011-08-23 21:48:14.000000000 -0400
59762@@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *n
59763 set_fail(&fail, table, "Directory with extra2");
59764 } else {
59765 if ((table->proc_handler == proc_dostring) ||
59766+ (table->proc_handler == proc_dostring_modpriv) ||
59767 (table->proc_handler == proc_dointvec) ||
59768 (table->proc_handler == proc_dointvec_minmax) ||
59769 (table->proc_handler == proc_dointvec_jiffies) ||
59770diff -urNp linux-3.0.3/kernel/taskstats.c linux-3.0.3/kernel/taskstats.c
59771--- linux-3.0.3/kernel/taskstats.c 2011-07-21 22:17:23.000000000 -0400
59772+++ linux-3.0.3/kernel/taskstats.c 2011-08-23 21:48:14.000000000 -0400
59773@@ -27,9 +27,12 @@
59774 #include <linux/cgroup.h>
59775 #include <linux/fs.h>
59776 #include <linux/file.h>
59777+#include <linux/grsecurity.h>
59778 #include <net/genetlink.h>
59779 #include <asm/atomic.h>
59780
59781+extern int gr_is_taskstats_denied(int pid);
59782+
59783 /*
59784 * Maximum length of a cpumask that can be specified in
59785 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
59786@@ -558,6 +561,9 @@ err:
59787
59788 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
59789 {
59790+ if (gr_is_taskstats_denied(current->pid))
59791+ return -EACCES;
59792+
59793 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
59794 return cmd_attr_register_cpumask(info);
59795 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
59796diff -urNp linux-3.0.3/kernel/time/alarmtimer.c linux-3.0.3/kernel/time/alarmtimer.c
59797--- linux-3.0.3/kernel/time/alarmtimer.c 2011-07-21 22:17:23.000000000 -0400
59798+++ linux-3.0.3/kernel/time/alarmtimer.c 2011-08-23 21:47:56.000000000 -0400
59799@@ -685,7 +685,7 @@ static int __init alarmtimer_init(void)
59800 {
59801 int error = 0;
59802 int i;
59803- struct k_clock alarm_clock = {
59804+ static struct k_clock alarm_clock = {
59805 .clock_getres = alarm_clock_getres,
59806 .clock_get = alarm_clock_get,
59807 .timer_create = alarm_timer_create,
59808diff -urNp linux-3.0.3/kernel/time/tick-broadcast.c linux-3.0.3/kernel/time/tick-broadcast.c
59809--- linux-3.0.3/kernel/time/tick-broadcast.c 2011-07-21 22:17:23.000000000 -0400
59810+++ linux-3.0.3/kernel/time/tick-broadcast.c 2011-08-23 21:47:56.000000000 -0400
59811@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct cl
59812 * then clear the broadcast bit.
59813 */
59814 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
59815- int cpu = smp_processor_id();
59816+ cpu = smp_processor_id();
59817
59818 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
59819 tick_broadcast_clear_oneshot(cpu);
59820diff -urNp linux-3.0.3/kernel/time/timekeeping.c linux-3.0.3/kernel/time/timekeeping.c
59821--- linux-3.0.3/kernel/time/timekeeping.c 2011-07-21 22:17:23.000000000 -0400
59822+++ linux-3.0.3/kernel/time/timekeeping.c 2011-08-23 21:48:14.000000000 -0400
59823@@ -14,6 +14,7 @@
59824 #include <linux/init.h>
59825 #include <linux/mm.h>
59826 #include <linux/sched.h>
59827+#include <linux/grsecurity.h>
59828 #include <linux/syscore_ops.h>
59829 #include <linux/clocksource.h>
59830 #include <linux/jiffies.h>
59831@@ -361,6 +362,8 @@ int do_settimeofday(const struct timespe
59832 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
59833 return -EINVAL;
59834
59835+ gr_log_timechange();
59836+
59837 write_seqlock_irqsave(&xtime_lock, flags);
59838
59839 timekeeping_forward_now();
59840diff -urNp linux-3.0.3/kernel/time/timer_list.c linux-3.0.3/kernel/time/timer_list.c
59841--- linux-3.0.3/kernel/time/timer_list.c 2011-07-21 22:17:23.000000000 -0400
59842+++ linux-3.0.3/kernel/time/timer_list.c 2011-08-23 21:48:14.000000000 -0400
59843@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
59844
59845 static void print_name_offset(struct seq_file *m, void *sym)
59846 {
59847+#ifdef CONFIG_GRKERNSEC_HIDESYM
59848+ SEQ_printf(m, "<%p>", NULL);
59849+#else
59850 char symname[KSYM_NAME_LEN];
59851
59852 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
59853 SEQ_printf(m, "<%pK>", sym);
59854 else
59855 SEQ_printf(m, "%s", symname);
59856+#endif
59857 }
59858
59859 static void
59860@@ -112,7 +116,11 @@ next_one:
59861 static void
59862 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
59863 {
59864+#ifdef CONFIG_GRKERNSEC_HIDESYM
59865+ SEQ_printf(m, " .base: %p\n", NULL);
59866+#else
59867 SEQ_printf(m, " .base: %pK\n", base);
59868+#endif
59869 SEQ_printf(m, " .index: %d\n",
59870 base->index);
59871 SEQ_printf(m, " .resolution: %Lu nsecs\n",
59872@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs
59873 {
59874 struct proc_dir_entry *pe;
59875
59876+#ifdef CONFIG_GRKERNSEC_PROC_ADD
59877+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
59878+#else
59879 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
59880+#endif
59881 if (!pe)
59882 return -ENOMEM;
59883 return 0;
59884diff -urNp linux-3.0.3/kernel/time/timer_stats.c linux-3.0.3/kernel/time/timer_stats.c
59885--- linux-3.0.3/kernel/time/timer_stats.c 2011-07-21 22:17:23.000000000 -0400
59886+++ linux-3.0.3/kernel/time/timer_stats.c 2011-08-23 21:48:14.000000000 -0400
59887@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
59888 static unsigned long nr_entries;
59889 static struct entry entries[MAX_ENTRIES];
59890
59891-static atomic_t overflow_count;
59892+static atomic_unchecked_t overflow_count;
59893
59894 /*
59895 * The entries are in a hash-table, for fast lookup:
59896@@ -140,7 +140,7 @@ static void reset_entries(void)
59897 nr_entries = 0;
59898 memset(entries, 0, sizeof(entries));
59899 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
59900- atomic_set(&overflow_count, 0);
59901+ atomic_set_unchecked(&overflow_count, 0);
59902 }
59903
59904 static struct entry *alloc_entry(void)
59905@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
59906 if (likely(entry))
59907 entry->count++;
59908 else
59909- atomic_inc(&overflow_count);
59910+ atomic_inc_unchecked(&overflow_count);
59911
59912 out_unlock:
59913 raw_spin_unlock_irqrestore(lock, flags);
59914@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
59915
59916 static void print_name_offset(struct seq_file *m, unsigned long addr)
59917 {
59918+#ifdef CONFIG_GRKERNSEC_HIDESYM
59919+ seq_printf(m, "<%p>", NULL);
59920+#else
59921 char symname[KSYM_NAME_LEN];
59922
59923 if (lookup_symbol_name(addr, symname) < 0)
59924 seq_printf(m, "<%p>", (void *)addr);
59925 else
59926 seq_printf(m, "%s", symname);
59927+#endif
59928 }
59929
59930 static int tstats_show(struct seq_file *m, void *v)
59931@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
59932
59933 seq_puts(m, "Timer Stats Version: v0.2\n");
59934 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
59935- if (atomic_read(&overflow_count))
59936+ if (atomic_read_unchecked(&overflow_count))
59937 seq_printf(m, "Overflow: %d entries\n",
59938- atomic_read(&overflow_count));
59939+ atomic_read_unchecked(&overflow_count));
59940
59941 for (i = 0; i < nr_entries; i++) {
59942 entry = entries + i;
59943@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(voi
59944 {
59945 struct proc_dir_entry *pe;
59946
59947+#ifdef CONFIG_GRKERNSEC_PROC_ADD
59948+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
59949+#else
59950 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
59951+#endif
59952 if (!pe)
59953 return -ENOMEM;
59954 return 0;
59955diff -urNp linux-3.0.3/kernel/time.c linux-3.0.3/kernel/time.c
59956--- linux-3.0.3/kernel/time.c 2011-07-21 22:17:23.000000000 -0400
59957+++ linux-3.0.3/kernel/time.c 2011-08-23 21:48:14.000000000 -0400
59958@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct tim
59959 return error;
59960
59961 if (tz) {
59962+ /* we log in do_settimeofday called below, so don't log twice
59963+ */
59964+ if (!tv)
59965+ gr_log_timechange();
59966+
59967 /* SMP safe, global irq locking makes it work. */
59968 sys_tz = *tz;
59969 update_vsyscall_tz();
59970diff -urNp linux-3.0.3/kernel/timer.c linux-3.0.3/kernel/timer.c
59971--- linux-3.0.3/kernel/timer.c 2011-07-21 22:17:23.000000000 -0400
59972+++ linux-3.0.3/kernel/timer.c 2011-08-23 21:47:56.000000000 -0400
59973@@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
59974 /*
59975 * This function runs timers and the timer-tq in bottom half context.
59976 */
59977-static void run_timer_softirq(struct softirq_action *h)
59978+static void run_timer_softirq(void)
59979 {
59980 struct tvec_base *base = __this_cpu_read(tvec_bases);
59981
59982diff -urNp linux-3.0.3/kernel/trace/blktrace.c linux-3.0.3/kernel/trace/blktrace.c
59983--- linux-3.0.3/kernel/trace/blktrace.c 2011-07-21 22:17:23.000000000 -0400
59984+++ linux-3.0.3/kernel/trace/blktrace.c 2011-08-23 21:47:56.000000000 -0400
59985@@ -321,7 +321,7 @@ static ssize_t blk_dropped_read(struct f
59986 struct blk_trace *bt = filp->private_data;
59987 char buf[16];
59988
59989- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
59990+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
59991
59992 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
59993 }
59994@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(str
59995 return 1;
59996
59997 bt = buf->chan->private_data;
59998- atomic_inc(&bt->dropped);
59999+ atomic_inc_unchecked(&bt->dropped);
60000 return 0;
60001 }
60002
60003@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_qu
60004
60005 bt->dir = dir;
60006 bt->dev = dev;
60007- atomic_set(&bt->dropped, 0);
60008+ atomic_set_unchecked(&bt->dropped, 0);
60009
60010 ret = -EIO;
60011 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
60012diff -urNp linux-3.0.3/kernel/trace/ftrace.c linux-3.0.3/kernel/trace/ftrace.c
60013--- linux-3.0.3/kernel/trace/ftrace.c 2011-07-21 22:17:23.000000000 -0400
60014+++ linux-3.0.3/kernel/trace/ftrace.c 2011-08-23 21:47:56.000000000 -0400
60015@@ -1566,12 +1566,17 @@ ftrace_code_disable(struct module *mod,
60016 if (unlikely(ftrace_disabled))
60017 return 0;
60018
60019+ ret = ftrace_arch_code_modify_prepare();
60020+ FTRACE_WARN_ON(ret);
60021+ if (ret)
60022+ return 0;
60023+
60024 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
60025+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
60026 if (ret) {
60027 ftrace_bug(ret, ip);
60028- return 0;
60029 }
60030- return 1;
60031+ return ret ? 0 : 1;
60032 }
60033
60034 /*
60035@@ -2550,7 +2555,7 @@ static void ftrace_free_entry_rcu(struct
60036
60037 int
60038 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
60039- void *data)
60040+ void *data)
60041 {
60042 struct ftrace_func_probe *entry;
60043 struct ftrace_page *pg;
60044diff -urNp linux-3.0.3/kernel/trace/trace.c linux-3.0.3/kernel/trace/trace.c
60045--- linux-3.0.3/kernel/trace/trace.c 2011-07-21 22:17:23.000000000 -0400
60046+++ linux-3.0.3/kernel/trace/trace.c 2011-08-23 21:48:14.000000000 -0400
60047@@ -3339,6 +3339,8 @@ static ssize_t tracing_splice_read_pipe(
60048 size_t rem;
60049 unsigned int i;
60050
60051+ pax_track_stack();
60052+
60053 if (splice_grow_spd(pipe, &spd))
60054 return -ENOMEM;
60055
60056@@ -3822,6 +3824,8 @@ tracing_buffers_splice_read(struct file
60057 int entries, size, i;
60058 size_t ret;
60059
60060+ pax_track_stack();
60061+
60062 if (splice_grow_spd(pipe, &spd))
60063 return -ENOMEM;
60064
60065@@ -3990,10 +3994,9 @@ static const struct file_operations trac
60066 };
60067 #endif
60068
60069-static struct dentry *d_tracer;
60070-
60071 struct dentry *tracing_init_dentry(void)
60072 {
60073+ static struct dentry *d_tracer;
60074 static int once;
60075
60076 if (d_tracer)
60077@@ -4013,10 +4016,9 @@ struct dentry *tracing_init_dentry(void)
60078 return d_tracer;
60079 }
60080
60081-static struct dentry *d_percpu;
60082-
60083 struct dentry *tracing_dentry_percpu(void)
60084 {
60085+ static struct dentry *d_percpu;
60086 static int once;
60087 struct dentry *d_tracer;
60088
60089diff -urNp linux-3.0.3/kernel/trace/trace_events.c linux-3.0.3/kernel/trace/trace_events.c
60090--- linux-3.0.3/kernel/trace/trace_events.c 2011-08-23 21:44:40.000000000 -0400
60091+++ linux-3.0.3/kernel/trace/trace_events.c 2011-08-23 21:47:56.000000000 -0400
60092@@ -1318,10 +1318,6 @@ static LIST_HEAD(ftrace_module_file_list
60093 struct ftrace_module_file_ops {
60094 struct list_head list;
60095 struct module *mod;
60096- struct file_operations id;
60097- struct file_operations enable;
60098- struct file_operations format;
60099- struct file_operations filter;
60100 };
60101
60102 static struct ftrace_module_file_ops *
60103@@ -1342,17 +1338,12 @@ trace_create_file_ops(struct module *mod
60104
60105 file_ops->mod = mod;
60106
60107- file_ops->id = ftrace_event_id_fops;
60108- file_ops->id.owner = mod;
60109-
60110- file_ops->enable = ftrace_enable_fops;
60111- file_ops->enable.owner = mod;
60112-
60113- file_ops->filter = ftrace_event_filter_fops;
60114- file_ops->filter.owner = mod;
60115-
60116- file_ops->format = ftrace_event_format_fops;
60117- file_ops->format.owner = mod;
60118+ pax_open_kernel();
60119+ *(void **)&mod->trace_id.owner = mod;
60120+ *(void **)&mod->trace_enable.owner = mod;
60121+ *(void **)&mod->trace_filter.owner = mod;
60122+ *(void **)&mod->trace_format.owner = mod;
60123+ pax_close_kernel();
60124
60125 list_add(&file_ops->list, &ftrace_module_file_list);
60126
60127@@ -1376,8 +1367,8 @@ static void trace_module_add_events(stru
60128
60129 for_each_event(call, start, end) {
60130 __trace_add_event_call(*call, mod,
60131- &file_ops->id, &file_ops->enable,
60132- &file_ops->filter, &file_ops->format);
60133+ &mod->trace_id, &mod->trace_enable,
60134+ &mod->trace_filter, &mod->trace_format);
60135 }
60136 }
60137
60138diff -urNp linux-3.0.3/kernel/trace/trace_mmiotrace.c linux-3.0.3/kernel/trace/trace_mmiotrace.c
60139--- linux-3.0.3/kernel/trace/trace_mmiotrace.c 2011-07-21 22:17:23.000000000 -0400
60140+++ linux-3.0.3/kernel/trace/trace_mmiotrace.c 2011-08-23 21:47:56.000000000 -0400
60141@@ -24,7 +24,7 @@ struct header_iter {
60142 static struct trace_array *mmio_trace_array;
60143 static bool overrun_detected;
60144 static unsigned long prev_overruns;
60145-static atomic_t dropped_count;
60146+static atomic_unchecked_t dropped_count;
60147
60148 static void mmio_reset_data(struct trace_array *tr)
60149 {
60150@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iter
60151
60152 static unsigned long count_overruns(struct trace_iterator *iter)
60153 {
60154- unsigned long cnt = atomic_xchg(&dropped_count, 0);
60155+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
60156 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
60157
60158 if (over > prev_overruns)
60159@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct
60160 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
60161 sizeof(*entry), 0, pc);
60162 if (!event) {
60163- atomic_inc(&dropped_count);
60164+ atomic_inc_unchecked(&dropped_count);
60165 return;
60166 }
60167 entry = ring_buffer_event_data(event);
60168@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct
60169 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
60170 sizeof(*entry), 0, pc);
60171 if (!event) {
60172- atomic_inc(&dropped_count);
60173+ atomic_inc_unchecked(&dropped_count);
60174 return;
60175 }
60176 entry = ring_buffer_event_data(event);
60177diff -urNp linux-3.0.3/kernel/trace/trace_output.c linux-3.0.3/kernel/trace/trace_output.c
60178--- linux-3.0.3/kernel/trace/trace_output.c 2011-07-21 22:17:23.000000000 -0400
60179+++ linux-3.0.3/kernel/trace/trace_output.c 2011-08-23 21:47:56.000000000 -0400
60180@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s,
60181
60182 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
60183 if (!IS_ERR(p)) {
60184- p = mangle_path(s->buffer + s->len, p, "\n");
60185+ p = mangle_path(s->buffer + s->len, p, "\n\\");
60186 if (p) {
60187 s->len = p - s->buffer;
60188 return 1;
60189diff -urNp linux-3.0.3/kernel/trace/trace_stack.c linux-3.0.3/kernel/trace/trace_stack.c
60190--- linux-3.0.3/kernel/trace/trace_stack.c 2011-07-21 22:17:23.000000000 -0400
60191+++ linux-3.0.3/kernel/trace/trace_stack.c 2011-08-23 21:47:56.000000000 -0400
60192@@ -50,7 +50,7 @@ static inline void check_stack(void)
60193 return;
60194
60195 /* we do not handle interrupt stacks yet */
60196- if (!object_is_on_stack(&this_size))
60197+ if (!object_starts_on_stack(&this_size))
60198 return;
60199
60200 local_irq_save(flags);
60201diff -urNp linux-3.0.3/kernel/trace/trace_workqueue.c linux-3.0.3/kernel/trace/trace_workqueue.c
60202--- linux-3.0.3/kernel/trace/trace_workqueue.c 2011-07-21 22:17:23.000000000 -0400
60203+++ linux-3.0.3/kernel/trace/trace_workqueue.c 2011-08-23 21:47:56.000000000 -0400
60204@@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
60205 int cpu;
60206 pid_t pid;
60207 /* Can be inserted from interrupt or user context, need to be atomic */
60208- atomic_t inserted;
60209+ atomic_unchecked_t inserted;
60210 /*
60211 * Don't need to be atomic, works are serialized in a single workqueue thread
60212 * on a single CPU.
60213@@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
60214 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
60215 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
60216 if (node->pid == wq_thread->pid) {
60217- atomic_inc(&node->inserted);
60218+ atomic_inc_unchecked(&node->inserted);
60219 goto found;
60220 }
60221 }
60222@@ -210,7 +210,7 @@ static int workqueue_stat_show(struct se
60223 tsk = get_pid_task(pid, PIDTYPE_PID);
60224 if (tsk) {
60225 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
60226- atomic_read(&cws->inserted), cws->executed,
60227+ atomic_read_unchecked(&cws->inserted), cws->executed,
60228 tsk->comm);
60229 put_task_struct(tsk);
60230 }
60231diff -urNp linux-3.0.3/lib/bug.c linux-3.0.3/lib/bug.c
60232--- linux-3.0.3/lib/bug.c 2011-07-21 22:17:23.000000000 -0400
60233+++ linux-3.0.3/lib/bug.c 2011-08-23 21:47:56.000000000 -0400
60234@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned l
60235 return BUG_TRAP_TYPE_NONE;
60236
60237 bug = find_bug(bugaddr);
60238+ if (!bug)
60239+ return BUG_TRAP_TYPE_NONE;
60240
60241 file = NULL;
60242 line = 0;
60243diff -urNp linux-3.0.3/lib/debugobjects.c linux-3.0.3/lib/debugobjects.c
60244--- linux-3.0.3/lib/debugobjects.c 2011-07-21 22:17:23.000000000 -0400
60245+++ linux-3.0.3/lib/debugobjects.c 2011-08-23 21:47:56.000000000 -0400
60246@@ -284,7 +284,7 @@ static void debug_object_is_on_stack(voi
60247 if (limit > 4)
60248 return;
60249
60250- is_on_stack = object_is_on_stack(addr);
60251+ is_on_stack = object_starts_on_stack(addr);
60252 if (is_on_stack == onstack)
60253 return;
60254
60255diff -urNp linux-3.0.3/lib/dma-debug.c linux-3.0.3/lib/dma-debug.c
60256--- linux-3.0.3/lib/dma-debug.c 2011-07-21 22:17:23.000000000 -0400
60257+++ linux-3.0.3/lib/dma-debug.c 2011-08-23 21:47:56.000000000 -0400
60258@@ -870,7 +870,7 @@ out:
60259
60260 static void check_for_stack(struct device *dev, void *addr)
60261 {
60262- if (object_is_on_stack(addr))
60263+ if (object_starts_on_stack(addr))
60264 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
60265 "stack [addr=%p]\n", addr);
60266 }
60267diff -urNp linux-3.0.3/lib/extable.c linux-3.0.3/lib/extable.c
60268--- linux-3.0.3/lib/extable.c 2011-07-21 22:17:23.000000000 -0400
60269+++ linux-3.0.3/lib/extable.c 2011-08-23 21:47:56.000000000 -0400
60270@@ -13,6 +13,7 @@
60271 #include <linux/init.h>
60272 #include <linux/sort.h>
60273 #include <asm/uaccess.h>
60274+#include <asm/pgtable.h>
60275
60276 #ifndef ARCH_HAS_SORT_EXTABLE
60277 /*
60278@@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const v
60279 void sort_extable(struct exception_table_entry *start,
60280 struct exception_table_entry *finish)
60281 {
60282+ pax_open_kernel();
60283 sort(start, finish - start, sizeof(struct exception_table_entry),
60284 cmp_ex, NULL);
60285+ pax_close_kernel();
60286 }
60287
60288 #ifdef CONFIG_MODULES
60289diff -urNp linux-3.0.3/lib/inflate.c linux-3.0.3/lib/inflate.c
60290--- linux-3.0.3/lib/inflate.c 2011-07-21 22:17:23.000000000 -0400
60291+++ linux-3.0.3/lib/inflate.c 2011-08-23 21:47:56.000000000 -0400
60292@@ -269,7 +269,7 @@ static void free(void *where)
60293 malloc_ptr = free_mem_ptr;
60294 }
60295 #else
60296-#define malloc(a) kmalloc(a, GFP_KERNEL)
60297+#define malloc(a) kmalloc((a), GFP_KERNEL)
60298 #define free(a) kfree(a)
60299 #endif
60300
60301diff -urNp linux-3.0.3/lib/Kconfig.debug linux-3.0.3/lib/Kconfig.debug
60302--- linux-3.0.3/lib/Kconfig.debug 2011-07-21 22:17:23.000000000 -0400
60303+++ linux-3.0.3/lib/Kconfig.debug 2011-08-23 21:48:14.000000000 -0400
60304@@ -1088,6 +1088,7 @@ config LATENCYTOP
60305 depends on DEBUG_KERNEL
60306 depends on STACKTRACE_SUPPORT
60307 depends on PROC_FS
60308+ depends on !GRKERNSEC_HIDESYM
60309 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
60310 select KALLSYMS
60311 select KALLSYMS_ALL
60312diff -urNp linux-3.0.3/lib/kref.c linux-3.0.3/lib/kref.c
60313--- linux-3.0.3/lib/kref.c 2011-07-21 22:17:23.000000000 -0400
60314+++ linux-3.0.3/lib/kref.c 2011-08-23 21:47:56.000000000 -0400
60315@@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
60316 */
60317 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
60318 {
60319- WARN_ON(release == NULL);
60320+ BUG_ON(release == NULL);
60321 WARN_ON(release == (void (*)(struct kref *))kfree);
60322
60323 if (atomic_dec_and_test(&kref->refcount)) {
60324diff -urNp linux-3.0.3/lib/radix-tree.c linux-3.0.3/lib/radix-tree.c
60325--- linux-3.0.3/lib/radix-tree.c 2011-07-21 22:17:23.000000000 -0400
60326+++ linux-3.0.3/lib/radix-tree.c 2011-08-23 21:47:56.000000000 -0400
60327@@ -80,7 +80,7 @@ struct radix_tree_preload {
60328 int nr;
60329 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
60330 };
60331-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
60332+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
60333
60334 static inline void *ptr_to_indirect(void *ptr)
60335 {
60336diff -urNp linux-3.0.3/lib/vsprintf.c linux-3.0.3/lib/vsprintf.c
60337--- linux-3.0.3/lib/vsprintf.c 2011-07-21 22:17:23.000000000 -0400
60338+++ linux-3.0.3/lib/vsprintf.c 2011-08-23 21:48:14.000000000 -0400
60339@@ -16,6 +16,9 @@
60340 * - scnprintf and vscnprintf
60341 */
60342
60343+#ifdef CONFIG_GRKERNSEC_HIDESYM
60344+#define __INCLUDED_BY_HIDESYM 1
60345+#endif
60346 #include <stdarg.h>
60347 #include <linux/module.h>
60348 #include <linux/types.h>
60349@@ -435,7 +438,7 @@ char *symbol_string(char *buf, char *end
60350 char sym[KSYM_SYMBOL_LEN];
60351 if (ext == 'B')
60352 sprint_backtrace(sym, value);
60353- else if (ext != 'f' && ext != 's')
60354+ else if (ext != 'f' && ext != 's' && ext != 'a')
60355 sprint_symbol(sym, value);
60356 else
60357 kallsyms_lookup(value, NULL, NULL, NULL, sym);
60358@@ -799,7 +802,11 @@ char *uuid_string(char *buf, char *end,
60359 return string(buf, end, uuid, spec);
60360 }
60361
60362+#ifdef CONFIG_GRKERNSEC_HIDESYM
60363+int kptr_restrict __read_mostly = 2;
60364+#else
60365 int kptr_restrict __read_mostly;
60366+#endif
60367
60368 /*
60369 * Show a '%p' thing. A kernel extension is that the '%p' is followed
60370@@ -813,6 +820,8 @@ int kptr_restrict __read_mostly;
60371 * - 'S' For symbolic direct pointers with offset
60372 * - 's' For symbolic direct pointers without offset
60373 * - 'B' For backtraced symbolic direct pointers with offset
60374+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
60375+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
60376 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
60377 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
60378 * - 'M' For a 6-byte MAC address, it prints the address in the
60379@@ -857,12 +866,12 @@ char *pointer(const char *fmt, char *buf
60380 {
60381 if (!ptr && *fmt != 'K') {
60382 /*
60383- * Print (null) with the same width as a pointer so it makes
60384+ * Print (nil) with the same width as a pointer so it makes
60385 * tabular output look nice.
60386 */
60387 if (spec.field_width == -1)
60388 spec.field_width = 2 * sizeof(void *);
60389- return string(buf, end, "(null)", spec);
60390+ return string(buf, end, "(nil)", spec);
60391 }
60392
60393 switch (*fmt) {
60394@@ -872,6 +881,13 @@ char *pointer(const char *fmt, char *buf
60395 /* Fallthrough */
60396 case 'S':
60397 case 's':
60398+#ifdef CONFIG_GRKERNSEC_HIDESYM
60399+ break;
60400+#else
60401+ return symbol_string(buf, end, ptr, spec, *fmt);
60402+#endif
60403+ case 'A':
60404+ case 'a':
60405 case 'B':
60406 return symbol_string(buf, end, ptr, spec, *fmt);
60407 case 'R':
60408@@ -1631,11 +1647,11 @@ int bstr_printf(char *buf, size_t size,
60409 typeof(type) value; \
60410 if (sizeof(type) == 8) { \
60411 args = PTR_ALIGN(args, sizeof(u32)); \
60412- *(u32 *)&value = *(u32 *)args; \
60413- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
60414+ *(u32 *)&value = *(const u32 *)args; \
60415+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
60416 } else { \
60417 args = PTR_ALIGN(args, sizeof(type)); \
60418- value = *(typeof(type) *)args; \
60419+ value = *(const typeof(type) *)args; \
60420 } \
60421 args += sizeof(type); \
60422 value; \
60423@@ -1698,7 +1714,7 @@ int bstr_printf(char *buf, size_t size,
60424 case FORMAT_TYPE_STR: {
60425 const char *str_arg = args;
60426 args += strlen(str_arg) + 1;
60427- str = string(str, end, (char *)str_arg, spec);
60428+ str = string(str, end, str_arg, spec);
60429 break;
60430 }
60431
60432diff -urNp linux-3.0.3/localversion-grsec linux-3.0.3/localversion-grsec
60433--- linux-3.0.3/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
60434+++ linux-3.0.3/localversion-grsec 2011-08-23 21:48:14.000000000 -0400
60435@@ -0,0 +1 @@
60436+-grsec
60437diff -urNp linux-3.0.3/Makefile linux-3.0.3/Makefile
60438--- linux-3.0.3/Makefile 2011-08-23 21:44:40.000000000 -0400
60439+++ linux-3.0.3/Makefile 2011-08-27 21:15:31.000000000 -0400
60440@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
60441
60442 HOSTCC = gcc
60443 HOSTCXX = g++
60444-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
60445-HOSTCXXFLAGS = -O2
60446+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
60447+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
60448+HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
60449
60450 # Decide whether to build built-in, modular, or both.
60451 # Normally, just do built-in.
60452@@ -365,10 +366,12 @@ LINUXINCLUDE := -I$(srctree)/arch/$(h
60453 KBUILD_CPPFLAGS := -D__KERNEL__
60454
60455 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
60456+ -W -Wno-unused-parameter -Wno-missing-field-initializers \
60457 -fno-strict-aliasing -fno-common \
60458 -Werror-implicit-function-declaration \
60459 -Wno-format-security \
60460 -fno-delete-null-pointer-checks
60461+KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
60462 KBUILD_AFLAGS_KERNEL :=
60463 KBUILD_CFLAGS_KERNEL :=
60464 KBUILD_AFLAGS := -D__ASSEMBLY__
60465@@ -564,6 +567,25 @@ else
60466 KBUILD_CFLAGS += -O2
60467 endif
60468
60469+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh $(HOSTCC)), y)
60470+CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
60471+ifdef CONFIG_PAX_MEMORY_STACKLEAK
60472+STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -fplugin-arg-stackleak_plugin-track-lowest-sp=100
60473+endif
60474+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN
60475+gcc-plugins0:
60476+ $(Q)$(MAKE) $(build)=tools/gcc
60477+gcc-plugins: scripts_basic gcc-plugins0
60478+else
60479+gcc-plugins:
60480+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
60481+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev.))
60482+else
60483+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
60484+endif
60485+ $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
60486+endif
60487+
60488 include $(srctree)/arch/$(SRCARCH)/Makefile
60489
60490 ifneq ($(CONFIG_FRAME_WARN),0)
60491@@ -708,7 +730,7 @@ export mod_strip_cmd
60492
60493
60494 ifeq ($(KBUILD_EXTMOD),)
60495-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
60496+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
60497
60498 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
60499 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
60500@@ -907,6 +929,7 @@ define rule_vmlinux-modpost
60501 endef
60502
60503 # vmlinux image - including updated kernel symbols
60504+vmlinux: KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
60505 vmlinux: $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) vmlinux.o $(kallsyms.o) FORCE
60506 ifdef CONFIG_HEADERS_CHECK
60507 $(Q)$(MAKE) -f $(srctree)/Makefile headers_check
60508@@ -973,7 +996,7 @@ ifneq ($(KBUILD_SRC),)
60509 endif
60510
60511 # prepare2 creates a makefile if using a separate output directory
60512-prepare2: prepare3 outputmakefile asm-generic
60513+prepare2: prepare3 outputmakefile asm-generic gcc-plugins
60514
60515 prepare1: prepare2 include/linux/version.h include/generated/utsrelease.h \
60516 include/config/auto.conf
60517@@ -1087,6 +1110,7 @@ all: modules
60518 # using awk while concatenating to the final file.
60519
60520 PHONY += modules
60521+modules: KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
60522 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
60523 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
60524 @$(kecho) ' Building modules, stage 2.';
60525@@ -1359,6 +1383,7 @@ PHONY += $(module-dirs) modules
60526 $(module-dirs): crmodverdir $(objtree)/Module.symvers
60527 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
60528
60529+modules: KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
60530 modules: $(module-dirs)
60531 @$(kecho) ' Building modules, stage 2.';
60532 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
60533@@ -1404,7 +1429,7 @@ clean: $(clean-dirs)
60534 $(call cmd,rmdirs)
60535 $(call cmd,rmfiles)
60536 @find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
60537- \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
60538+ \( -name '*.[oas]' -o -name '*.[ks]o' -o -name '.*.cmd' \
60539 -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
60540 -o -name '*.symtypes' -o -name 'modules.order' \
60541 -o -name modules.builtin -o -name '.tmp_*.o.*' \
60542diff -urNp linux-3.0.3/mm/filemap.c linux-3.0.3/mm/filemap.c
60543--- linux-3.0.3/mm/filemap.c 2011-07-21 22:17:23.000000000 -0400
60544+++ linux-3.0.3/mm/filemap.c 2011-08-23 21:48:14.000000000 -0400
60545@@ -1763,7 +1763,7 @@ int generic_file_mmap(struct file * file
60546 struct address_space *mapping = file->f_mapping;
60547
60548 if (!mapping->a_ops->readpage)
60549- return -ENOEXEC;
60550+ return -ENODEV;
60551 file_accessed(file);
60552 vma->vm_ops = &generic_file_vm_ops;
60553 vma->vm_flags |= VM_CAN_NONLINEAR;
60554@@ -2169,6 +2169,7 @@ inline int generic_write_checks(struct f
60555 *pos = i_size_read(inode);
60556
60557 if (limit != RLIM_INFINITY) {
60558+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
60559 if (*pos >= limit) {
60560 send_sig(SIGXFSZ, current, 0);
60561 return -EFBIG;
60562diff -urNp linux-3.0.3/mm/fremap.c linux-3.0.3/mm/fremap.c
60563--- linux-3.0.3/mm/fremap.c 2011-07-21 22:17:23.000000000 -0400
60564+++ linux-3.0.3/mm/fremap.c 2011-08-23 21:47:56.000000000 -0400
60565@@ -156,6 +156,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
60566 retry:
60567 vma = find_vma(mm, start);
60568
60569+#ifdef CONFIG_PAX_SEGMEXEC
60570+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
60571+ goto out;
60572+#endif
60573+
60574 /*
60575 * Make sure the vma is shared, that it supports prefaulting,
60576 * and that the remapped range is valid and fully within
60577diff -urNp linux-3.0.3/mm/highmem.c linux-3.0.3/mm/highmem.c
60578--- linux-3.0.3/mm/highmem.c 2011-07-21 22:17:23.000000000 -0400
60579+++ linux-3.0.3/mm/highmem.c 2011-08-23 21:47:56.000000000 -0400
60580@@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
60581 * So no dangers, even with speculative execution.
60582 */
60583 page = pte_page(pkmap_page_table[i]);
60584+ pax_open_kernel();
60585 pte_clear(&init_mm, (unsigned long)page_address(page),
60586 &pkmap_page_table[i]);
60587-
60588+ pax_close_kernel();
60589 set_page_address(page, NULL);
60590 need_flush = 1;
60591 }
60592@@ -186,9 +187,11 @@ start:
60593 }
60594 }
60595 vaddr = PKMAP_ADDR(last_pkmap_nr);
60596+
60597+ pax_open_kernel();
60598 set_pte_at(&init_mm, vaddr,
60599 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
60600-
60601+ pax_close_kernel();
60602 pkmap_count[last_pkmap_nr] = 1;
60603 set_page_address(page, (void *)vaddr);
60604
60605diff -urNp linux-3.0.3/mm/huge_memory.c linux-3.0.3/mm/huge_memory.c
60606--- linux-3.0.3/mm/huge_memory.c 2011-07-21 22:17:23.000000000 -0400
60607+++ linux-3.0.3/mm/huge_memory.c 2011-08-23 21:47:56.000000000 -0400
60608@@ -702,7 +702,7 @@ out:
60609 * run pte_offset_map on the pmd, if an huge pmd could
60610 * materialize from under us from a different thread.
60611 */
60612- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
60613+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
60614 return VM_FAULT_OOM;
60615 /* if an huge pmd materialized from under us just retry later */
60616 if (unlikely(pmd_trans_huge(*pmd)))
60617diff -urNp linux-3.0.3/mm/hugetlb.c linux-3.0.3/mm/hugetlb.c
60618--- linux-3.0.3/mm/hugetlb.c 2011-07-21 22:17:23.000000000 -0400
60619+++ linux-3.0.3/mm/hugetlb.c 2011-08-23 21:47:56.000000000 -0400
60620@@ -2339,6 +2339,27 @@ static int unmap_ref_private(struct mm_s
60621 return 1;
60622 }
60623
60624+#ifdef CONFIG_PAX_SEGMEXEC
60625+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
60626+{
60627+ struct mm_struct *mm = vma->vm_mm;
60628+ struct vm_area_struct *vma_m;
60629+ unsigned long address_m;
60630+ pte_t *ptep_m;
60631+
60632+ vma_m = pax_find_mirror_vma(vma);
60633+ if (!vma_m)
60634+ return;
60635+
60636+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
60637+ address_m = address + SEGMEXEC_TASK_SIZE;
60638+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
60639+ get_page(page_m);
60640+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
60641+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
60642+}
60643+#endif
60644+
60645 /*
60646 * Hugetlb_cow() should be called with page lock of the original hugepage held.
60647 */
60648@@ -2440,6 +2461,11 @@ retry_avoidcopy:
60649 make_huge_pte(vma, new_page, 1));
60650 page_remove_rmap(old_page);
60651 hugepage_add_new_anon_rmap(new_page, vma, address);
60652+
60653+#ifdef CONFIG_PAX_SEGMEXEC
60654+ pax_mirror_huge_pte(vma, address, new_page);
60655+#endif
60656+
60657 /* Make the old page be freed below */
60658 new_page = old_page;
60659 mmu_notifier_invalidate_range_end(mm,
60660@@ -2591,6 +2617,10 @@ retry:
60661 && (vma->vm_flags & VM_SHARED)));
60662 set_huge_pte_at(mm, address, ptep, new_pte);
60663
60664+#ifdef CONFIG_PAX_SEGMEXEC
60665+ pax_mirror_huge_pte(vma, address, page);
60666+#endif
60667+
60668 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
60669 /* Optimization, do the COW without a second fault */
60670 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
60671@@ -2620,6 +2650,10 @@ int hugetlb_fault(struct mm_struct *mm,
60672 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
60673 struct hstate *h = hstate_vma(vma);
60674
60675+#ifdef CONFIG_PAX_SEGMEXEC
60676+ struct vm_area_struct *vma_m;
60677+#endif
60678+
60679 ptep = huge_pte_offset(mm, address);
60680 if (ptep) {
60681 entry = huge_ptep_get(ptep);
60682@@ -2631,6 +2665,26 @@ int hugetlb_fault(struct mm_struct *mm,
60683 VM_FAULT_SET_HINDEX(h - hstates);
60684 }
60685
60686+#ifdef CONFIG_PAX_SEGMEXEC
60687+ vma_m = pax_find_mirror_vma(vma);
60688+ if (vma_m) {
60689+ unsigned long address_m;
60690+
60691+ if (vma->vm_start > vma_m->vm_start) {
60692+ address_m = address;
60693+ address -= SEGMEXEC_TASK_SIZE;
60694+ vma = vma_m;
60695+ h = hstate_vma(vma);
60696+ } else
60697+ address_m = address + SEGMEXEC_TASK_SIZE;
60698+
60699+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
60700+ return VM_FAULT_OOM;
60701+ address_m &= HPAGE_MASK;
60702+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
60703+ }
60704+#endif
60705+
60706 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
60707 if (!ptep)
60708 return VM_FAULT_OOM;
60709diff -urNp linux-3.0.3/mm/internal.h linux-3.0.3/mm/internal.h
60710--- linux-3.0.3/mm/internal.h 2011-07-21 22:17:23.000000000 -0400
60711+++ linux-3.0.3/mm/internal.h 2011-08-23 21:47:56.000000000 -0400
60712@@ -49,6 +49,7 @@ extern void putback_lru_page(struct page
60713 * in mm/page_alloc.c
60714 */
60715 extern void __free_pages_bootmem(struct page *page, unsigned int order);
60716+extern void free_compound_page(struct page *page);
60717 extern void prep_compound_page(struct page *page, unsigned long order);
60718 #ifdef CONFIG_MEMORY_FAILURE
60719 extern bool is_free_buddy_page(struct page *page);
60720diff -urNp linux-3.0.3/mm/Kconfig linux-3.0.3/mm/Kconfig
60721--- linux-3.0.3/mm/Kconfig 2011-07-21 22:17:23.000000000 -0400
60722+++ linux-3.0.3/mm/Kconfig 2011-08-23 21:48:14.000000000 -0400
60723@@ -240,7 +240,7 @@ config KSM
60724 config DEFAULT_MMAP_MIN_ADDR
60725 int "Low address space to protect from user allocation"
60726 depends on MMU
60727- default 4096
60728+ default 65536
60729 help
60730 This is the portion of low virtual memory which should be protected
60731 from userspace allocation. Keeping a user from writing to low pages
60732diff -urNp linux-3.0.3/mm/kmemleak.c linux-3.0.3/mm/kmemleak.c
60733--- linux-3.0.3/mm/kmemleak.c 2011-07-21 22:17:23.000000000 -0400
60734+++ linux-3.0.3/mm/kmemleak.c 2011-08-23 21:48:14.000000000 -0400
60735@@ -357,7 +357,7 @@ static void print_unreferenced(struct se
60736
60737 for (i = 0; i < object->trace_len; i++) {
60738 void *ptr = (void *)object->trace[i];
60739- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
60740+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
60741 }
60742 }
60743
60744diff -urNp linux-3.0.3/mm/madvise.c linux-3.0.3/mm/madvise.c
60745--- linux-3.0.3/mm/madvise.c 2011-07-21 22:17:23.000000000 -0400
60746+++ linux-3.0.3/mm/madvise.c 2011-08-23 21:47:56.000000000 -0400
60747@@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_a
60748 pgoff_t pgoff;
60749 unsigned long new_flags = vma->vm_flags;
60750
60751+#ifdef CONFIG_PAX_SEGMEXEC
60752+ struct vm_area_struct *vma_m;
60753+#endif
60754+
60755 switch (behavior) {
60756 case MADV_NORMAL:
60757 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
60758@@ -110,6 +114,13 @@ success:
60759 /*
60760 * vm_flags is protected by the mmap_sem held in write mode.
60761 */
60762+
60763+#ifdef CONFIG_PAX_SEGMEXEC
60764+ vma_m = pax_find_mirror_vma(vma);
60765+ if (vma_m)
60766+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
60767+#endif
60768+
60769 vma->vm_flags = new_flags;
60770
60771 out:
60772@@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_a
60773 struct vm_area_struct ** prev,
60774 unsigned long start, unsigned long end)
60775 {
60776+
60777+#ifdef CONFIG_PAX_SEGMEXEC
60778+ struct vm_area_struct *vma_m;
60779+#endif
60780+
60781 *prev = vma;
60782 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
60783 return -EINVAL;
60784@@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_a
60785 zap_page_range(vma, start, end - start, &details);
60786 } else
60787 zap_page_range(vma, start, end - start, NULL);
60788+
60789+#ifdef CONFIG_PAX_SEGMEXEC
60790+ vma_m = pax_find_mirror_vma(vma);
60791+ if (vma_m) {
60792+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
60793+ struct zap_details details = {
60794+ .nonlinear_vma = vma_m,
60795+ .last_index = ULONG_MAX,
60796+ };
60797+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
60798+ } else
60799+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
60800+ }
60801+#endif
60802+
60803 return 0;
60804 }
60805
60806@@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
60807 if (end < start)
60808 goto out;
60809
60810+#ifdef CONFIG_PAX_SEGMEXEC
60811+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
60812+ if (end > SEGMEXEC_TASK_SIZE)
60813+ goto out;
60814+ } else
60815+#endif
60816+
60817+ if (end > TASK_SIZE)
60818+ goto out;
60819+
60820 error = 0;
60821 if (end == start)
60822 goto out;
60823diff -urNp linux-3.0.3/mm/memory.c linux-3.0.3/mm/memory.c
60824--- linux-3.0.3/mm/memory.c 2011-08-23 21:44:40.000000000 -0400
60825+++ linux-3.0.3/mm/memory.c 2011-08-23 21:47:56.000000000 -0400
60826@@ -457,8 +457,12 @@ static inline void free_pmd_range(struct
60827 return;
60828
60829 pmd = pmd_offset(pud, start);
60830+
60831+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
60832 pud_clear(pud);
60833 pmd_free_tlb(tlb, pmd, start);
60834+#endif
60835+
60836 }
60837
60838 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
60839@@ -489,9 +493,12 @@ static inline void free_pud_range(struct
60840 if (end - 1 > ceiling - 1)
60841 return;
60842
60843+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
60844 pud = pud_offset(pgd, start);
60845 pgd_clear(pgd);
60846 pud_free_tlb(tlb, pud, start);
60847+#endif
60848+
60849 }
60850
60851 /*
60852@@ -1577,12 +1584,6 @@ no_page_table:
60853 return page;
60854 }
60855
60856-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
60857-{
60858- return stack_guard_page_start(vma, addr) ||
60859- stack_guard_page_end(vma, addr+PAGE_SIZE);
60860-}
60861-
60862 /**
60863 * __get_user_pages() - pin user pages in memory
60864 * @tsk: task_struct of target task
60865@@ -1655,10 +1656,10 @@ int __get_user_pages(struct task_struct
60866 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
60867 i = 0;
60868
60869- do {
60870+ while (nr_pages) {
60871 struct vm_area_struct *vma;
60872
60873- vma = find_extend_vma(mm, start);
60874+ vma = find_vma(mm, start);
60875 if (!vma && in_gate_area(mm, start)) {
60876 unsigned long pg = start & PAGE_MASK;
60877 pgd_t *pgd;
60878@@ -1706,7 +1707,7 @@ int __get_user_pages(struct task_struct
60879 goto next_page;
60880 }
60881
60882- if (!vma ||
60883+ if (!vma || start < vma->vm_start ||
60884 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
60885 !(vm_flags & vma->vm_flags))
60886 return i ? : -EFAULT;
60887@@ -1733,11 +1734,6 @@ int __get_user_pages(struct task_struct
60888 int ret;
60889 unsigned int fault_flags = 0;
60890
60891- /* For mlock, just skip the stack guard page. */
60892- if (foll_flags & FOLL_MLOCK) {
60893- if (stack_guard_page(vma, start))
60894- goto next_page;
60895- }
60896 if (foll_flags & FOLL_WRITE)
60897 fault_flags |= FAULT_FLAG_WRITE;
60898 if (nonblocking)
60899@@ -1811,7 +1807,7 @@ next_page:
60900 start += PAGE_SIZE;
60901 nr_pages--;
60902 } while (nr_pages && start < vma->vm_end);
60903- } while (nr_pages);
60904+ }
60905 return i;
60906 }
60907 EXPORT_SYMBOL(__get_user_pages);
60908@@ -2018,6 +2014,10 @@ static int insert_page(struct vm_area_st
60909 page_add_file_rmap(page);
60910 set_pte_at(mm, addr, pte, mk_pte(page, prot));
60911
60912+#ifdef CONFIG_PAX_SEGMEXEC
60913+ pax_mirror_file_pte(vma, addr, page, ptl);
60914+#endif
60915+
60916 retval = 0;
60917 pte_unmap_unlock(pte, ptl);
60918 return retval;
60919@@ -2052,10 +2052,22 @@ out:
60920 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
60921 struct page *page)
60922 {
60923+
60924+#ifdef CONFIG_PAX_SEGMEXEC
60925+ struct vm_area_struct *vma_m;
60926+#endif
60927+
60928 if (addr < vma->vm_start || addr >= vma->vm_end)
60929 return -EFAULT;
60930 if (!page_count(page))
60931 return -EINVAL;
60932+
60933+#ifdef CONFIG_PAX_SEGMEXEC
60934+ vma_m = pax_find_mirror_vma(vma);
60935+ if (vma_m)
60936+ vma_m->vm_flags |= VM_INSERTPAGE;
60937+#endif
60938+
60939 vma->vm_flags |= VM_INSERTPAGE;
60940 return insert_page(vma, addr, page, vma->vm_page_prot);
60941 }
60942@@ -2141,6 +2153,7 @@ int vm_insert_mixed(struct vm_area_struc
60943 unsigned long pfn)
60944 {
60945 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
60946+ BUG_ON(vma->vm_mirror);
60947
60948 if (addr < vma->vm_start || addr >= vma->vm_end)
60949 return -EFAULT;
60950@@ -2456,6 +2469,186 @@ static inline void cow_user_page(struct
60951 copy_user_highpage(dst, src, va, vma);
60952 }
60953
60954+#ifdef CONFIG_PAX_SEGMEXEC
60955+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
60956+{
60957+ struct mm_struct *mm = vma->vm_mm;
60958+ spinlock_t *ptl;
60959+ pte_t *pte, entry;
60960+
60961+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
60962+ entry = *pte;
60963+ if (!pte_present(entry)) {
60964+ if (!pte_none(entry)) {
60965+ BUG_ON(pte_file(entry));
60966+ free_swap_and_cache(pte_to_swp_entry(entry));
60967+ pte_clear_not_present_full(mm, address, pte, 0);
60968+ }
60969+ } else {
60970+ struct page *page;
60971+
60972+ flush_cache_page(vma, address, pte_pfn(entry));
60973+ entry = ptep_clear_flush(vma, address, pte);
60974+ BUG_ON(pte_dirty(entry));
60975+ page = vm_normal_page(vma, address, entry);
60976+ if (page) {
60977+ update_hiwater_rss(mm);
60978+ if (PageAnon(page))
60979+ dec_mm_counter_fast(mm, MM_ANONPAGES);
60980+ else
60981+ dec_mm_counter_fast(mm, MM_FILEPAGES);
60982+ page_remove_rmap(page);
60983+ page_cache_release(page);
60984+ }
60985+ }
60986+ pte_unmap_unlock(pte, ptl);
60987+}
60988+
60989+/* PaX: if vma is mirrored, synchronize the mirror's PTE
60990+ *
60991+ * the ptl of the lower mapped page is held on entry and is not released on exit
60992+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
60993+ */
60994+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
60995+{
60996+ struct mm_struct *mm = vma->vm_mm;
60997+ unsigned long address_m;
60998+ spinlock_t *ptl_m;
60999+ struct vm_area_struct *vma_m;
61000+ pmd_t *pmd_m;
61001+ pte_t *pte_m, entry_m;
61002+
61003+ BUG_ON(!page_m || !PageAnon(page_m));
61004+
61005+ vma_m = pax_find_mirror_vma(vma);
61006+ if (!vma_m)
61007+ return;
61008+
61009+ BUG_ON(!PageLocked(page_m));
61010+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
61011+ address_m = address + SEGMEXEC_TASK_SIZE;
61012+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
61013+ pte_m = pte_offset_map(pmd_m, address_m);
61014+ ptl_m = pte_lockptr(mm, pmd_m);
61015+ if (ptl != ptl_m) {
61016+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
61017+ if (!pte_none(*pte_m))
61018+ goto out;
61019+ }
61020+
61021+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
61022+ page_cache_get(page_m);
61023+ page_add_anon_rmap(page_m, vma_m, address_m);
61024+ inc_mm_counter_fast(mm, MM_ANONPAGES);
61025+ set_pte_at(mm, address_m, pte_m, entry_m);
61026+ update_mmu_cache(vma_m, address_m, entry_m);
61027+out:
61028+ if (ptl != ptl_m)
61029+ spin_unlock(ptl_m);
61030+ pte_unmap(pte_m);
61031+ unlock_page(page_m);
61032+}
61033+
61034+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
61035+{
61036+ struct mm_struct *mm = vma->vm_mm;
61037+ unsigned long address_m;
61038+ spinlock_t *ptl_m;
61039+ struct vm_area_struct *vma_m;
61040+ pmd_t *pmd_m;
61041+ pte_t *pte_m, entry_m;
61042+
61043+ BUG_ON(!page_m || PageAnon(page_m));
61044+
61045+ vma_m = pax_find_mirror_vma(vma);
61046+ if (!vma_m)
61047+ return;
61048+
61049+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
61050+ address_m = address + SEGMEXEC_TASK_SIZE;
61051+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
61052+ pte_m = pte_offset_map(pmd_m, address_m);
61053+ ptl_m = pte_lockptr(mm, pmd_m);
61054+ if (ptl != ptl_m) {
61055+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
61056+ if (!pte_none(*pte_m))
61057+ goto out;
61058+ }
61059+
61060+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
61061+ page_cache_get(page_m);
61062+ page_add_file_rmap(page_m);
61063+ inc_mm_counter_fast(mm, MM_FILEPAGES);
61064+ set_pte_at(mm, address_m, pte_m, entry_m);
61065+ update_mmu_cache(vma_m, address_m, entry_m);
61066+out:
61067+ if (ptl != ptl_m)
61068+ spin_unlock(ptl_m);
61069+ pte_unmap(pte_m);
61070+}
61071+
61072+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
61073+{
61074+ struct mm_struct *mm = vma->vm_mm;
61075+ unsigned long address_m;
61076+ spinlock_t *ptl_m;
61077+ struct vm_area_struct *vma_m;
61078+ pmd_t *pmd_m;
61079+ pte_t *pte_m, entry_m;
61080+
61081+ vma_m = pax_find_mirror_vma(vma);
61082+ if (!vma_m)
61083+ return;
61084+
61085+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
61086+ address_m = address + SEGMEXEC_TASK_SIZE;
61087+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
61088+ pte_m = pte_offset_map(pmd_m, address_m);
61089+ ptl_m = pte_lockptr(mm, pmd_m);
61090+ if (ptl != ptl_m) {
61091+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
61092+ if (!pte_none(*pte_m))
61093+ goto out;
61094+ }
61095+
61096+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
61097+ set_pte_at(mm, address_m, pte_m, entry_m);
61098+out:
61099+ if (ptl != ptl_m)
61100+ spin_unlock(ptl_m);
61101+ pte_unmap(pte_m);
61102+}
61103+
61104+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
61105+{
61106+ struct page *page_m;
61107+ pte_t entry;
61108+
61109+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
61110+ goto out;
61111+
61112+ entry = *pte;
61113+ page_m = vm_normal_page(vma, address, entry);
61114+ if (!page_m)
61115+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
61116+ else if (PageAnon(page_m)) {
61117+ if (pax_find_mirror_vma(vma)) {
61118+ pte_unmap_unlock(pte, ptl);
61119+ lock_page(page_m);
61120+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
61121+ if (pte_same(entry, *pte))
61122+ pax_mirror_anon_pte(vma, address, page_m, ptl);
61123+ else
61124+ unlock_page(page_m);
61125+ }
61126+ } else
61127+ pax_mirror_file_pte(vma, address, page_m, ptl);
61128+
61129+out:
61130+ pte_unmap_unlock(pte, ptl);
61131+}
61132+#endif
61133+
61134 /*
61135 * This routine handles present pages, when users try to write
61136 * to a shared page. It is done by copying the page to a new address
61137@@ -2667,6 +2860,12 @@ gotten:
61138 */
61139 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
61140 if (likely(pte_same(*page_table, orig_pte))) {
61141+
61142+#ifdef CONFIG_PAX_SEGMEXEC
61143+ if (pax_find_mirror_vma(vma))
61144+ BUG_ON(!trylock_page(new_page));
61145+#endif
61146+
61147 if (old_page) {
61148 if (!PageAnon(old_page)) {
61149 dec_mm_counter_fast(mm, MM_FILEPAGES);
61150@@ -2718,6 +2917,10 @@ gotten:
61151 page_remove_rmap(old_page);
61152 }
61153
61154+#ifdef CONFIG_PAX_SEGMEXEC
61155+ pax_mirror_anon_pte(vma, address, new_page, ptl);
61156+#endif
61157+
61158 /* Free the old page.. */
61159 new_page = old_page;
61160 ret |= VM_FAULT_WRITE;
61161@@ -2997,6 +3200,11 @@ static int do_swap_page(struct mm_struct
61162 swap_free(entry);
61163 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
61164 try_to_free_swap(page);
61165+
61166+#ifdef CONFIG_PAX_SEGMEXEC
61167+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
61168+#endif
61169+
61170 unlock_page(page);
61171 if (swapcache) {
61172 /*
61173@@ -3020,6 +3228,11 @@ static int do_swap_page(struct mm_struct
61174
61175 /* No need to invalidate - it was non-present before */
61176 update_mmu_cache(vma, address, page_table);
61177+
61178+#ifdef CONFIG_PAX_SEGMEXEC
61179+ pax_mirror_anon_pte(vma, address, page, ptl);
61180+#endif
61181+
61182 unlock:
61183 pte_unmap_unlock(page_table, ptl);
61184 out:
61185@@ -3039,40 +3252,6 @@ out_release:
61186 }
61187
61188 /*
61189- * This is like a special single-page "expand_{down|up}wards()",
61190- * except we must first make sure that 'address{-|+}PAGE_SIZE'
61191- * doesn't hit another vma.
61192- */
61193-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
61194-{
61195- address &= PAGE_MASK;
61196- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
61197- struct vm_area_struct *prev = vma->vm_prev;
61198-
61199- /*
61200- * Is there a mapping abutting this one below?
61201- *
61202- * That's only ok if it's the same stack mapping
61203- * that has gotten split..
61204- */
61205- if (prev && prev->vm_end == address)
61206- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
61207-
61208- expand_downwards(vma, address - PAGE_SIZE);
61209- }
61210- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
61211- struct vm_area_struct *next = vma->vm_next;
61212-
61213- /* As VM_GROWSDOWN but s/below/above/ */
61214- if (next && next->vm_start == address + PAGE_SIZE)
61215- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
61216-
61217- expand_upwards(vma, address + PAGE_SIZE);
61218- }
61219- return 0;
61220-}
61221-
61222-/*
61223 * We enter with non-exclusive mmap_sem (to exclude vma changes,
61224 * but allow concurrent faults), and pte mapped but not yet locked.
61225 * We return with mmap_sem still held, but pte unmapped and unlocked.
61226@@ -3081,27 +3260,23 @@ static int do_anonymous_page(struct mm_s
61227 unsigned long address, pte_t *page_table, pmd_t *pmd,
61228 unsigned int flags)
61229 {
61230- struct page *page;
61231+ struct page *page = NULL;
61232 spinlock_t *ptl;
61233 pte_t entry;
61234
61235- pte_unmap(page_table);
61236-
61237- /* Check if we need to add a guard page to the stack */
61238- if (check_stack_guard_page(vma, address) < 0)
61239- return VM_FAULT_SIGBUS;
61240-
61241- /* Use the zero-page for reads */
61242 if (!(flags & FAULT_FLAG_WRITE)) {
61243 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
61244 vma->vm_page_prot));
61245- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
61246+ ptl = pte_lockptr(mm, pmd);
61247+ spin_lock(ptl);
61248 if (!pte_none(*page_table))
61249 goto unlock;
61250 goto setpte;
61251 }
61252
61253 /* Allocate our own private page. */
61254+ pte_unmap(page_table);
61255+
61256 if (unlikely(anon_vma_prepare(vma)))
61257 goto oom;
61258 page = alloc_zeroed_user_highpage_movable(vma, address);
61259@@ -3120,6 +3295,11 @@ static int do_anonymous_page(struct mm_s
61260 if (!pte_none(*page_table))
61261 goto release;
61262
61263+#ifdef CONFIG_PAX_SEGMEXEC
61264+ if (pax_find_mirror_vma(vma))
61265+ BUG_ON(!trylock_page(page));
61266+#endif
61267+
61268 inc_mm_counter_fast(mm, MM_ANONPAGES);
61269 page_add_new_anon_rmap(page, vma, address);
61270 setpte:
61271@@ -3127,6 +3307,12 @@ setpte:
61272
61273 /* No need to invalidate - it was non-present before */
61274 update_mmu_cache(vma, address, page_table);
61275+
61276+#ifdef CONFIG_PAX_SEGMEXEC
61277+ if (page)
61278+ pax_mirror_anon_pte(vma, address, page, ptl);
61279+#endif
61280+
61281 unlock:
61282 pte_unmap_unlock(page_table, ptl);
61283 return 0;
61284@@ -3264,6 +3450,12 @@ static int __do_fault(struct mm_struct *
61285 */
61286 /* Only go through if we didn't race with anybody else... */
61287 if (likely(pte_same(*page_table, orig_pte))) {
61288+
61289+#ifdef CONFIG_PAX_SEGMEXEC
61290+ if (anon && pax_find_mirror_vma(vma))
61291+ BUG_ON(!trylock_page(page));
61292+#endif
61293+
61294 flush_icache_page(vma, page);
61295 entry = mk_pte(page, vma->vm_page_prot);
61296 if (flags & FAULT_FLAG_WRITE)
61297@@ -3283,6 +3475,14 @@ static int __do_fault(struct mm_struct *
61298
61299 /* no need to invalidate: a not-present page won't be cached */
61300 update_mmu_cache(vma, address, page_table);
61301+
61302+#ifdef CONFIG_PAX_SEGMEXEC
61303+ if (anon)
61304+ pax_mirror_anon_pte(vma, address, page, ptl);
61305+ else
61306+ pax_mirror_file_pte(vma, address, page, ptl);
61307+#endif
61308+
61309 } else {
61310 if (charged)
61311 mem_cgroup_uncharge_page(page);
61312@@ -3430,6 +3630,12 @@ int handle_pte_fault(struct mm_struct *m
61313 if (flags & FAULT_FLAG_WRITE)
61314 flush_tlb_fix_spurious_fault(vma, address);
61315 }
61316+
61317+#ifdef CONFIG_PAX_SEGMEXEC
61318+ pax_mirror_pte(vma, address, pte, pmd, ptl);
61319+ return 0;
61320+#endif
61321+
61322 unlock:
61323 pte_unmap_unlock(pte, ptl);
61324 return 0;
61325@@ -3446,6 +3652,10 @@ int handle_mm_fault(struct mm_struct *mm
61326 pmd_t *pmd;
61327 pte_t *pte;
61328
61329+#ifdef CONFIG_PAX_SEGMEXEC
61330+ struct vm_area_struct *vma_m;
61331+#endif
61332+
61333 __set_current_state(TASK_RUNNING);
61334
61335 count_vm_event(PGFAULT);
61336@@ -3457,6 +3667,34 @@ int handle_mm_fault(struct mm_struct *mm
61337 if (unlikely(is_vm_hugetlb_page(vma)))
61338 return hugetlb_fault(mm, vma, address, flags);
61339
61340+#ifdef CONFIG_PAX_SEGMEXEC
61341+ vma_m = pax_find_mirror_vma(vma);
61342+ if (vma_m) {
61343+ unsigned long address_m;
61344+ pgd_t *pgd_m;
61345+ pud_t *pud_m;
61346+ pmd_t *pmd_m;
61347+
61348+ if (vma->vm_start > vma_m->vm_start) {
61349+ address_m = address;
61350+ address -= SEGMEXEC_TASK_SIZE;
61351+ vma = vma_m;
61352+ } else
61353+ address_m = address + SEGMEXEC_TASK_SIZE;
61354+
61355+ pgd_m = pgd_offset(mm, address_m);
61356+ pud_m = pud_alloc(mm, pgd_m, address_m);
61357+ if (!pud_m)
61358+ return VM_FAULT_OOM;
61359+ pmd_m = pmd_alloc(mm, pud_m, address_m);
61360+ if (!pmd_m)
61361+ return VM_FAULT_OOM;
61362+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
61363+ return VM_FAULT_OOM;
61364+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
61365+ }
61366+#endif
61367+
61368 pgd = pgd_offset(mm, address);
61369 pud = pud_alloc(mm, pgd, address);
61370 if (!pud)
61371@@ -3486,7 +3724,7 @@ int handle_mm_fault(struct mm_struct *mm
61372 * run pte_offset_map on the pmd, if an huge pmd could
61373 * materialize from under us from a different thread.
61374 */
61375- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
61376+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
61377 return VM_FAULT_OOM;
61378 /* if an huge pmd materialized from under us just retry later */
61379 if (unlikely(pmd_trans_huge(*pmd)))
61380@@ -3590,7 +3828,7 @@ static int __init gate_vma_init(void)
61381 gate_vma.vm_start = FIXADDR_USER_START;
61382 gate_vma.vm_end = FIXADDR_USER_END;
61383 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
61384- gate_vma.vm_page_prot = __P101;
61385+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
61386 /*
61387 * Make sure the vDSO gets into every core dump.
61388 * Dumping its contents makes post-mortem fully interpretable later
61389diff -urNp linux-3.0.3/mm/memory-failure.c linux-3.0.3/mm/memory-failure.c
61390--- linux-3.0.3/mm/memory-failure.c 2011-07-21 22:17:23.000000000 -0400
61391+++ linux-3.0.3/mm/memory-failure.c 2011-08-23 21:47:56.000000000 -0400
61392@@ -59,7 +59,7 @@ int sysctl_memory_failure_early_kill __r
61393
61394 int sysctl_memory_failure_recovery __read_mostly = 1;
61395
61396-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
61397+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
61398
61399 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
61400
61401@@ -1008,7 +1008,7 @@ int __memory_failure(unsigned long pfn,
61402 }
61403
61404 nr_pages = 1 << compound_trans_order(hpage);
61405- atomic_long_add(nr_pages, &mce_bad_pages);
61406+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
61407
61408 /*
61409 * We need/can do nothing about count=0 pages.
61410@@ -1038,7 +1038,7 @@ int __memory_failure(unsigned long pfn,
61411 if (!PageHWPoison(hpage)
61412 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
61413 || (p != hpage && TestSetPageHWPoison(hpage))) {
61414- atomic_long_sub(nr_pages, &mce_bad_pages);
61415+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61416 return 0;
61417 }
61418 set_page_hwpoison_huge_page(hpage);
61419@@ -1096,7 +1096,7 @@ int __memory_failure(unsigned long pfn,
61420 }
61421 if (hwpoison_filter(p)) {
61422 if (TestClearPageHWPoison(p))
61423- atomic_long_sub(nr_pages, &mce_bad_pages);
61424+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61425 unlock_page(hpage);
61426 put_page(hpage);
61427 return 0;
61428@@ -1222,7 +1222,7 @@ int unpoison_memory(unsigned long pfn)
61429 return 0;
61430 }
61431 if (TestClearPageHWPoison(p))
61432- atomic_long_sub(nr_pages, &mce_bad_pages);
61433+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61434 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
61435 return 0;
61436 }
61437@@ -1236,7 +1236,7 @@ int unpoison_memory(unsigned long pfn)
61438 */
61439 if (TestClearPageHWPoison(page)) {
61440 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
61441- atomic_long_sub(nr_pages, &mce_bad_pages);
61442+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61443 freeit = 1;
61444 if (PageHuge(page))
61445 clear_page_hwpoison_huge_page(page);
61446@@ -1349,7 +1349,7 @@ static int soft_offline_huge_page(struct
61447 }
61448 done:
61449 if (!PageHWPoison(hpage))
61450- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
61451+ atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
61452 set_page_hwpoison_huge_page(hpage);
61453 dequeue_hwpoisoned_huge_page(hpage);
61454 /* keep elevated page count for bad page */
61455@@ -1480,7 +1480,7 @@ int soft_offline_page(struct page *page,
61456 return ret;
61457
61458 done:
61459- atomic_long_add(1, &mce_bad_pages);
61460+ atomic_long_add_unchecked(1, &mce_bad_pages);
61461 SetPageHWPoison(page);
61462 /* keep elevated page count for bad page */
61463 return ret;
61464diff -urNp linux-3.0.3/mm/mempolicy.c linux-3.0.3/mm/mempolicy.c
61465--- linux-3.0.3/mm/mempolicy.c 2011-07-21 22:17:23.000000000 -0400
61466+++ linux-3.0.3/mm/mempolicy.c 2011-08-23 21:48:14.000000000 -0400
61467@@ -639,6 +639,10 @@ static int mbind_range(struct mm_struct
61468 unsigned long vmstart;
61469 unsigned long vmend;
61470
61471+#ifdef CONFIG_PAX_SEGMEXEC
61472+ struct vm_area_struct *vma_m;
61473+#endif
61474+
61475 vma = find_vma_prev(mm, start, &prev);
61476 if (!vma || vma->vm_start > start)
61477 return -EFAULT;
61478@@ -669,6 +673,16 @@ static int mbind_range(struct mm_struct
61479 err = policy_vma(vma, new_pol);
61480 if (err)
61481 goto out;
61482+
61483+#ifdef CONFIG_PAX_SEGMEXEC
61484+ vma_m = pax_find_mirror_vma(vma);
61485+ if (vma_m) {
61486+ err = policy_vma(vma_m, new_pol);
61487+ if (err)
61488+ goto out;
61489+ }
61490+#endif
61491+
61492 }
61493
61494 out:
61495@@ -1102,6 +1116,17 @@ static long do_mbind(unsigned long start
61496
61497 if (end < start)
61498 return -EINVAL;
61499+
61500+#ifdef CONFIG_PAX_SEGMEXEC
61501+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
61502+ if (end > SEGMEXEC_TASK_SIZE)
61503+ return -EINVAL;
61504+ } else
61505+#endif
61506+
61507+ if (end > TASK_SIZE)
61508+ return -EINVAL;
61509+
61510 if (end == start)
61511 return 0;
61512
61513@@ -1320,6 +1345,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
61514 if (!mm)
61515 goto out;
61516
61517+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61518+ if (mm != current->mm &&
61519+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
61520+ err = -EPERM;
61521+ goto out;
61522+ }
61523+#endif
61524+
61525 /*
61526 * Check if this process has the right to modify the specified
61527 * process. The right exists if the process has administrative
61528@@ -1329,8 +1362,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
61529 rcu_read_lock();
61530 tcred = __task_cred(task);
61531 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
61532- cred->uid != tcred->suid && cred->uid != tcred->uid &&
61533- !capable(CAP_SYS_NICE)) {
61534+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
61535 rcu_read_unlock();
61536 err = -EPERM;
61537 goto out;
61538diff -urNp linux-3.0.3/mm/migrate.c linux-3.0.3/mm/migrate.c
61539--- linux-3.0.3/mm/migrate.c 2011-07-21 22:17:23.000000000 -0400
61540+++ linux-3.0.3/mm/migrate.c 2011-08-23 21:48:14.000000000 -0400
61541@@ -1124,6 +1124,8 @@ static int do_pages_move(struct mm_struc
61542 unsigned long chunk_start;
61543 int err;
61544
61545+ pax_track_stack();
61546+
61547 task_nodes = cpuset_mems_allowed(task);
61548
61549 err = -ENOMEM;
61550@@ -1308,6 +1310,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
61551 if (!mm)
61552 return -EINVAL;
61553
61554+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61555+ if (mm != current->mm &&
61556+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
61557+ err = -EPERM;
61558+ goto out;
61559+ }
61560+#endif
61561+
61562 /*
61563 * Check if this process has the right to modify the specified
61564 * process. The right exists if the process has administrative
61565@@ -1317,8 +1327,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
61566 rcu_read_lock();
61567 tcred = __task_cred(task);
61568 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
61569- cred->uid != tcred->suid && cred->uid != tcred->uid &&
61570- !capable(CAP_SYS_NICE)) {
61571+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
61572 rcu_read_unlock();
61573 err = -EPERM;
61574 goto out;
61575diff -urNp linux-3.0.3/mm/mlock.c linux-3.0.3/mm/mlock.c
61576--- linux-3.0.3/mm/mlock.c 2011-07-21 22:17:23.000000000 -0400
61577+++ linux-3.0.3/mm/mlock.c 2011-08-23 21:48:14.000000000 -0400
61578@@ -13,6 +13,7 @@
61579 #include <linux/pagemap.h>
61580 #include <linux/mempolicy.h>
61581 #include <linux/syscalls.h>
61582+#include <linux/security.h>
61583 #include <linux/sched.h>
61584 #include <linux/module.h>
61585 #include <linux/rmap.h>
61586@@ -377,6 +378,9 @@ static int do_mlock(unsigned long start,
61587 return -EINVAL;
61588 if (end == start)
61589 return 0;
61590+ if (end > TASK_SIZE)
61591+ return -EINVAL;
61592+
61593 vma = find_vma_prev(current->mm, start, &prev);
61594 if (!vma || vma->vm_start > start)
61595 return -ENOMEM;
61596@@ -387,6 +391,11 @@ static int do_mlock(unsigned long start,
61597 for (nstart = start ; ; ) {
61598 vm_flags_t newflags;
61599
61600+#ifdef CONFIG_PAX_SEGMEXEC
61601+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
61602+ break;
61603+#endif
61604+
61605 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
61606
61607 newflags = vma->vm_flags | VM_LOCKED;
61608@@ -492,6 +501,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
61609 lock_limit >>= PAGE_SHIFT;
61610
61611 /* check against resource limits */
61612+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
61613 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
61614 error = do_mlock(start, len, 1);
61615 up_write(&current->mm->mmap_sem);
61616@@ -515,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
61617 static int do_mlockall(int flags)
61618 {
61619 struct vm_area_struct * vma, * prev = NULL;
61620- unsigned int def_flags = 0;
61621
61622 if (flags & MCL_FUTURE)
61623- def_flags = VM_LOCKED;
61624- current->mm->def_flags = def_flags;
61625+ current->mm->def_flags |= VM_LOCKED;
61626+ else
61627+ current->mm->def_flags &= ~VM_LOCKED;
61628 if (flags == MCL_FUTURE)
61629 goto out;
61630
61631 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
61632 vm_flags_t newflags;
61633
61634+#ifdef CONFIG_PAX_SEGMEXEC
61635+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
61636+ break;
61637+#endif
61638+
61639+ BUG_ON(vma->vm_end > TASK_SIZE);
61640 newflags = vma->vm_flags | VM_LOCKED;
61641 if (!(flags & MCL_CURRENT))
61642 newflags &= ~VM_LOCKED;
61643@@ -557,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
61644 lock_limit >>= PAGE_SHIFT;
61645
61646 ret = -ENOMEM;
61647+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
61648 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
61649 capable(CAP_IPC_LOCK))
61650 ret = do_mlockall(flags);
61651diff -urNp linux-3.0.3/mm/mmap.c linux-3.0.3/mm/mmap.c
61652--- linux-3.0.3/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
61653+++ linux-3.0.3/mm/mmap.c 2011-08-23 21:48:14.000000000 -0400
61654@@ -46,6 +46,16 @@
61655 #define arch_rebalance_pgtables(addr, len) (addr)
61656 #endif
61657
61658+static inline void verify_mm_writelocked(struct mm_struct *mm)
61659+{
61660+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
61661+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
61662+ up_read(&mm->mmap_sem);
61663+ BUG();
61664+ }
61665+#endif
61666+}
61667+
61668 static void unmap_region(struct mm_struct *mm,
61669 struct vm_area_struct *vma, struct vm_area_struct *prev,
61670 unsigned long start, unsigned long end);
61671@@ -71,22 +81,32 @@ static void unmap_region(struct mm_struc
61672 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
61673 *
61674 */
61675-pgprot_t protection_map[16] = {
61676+pgprot_t protection_map[16] __read_only = {
61677 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
61678 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
61679 };
61680
61681-pgprot_t vm_get_page_prot(unsigned long vm_flags)
61682+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
61683 {
61684- return __pgprot(pgprot_val(protection_map[vm_flags &
61685+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
61686 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
61687 pgprot_val(arch_vm_get_page_prot(vm_flags)));
61688+
61689+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
61690+ if (!(__supported_pte_mask & _PAGE_NX) &&
61691+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
61692+ (vm_flags & (VM_READ | VM_WRITE)))
61693+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
61694+#endif
61695+
61696+ return prot;
61697 }
61698 EXPORT_SYMBOL(vm_get_page_prot);
61699
61700 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
61701 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
61702 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
61703+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
61704 /*
61705 * Make sure vm_committed_as in one cacheline and not cacheline shared with
61706 * other variables. It can be updated by several CPUs frequently.
61707@@ -236,6 +256,7 @@ static struct vm_area_struct *remove_vma
61708 struct vm_area_struct *next = vma->vm_next;
61709
61710 might_sleep();
61711+ BUG_ON(vma->vm_mirror);
61712 if (vma->vm_ops && vma->vm_ops->close)
61713 vma->vm_ops->close(vma);
61714 if (vma->vm_file) {
61715@@ -280,6 +301,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
61716 * not page aligned -Ram Gupta
61717 */
61718 rlim = rlimit(RLIMIT_DATA);
61719+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
61720 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
61721 (mm->end_data - mm->start_data) > rlim)
61722 goto out;
61723@@ -697,6 +719,12 @@ static int
61724 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
61725 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
61726 {
61727+
61728+#ifdef CONFIG_PAX_SEGMEXEC
61729+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
61730+ return 0;
61731+#endif
61732+
61733 if (is_mergeable_vma(vma, file, vm_flags) &&
61734 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
61735 if (vma->vm_pgoff == vm_pgoff)
61736@@ -716,6 +744,12 @@ static int
61737 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
61738 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
61739 {
61740+
61741+#ifdef CONFIG_PAX_SEGMEXEC
61742+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
61743+ return 0;
61744+#endif
61745+
61746 if (is_mergeable_vma(vma, file, vm_flags) &&
61747 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
61748 pgoff_t vm_pglen;
61749@@ -758,13 +792,20 @@ can_vma_merge_after(struct vm_area_struc
61750 struct vm_area_struct *vma_merge(struct mm_struct *mm,
61751 struct vm_area_struct *prev, unsigned long addr,
61752 unsigned long end, unsigned long vm_flags,
61753- struct anon_vma *anon_vma, struct file *file,
61754+ struct anon_vma *anon_vma, struct file *file,
61755 pgoff_t pgoff, struct mempolicy *policy)
61756 {
61757 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
61758 struct vm_area_struct *area, *next;
61759 int err;
61760
61761+#ifdef CONFIG_PAX_SEGMEXEC
61762+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
61763+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
61764+
61765+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
61766+#endif
61767+
61768 /*
61769 * We later require that vma->vm_flags == vm_flags,
61770 * so this tests vma->vm_flags & VM_SPECIAL, too.
61771@@ -780,6 +821,15 @@ struct vm_area_struct *vma_merge(struct
61772 if (next && next->vm_end == end) /* cases 6, 7, 8 */
61773 next = next->vm_next;
61774
61775+#ifdef CONFIG_PAX_SEGMEXEC
61776+ if (prev)
61777+ prev_m = pax_find_mirror_vma(prev);
61778+ if (area)
61779+ area_m = pax_find_mirror_vma(area);
61780+ if (next)
61781+ next_m = pax_find_mirror_vma(next);
61782+#endif
61783+
61784 /*
61785 * Can it merge with the predecessor?
61786 */
61787@@ -799,9 +849,24 @@ struct vm_area_struct *vma_merge(struct
61788 /* cases 1, 6 */
61789 err = vma_adjust(prev, prev->vm_start,
61790 next->vm_end, prev->vm_pgoff, NULL);
61791- } else /* cases 2, 5, 7 */
61792+
61793+#ifdef CONFIG_PAX_SEGMEXEC
61794+ if (!err && prev_m)
61795+ err = vma_adjust(prev_m, prev_m->vm_start,
61796+ next_m->vm_end, prev_m->vm_pgoff, NULL);
61797+#endif
61798+
61799+ } else { /* cases 2, 5, 7 */
61800 err = vma_adjust(prev, prev->vm_start,
61801 end, prev->vm_pgoff, NULL);
61802+
61803+#ifdef CONFIG_PAX_SEGMEXEC
61804+ if (!err && prev_m)
61805+ err = vma_adjust(prev_m, prev_m->vm_start,
61806+ end_m, prev_m->vm_pgoff, NULL);
61807+#endif
61808+
61809+ }
61810 if (err)
61811 return NULL;
61812 khugepaged_enter_vma_merge(prev);
61813@@ -815,12 +880,27 @@ struct vm_area_struct *vma_merge(struct
61814 mpol_equal(policy, vma_policy(next)) &&
61815 can_vma_merge_before(next, vm_flags,
61816 anon_vma, file, pgoff+pglen)) {
61817- if (prev && addr < prev->vm_end) /* case 4 */
61818+ if (prev && addr < prev->vm_end) { /* case 4 */
61819 err = vma_adjust(prev, prev->vm_start,
61820 addr, prev->vm_pgoff, NULL);
61821- else /* cases 3, 8 */
61822+
61823+#ifdef CONFIG_PAX_SEGMEXEC
61824+ if (!err && prev_m)
61825+ err = vma_adjust(prev_m, prev_m->vm_start,
61826+ addr_m, prev_m->vm_pgoff, NULL);
61827+#endif
61828+
61829+ } else { /* cases 3, 8 */
61830 err = vma_adjust(area, addr, next->vm_end,
61831 next->vm_pgoff - pglen, NULL);
61832+
61833+#ifdef CONFIG_PAX_SEGMEXEC
61834+ if (!err && area_m)
61835+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
61836+ next_m->vm_pgoff - pglen, NULL);
61837+#endif
61838+
61839+ }
61840 if (err)
61841 return NULL;
61842 khugepaged_enter_vma_merge(area);
61843@@ -929,14 +1009,11 @@ none:
61844 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
61845 struct file *file, long pages)
61846 {
61847- const unsigned long stack_flags
61848- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
61849-
61850 if (file) {
61851 mm->shared_vm += pages;
61852 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
61853 mm->exec_vm += pages;
61854- } else if (flags & stack_flags)
61855+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
61856 mm->stack_vm += pages;
61857 if (flags & (VM_RESERVED|VM_IO))
61858 mm->reserved_vm += pages;
61859@@ -963,7 +1040,7 @@ unsigned long do_mmap_pgoff(struct file
61860 * (the exception is when the underlying filesystem is noexec
61861 * mounted, in which case we dont add PROT_EXEC.)
61862 */
61863- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
61864+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
61865 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
61866 prot |= PROT_EXEC;
61867
61868@@ -989,7 +1066,7 @@ unsigned long do_mmap_pgoff(struct file
61869 /* Obtain the address to map to. we verify (or select) it and ensure
61870 * that it represents a valid section of the address space.
61871 */
61872- addr = get_unmapped_area(file, addr, len, pgoff, flags);
61873+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
61874 if (addr & ~PAGE_MASK)
61875 return addr;
61876
61877@@ -1000,6 +1077,36 @@ unsigned long do_mmap_pgoff(struct file
61878 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
61879 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
61880
61881+#ifdef CONFIG_PAX_MPROTECT
61882+ if (mm->pax_flags & MF_PAX_MPROTECT) {
61883+#ifndef CONFIG_PAX_MPROTECT_COMPAT
61884+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
61885+ gr_log_rwxmmap(file);
61886+
61887+#ifdef CONFIG_PAX_EMUPLT
61888+ vm_flags &= ~VM_EXEC;
61889+#else
61890+ return -EPERM;
61891+#endif
61892+
61893+ }
61894+
61895+ if (!(vm_flags & VM_EXEC))
61896+ vm_flags &= ~VM_MAYEXEC;
61897+#else
61898+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
61899+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
61900+#endif
61901+ else
61902+ vm_flags &= ~VM_MAYWRITE;
61903+ }
61904+#endif
61905+
61906+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
61907+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
61908+ vm_flags &= ~VM_PAGEEXEC;
61909+#endif
61910+
61911 if (flags & MAP_LOCKED)
61912 if (!can_do_mlock())
61913 return -EPERM;
61914@@ -1011,6 +1118,7 @@ unsigned long do_mmap_pgoff(struct file
61915 locked += mm->locked_vm;
61916 lock_limit = rlimit(RLIMIT_MEMLOCK);
61917 lock_limit >>= PAGE_SHIFT;
61918+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
61919 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
61920 return -EAGAIN;
61921 }
61922@@ -1081,6 +1189,9 @@ unsigned long do_mmap_pgoff(struct file
61923 if (error)
61924 return error;
61925
61926+ if (!gr_acl_handle_mmap(file, prot))
61927+ return -EACCES;
61928+
61929 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
61930 }
61931 EXPORT_SYMBOL(do_mmap_pgoff);
61932@@ -1161,7 +1272,7 @@ int vma_wants_writenotify(struct vm_area
61933 vm_flags_t vm_flags = vma->vm_flags;
61934
61935 /* If it was private or non-writable, the write bit is already clear */
61936- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
61937+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
61938 return 0;
61939
61940 /* The backer wishes to know when pages are first written to? */
61941@@ -1210,14 +1321,24 @@ unsigned long mmap_region(struct file *f
61942 unsigned long charged = 0;
61943 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
61944
61945+#ifdef CONFIG_PAX_SEGMEXEC
61946+ struct vm_area_struct *vma_m = NULL;
61947+#endif
61948+
61949+ /*
61950+ * mm->mmap_sem is required to protect against another thread
61951+ * changing the mappings in case we sleep.
61952+ */
61953+ verify_mm_writelocked(mm);
61954+
61955 /* Clear old maps */
61956 error = -ENOMEM;
61957-munmap_back:
61958 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
61959 if (vma && vma->vm_start < addr + len) {
61960 if (do_munmap(mm, addr, len))
61961 return -ENOMEM;
61962- goto munmap_back;
61963+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
61964+ BUG_ON(vma && vma->vm_start < addr + len);
61965 }
61966
61967 /* Check against address space limit. */
61968@@ -1266,6 +1387,16 @@ munmap_back:
61969 goto unacct_error;
61970 }
61971
61972+#ifdef CONFIG_PAX_SEGMEXEC
61973+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
61974+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
61975+ if (!vma_m) {
61976+ error = -ENOMEM;
61977+ goto free_vma;
61978+ }
61979+ }
61980+#endif
61981+
61982 vma->vm_mm = mm;
61983 vma->vm_start = addr;
61984 vma->vm_end = addr + len;
61985@@ -1289,6 +1420,19 @@ munmap_back:
61986 error = file->f_op->mmap(file, vma);
61987 if (error)
61988 goto unmap_and_free_vma;
61989+
61990+#ifdef CONFIG_PAX_SEGMEXEC
61991+ if (vma_m && (vm_flags & VM_EXECUTABLE))
61992+ added_exe_file_vma(mm);
61993+#endif
61994+
61995+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
61996+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
61997+ vma->vm_flags |= VM_PAGEEXEC;
61998+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
61999+ }
62000+#endif
62001+
62002 if (vm_flags & VM_EXECUTABLE)
62003 added_exe_file_vma(mm);
62004
62005@@ -1324,6 +1468,11 @@ munmap_back:
62006 vma_link(mm, vma, prev, rb_link, rb_parent);
62007 file = vma->vm_file;
62008
62009+#ifdef CONFIG_PAX_SEGMEXEC
62010+ if (vma_m)
62011+ BUG_ON(pax_mirror_vma(vma_m, vma));
62012+#endif
62013+
62014 /* Once vma denies write, undo our temporary denial count */
62015 if (correct_wcount)
62016 atomic_inc(&inode->i_writecount);
62017@@ -1332,6 +1481,7 @@ out:
62018
62019 mm->total_vm += len >> PAGE_SHIFT;
62020 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
62021+ track_exec_limit(mm, addr, addr + len, vm_flags);
62022 if (vm_flags & VM_LOCKED) {
62023 if (!mlock_vma_pages_range(vma, addr, addr + len))
62024 mm->locked_vm += (len >> PAGE_SHIFT);
62025@@ -1349,6 +1499,12 @@ unmap_and_free_vma:
62026 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
62027 charged = 0;
62028 free_vma:
62029+
62030+#ifdef CONFIG_PAX_SEGMEXEC
62031+ if (vma_m)
62032+ kmem_cache_free(vm_area_cachep, vma_m);
62033+#endif
62034+
62035 kmem_cache_free(vm_area_cachep, vma);
62036 unacct_error:
62037 if (charged)
62038@@ -1356,6 +1512,44 @@ unacct_error:
62039 return error;
62040 }
62041
62042+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
62043+{
62044+ if (!vma) {
62045+#ifdef CONFIG_STACK_GROWSUP
62046+ if (addr > sysctl_heap_stack_gap)
62047+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
62048+ else
62049+ vma = find_vma(current->mm, 0);
62050+ if (vma && (vma->vm_flags & VM_GROWSUP))
62051+ return false;
62052+#endif
62053+ return true;
62054+ }
62055+
62056+ if (addr + len > vma->vm_start)
62057+ return false;
62058+
62059+ if (vma->vm_flags & VM_GROWSDOWN)
62060+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
62061+#ifdef CONFIG_STACK_GROWSUP
62062+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
62063+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
62064+#endif
62065+
62066+ return true;
62067+}
62068+
62069+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
62070+{
62071+ if (vma->vm_start < len)
62072+ return -ENOMEM;
62073+ if (!(vma->vm_flags & VM_GROWSDOWN))
62074+ return vma->vm_start - len;
62075+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
62076+ return vma->vm_start - len - sysctl_heap_stack_gap;
62077+ return -ENOMEM;
62078+}
62079+
62080 /* Get an address range which is currently unmapped.
62081 * For shmat() with addr=0.
62082 *
62083@@ -1382,18 +1576,23 @@ arch_get_unmapped_area(struct file *filp
62084 if (flags & MAP_FIXED)
62085 return addr;
62086
62087+#ifdef CONFIG_PAX_RANDMMAP
62088+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
62089+#endif
62090+
62091 if (addr) {
62092 addr = PAGE_ALIGN(addr);
62093- vma = find_vma(mm, addr);
62094- if (TASK_SIZE - len >= addr &&
62095- (!vma || addr + len <= vma->vm_start))
62096- return addr;
62097+ if (TASK_SIZE - len >= addr) {
62098+ vma = find_vma(mm, addr);
62099+ if (check_heap_stack_gap(vma, addr, len))
62100+ return addr;
62101+ }
62102 }
62103 if (len > mm->cached_hole_size) {
62104- start_addr = addr = mm->free_area_cache;
62105+ start_addr = addr = mm->free_area_cache;
62106 } else {
62107- start_addr = addr = TASK_UNMAPPED_BASE;
62108- mm->cached_hole_size = 0;
62109+ start_addr = addr = mm->mmap_base;
62110+ mm->cached_hole_size = 0;
62111 }
62112
62113 full_search:
62114@@ -1404,34 +1603,40 @@ full_search:
62115 * Start a new search - just in case we missed
62116 * some holes.
62117 */
62118- if (start_addr != TASK_UNMAPPED_BASE) {
62119- addr = TASK_UNMAPPED_BASE;
62120- start_addr = addr;
62121+ if (start_addr != mm->mmap_base) {
62122+ start_addr = addr = mm->mmap_base;
62123 mm->cached_hole_size = 0;
62124 goto full_search;
62125 }
62126 return -ENOMEM;
62127 }
62128- if (!vma || addr + len <= vma->vm_start) {
62129- /*
62130- * Remember the place where we stopped the search:
62131- */
62132- mm->free_area_cache = addr + len;
62133- return addr;
62134- }
62135+ if (check_heap_stack_gap(vma, addr, len))
62136+ break;
62137 if (addr + mm->cached_hole_size < vma->vm_start)
62138 mm->cached_hole_size = vma->vm_start - addr;
62139 addr = vma->vm_end;
62140 }
62141+
62142+ /*
62143+ * Remember the place where we stopped the search:
62144+ */
62145+ mm->free_area_cache = addr + len;
62146+ return addr;
62147 }
62148 #endif
62149
62150 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
62151 {
62152+
62153+#ifdef CONFIG_PAX_SEGMEXEC
62154+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
62155+ return;
62156+#endif
62157+
62158 /*
62159 * Is this a new hole at the lowest possible address?
62160 */
62161- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
62162+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
62163 mm->free_area_cache = addr;
62164 mm->cached_hole_size = ~0UL;
62165 }
62166@@ -1449,7 +1654,7 @@ arch_get_unmapped_area_topdown(struct fi
62167 {
62168 struct vm_area_struct *vma;
62169 struct mm_struct *mm = current->mm;
62170- unsigned long addr = addr0;
62171+ unsigned long base = mm->mmap_base, addr = addr0;
62172
62173 /* requested length too big for entire address space */
62174 if (len > TASK_SIZE)
62175@@ -1458,13 +1663,18 @@ arch_get_unmapped_area_topdown(struct fi
62176 if (flags & MAP_FIXED)
62177 return addr;
62178
62179+#ifdef CONFIG_PAX_RANDMMAP
62180+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
62181+#endif
62182+
62183 /* requesting a specific address */
62184 if (addr) {
62185 addr = PAGE_ALIGN(addr);
62186- vma = find_vma(mm, addr);
62187- if (TASK_SIZE - len >= addr &&
62188- (!vma || addr + len <= vma->vm_start))
62189- return addr;
62190+ if (TASK_SIZE - len >= addr) {
62191+ vma = find_vma(mm, addr);
62192+ if (check_heap_stack_gap(vma, addr, len))
62193+ return addr;
62194+ }
62195 }
62196
62197 /* check if free_area_cache is useful for us */
62198@@ -1479,7 +1689,7 @@ arch_get_unmapped_area_topdown(struct fi
62199 /* make sure it can fit in the remaining address space */
62200 if (addr > len) {
62201 vma = find_vma(mm, addr-len);
62202- if (!vma || addr <= vma->vm_start)
62203+ if (check_heap_stack_gap(vma, addr - len, len))
62204 /* remember the address as a hint for next time */
62205 return (mm->free_area_cache = addr-len);
62206 }
62207@@ -1496,7 +1706,7 @@ arch_get_unmapped_area_topdown(struct fi
62208 * return with success:
62209 */
62210 vma = find_vma(mm, addr);
62211- if (!vma || addr+len <= vma->vm_start)
62212+ if (check_heap_stack_gap(vma, addr, len))
62213 /* remember the address as a hint for next time */
62214 return (mm->free_area_cache = addr);
62215
62216@@ -1505,8 +1715,8 @@ arch_get_unmapped_area_topdown(struct fi
62217 mm->cached_hole_size = vma->vm_start - addr;
62218
62219 /* try just below the current vma->vm_start */
62220- addr = vma->vm_start-len;
62221- } while (len < vma->vm_start);
62222+ addr = skip_heap_stack_gap(vma, len);
62223+ } while (!IS_ERR_VALUE(addr));
62224
62225 bottomup:
62226 /*
62227@@ -1515,13 +1725,21 @@ bottomup:
62228 * can happen with large stack limits and large mmap()
62229 * allocations.
62230 */
62231+ mm->mmap_base = TASK_UNMAPPED_BASE;
62232+
62233+#ifdef CONFIG_PAX_RANDMMAP
62234+ if (mm->pax_flags & MF_PAX_RANDMMAP)
62235+ mm->mmap_base += mm->delta_mmap;
62236+#endif
62237+
62238+ mm->free_area_cache = mm->mmap_base;
62239 mm->cached_hole_size = ~0UL;
62240- mm->free_area_cache = TASK_UNMAPPED_BASE;
62241 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
62242 /*
62243 * Restore the topdown base:
62244 */
62245- mm->free_area_cache = mm->mmap_base;
62246+ mm->mmap_base = base;
62247+ mm->free_area_cache = base;
62248 mm->cached_hole_size = ~0UL;
62249
62250 return addr;
62251@@ -1530,6 +1748,12 @@ bottomup:
62252
62253 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
62254 {
62255+
62256+#ifdef CONFIG_PAX_SEGMEXEC
62257+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
62258+ return;
62259+#endif
62260+
62261 /*
62262 * Is this a new hole at the highest possible address?
62263 */
62264@@ -1537,8 +1761,10 @@ void arch_unmap_area_topdown(struct mm_s
62265 mm->free_area_cache = addr;
62266
62267 /* dont allow allocations above current base */
62268- if (mm->free_area_cache > mm->mmap_base)
62269+ if (mm->free_area_cache > mm->mmap_base) {
62270 mm->free_area_cache = mm->mmap_base;
62271+ mm->cached_hole_size = ~0UL;
62272+ }
62273 }
62274
62275 unsigned long
62276@@ -1646,6 +1872,28 @@ out:
62277 return prev ? prev->vm_next : vma;
62278 }
62279
62280+#ifdef CONFIG_PAX_SEGMEXEC
62281+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
62282+{
62283+ struct vm_area_struct *vma_m;
62284+
62285+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
62286+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
62287+ BUG_ON(vma->vm_mirror);
62288+ return NULL;
62289+ }
62290+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
62291+ vma_m = vma->vm_mirror;
62292+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
62293+ BUG_ON(vma->vm_file != vma_m->vm_file);
62294+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
62295+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
62296+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
62297+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
62298+ return vma_m;
62299+}
62300+#endif
62301+
62302 /*
62303 * Verify that the stack growth is acceptable and
62304 * update accounting. This is shared with both the
62305@@ -1662,6 +1910,7 @@ static int acct_stack_growth(struct vm_a
62306 return -ENOMEM;
62307
62308 /* Stack limit test */
62309+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
62310 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
62311 return -ENOMEM;
62312
62313@@ -1672,6 +1921,7 @@ static int acct_stack_growth(struct vm_a
62314 locked = mm->locked_vm + grow;
62315 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
62316 limit >>= PAGE_SHIFT;
62317+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
62318 if (locked > limit && !capable(CAP_IPC_LOCK))
62319 return -ENOMEM;
62320 }
62321@@ -1702,37 +1952,48 @@ static int acct_stack_growth(struct vm_a
62322 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
62323 * vma is the last one with address > vma->vm_end. Have to extend vma.
62324 */
62325+#ifndef CONFIG_IA64
62326+static
62327+#endif
62328 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
62329 {
62330 int error;
62331+ bool locknext;
62332
62333 if (!(vma->vm_flags & VM_GROWSUP))
62334 return -EFAULT;
62335
62336+ /* Also guard against wrapping around to address 0. */
62337+ if (address < PAGE_ALIGN(address+1))
62338+ address = PAGE_ALIGN(address+1);
62339+ else
62340+ return -ENOMEM;
62341+
62342 /*
62343 * We must make sure the anon_vma is allocated
62344 * so that the anon_vma locking is not a noop.
62345 */
62346 if (unlikely(anon_vma_prepare(vma)))
62347 return -ENOMEM;
62348+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
62349+ if (locknext && anon_vma_prepare(vma->vm_next))
62350+ return -ENOMEM;
62351 vma_lock_anon_vma(vma);
62352+ if (locknext)
62353+ vma_lock_anon_vma(vma->vm_next);
62354
62355 /*
62356 * vma->vm_start/vm_end cannot change under us because the caller
62357 * is required to hold the mmap_sem in read mode. We need the
62358- * anon_vma lock to serialize against concurrent expand_stacks.
62359- * Also guard against wrapping around to address 0.
62360+ * anon_vma locks to serialize against concurrent expand_stacks
62361+ * and expand_upwards.
62362 */
62363- if (address < PAGE_ALIGN(address+4))
62364- address = PAGE_ALIGN(address+4);
62365- else {
62366- vma_unlock_anon_vma(vma);
62367- return -ENOMEM;
62368- }
62369 error = 0;
62370
62371 /* Somebody else might have raced and expanded it already */
62372- if (address > vma->vm_end) {
62373+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
62374+ error = -ENOMEM;
62375+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
62376 unsigned long size, grow;
62377
62378 size = address - vma->vm_start;
62379@@ -1747,6 +2008,8 @@ int expand_upwards(struct vm_area_struct
62380 }
62381 }
62382 }
62383+ if (locknext)
62384+ vma_unlock_anon_vma(vma->vm_next);
62385 vma_unlock_anon_vma(vma);
62386 khugepaged_enter_vma_merge(vma);
62387 return error;
62388@@ -1760,6 +2023,8 @@ int expand_downwards(struct vm_area_stru
62389 unsigned long address)
62390 {
62391 int error;
62392+ bool lockprev = false;
62393+ struct vm_area_struct *prev;
62394
62395 /*
62396 * We must make sure the anon_vma is allocated
62397@@ -1773,6 +2038,15 @@ int expand_downwards(struct vm_area_stru
62398 if (error)
62399 return error;
62400
62401+ prev = vma->vm_prev;
62402+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
62403+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
62404+#endif
62405+ if (lockprev && anon_vma_prepare(prev))
62406+ return -ENOMEM;
62407+ if (lockprev)
62408+ vma_lock_anon_vma(prev);
62409+
62410 vma_lock_anon_vma(vma);
62411
62412 /*
62413@@ -1782,9 +2056,17 @@ int expand_downwards(struct vm_area_stru
62414 */
62415
62416 /* Somebody else might have raced and expanded it already */
62417- if (address < vma->vm_start) {
62418+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
62419+ error = -ENOMEM;
62420+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
62421 unsigned long size, grow;
62422
62423+#ifdef CONFIG_PAX_SEGMEXEC
62424+ struct vm_area_struct *vma_m;
62425+
62426+ vma_m = pax_find_mirror_vma(vma);
62427+#endif
62428+
62429 size = vma->vm_end - address;
62430 grow = (vma->vm_start - address) >> PAGE_SHIFT;
62431
62432@@ -1794,11 +2076,22 @@ int expand_downwards(struct vm_area_stru
62433 if (!error) {
62434 vma->vm_start = address;
62435 vma->vm_pgoff -= grow;
62436+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
62437+
62438+#ifdef CONFIG_PAX_SEGMEXEC
62439+ if (vma_m) {
62440+ vma_m->vm_start -= grow << PAGE_SHIFT;
62441+ vma_m->vm_pgoff -= grow;
62442+ }
62443+#endif
62444+
62445 perf_event_mmap(vma);
62446 }
62447 }
62448 }
62449 vma_unlock_anon_vma(vma);
62450+ if (lockprev)
62451+ vma_unlock_anon_vma(prev);
62452 khugepaged_enter_vma_merge(vma);
62453 return error;
62454 }
62455@@ -1868,6 +2161,13 @@ static void remove_vma_list(struct mm_st
62456 do {
62457 long nrpages = vma_pages(vma);
62458
62459+#ifdef CONFIG_PAX_SEGMEXEC
62460+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
62461+ vma = remove_vma(vma);
62462+ continue;
62463+ }
62464+#endif
62465+
62466 mm->total_vm -= nrpages;
62467 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
62468 vma = remove_vma(vma);
62469@@ -1913,6 +2213,16 @@ detach_vmas_to_be_unmapped(struct mm_str
62470 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
62471 vma->vm_prev = NULL;
62472 do {
62473+
62474+#ifdef CONFIG_PAX_SEGMEXEC
62475+ if (vma->vm_mirror) {
62476+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
62477+ vma->vm_mirror->vm_mirror = NULL;
62478+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
62479+ vma->vm_mirror = NULL;
62480+ }
62481+#endif
62482+
62483 rb_erase(&vma->vm_rb, &mm->mm_rb);
62484 mm->map_count--;
62485 tail_vma = vma;
62486@@ -1941,14 +2251,33 @@ static int __split_vma(struct mm_struct
62487 struct vm_area_struct *new;
62488 int err = -ENOMEM;
62489
62490+#ifdef CONFIG_PAX_SEGMEXEC
62491+ struct vm_area_struct *vma_m, *new_m = NULL;
62492+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
62493+#endif
62494+
62495 if (is_vm_hugetlb_page(vma) && (addr &
62496 ~(huge_page_mask(hstate_vma(vma)))))
62497 return -EINVAL;
62498
62499+#ifdef CONFIG_PAX_SEGMEXEC
62500+ vma_m = pax_find_mirror_vma(vma);
62501+#endif
62502+
62503 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
62504 if (!new)
62505 goto out_err;
62506
62507+#ifdef CONFIG_PAX_SEGMEXEC
62508+ if (vma_m) {
62509+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
62510+ if (!new_m) {
62511+ kmem_cache_free(vm_area_cachep, new);
62512+ goto out_err;
62513+ }
62514+ }
62515+#endif
62516+
62517 /* most fields are the same, copy all, and then fixup */
62518 *new = *vma;
62519
62520@@ -1961,6 +2290,22 @@ static int __split_vma(struct mm_struct
62521 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
62522 }
62523
62524+#ifdef CONFIG_PAX_SEGMEXEC
62525+ if (vma_m) {
62526+ *new_m = *vma_m;
62527+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
62528+ new_m->vm_mirror = new;
62529+ new->vm_mirror = new_m;
62530+
62531+ if (new_below)
62532+ new_m->vm_end = addr_m;
62533+ else {
62534+ new_m->vm_start = addr_m;
62535+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
62536+ }
62537+ }
62538+#endif
62539+
62540 pol = mpol_dup(vma_policy(vma));
62541 if (IS_ERR(pol)) {
62542 err = PTR_ERR(pol);
62543@@ -1986,6 +2331,42 @@ static int __split_vma(struct mm_struct
62544 else
62545 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
62546
62547+#ifdef CONFIG_PAX_SEGMEXEC
62548+ if (!err && vma_m) {
62549+ if (anon_vma_clone(new_m, vma_m))
62550+ goto out_free_mpol;
62551+
62552+ mpol_get(pol);
62553+ vma_set_policy(new_m, pol);
62554+
62555+ if (new_m->vm_file) {
62556+ get_file(new_m->vm_file);
62557+ if (vma_m->vm_flags & VM_EXECUTABLE)
62558+ added_exe_file_vma(mm);
62559+ }
62560+
62561+ if (new_m->vm_ops && new_m->vm_ops->open)
62562+ new_m->vm_ops->open(new_m);
62563+
62564+ if (new_below)
62565+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
62566+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
62567+ else
62568+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
62569+
62570+ if (err) {
62571+ if (new_m->vm_ops && new_m->vm_ops->close)
62572+ new_m->vm_ops->close(new_m);
62573+ if (new_m->vm_file) {
62574+ if (vma_m->vm_flags & VM_EXECUTABLE)
62575+ removed_exe_file_vma(mm);
62576+ fput(new_m->vm_file);
62577+ }
62578+ mpol_put(pol);
62579+ }
62580+ }
62581+#endif
62582+
62583 /* Success. */
62584 if (!err)
62585 return 0;
62586@@ -1998,10 +2379,18 @@ static int __split_vma(struct mm_struct
62587 removed_exe_file_vma(mm);
62588 fput(new->vm_file);
62589 }
62590- unlink_anon_vmas(new);
62591 out_free_mpol:
62592 mpol_put(pol);
62593 out_free_vma:
62594+
62595+#ifdef CONFIG_PAX_SEGMEXEC
62596+ if (new_m) {
62597+ unlink_anon_vmas(new_m);
62598+ kmem_cache_free(vm_area_cachep, new_m);
62599+ }
62600+#endif
62601+
62602+ unlink_anon_vmas(new);
62603 kmem_cache_free(vm_area_cachep, new);
62604 out_err:
62605 return err;
62606@@ -2014,6 +2403,15 @@ static int __split_vma(struct mm_struct
62607 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
62608 unsigned long addr, int new_below)
62609 {
62610+
62611+#ifdef CONFIG_PAX_SEGMEXEC
62612+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
62613+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
62614+ if (mm->map_count >= sysctl_max_map_count-1)
62615+ return -ENOMEM;
62616+ } else
62617+#endif
62618+
62619 if (mm->map_count >= sysctl_max_map_count)
62620 return -ENOMEM;
62621
62622@@ -2025,11 +2423,30 @@ int split_vma(struct mm_struct *mm, stru
62623 * work. This now handles partial unmappings.
62624 * Jeremy Fitzhardinge <jeremy@goop.org>
62625 */
62626+#ifdef CONFIG_PAX_SEGMEXEC
62627 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
62628 {
62629+ int ret = __do_munmap(mm, start, len);
62630+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
62631+ return ret;
62632+
62633+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
62634+}
62635+
62636+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
62637+#else
62638+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
62639+#endif
62640+{
62641 unsigned long end;
62642 struct vm_area_struct *vma, *prev, *last;
62643
62644+ /*
62645+ * mm->mmap_sem is required to protect against another thread
62646+ * changing the mappings in case we sleep.
62647+ */
62648+ verify_mm_writelocked(mm);
62649+
62650 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
62651 return -EINVAL;
62652
62653@@ -2104,6 +2521,8 @@ int do_munmap(struct mm_struct *mm, unsi
62654 /* Fix up all other VM information */
62655 remove_vma_list(mm, vma);
62656
62657+ track_exec_limit(mm, start, end, 0UL);
62658+
62659 return 0;
62660 }
62661
62662@@ -2116,22 +2535,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
62663
62664 profile_munmap(addr);
62665
62666+#ifdef CONFIG_PAX_SEGMEXEC
62667+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
62668+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
62669+ return -EINVAL;
62670+#endif
62671+
62672 down_write(&mm->mmap_sem);
62673 ret = do_munmap(mm, addr, len);
62674 up_write(&mm->mmap_sem);
62675 return ret;
62676 }
62677
62678-static inline void verify_mm_writelocked(struct mm_struct *mm)
62679-{
62680-#ifdef CONFIG_DEBUG_VM
62681- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
62682- WARN_ON(1);
62683- up_read(&mm->mmap_sem);
62684- }
62685-#endif
62686-}
62687-
62688 /*
62689 * this is really a simplified "do_mmap". it only handles
62690 * anonymous maps. eventually we may be able to do some
62691@@ -2145,6 +2560,7 @@ unsigned long do_brk(unsigned long addr,
62692 struct rb_node ** rb_link, * rb_parent;
62693 pgoff_t pgoff = addr >> PAGE_SHIFT;
62694 int error;
62695+ unsigned long charged;
62696
62697 len = PAGE_ALIGN(len);
62698 if (!len)
62699@@ -2156,16 +2572,30 @@ unsigned long do_brk(unsigned long addr,
62700
62701 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
62702
62703+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
62704+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
62705+ flags &= ~VM_EXEC;
62706+
62707+#ifdef CONFIG_PAX_MPROTECT
62708+ if (mm->pax_flags & MF_PAX_MPROTECT)
62709+ flags &= ~VM_MAYEXEC;
62710+#endif
62711+
62712+ }
62713+#endif
62714+
62715 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
62716 if (error & ~PAGE_MASK)
62717 return error;
62718
62719+ charged = len >> PAGE_SHIFT;
62720+
62721 /*
62722 * mlock MCL_FUTURE?
62723 */
62724 if (mm->def_flags & VM_LOCKED) {
62725 unsigned long locked, lock_limit;
62726- locked = len >> PAGE_SHIFT;
62727+ locked = charged;
62728 locked += mm->locked_vm;
62729 lock_limit = rlimit(RLIMIT_MEMLOCK);
62730 lock_limit >>= PAGE_SHIFT;
62731@@ -2182,22 +2612,22 @@ unsigned long do_brk(unsigned long addr,
62732 /*
62733 * Clear old maps. this also does some error checking for us
62734 */
62735- munmap_back:
62736 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
62737 if (vma && vma->vm_start < addr + len) {
62738 if (do_munmap(mm, addr, len))
62739 return -ENOMEM;
62740- goto munmap_back;
62741+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
62742+ BUG_ON(vma && vma->vm_start < addr + len);
62743 }
62744
62745 /* Check against address space limits *after* clearing old maps... */
62746- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
62747+ if (!may_expand_vm(mm, charged))
62748 return -ENOMEM;
62749
62750 if (mm->map_count > sysctl_max_map_count)
62751 return -ENOMEM;
62752
62753- if (security_vm_enough_memory(len >> PAGE_SHIFT))
62754+ if (security_vm_enough_memory(charged))
62755 return -ENOMEM;
62756
62757 /* Can we just expand an old private anonymous mapping? */
62758@@ -2211,7 +2641,7 @@ unsigned long do_brk(unsigned long addr,
62759 */
62760 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
62761 if (!vma) {
62762- vm_unacct_memory(len >> PAGE_SHIFT);
62763+ vm_unacct_memory(charged);
62764 return -ENOMEM;
62765 }
62766
62767@@ -2225,11 +2655,12 @@ unsigned long do_brk(unsigned long addr,
62768 vma_link(mm, vma, prev, rb_link, rb_parent);
62769 out:
62770 perf_event_mmap(vma);
62771- mm->total_vm += len >> PAGE_SHIFT;
62772+ mm->total_vm += charged;
62773 if (flags & VM_LOCKED) {
62774 if (!mlock_vma_pages_range(vma, addr, addr + len))
62775- mm->locked_vm += (len >> PAGE_SHIFT);
62776+ mm->locked_vm += charged;
62777 }
62778+ track_exec_limit(mm, addr, addr + len, flags);
62779 return addr;
62780 }
62781
62782@@ -2276,8 +2707,10 @@ void exit_mmap(struct mm_struct *mm)
62783 * Walk the list again, actually closing and freeing it,
62784 * with preemption enabled, without holding any MM locks.
62785 */
62786- while (vma)
62787+ while (vma) {
62788+ vma->vm_mirror = NULL;
62789 vma = remove_vma(vma);
62790+ }
62791
62792 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
62793 }
62794@@ -2291,6 +2724,13 @@ int insert_vm_struct(struct mm_struct *
62795 struct vm_area_struct * __vma, * prev;
62796 struct rb_node ** rb_link, * rb_parent;
62797
62798+#ifdef CONFIG_PAX_SEGMEXEC
62799+ struct vm_area_struct *vma_m = NULL;
62800+#endif
62801+
62802+ if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
62803+ return -EPERM;
62804+
62805 /*
62806 * The vm_pgoff of a purely anonymous vma should be irrelevant
62807 * until its first write fault, when page's anon_vma and index
62808@@ -2313,7 +2753,22 @@ int insert_vm_struct(struct mm_struct *
62809 if ((vma->vm_flags & VM_ACCOUNT) &&
62810 security_vm_enough_memory_mm(mm, vma_pages(vma)))
62811 return -ENOMEM;
62812+
62813+#ifdef CONFIG_PAX_SEGMEXEC
62814+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
62815+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
62816+ if (!vma_m)
62817+ return -ENOMEM;
62818+ }
62819+#endif
62820+
62821 vma_link(mm, vma, prev, rb_link, rb_parent);
62822+
62823+#ifdef CONFIG_PAX_SEGMEXEC
62824+ if (vma_m)
62825+ BUG_ON(pax_mirror_vma(vma_m, vma));
62826+#endif
62827+
62828 return 0;
62829 }
62830
62831@@ -2331,6 +2786,8 @@ struct vm_area_struct *copy_vma(struct v
62832 struct rb_node **rb_link, *rb_parent;
62833 struct mempolicy *pol;
62834
62835+ BUG_ON(vma->vm_mirror);
62836+
62837 /*
62838 * If anonymous vma has not yet been faulted, update new pgoff
62839 * to match new location, to increase its chance of merging.
62840@@ -2381,6 +2838,39 @@ struct vm_area_struct *copy_vma(struct v
62841 return NULL;
62842 }
62843
62844+#ifdef CONFIG_PAX_SEGMEXEC
62845+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
62846+{
62847+ struct vm_area_struct *prev_m;
62848+ struct rb_node **rb_link_m, *rb_parent_m;
62849+ struct mempolicy *pol_m;
62850+
62851+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
62852+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
62853+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
62854+ *vma_m = *vma;
62855+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
62856+ if (anon_vma_clone(vma_m, vma))
62857+ return -ENOMEM;
62858+ pol_m = vma_policy(vma_m);
62859+ mpol_get(pol_m);
62860+ vma_set_policy(vma_m, pol_m);
62861+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
62862+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
62863+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
62864+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
62865+ if (vma_m->vm_file)
62866+ get_file(vma_m->vm_file);
62867+ if (vma_m->vm_ops && vma_m->vm_ops->open)
62868+ vma_m->vm_ops->open(vma_m);
62869+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
62870+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
62871+ vma_m->vm_mirror = vma;
62872+ vma->vm_mirror = vma_m;
62873+ return 0;
62874+}
62875+#endif
62876+
62877 /*
62878 * Return true if the calling process may expand its vm space by the passed
62879 * number of pages
62880@@ -2391,7 +2881,7 @@ int may_expand_vm(struct mm_struct *mm,
62881 unsigned long lim;
62882
62883 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
62884-
62885+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
62886 if (cur + npages > lim)
62887 return 0;
62888 return 1;
62889@@ -2462,6 +2952,22 @@ int install_special_mapping(struct mm_st
62890 vma->vm_start = addr;
62891 vma->vm_end = addr + len;
62892
62893+#ifdef CONFIG_PAX_MPROTECT
62894+ if (mm->pax_flags & MF_PAX_MPROTECT) {
62895+#ifndef CONFIG_PAX_MPROTECT_COMPAT
62896+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
62897+ return -EPERM;
62898+ if (!(vm_flags & VM_EXEC))
62899+ vm_flags &= ~VM_MAYEXEC;
62900+#else
62901+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
62902+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
62903+#endif
62904+ else
62905+ vm_flags &= ~VM_MAYWRITE;
62906+ }
62907+#endif
62908+
62909 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
62910 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
62911
62912diff -urNp linux-3.0.3/mm/mprotect.c linux-3.0.3/mm/mprotect.c
62913--- linux-3.0.3/mm/mprotect.c 2011-07-21 22:17:23.000000000 -0400
62914+++ linux-3.0.3/mm/mprotect.c 2011-08-23 21:48:14.000000000 -0400
62915@@ -23,10 +23,16 @@
62916 #include <linux/mmu_notifier.h>
62917 #include <linux/migrate.h>
62918 #include <linux/perf_event.h>
62919+
62920+#ifdef CONFIG_PAX_MPROTECT
62921+#include <linux/elf.h>
62922+#endif
62923+
62924 #include <asm/uaccess.h>
62925 #include <asm/pgtable.h>
62926 #include <asm/cacheflush.h>
62927 #include <asm/tlbflush.h>
62928+#include <asm/mmu_context.h>
62929
62930 #ifndef pgprot_modify
62931 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
62932@@ -141,6 +147,48 @@ static void change_protection(struct vm_
62933 flush_tlb_range(vma, start, end);
62934 }
62935
62936+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
62937+/* called while holding the mmap semaphor for writing except stack expansion */
62938+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
62939+{
62940+ unsigned long oldlimit, newlimit = 0UL;
62941+
62942+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
62943+ return;
62944+
62945+ spin_lock(&mm->page_table_lock);
62946+ oldlimit = mm->context.user_cs_limit;
62947+ if ((prot & VM_EXEC) && oldlimit < end)
62948+ /* USER_CS limit moved up */
62949+ newlimit = end;
62950+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
62951+ /* USER_CS limit moved down */
62952+ newlimit = start;
62953+
62954+ if (newlimit) {
62955+ mm->context.user_cs_limit = newlimit;
62956+
62957+#ifdef CONFIG_SMP
62958+ wmb();
62959+ cpus_clear(mm->context.cpu_user_cs_mask);
62960+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
62961+#endif
62962+
62963+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
62964+ }
62965+ spin_unlock(&mm->page_table_lock);
62966+ if (newlimit == end) {
62967+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
62968+
62969+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
62970+ if (is_vm_hugetlb_page(vma))
62971+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
62972+ else
62973+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
62974+ }
62975+}
62976+#endif
62977+
62978 int
62979 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
62980 unsigned long start, unsigned long end, unsigned long newflags)
62981@@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vm
62982 int error;
62983 int dirty_accountable = 0;
62984
62985+#ifdef CONFIG_PAX_SEGMEXEC
62986+ struct vm_area_struct *vma_m = NULL;
62987+ unsigned long start_m, end_m;
62988+
62989+ start_m = start + SEGMEXEC_TASK_SIZE;
62990+ end_m = end + SEGMEXEC_TASK_SIZE;
62991+#endif
62992+
62993 if (newflags == oldflags) {
62994 *pprev = vma;
62995 return 0;
62996 }
62997
62998+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
62999+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
63000+
63001+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
63002+ return -ENOMEM;
63003+
63004+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
63005+ return -ENOMEM;
63006+ }
63007+
63008 /*
63009 * If we make a private mapping writable we increase our commit;
63010 * but (without finer accounting) cannot reduce our commit if we
63011@@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vm
63012 }
63013 }
63014
63015+#ifdef CONFIG_PAX_SEGMEXEC
63016+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
63017+ if (start != vma->vm_start) {
63018+ error = split_vma(mm, vma, start, 1);
63019+ if (error)
63020+ goto fail;
63021+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
63022+ *pprev = (*pprev)->vm_next;
63023+ }
63024+
63025+ if (end != vma->vm_end) {
63026+ error = split_vma(mm, vma, end, 0);
63027+ if (error)
63028+ goto fail;
63029+ }
63030+
63031+ if (pax_find_mirror_vma(vma)) {
63032+ error = __do_munmap(mm, start_m, end_m - start_m);
63033+ if (error)
63034+ goto fail;
63035+ } else {
63036+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
63037+ if (!vma_m) {
63038+ error = -ENOMEM;
63039+ goto fail;
63040+ }
63041+ vma->vm_flags = newflags;
63042+ error = pax_mirror_vma(vma_m, vma);
63043+ if (error) {
63044+ vma->vm_flags = oldflags;
63045+ goto fail;
63046+ }
63047+ }
63048+ }
63049+#endif
63050+
63051 /*
63052 * First try to merge with previous and/or next vma.
63053 */
63054@@ -204,9 +306,21 @@ success:
63055 * vm_flags and vm_page_prot are protected by the mmap_sem
63056 * held in write mode.
63057 */
63058+
63059+#ifdef CONFIG_PAX_SEGMEXEC
63060+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
63061+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
63062+#endif
63063+
63064 vma->vm_flags = newflags;
63065+
63066+#ifdef CONFIG_PAX_MPROTECT
63067+ if (mm->binfmt && mm->binfmt->handle_mprotect)
63068+ mm->binfmt->handle_mprotect(vma, newflags);
63069+#endif
63070+
63071 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
63072- vm_get_page_prot(newflags));
63073+ vm_get_page_prot(vma->vm_flags));
63074
63075 if (vma_wants_writenotify(vma)) {
63076 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
63077@@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63078 end = start + len;
63079 if (end <= start)
63080 return -ENOMEM;
63081+
63082+#ifdef CONFIG_PAX_SEGMEXEC
63083+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
63084+ if (end > SEGMEXEC_TASK_SIZE)
63085+ return -EINVAL;
63086+ } else
63087+#endif
63088+
63089+ if (end > TASK_SIZE)
63090+ return -EINVAL;
63091+
63092 if (!arch_validate_prot(prot))
63093 return -EINVAL;
63094
63095@@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63096 /*
63097 * Does the application expect PROT_READ to imply PROT_EXEC:
63098 */
63099- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
63100+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
63101 prot |= PROT_EXEC;
63102
63103 vm_flags = calc_vm_prot_bits(prot);
63104@@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63105 if (start > vma->vm_start)
63106 prev = vma;
63107
63108+#ifdef CONFIG_PAX_MPROTECT
63109+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
63110+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
63111+#endif
63112+
63113 for (nstart = start ; ; ) {
63114 unsigned long newflags;
63115
63116@@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63117
63118 /* newflags >> 4 shift VM_MAY% in place of VM_% */
63119 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
63120+ if (prot & (PROT_WRITE | PROT_EXEC))
63121+ gr_log_rwxmprotect(vma->vm_file);
63122+
63123+ error = -EACCES;
63124+ goto out;
63125+ }
63126+
63127+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
63128 error = -EACCES;
63129 goto out;
63130 }
63131@@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63132 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
63133 if (error)
63134 goto out;
63135+
63136+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
63137+
63138 nstart = tmp;
63139
63140 if (nstart < prev->vm_end)
63141diff -urNp linux-3.0.3/mm/mremap.c linux-3.0.3/mm/mremap.c
63142--- linux-3.0.3/mm/mremap.c 2011-07-21 22:17:23.000000000 -0400
63143+++ linux-3.0.3/mm/mremap.c 2011-08-23 21:47:56.000000000 -0400
63144@@ -113,6 +113,12 @@ static void move_ptes(struct vm_area_str
63145 continue;
63146 pte = ptep_clear_flush(vma, old_addr, old_pte);
63147 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
63148+
63149+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
63150+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
63151+ pte = pte_exprotect(pte);
63152+#endif
63153+
63154 set_pte_at(mm, new_addr, new_pte, pte);
63155 }
63156
63157@@ -272,6 +278,11 @@ static struct vm_area_struct *vma_to_res
63158 if (is_vm_hugetlb_page(vma))
63159 goto Einval;
63160
63161+#ifdef CONFIG_PAX_SEGMEXEC
63162+ if (pax_find_mirror_vma(vma))
63163+ goto Einval;
63164+#endif
63165+
63166 /* We can't remap across vm area boundaries */
63167 if (old_len > vma->vm_end - addr)
63168 goto Efault;
63169@@ -328,20 +339,25 @@ static unsigned long mremap_to(unsigned
63170 unsigned long ret = -EINVAL;
63171 unsigned long charged = 0;
63172 unsigned long map_flags;
63173+ unsigned long pax_task_size = TASK_SIZE;
63174
63175 if (new_addr & ~PAGE_MASK)
63176 goto out;
63177
63178- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
63179+#ifdef CONFIG_PAX_SEGMEXEC
63180+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
63181+ pax_task_size = SEGMEXEC_TASK_SIZE;
63182+#endif
63183+
63184+ pax_task_size -= PAGE_SIZE;
63185+
63186+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
63187 goto out;
63188
63189 /* Check if the location we're moving into overlaps the
63190 * old location at all, and fail if it does.
63191 */
63192- if ((new_addr <= addr) && (new_addr+new_len) > addr)
63193- goto out;
63194-
63195- if ((addr <= new_addr) && (addr+old_len) > new_addr)
63196+ if (addr + old_len > new_addr && new_addr + new_len > addr)
63197 goto out;
63198
63199 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
63200@@ -413,6 +429,7 @@ unsigned long do_mremap(unsigned long ad
63201 struct vm_area_struct *vma;
63202 unsigned long ret = -EINVAL;
63203 unsigned long charged = 0;
63204+ unsigned long pax_task_size = TASK_SIZE;
63205
63206 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
63207 goto out;
63208@@ -431,6 +448,17 @@ unsigned long do_mremap(unsigned long ad
63209 if (!new_len)
63210 goto out;
63211
63212+#ifdef CONFIG_PAX_SEGMEXEC
63213+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
63214+ pax_task_size = SEGMEXEC_TASK_SIZE;
63215+#endif
63216+
63217+ pax_task_size -= PAGE_SIZE;
63218+
63219+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
63220+ old_len > pax_task_size || addr > pax_task_size-old_len)
63221+ goto out;
63222+
63223 if (flags & MREMAP_FIXED) {
63224 if (flags & MREMAP_MAYMOVE)
63225 ret = mremap_to(addr, old_len, new_addr, new_len);
63226@@ -480,6 +508,7 @@ unsigned long do_mremap(unsigned long ad
63227 addr + new_len);
63228 }
63229 ret = addr;
63230+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
63231 goto out;
63232 }
63233 }
63234@@ -506,7 +535,13 @@ unsigned long do_mremap(unsigned long ad
63235 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
63236 if (ret)
63237 goto out;
63238+
63239+ map_flags = vma->vm_flags;
63240 ret = move_vma(vma, addr, old_len, new_len, new_addr);
63241+ if (!(ret & ~PAGE_MASK)) {
63242+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
63243+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
63244+ }
63245 }
63246 out:
63247 if (ret & ~PAGE_MASK)
63248diff -urNp linux-3.0.3/mm/nobootmem.c linux-3.0.3/mm/nobootmem.c
63249--- linux-3.0.3/mm/nobootmem.c 2011-07-21 22:17:23.000000000 -0400
63250+++ linux-3.0.3/mm/nobootmem.c 2011-08-23 21:47:56.000000000 -0400
63251@@ -110,19 +110,30 @@ static void __init __free_pages_memory(u
63252 unsigned long __init free_all_memory_core_early(int nodeid)
63253 {
63254 int i;
63255- u64 start, end;
63256+ u64 start, end, startrange, endrange;
63257 unsigned long count = 0;
63258- struct range *range = NULL;
63259+ struct range *range = NULL, rangerange = { 0, 0 };
63260 int nr_range;
63261
63262 nr_range = get_free_all_memory_range(&range, nodeid);
63263+ startrange = __pa(range) >> PAGE_SHIFT;
63264+ endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
63265
63266 for (i = 0; i < nr_range; i++) {
63267 start = range[i].start;
63268 end = range[i].end;
63269+ if (start <= endrange && startrange < end) {
63270+ BUG_ON(rangerange.start | rangerange.end);
63271+ rangerange = range[i];
63272+ continue;
63273+ }
63274 count += end - start;
63275 __free_pages_memory(start, end);
63276 }
63277+ start = rangerange.start;
63278+ end = rangerange.end;
63279+ count += end - start;
63280+ __free_pages_memory(start, end);
63281
63282 return count;
63283 }
63284diff -urNp linux-3.0.3/mm/nommu.c linux-3.0.3/mm/nommu.c
63285--- linux-3.0.3/mm/nommu.c 2011-07-21 22:17:23.000000000 -0400
63286+++ linux-3.0.3/mm/nommu.c 2011-08-23 21:47:56.000000000 -0400
63287@@ -63,7 +63,6 @@ int sysctl_overcommit_memory = OVERCOMMI
63288 int sysctl_overcommit_ratio = 50; /* default is 50% */
63289 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
63290 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
63291-int heap_stack_gap = 0;
63292
63293 atomic_long_t mmap_pages_allocated;
63294
63295@@ -826,15 +825,6 @@ struct vm_area_struct *find_vma(struct m
63296 EXPORT_SYMBOL(find_vma);
63297
63298 /*
63299- * find a VMA
63300- * - we don't extend stack VMAs under NOMMU conditions
63301- */
63302-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
63303-{
63304- return find_vma(mm, addr);
63305-}
63306-
63307-/*
63308 * expand a stack to a given address
63309 * - not supported under NOMMU conditions
63310 */
63311@@ -1554,6 +1544,7 @@ int split_vma(struct mm_struct *mm, stru
63312
63313 /* most fields are the same, copy all, and then fixup */
63314 *new = *vma;
63315+ INIT_LIST_HEAD(&new->anon_vma_chain);
63316 *region = *vma->vm_region;
63317 new->vm_region = region;
63318
63319diff -urNp linux-3.0.3/mm/page_alloc.c linux-3.0.3/mm/page_alloc.c
63320--- linux-3.0.3/mm/page_alloc.c 2011-07-21 22:17:23.000000000 -0400
63321+++ linux-3.0.3/mm/page_alloc.c 2011-08-23 21:48:14.000000000 -0400
63322@@ -340,7 +340,7 @@ out:
63323 * This usage means that zero-order pages may not be compound.
63324 */
63325
63326-static void free_compound_page(struct page *page)
63327+void free_compound_page(struct page *page)
63328 {
63329 __free_pages_ok(page, compound_order(page));
63330 }
63331@@ -653,6 +653,10 @@ static bool free_pages_prepare(struct pa
63332 int i;
63333 int bad = 0;
63334
63335+#ifdef CONFIG_PAX_MEMORY_SANITIZE
63336+ unsigned long index = 1UL << order;
63337+#endif
63338+
63339 trace_mm_page_free_direct(page, order);
63340 kmemcheck_free_shadow(page, order);
63341
63342@@ -668,6 +672,12 @@ static bool free_pages_prepare(struct pa
63343 debug_check_no_obj_freed(page_address(page),
63344 PAGE_SIZE << order);
63345 }
63346+
63347+#ifdef CONFIG_PAX_MEMORY_SANITIZE
63348+ for (; index; --index)
63349+ sanitize_highpage(page + index - 1);
63350+#endif
63351+
63352 arch_free_page(page, order);
63353 kernel_map_pages(page, 1 << order, 0);
63354
63355@@ -783,8 +793,10 @@ static int prep_new_page(struct page *pa
63356 arch_alloc_page(page, order);
63357 kernel_map_pages(page, 1 << order, 1);
63358
63359+#ifndef CONFIG_PAX_MEMORY_SANITIZE
63360 if (gfp_flags & __GFP_ZERO)
63361 prep_zero_page(page, order, gfp_flags);
63362+#endif
63363
63364 if (order && (gfp_flags & __GFP_COMP))
63365 prep_compound_page(page, order);
63366@@ -2525,6 +2537,8 @@ void show_free_areas(unsigned int filter
63367 int cpu;
63368 struct zone *zone;
63369
63370+ pax_track_stack();
63371+
63372 for_each_populated_zone(zone) {
63373 if (skip_free_areas_node(filter, zone_to_nid(zone)))
63374 continue;
63375diff -urNp linux-3.0.3/mm/percpu.c linux-3.0.3/mm/percpu.c
63376--- linux-3.0.3/mm/percpu.c 2011-07-21 22:17:23.000000000 -0400
63377+++ linux-3.0.3/mm/percpu.c 2011-08-23 21:47:56.000000000 -0400
63378@@ -121,7 +121,7 @@ static unsigned int pcpu_first_unit_cpu
63379 static unsigned int pcpu_last_unit_cpu __read_mostly;
63380
63381 /* the address of the first chunk which starts with the kernel static area */
63382-void *pcpu_base_addr __read_mostly;
63383+void *pcpu_base_addr __read_only;
63384 EXPORT_SYMBOL_GPL(pcpu_base_addr);
63385
63386 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
63387diff -urNp linux-3.0.3/mm/rmap.c linux-3.0.3/mm/rmap.c
63388--- linux-3.0.3/mm/rmap.c 2011-07-21 22:17:23.000000000 -0400
63389+++ linux-3.0.3/mm/rmap.c 2011-08-23 21:47:56.000000000 -0400
63390@@ -153,6 +153,10 @@ int anon_vma_prepare(struct vm_area_stru
63391 struct anon_vma *anon_vma = vma->anon_vma;
63392 struct anon_vma_chain *avc;
63393
63394+#ifdef CONFIG_PAX_SEGMEXEC
63395+ struct anon_vma_chain *avc_m = NULL;
63396+#endif
63397+
63398 might_sleep();
63399 if (unlikely(!anon_vma)) {
63400 struct mm_struct *mm = vma->vm_mm;
63401@@ -162,6 +166,12 @@ int anon_vma_prepare(struct vm_area_stru
63402 if (!avc)
63403 goto out_enomem;
63404
63405+#ifdef CONFIG_PAX_SEGMEXEC
63406+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
63407+ if (!avc_m)
63408+ goto out_enomem_free_avc;
63409+#endif
63410+
63411 anon_vma = find_mergeable_anon_vma(vma);
63412 allocated = NULL;
63413 if (!anon_vma) {
63414@@ -175,6 +185,21 @@ int anon_vma_prepare(struct vm_area_stru
63415 /* page_table_lock to protect against threads */
63416 spin_lock(&mm->page_table_lock);
63417 if (likely(!vma->anon_vma)) {
63418+
63419+#ifdef CONFIG_PAX_SEGMEXEC
63420+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
63421+
63422+ if (vma_m) {
63423+ BUG_ON(vma_m->anon_vma);
63424+ vma_m->anon_vma = anon_vma;
63425+ avc_m->anon_vma = anon_vma;
63426+ avc_m->vma = vma;
63427+ list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
63428+ list_add(&avc_m->same_anon_vma, &anon_vma->head);
63429+ avc_m = NULL;
63430+ }
63431+#endif
63432+
63433 vma->anon_vma = anon_vma;
63434 avc->anon_vma = anon_vma;
63435 avc->vma = vma;
63436@@ -188,12 +213,24 @@ int anon_vma_prepare(struct vm_area_stru
63437
63438 if (unlikely(allocated))
63439 put_anon_vma(allocated);
63440+
63441+#ifdef CONFIG_PAX_SEGMEXEC
63442+ if (unlikely(avc_m))
63443+ anon_vma_chain_free(avc_m);
63444+#endif
63445+
63446 if (unlikely(avc))
63447 anon_vma_chain_free(avc);
63448 }
63449 return 0;
63450
63451 out_enomem_free_avc:
63452+
63453+#ifdef CONFIG_PAX_SEGMEXEC
63454+ if (avc_m)
63455+ anon_vma_chain_free(avc_m);
63456+#endif
63457+
63458 anon_vma_chain_free(avc);
63459 out_enomem:
63460 return -ENOMEM;
63461@@ -244,7 +281,7 @@ static void anon_vma_chain_link(struct v
63462 * Attach the anon_vmas from src to dst.
63463 * Returns 0 on success, -ENOMEM on failure.
63464 */
63465-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
63466+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
63467 {
63468 struct anon_vma_chain *avc, *pavc;
63469 struct anon_vma *root = NULL;
63470@@ -277,7 +314,7 @@ int anon_vma_clone(struct vm_area_struct
63471 * the corresponding VMA in the parent process is attached to.
63472 * Returns 0 on success, non-zero on failure.
63473 */
63474-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
63475+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
63476 {
63477 struct anon_vma_chain *avc;
63478 struct anon_vma *anon_vma;
63479diff -urNp linux-3.0.3/mm/shmem.c linux-3.0.3/mm/shmem.c
63480--- linux-3.0.3/mm/shmem.c 2011-07-21 22:17:23.000000000 -0400
63481+++ linux-3.0.3/mm/shmem.c 2011-08-23 21:48:14.000000000 -0400
63482@@ -31,7 +31,7 @@
63483 #include <linux/percpu_counter.h>
63484 #include <linux/swap.h>
63485
63486-static struct vfsmount *shm_mnt;
63487+struct vfsmount *shm_mnt;
63488
63489 #ifdef CONFIG_SHMEM
63490 /*
63491@@ -1101,6 +1101,8 @@ static int shmem_writepage(struct page *
63492 goto unlock;
63493 }
63494 entry = shmem_swp_entry(info, index, NULL);
63495+ if (!entry)
63496+ goto unlock;
63497 if (entry->val) {
63498 /*
63499 * The more uptodate page coming down from a stacked
63500@@ -1172,6 +1174,8 @@ static struct page *shmem_swapin(swp_ent
63501 struct vm_area_struct pvma;
63502 struct page *page;
63503
63504+ pax_track_stack();
63505+
63506 spol = mpol_cond_copy(&mpol,
63507 mpol_shared_policy_lookup(&info->policy, idx));
63508
63509@@ -2568,8 +2572,7 @@ int shmem_fill_super(struct super_block
63510 int err = -ENOMEM;
63511
63512 /* Round up to L1_CACHE_BYTES to resist false sharing */
63513- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
63514- L1_CACHE_BYTES), GFP_KERNEL);
63515+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
63516 if (!sbinfo)
63517 return -ENOMEM;
63518
63519diff -urNp linux-3.0.3/mm/slab.c linux-3.0.3/mm/slab.c
63520--- linux-3.0.3/mm/slab.c 2011-07-21 22:17:23.000000000 -0400
63521+++ linux-3.0.3/mm/slab.c 2011-08-23 21:48:14.000000000 -0400
63522@@ -151,7 +151,7 @@
63523
63524 /* Legal flag mask for kmem_cache_create(). */
63525 #if DEBUG
63526-# define CREATE_MASK (SLAB_RED_ZONE | \
63527+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
63528 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
63529 SLAB_CACHE_DMA | \
63530 SLAB_STORE_USER | \
63531@@ -159,7 +159,7 @@
63532 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
63533 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
63534 #else
63535-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
63536+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
63537 SLAB_CACHE_DMA | \
63538 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
63539 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
63540@@ -288,7 +288,7 @@ struct kmem_list3 {
63541 * Need this for bootstrapping a per node allocator.
63542 */
63543 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
63544-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
63545+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
63546 #define CACHE_CACHE 0
63547 #define SIZE_AC MAX_NUMNODES
63548 #define SIZE_L3 (2 * MAX_NUMNODES)
63549@@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_
63550 if ((x)->max_freeable < i) \
63551 (x)->max_freeable = i; \
63552 } while (0)
63553-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
63554-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
63555-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
63556-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
63557+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
63558+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
63559+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
63560+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
63561 #else
63562 #define STATS_INC_ACTIVE(x) do { } while (0)
63563 #define STATS_DEC_ACTIVE(x) do { } while (0)
63564@@ -538,7 +538,7 @@ static inline void *index_to_obj(struct
63565 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
63566 */
63567 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
63568- const struct slab *slab, void *obj)
63569+ const struct slab *slab, const void *obj)
63570 {
63571 u32 offset = (obj - slab->s_mem);
63572 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
63573@@ -564,7 +564,7 @@ struct cache_names {
63574 static struct cache_names __initdata cache_names[] = {
63575 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
63576 #include <linux/kmalloc_sizes.h>
63577- {NULL,}
63578+ {NULL}
63579 #undef CACHE
63580 };
63581
63582@@ -1530,7 +1530,7 @@ void __init kmem_cache_init(void)
63583 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
63584 sizes[INDEX_AC].cs_size,
63585 ARCH_KMALLOC_MINALIGN,
63586- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
63587+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
63588 NULL);
63589
63590 if (INDEX_AC != INDEX_L3) {
63591@@ -1538,7 +1538,7 @@ void __init kmem_cache_init(void)
63592 kmem_cache_create(names[INDEX_L3].name,
63593 sizes[INDEX_L3].cs_size,
63594 ARCH_KMALLOC_MINALIGN,
63595- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
63596+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
63597 NULL);
63598 }
63599
63600@@ -1556,7 +1556,7 @@ void __init kmem_cache_init(void)
63601 sizes->cs_cachep = kmem_cache_create(names->name,
63602 sizes->cs_size,
63603 ARCH_KMALLOC_MINALIGN,
63604- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
63605+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
63606 NULL);
63607 }
63608 #ifdef CONFIG_ZONE_DMA
63609@@ -4272,10 +4272,10 @@ static int s_show(struct seq_file *m, vo
63610 }
63611 /* cpu stats */
63612 {
63613- unsigned long allochit = atomic_read(&cachep->allochit);
63614- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
63615- unsigned long freehit = atomic_read(&cachep->freehit);
63616- unsigned long freemiss = atomic_read(&cachep->freemiss);
63617+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
63618+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
63619+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
63620+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
63621
63622 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
63623 allochit, allocmiss, freehit, freemiss);
63624@@ -4532,15 +4532,66 @@ static const struct file_operations proc
63625
63626 static int __init slab_proc_init(void)
63627 {
63628- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
63629+ mode_t gr_mode = S_IRUGO;
63630+
63631+#ifdef CONFIG_GRKERNSEC_PROC_ADD
63632+ gr_mode = S_IRUSR;
63633+#endif
63634+
63635+ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
63636 #ifdef CONFIG_DEBUG_SLAB_LEAK
63637- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
63638+ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
63639 #endif
63640 return 0;
63641 }
63642 module_init(slab_proc_init);
63643 #endif
63644
63645+void check_object_size(const void *ptr, unsigned long n, bool to)
63646+{
63647+
63648+#ifdef CONFIG_PAX_USERCOPY
63649+ struct page *page;
63650+ struct kmem_cache *cachep = NULL;
63651+ struct slab *slabp;
63652+ unsigned int objnr;
63653+ unsigned long offset;
63654+
63655+ if (!n)
63656+ return;
63657+
63658+ if (ZERO_OR_NULL_PTR(ptr))
63659+ goto report;
63660+
63661+ if (!virt_addr_valid(ptr))
63662+ return;
63663+
63664+ page = virt_to_head_page(ptr);
63665+
63666+ if (!PageSlab(page)) {
63667+ if (object_is_on_stack(ptr, n) == -1)
63668+ goto report;
63669+ return;
63670+ }
63671+
63672+ cachep = page_get_cache(page);
63673+ if (!(cachep->flags & SLAB_USERCOPY))
63674+ goto report;
63675+
63676+ slabp = page_get_slab(page);
63677+ objnr = obj_to_index(cachep, slabp, ptr);
63678+ BUG_ON(objnr >= cachep->num);
63679+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
63680+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
63681+ return;
63682+
63683+report:
63684+ pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
63685+#endif
63686+
63687+}
63688+EXPORT_SYMBOL(check_object_size);
63689+
63690 /**
63691 * ksize - get the actual amount of memory allocated for a given object
63692 * @objp: Pointer to the object
63693diff -urNp linux-3.0.3/mm/slob.c linux-3.0.3/mm/slob.c
63694--- linux-3.0.3/mm/slob.c 2011-07-21 22:17:23.000000000 -0400
63695+++ linux-3.0.3/mm/slob.c 2011-08-23 21:47:56.000000000 -0400
63696@@ -29,7 +29,7 @@
63697 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
63698 * alloc_pages() directly, allocating compound pages so the page order
63699 * does not have to be separately tracked, and also stores the exact
63700- * allocation size in page->private so that it can be used to accurately
63701+ * allocation size in slob_page->size so that it can be used to accurately
63702 * provide ksize(). These objects are detected in kfree() because slob_page()
63703 * is false for them.
63704 *
63705@@ -58,6 +58,7 @@
63706 */
63707
63708 #include <linux/kernel.h>
63709+#include <linux/sched.h>
63710 #include <linux/slab.h>
63711 #include <linux/mm.h>
63712 #include <linux/swap.h> /* struct reclaim_state */
63713@@ -102,7 +103,8 @@ struct slob_page {
63714 unsigned long flags; /* mandatory */
63715 atomic_t _count; /* mandatory */
63716 slobidx_t units; /* free units left in page */
63717- unsigned long pad[2];
63718+ unsigned long pad[1];
63719+ unsigned long size; /* size when >=PAGE_SIZE */
63720 slob_t *free; /* first free slob_t in page */
63721 struct list_head list; /* linked list of free pages */
63722 };
63723@@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
63724 */
63725 static inline int is_slob_page(struct slob_page *sp)
63726 {
63727- return PageSlab((struct page *)sp);
63728+ return PageSlab((struct page *)sp) && !sp->size;
63729 }
63730
63731 static inline void set_slob_page(struct slob_page *sp)
63732@@ -150,7 +152,7 @@ static inline void clear_slob_page(struc
63733
63734 static inline struct slob_page *slob_page(const void *addr)
63735 {
63736- return (struct slob_page *)virt_to_page(addr);
63737+ return (struct slob_page *)virt_to_head_page(addr);
63738 }
63739
63740 /*
63741@@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_
63742 /*
63743 * Return the size of a slob block.
63744 */
63745-static slobidx_t slob_units(slob_t *s)
63746+static slobidx_t slob_units(const slob_t *s)
63747 {
63748 if (s->units > 0)
63749 return s->units;
63750@@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
63751 /*
63752 * Return the next free slob block pointer after this one.
63753 */
63754-static slob_t *slob_next(slob_t *s)
63755+static slob_t *slob_next(const slob_t *s)
63756 {
63757 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
63758 slobidx_t next;
63759@@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
63760 /*
63761 * Returns true if s is the last free block in its page.
63762 */
63763-static int slob_last(slob_t *s)
63764+static int slob_last(const slob_t *s)
63765 {
63766 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
63767 }
63768@@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, i
63769 if (!page)
63770 return NULL;
63771
63772+ set_slob_page(page);
63773 return page_address(page);
63774 }
63775
63776@@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp
63777 if (!b)
63778 return NULL;
63779 sp = slob_page(b);
63780- set_slob_page(sp);
63781
63782 spin_lock_irqsave(&slob_lock, flags);
63783 sp->units = SLOB_UNITS(PAGE_SIZE);
63784 sp->free = b;
63785+ sp->size = 0;
63786 INIT_LIST_HEAD(&sp->list);
63787 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
63788 set_slob_page_free(sp, slob_list);
63789@@ -476,10 +479,9 @@ out:
63790 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
63791 */
63792
63793-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
63794+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
63795 {
63796- unsigned int *m;
63797- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
63798+ slob_t *m;
63799 void *ret;
63800
63801 lockdep_trace_alloc(gfp);
63802@@ -492,7 +494,10 @@ void *__kmalloc_node(size_t size, gfp_t
63803
63804 if (!m)
63805 return NULL;
63806- *m = size;
63807+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
63808+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
63809+ m[0].units = size;
63810+ m[1].units = align;
63811 ret = (void *)m + align;
63812
63813 trace_kmalloc_node(_RET_IP_, ret,
63814@@ -504,16 +509,25 @@ void *__kmalloc_node(size_t size, gfp_t
63815 gfp |= __GFP_COMP;
63816 ret = slob_new_pages(gfp, order, node);
63817 if (ret) {
63818- struct page *page;
63819- page = virt_to_page(ret);
63820- page->private = size;
63821+ struct slob_page *sp;
63822+ sp = slob_page(ret);
63823+ sp->size = size;
63824 }
63825
63826 trace_kmalloc_node(_RET_IP_, ret,
63827 size, PAGE_SIZE << order, gfp, node);
63828 }
63829
63830- kmemleak_alloc(ret, size, 1, gfp);
63831+ return ret;
63832+}
63833+
63834+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
63835+{
63836+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
63837+ void *ret = __kmalloc_node_align(size, gfp, node, align);
63838+
63839+ if (!ZERO_OR_NULL_PTR(ret))
63840+ kmemleak_alloc(ret, size, 1, gfp);
63841 return ret;
63842 }
63843 EXPORT_SYMBOL(__kmalloc_node);
63844@@ -531,13 +545,88 @@ void kfree(const void *block)
63845 sp = slob_page(block);
63846 if (is_slob_page(sp)) {
63847 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
63848- unsigned int *m = (unsigned int *)(block - align);
63849- slob_free(m, *m + align);
63850- } else
63851+ slob_t *m = (slob_t *)(block - align);
63852+ slob_free(m, m[0].units + align);
63853+ } else {
63854+ clear_slob_page(sp);
63855+ free_slob_page(sp);
63856+ sp->size = 0;
63857 put_page(&sp->page);
63858+ }
63859 }
63860 EXPORT_SYMBOL(kfree);
63861
63862+void check_object_size(const void *ptr, unsigned long n, bool to)
63863+{
63864+
63865+#ifdef CONFIG_PAX_USERCOPY
63866+ struct slob_page *sp;
63867+ const slob_t *free;
63868+ const void *base;
63869+ unsigned long flags;
63870+
63871+ if (!n)
63872+ return;
63873+
63874+ if (ZERO_OR_NULL_PTR(ptr))
63875+ goto report;
63876+
63877+ if (!virt_addr_valid(ptr))
63878+ return;
63879+
63880+ sp = slob_page(ptr);
63881+ if (!PageSlab((struct page*)sp)) {
63882+ if (object_is_on_stack(ptr, n) == -1)
63883+ goto report;
63884+ return;
63885+ }
63886+
63887+ if (sp->size) {
63888+ base = page_address(&sp->page);
63889+ if (base <= ptr && n <= sp->size - (ptr - base))
63890+ return;
63891+ goto report;
63892+ }
63893+
63894+ /* some tricky double walking to find the chunk */
63895+ spin_lock_irqsave(&slob_lock, flags);
63896+ base = (void *)((unsigned long)ptr & PAGE_MASK);
63897+ free = sp->free;
63898+
63899+ while (!slob_last(free) && (void *)free <= ptr) {
63900+ base = free + slob_units(free);
63901+ free = slob_next(free);
63902+ }
63903+
63904+ while (base < (void *)free) {
63905+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
63906+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
63907+ int offset;
63908+
63909+ if (ptr < base + align)
63910+ break;
63911+
63912+ offset = ptr - base - align;
63913+ if (offset >= m) {
63914+ base += size;
63915+ continue;
63916+ }
63917+
63918+ if (n > m - offset)
63919+ break;
63920+
63921+ spin_unlock_irqrestore(&slob_lock, flags);
63922+ return;
63923+ }
63924+
63925+ spin_unlock_irqrestore(&slob_lock, flags);
63926+report:
63927+ pax_report_usercopy(ptr, n, to, NULL);
63928+#endif
63929+
63930+}
63931+EXPORT_SYMBOL(check_object_size);
63932+
63933 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
63934 size_t ksize(const void *block)
63935 {
63936@@ -550,10 +639,10 @@ size_t ksize(const void *block)
63937 sp = slob_page(block);
63938 if (is_slob_page(sp)) {
63939 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
63940- unsigned int *m = (unsigned int *)(block - align);
63941- return SLOB_UNITS(*m) * SLOB_UNIT;
63942+ slob_t *m = (slob_t *)(block - align);
63943+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
63944 } else
63945- return sp->page.private;
63946+ return sp->size;
63947 }
63948 EXPORT_SYMBOL(ksize);
63949
63950@@ -569,8 +658,13 @@ struct kmem_cache *kmem_cache_create(con
63951 {
63952 struct kmem_cache *c;
63953
63954+#ifdef CONFIG_PAX_USERCOPY
63955+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
63956+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
63957+#else
63958 c = slob_alloc(sizeof(struct kmem_cache),
63959 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
63960+#endif
63961
63962 if (c) {
63963 c->name = name;
63964@@ -608,17 +702,25 @@ void *kmem_cache_alloc_node(struct kmem_
63965 {
63966 void *b;
63967
63968+#ifdef CONFIG_PAX_USERCOPY
63969+ b = __kmalloc_node_align(c->size, flags, node, c->align);
63970+#else
63971 if (c->size < PAGE_SIZE) {
63972 b = slob_alloc(c->size, flags, c->align, node);
63973 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
63974 SLOB_UNITS(c->size) * SLOB_UNIT,
63975 flags, node);
63976 } else {
63977+ struct slob_page *sp;
63978+
63979 b = slob_new_pages(flags, get_order(c->size), node);
63980+ sp = slob_page(b);
63981+ sp->size = c->size;
63982 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
63983 PAGE_SIZE << get_order(c->size),
63984 flags, node);
63985 }
63986+#endif
63987
63988 if (c->ctor)
63989 c->ctor(b);
63990@@ -630,10 +732,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
63991
63992 static void __kmem_cache_free(void *b, int size)
63993 {
63994- if (size < PAGE_SIZE)
63995+ struct slob_page *sp = slob_page(b);
63996+
63997+ if (is_slob_page(sp))
63998 slob_free(b, size);
63999- else
64000+ else {
64001+ clear_slob_page(sp);
64002+ free_slob_page(sp);
64003+ sp->size = 0;
64004 slob_free_pages(b, get_order(size));
64005+ }
64006 }
64007
64008 static void kmem_rcu_free(struct rcu_head *head)
64009@@ -646,17 +754,31 @@ static void kmem_rcu_free(struct rcu_hea
64010
64011 void kmem_cache_free(struct kmem_cache *c, void *b)
64012 {
64013+ int size = c->size;
64014+
64015+#ifdef CONFIG_PAX_USERCOPY
64016+ if (size + c->align < PAGE_SIZE) {
64017+ size += c->align;
64018+ b -= c->align;
64019+ }
64020+#endif
64021+
64022 kmemleak_free_recursive(b, c->flags);
64023 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
64024 struct slob_rcu *slob_rcu;
64025- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
64026- slob_rcu->size = c->size;
64027+ slob_rcu = b + (size - sizeof(struct slob_rcu));
64028+ slob_rcu->size = size;
64029 call_rcu(&slob_rcu->head, kmem_rcu_free);
64030 } else {
64031- __kmem_cache_free(b, c->size);
64032+ __kmem_cache_free(b, size);
64033 }
64034
64035+#ifdef CONFIG_PAX_USERCOPY
64036+ trace_kfree(_RET_IP_, b);
64037+#else
64038 trace_kmem_cache_free(_RET_IP_, b);
64039+#endif
64040+
64041 }
64042 EXPORT_SYMBOL(kmem_cache_free);
64043
64044diff -urNp linux-3.0.3/mm/slub.c linux-3.0.3/mm/slub.c
64045--- linux-3.0.3/mm/slub.c 2011-07-21 22:17:23.000000000 -0400
64046+++ linux-3.0.3/mm/slub.c 2011-08-23 21:48:14.000000000 -0400
64047@@ -442,7 +442,7 @@ static void print_track(const char *s, s
64048 if (!t->addr)
64049 return;
64050
64051- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
64052+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
64053 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
64054 }
64055
64056@@ -2137,6 +2137,8 @@ void kmem_cache_free(struct kmem_cache *
64057
64058 page = virt_to_head_page(x);
64059
64060+ BUG_ON(!PageSlab(page));
64061+
64062 slab_free(s, page, x, _RET_IP_);
64063
64064 trace_kmem_cache_free(_RET_IP_, x);
64065@@ -2170,7 +2172,7 @@ static int slub_min_objects;
64066 * Merge control. If this is set then no merging of slab caches will occur.
64067 * (Could be removed. This was introduced to pacify the merge skeptics.)
64068 */
64069-static int slub_nomerge;
64070+static int slub_nomerge = 1;
64071
64072 /*
64073 * Calculate the order of allocation given an slab object size.
64074@@ -2594,7 +2596,7 @@ static int kmem_cache_open(struct kmem_c
64075 * list to avoid pounding the page allocator excessively.
64076 */
64077 set_min_partial(s, ilog2(s->size));
64078- s->refcount = 1;
64079+ atomic_set(&s->refcount, 1);
64080 #ifdef CONFIG_NUMA
64081 s->remote_node_defrag_ratio = 1000;
64082 #endif
64083@@ -2699,8 +2701,7 @@ static inline int kmem_cache_close(struc
64084 void kmem_cache_destroy(struct kmem_cache *s)
64085 {
64086 down_write(&slub_lock);
64087- s->refcount--;
64088- if (!s->refcount) {
64089+ if (atomic_dec_and_test(&s->refcount)) {
64090 list_del(&s->list);
64091 if (kmem_cache_close(s)) {
64092 printk(KERN_ERR "SLUB %s: %s called for cache that "
64093@@ -2910,6 +2911,46 @@ void *__kmalloc_node(size_t size, gfp_t
64094 EXPORT_SYMBOL(__kmalloc_node);
64095 #endif
64096
64097+void check_object_size(const void *ptr, unsigned long n, bool to)
64098+{
64099+
64100+#ifdef CONFIG_PAX_USERCOPY
64101+ struct page *page;
64102+ struct kmem_cache *s = NULL;
64103+ unsigned long offset;
64104+
64105+ if (!n)
64106+ return;
64107+
64108+ if (ZERO_OR_NULL_PTR(ptr))
64109+ goto report;
64110+
64111+ if (!virt_addr_valid(ptr))
64112+ return;
64113+
64114+ page = virt_to_head_page(ptr);
64115+
64116+ if (!PageSlab(page)) {
64117+ if (object_is_on_stack(ptr, n) == -1)
64118+ goto report;
64119+ return;
64120+ }
64121+
64122+ s = page->slab;
64123+ if (!(s->flags & SLAB_USERCOPY))
64124+ goto report;
64125+
64126+ offset = (ptr - page_address(page)) % s->size;
64127+ if (offset <= s->objsize && n <= s->objsize - offset)
64128+ return;
64129+
64130+report:
64131+ pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
64132+#endif
64133+
64134+}
64135+EXPORT_SYMBOL(check_object_size);
64136+
64137 size_t ksize(const void *object)
64138 {
64139 struct page *page;
64140@@ -3154,7 +3195,7 @@ static void __init kmem_cache_bootstrap_
64141 int node;
64142
64143 list_add(&s->list, &slab_caches);
64144- s->refcount = -1;
64145+ atomic_set(&s->refcount, -1);
64146
64147 for_each_node_state(node, N_NORMAL_MEMORY) {
64148 struct kmem_cache_node *n = get_node(s, node);
64149@@ -3271,17 +3312,17 @@ void __init kmem_cache_init(void)
64150
64151 /* Caches that are not of the two-to-the-power-of size */
64152 if (KMALLOC_MIN_SIZE <= 32) {
64153- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
64154+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
64155 caches++;
64156 }
64157
64158 if (KMALLOC_MIN_SIZE <= 64) {
64159- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
64160+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
64161 caches++;
64162 }
64163
64164 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
64165- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
64166+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
64167 caches++;
64168 }
64169
64170@@ -3349,7 +3390,7 @@ static int slab_unmergeable(struct kmem_
64171 /*
64172 * We may have set a slab to be unmergeable during bootstrap.
64173 */
64174- if (s->refcount < 0)
64175+ if (atomic_read(&s->refcount) < 0)
64176 return 1;
64177
64178 return 0;
64179@@ -3408,7 +3449,7 @@ struct kmem_cache *kmem_cache_create(con
64180 down_write(&slub_lock);
64181 s = find_mergeable(size, align, flags, name, ctor);
64182 if (s) {
64183- s->refcount++;
64184+ atomic_inc(&s->refcount);
64185 /*
64186 * Adjust the object sizes so that we clear
64187 * the complete object on kzalloc.
64188@@ -3417,7 +3458,7 @@ struct kmem_cache *kmem_cache_create(con
64189 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
64190
64191 if (sysfs_slab_alias(s, name)) {
64192- s->refcount--;
64193+ atomic_dec(&s->refcount);
64194 goto err;
64195 }
64196 up_write(&slub_lock);
64197@@ -4150,7 +4191,7 @@ SLAB_ATTR_RO(ctor);
64198
64199 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
64200 {
64201- return sprintf(buf, "%d\n", s->refcount - 1);
64202+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
64203 }
64204 SLAB_ATTR_RO(aliases);
64205
64206@@ -4894,7 +4935,13 @@ static const struct file_operations proc
64207
64208 static int __init slab_proc_init(void)
64209 {
64210- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
64211+ mode_t gr_mode = S_IRUGO;
64212+
64213+#ifdef CONFIG_GRKERNSEC_PROC_ADD
64214+ gr_mode = S_IRUSR;
64215+#endif
64216+
64217+ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
64218 return 0;
64219 }
64220 module_init(slab_proc_init);
64221diff -urNp linux-3.0.3/mm/swap.c linux-3.0.3/mm/swap.c
64222--- linux-3.0.3/mm/swap.c 2011-07-21 22:17:23.000000000 -0400
64223+++ linux-3.0.3/mm/swap.c 2011-08-23 21:47:56.000000000 -0400
64224@@ -31,6 +31,7 @@
64225 #include <linux/backing-dev.h>
64226 #include <linux/memcontrol.h>
64227 #include <linux/gfp.h>
64228+#include <linux/hugetlb.h>
64229
64230 #include "internal.h"
64231
64232@@ -71,6 +72,8 @@ static void __put_compound_page(struct p
64233
64234 __page_cache_release(page);
64235 dtor = get_compound_page_dtor(page);
64236+ if (!PageHuge(page))
64237+ BUG_ON(dtor != free_compound_page);
64238 (*dtor)(page);
64239 }
64240
64241diff -urNp linux-3.0.3/mm/swapfile.c linux-3.0.3/mm/swapfile.c
64242--- linux-3.0.3/mm/swapfile.c 2011-07-21 22:17:23.000000000 -0400
64243+++ linux-3.0.3/mm/swapfile.c 2011-08-23 21:47:56.000000000 -0400
64244@@ -62,7 +62,7 @@ static DEFINE_MUTEX(swapon_mutex);
64245
64246 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
64247 /* Activity counter to indicate that a swapon or swapoff has occurred */
64248-static atomic_t proc_poll_event = ATOMIC_INIT(0);
64249+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
64250
64251 static inline unsigned char swap_count(unsigned char ent)
64252 {
64253@@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __us
64254 }
64255 filp_close(swap_file, NULL);
64256 err = 0;
64257- atomic_inc(&proc_poll_event);
64258+ atomic_inc_unchecked(&proc_poll_event);
64259 wake_up_interruptible(&proc_poll_wait);
64260
64261 out_dput:
64262@@ -1692,8 +1692,8 @@ static unsigned swaps_poll(struct file *
64263
64264 poll_wait(file, &proc_poll_wait, wait);
64265
64266- if (s->event != atomic_read(&proc_poll_event)) {
64267- s->event = atomic_read(&proc_poll_event);
64268+ if (s->event != atomic_read_unchecked(&proc_poll_event)) {
64269+ s->event = atomic_read_unchecked(&proc_poll_event);
64270 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
64271 }
64272
64273@@ -1799,7 +1799,7 @@ static int swaps_open(struct inode *inod
64274 }
64275
64276 s->seq.private = s;
64277- s->event = atomic_read(&proc_poll_event);
64278+ s->event = atomic_read_unchecked(&proc_poll_event);
64279 return ret;
64280 }
64281
64282@@ -2133,7 +2133,7 @@ SYSCALL_DEFINE2(swapon, const char __use
64283 (p->flags & SWP_DISCARDABLE) ? "D" : "");
64284
64285 mutex_unlock(&swapon_mutex);
64286- atomic_inc(&proc_poll_event);
64287+ atomic_inc_unchecked(&proc_poll_event);
64288 wake_up_interruptible(&proc_poll_wait);
64289
64290 if (S_ISREG(inode->i_mode))
64291diff -urNp linux-3.0.3/mm/util.c linux-3.0.3/mm/util.c
64292--- linux-3.0.3/mm/util.c 2011-07-21 22:17:23.000000000 -0400
64293+++ linux-3.0.3/mm/util.c 2011-08-23 21:47:56.000000000 -0400
64294@@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
64295 * allocated buffer. Use this if you don't want to free the buffer immediately
64296 * like, for example, with RCU.
64297 */
64298+#undef __krealloc
64299 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
64300 {
64301 void *ret;
64302@@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
64303 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
64304 * %NULL pointer, the object pointed to is freed.
64305 */
64306+#undef krealloc
64307 void *krealloc(const void *p, size_t new_size, gfp_t flags)
64308 {
64309 void *ret;
64310@@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *m
64311 void arch_pick_mmap_layout(struct mm_struct *mm)
64312 {
64313 mm->mmap_base = TASK_UNMAPPED_BASE;
64314+
64315+#ifdef CONFIG_PAX_RANDMMAP
64316+ if (mm->pax_flags & MF_PAX_RANDMMAP)
64317+ mm->mmap_base += mm->delta_mmap;
64318+#endif
64319+
64320 mm->get_unmapped_area = arch_get_unmapped_area;
64321 mm->unmap_area = arch_unmap_area;
64322 }
64323diff -urNp linux-3.0.3/mm/vmalloc.c linux-3.0.3/mm/vmalloc.c
64324--- linux-3.0.3/mm/vmalloc.c 2011-08-23 21:44:40.000000000 -0400
64325+++ linux-3.0.3/mm/vmalloc.c 2011-08-23 21:47:56.000000000 -0400
64326@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd,
64327
64328 pte = pte_offset_kernel(pmd, addr);
64329 do {
64330- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
64331- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
64332+
64333+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64334+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
64335+ BUG_ON(!pte_exec(*pte));
64336+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
64337+ continue;
64338+ }
64339+#endif
64340+
64341+ {
64342+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
64343+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
64344+ }
64345 } while (pte++, addr += PAGE_SIZE, addr != end);
64346 }
64347
64348@@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, un
64349 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
64350 {
64351 pte_t *pte;
64352+ int ret = -ENOMEM;
64353
64354 /*
64355 * nr is a running index into the array which helps higher level
64356@@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, un
64357 pte = pte_alloc_kernel(pmd, addr);
64358 if (!pte)
64359 return -ENOMEM;
64360+
64361+ pax_open_kernel();
64362 do {
64363 struct page *page = pages[*nr];
64364
64365- if (WARN_ON(!pte_none(*pte)))
64366- return -EBUSY;
64367- if (WARN_ON(!page))
64368- return -ENOMEM;
64369+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64370+ if (pgprot_val(prot) & _PAGE_NX)
64371+#endif
64372+
64373+ if (WARN_ON(!pte_none(*pte))) {
64374+ ret = -EBUSY;
64375+ goto out;
64376+ }
64377+ if (WARN_ON(!page)) {
64378+ ret = -ENOMEM;
64379+ goto out;
64380+ }
64381 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
64382 (*nr)++;
64383 } while (pte++, addr += PAGE_SIZE, addr != end);
64384- return 0;
64385+ ret = 0;
64386+out:
64387+ pax_close_kernel();
64388+ return ret;
64389 }
64390
64391 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
64392@@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void
64393 * and fall back on vmalloc() if that fails. Others
64394 * just put it in the vmalloc space.
64395 */
64396-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
64397+#ifdef CONFIG_MODULES
64398+#ifdef MODULES_VADDR
64399 unsigned long addr = (unsigned long)x;
64400 if (addr >= MODULES_VADDR && addr < MODULES_END)
64401 return 1;
64402 #endif
64403+
64404+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64405+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
64406+ return 1;
64407+#endif
64408+
64409+#endif
64410+
64411 return is_vmalloc_addr(x);
64412 }
64413
64414@@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void
64415
64416 if (!pgd_none(*pgd)) {
64417 pud_t *pud = pud_offset(pgd, addr);
64418+#ifdef CONFIG_X86
64419+ if (!pud_large(*pud))
64420+#endif
64421 if (!pud_none(*pud)) {
64422 pmd_t *pmd = pmd_offset(pud, addr);
64423+#ifdef CONFIG_X86
64424+ if (!pmd_large(*pmd))
64425+#endif
64426 if (!pmd_none(*pmd)) {
64427 pte_t *ptep, pte;
64428
64429@@ -1297,6 +1337,16 @@ static struct vm_struct *__get_vm_area_n
64430 struct vm_struct *area;
64431
64432 BUG_ON(in_interrupt());
64433+
64434+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64435+ if (flags & VM_KERNEXEC) {
64436+ if (start != VMALLOC_START || end != VMALLOC_END)
64437+ return NULL;
64438+ start = (unsigned long)MODULES_EXEC_VADDR;
64439+ end = (unsigned long)MODULES_EXEC_END;
64440+ }
64441+#endif
64442+
64443 if (flags & VM_IOREMAP) {
64444 int bit = fls(size);
64445
64446@@ -1515,6 +1565,11 @@ void *vmap(struct page **pages, unsigned
64447 if (count > totalram_pages)
64448 return NULL;
64449
64450+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64451+ if (!(pgprot_val(prot) & _PAGE_NX))
64452+ flags |= VM_KERNEXEC;
64453+#endif
64454+
64455 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
64456 __builtin_return_address(0));
64457 if (!area)
64458@@ -1616,6 +1671,13 @@ void *__vmalloc_node_range(unsigned long
64459 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
64460 return NULL;
64461
64462+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64463+ if (!(pgprot_val(prot) & _PAGE_NX))
64464+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
64465+ node, gfp_mask, caller);
64466+ else
64467+#endif
64468+
64469 area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node,
64470 gfp_mask, caller);
64471
64472@@ -1655,6 +1717,7 @@ static void *__vmalloc_node(unsigned lon
64473 gfp_mask, prot, node, caller);
64474 }
64475
64476+#undef __vmalloc
64477 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
64478 {
64479 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
64480@@ -1678,6 +1741,7 @@ static inline void *__vmalloc_node_flags
64481 * For tight control over page level allocator and protection flags
64482 * use __vmalloc() instead.
64483 */
64484+#undef vmalloc
64485 void *vmalloc(unsigned long size)
64486 {
64487 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
64488@@ -1694,6 +1758,7 @@ EXPORT_SYMBOL(vmalloc);
64489 * For tight control over page level allocator and protection flags
64490 * use __vmalloc() instead.
64491 */
64492+#undef vzalloc
64493 void *vzalloc(unsigned long size)
64494 {
64495 return __vmalloc_node_flags(size, -1,
64496@@ -1708,6 +1773,7 @@ EXPORT_SYMBOL(vzalloc);
64497 * The resulting memory area is zeroed so it can be mapped to userspace
64498 * without leaking data.
64499 */
64500+#undef vmalloc_user
64501 void *vmalloc_user(unsigned long size)
64502 {
64503 struct vm_struct *area;
64504@@ -1735,6 +1801,7 @@ EXPORT_SYMBOL(vmalloc_user);
64505 * For tight control over page level allocator and protection flags
64506 * use __vmalloc() instead.
64507 */
64508+#undef vmalloc_node
64509 void *vmalloc_node(unsigned long size, int node)
64510 {
64511 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
64512@@ -1754,6 +1821,7 @@ EXPORT_SYMBOL(vmalloc_node);
64513 * For tight control over page level allocator and protection flags
64514 * use __vmalloc_node() instead.
64515 */
64516+#undef vzalloc_node
64517 void *vzalloc_node(unsigned long size, int node)
64518 {
64519 return __vmalloc_node_flags(size, node,
64520@@ -1776,10 +1844,10 @@ EXPORT_SYMBOL(vzalloc_node);
64521 * For tight control over page level allocator and protection flags
64522 * use __vmalloc() instead.
64523 */
64524-
64525+#undef vmalloc_exec
64526 void *vmalloc_exec(unsigned long size)
64527 {
64528- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
64529+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
64530 -1, __builtin_return_address(0));
64531 }
64532
64533@@ -1798,6 +1866,7 @@ void *vmalloc_exec(unsigned long size)
64534 * Allocate enough 32bit PA addressable pages to cover @size from the
64535 * page level allocator and map them into contiguous kernel virtual space.
64536 */
64537+#undef vmalloc_32
64538 void *vmalloc_32(unsigned long size)
64539 {
64540 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
64541@@ -1812,6 +1881,7 @@ EXPORT_SYMBOL(vmalloc_32);
64542 * The resulting memory area is 32bit addressable and zeroed so it can be
64543 * mapped to userspace without leaking data.
64544 */
64545+#undef vmalloc_32_user
64546 void *vmalloc_32_user(unsigned long size)
64547 {
64548 struct vm_struct *area;
64549@@ -2074,6 +2144,8 @@ int remap_vmalloc_range(struct vm_area_s
64550 unsigned long uaddr = vma->vm_start;
64551 unsigned long usize = vma->vm_end - vma->vm_start;
64552
64553+ BUG_ON(vma->vm_mirror);
64554+
64555 if ((PAGE_SIZE-1) & (unsigned long)addr)
64556 return -EINVAL;
64557
64558diff -urNp linux-3.0.3/mm/vmstat.c linux-3.0.3/mm/vmstat.c
64559--- linux-3.0.3/mm/vmstat.c 2011-07-21 22:17:23.000000000 -0400
64560+++ linux-3.0.3/mm/vmstat.c 2011-08-23 21:48:14.000000000 -0400
64561@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
64562 *
64563 * vm_stat contains the global counters
64564 */
64565-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
64566+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
64567 EXPORT_SYMBOL(vm_stat);
64568
64569 #ifdef CONFIG_SMP
64570@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
64571 v = p->vm_stat_diff[i];
64572 p->vm_stat_diff[i] = 0;
64573 local_irq_restore(flags);
64574- atomic_long_add(v, &zone->vm_stat[i]);
64575+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
64576 global_diff[i] += v;
64577 #ifdef CONFIG_NUMA
64578 /* 3 seconds idle till flush */
64579@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
64580
64581 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
64582 if (global_diff[i])
64583- atomic_long_add(global_diff[i], &vm_stat[i]);
64584+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
64585 }
64586
64587 #endif
64588@@ -1207,10 +1207,20 @@ static int __init setup_vmstat(void)
64589 start_cpu_timer(cpu);
64590 #endif
64591 #ifdef CONFIG_PROC_FS
64592- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
64593- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
64594- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
64595- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
64596+ {
64597+ mode_t gr_mode = S_IRUGO;
64598+#ifdef CONFIG_GRKERNSEC_PROC_ADD
64599+ gr_mode = S_IRUSR;
64600+#endif
64601+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
64602+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
64603+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
64604+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
64605+#else
64606+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
64607+#endif
64608+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
64609+ }
64610 #endif
64611 return 0;
64612 }
64613diff -urNp linux-3.0.3/net/8021q/vlan.c linux-3.0.3/net/8021q/vlan.c
64614--- linux-3.0.3/net/8021q/vlan.c 2011-07-21 22:17:23.000000000 -0400
64615+++ linux-3.0.3/net/8021q/vlan.c 2011-08-23 21:47:56.000000000 -0400
64616@@ -591,8 +591,7 @@ static int vlan_ioctl_handler(struct net
64617 err = -EPERM;
64618 if (!capable(CAP_NET_ADMIN))
64619 break;
64620- if ((args.u.name_type >= 0) &&
64621- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
64622+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
64623 struct vlan_net *vn;
64624
64625 vn = net_generic(net, vlan_net_id);
64626diff -urNp linux-3.0.3/net/atm/atm_misc.c linux-3.0.3/net/atm/atm_misc.c
64627--- linux-3.0.3/net/atm/atm_misc.c 2011-07-21 22:17:23.000000000 -0400
64628+++ linux-3.0.3/net/atm/atm_misc.c 2011-08-23 21:47:56.000000000 -0400
64629@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int
64630 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
64631 return 1;
64632 atm_return(vcc, truesize);
64633- atomic_inc(&vcc->stats->rx_drop);
64634+ atomic_inc_unchecked(&vcc->stats->rx_drop);
64635 return 0;
64636 }
64637 EXPORT_SYMBOL(atm_charge);
64638@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct
64639 }
64640 }
64641 atm_return(vcc, guess);
64642- atomic_inc(&vcc->stats->rx_drop);
64643+ atomic_inc_unchecked(&vcc->stats->rx_drop);
64644 return NULL;
64645 }
64646 EXPORT_SYMBOL(atm_alloc_charge);
64647@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
64648
64649 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
64650 {
64651-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
64652+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
64653 __SONET_ITEMS
64654 #undef __HANDLE_ITEM
64655 }
64656@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
64657
64658 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
64659 {
64660-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
64661+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
64662 __SONET_ITEMS
64663 #undef __HANDLE_ITEM
64664 }
64665diff -urNp linux-3.0.3/net/atm/lec.h linux-3.0.3/net/atm/lec.h
64666--- linux-3.0.3/net/atm/lec.h 2011-07-21 22:17:23.000000000 -0400
64667+++ linux-3.0.3/net/atm/lec.h 2011-08-23 21:47:56.000000000 -0400
64668@@ -48,7 +48,7 @@ struct lane2_ops {
64669 const u8 *tlvs, u32 sizeoftlvs);
64670 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
64671 const u8 *tlvs, u32 sizeoftlvs);
64672-};
64673+} __no_const;
64674
64675 /*
64676 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
64677diff -urNp linux-3.0.3/net/atm/mpc.h linux-3.0.3/net/atm/mpc.h
64678--- linux-3.0.3/net/atm/mpc.h 2011-07-21 22:17:23.000000000 -0400
64679+++ linux-3.0.3/net/atm/mpc.h 2011-08-23 21:47:56.000000000 -0400
64680@@ -33,7 +33,7 @@ struct mpoa_client {
64681 struct mpc_parameters parameters; /* parameters for this client */
64682
64683 const struct net_device_ops *old_ops;
64684- struct net_device_ops new_ops;
64685+ net_device_ops_no_const new_ops;
64686 };
64687
64688
64689diff -urNp linux-3.0.3/net/atm/mpoa_caches.c linux-3.0.3/net/atm/mpoa_caches.c
64690--- linux-3.0.3/net/atm/mpoa_caches.c 2011-07-21 22:17:23.000000000 -0400
64691+++ linux-3.0.3/net/atm/mpoa_caches.c 2011-08-23 21:48:14.000000000 -0400
64692@@ -255,6 +255,8 @@ static void check_resolving_entries(stru
64693 struct timeval now;
64694 struct k_message msg;
64695
64696+ pax_track_stack();
64697+
64698 do_gettimeofday(&now);
64699
64700 read_lock_bh(&client->ingress_lock);
64701diff -urNp linux-3.0.3/net/atm/proc.c linux-3.0.3/net/atm/proc.c
64702--- linux-3.0.3/net/atm/proc.c 2011-07-21 22:17:23.000000000 -0400
64703+++ linux-3.0.3/net/atm/proc.c 2011-08-23 21:47:56.000000000 -0400
64704@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *s
64705 const struct k_atm_aal_stats *stats)
64706 {
64707 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
64708- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
64709- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
64710- atomic_read(&stats->rx_drop));
64711+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
64712+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
64713+ atomic_read_unchecked(&stats->rx_drop));
64714 }
64715
64716 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
64717diff -urNp linux-3.0.3/net/atm/resources.c linux-3.0.3/net/atm/resources.c
64718--- linux-3.0.3/net/atm/resources.c 2011-07-21 22:17:23.000000000 -0400
64719+++ linux-3.0.3/net/atm/resources.c 2011-08-23 21:47:56.000000000 -0400
64720@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
64721 static void copy_aal_stats(struct k_atm_aal_stats *from,
64722 struct atm_aal_stats *to)
64723 {
64724-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
64725+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
64726 __AAL_STAT_ITEMS
64727 #undef __HANDLE_ITEM
64728 }
64729@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_
64730 static void subtract_aal_stats(struct k_atm_aal_stats *from,
64731 struct atm_aal_stats *to)
64732 {
64733-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
64734+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
64735 __AAL_STAT_ITEMS
64736 #undef __HANDLE_ITEM
64737 }
64738diff -urNp linux-3.0.3/net/batman-adv/hard-interface.c linux-3.0.3/net/batman-adv/hard-interface.c
64739--- linux-3.0.3/net/batman-adv/hard-interface.c 2011-07-21 22:17:23.000000000 -0400
64740+++ linux-3.0.3/net/batman-adv/hard-interface.c 2011-08-23 21:47:56.000000000 -0400
64741@@ -351,8 +351,8 @@ int hardif_enable_interface(struct hard_
64742 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
64743 dev_add_pack(&hard_iface->batman_adv_ptype);
64744
64745- atomic_set(&hard_iface->seqno, 1);
64746- atomic_set(&hard_iface->frag_seqno, 1);
64747+ atomic_set_unchecked(&hard_iface->seqno, 1);
64748+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
64749 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
64750 hard_iface->net_dev->name);
64751
64752diff -urNp linux-3.0.3/net/batman-adv/routing.c linux-3.0.3/net/batman-adv/routing.c
64753--- linux-3.0.3/net/batman-adv/routing.c 2011-07-21 22:17:23.000000000 -0400
64754+++ linux-3.0.3/net/batman-adv/routing.c 2011-08-23 21:47:56.000000000 -0400
64755@@ -627,7 +627,7 @@ void receive_bat_packet(struct ethhdr *e
64756 return;
64757
64758 /* could be changed by schedule_own_packet() */
64759- if_incoming_seqno = atomic_read(&if_incoming->seqno);
64760+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
64761
64762 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
64763
64764diff -urNp linux-3.0.3/net/batman-adv/send.c linux-3.0.3/net/batman-adv/send.c
64765--- linux-3.0.3/net/batman-adv/send.c 2011-07-21 22:17:23.000000000 -0400
64766+++ linux-3.0.3/net/batman-adv/send.c 2011-08-23 21:47:56.000000000 -0400
64767@@ -279,7 +279,7 @@ void schedule_own_packet(struct hard_ifa
64768
64769 /* change sequence number to network order */
64770 batman_packet->seqno =
64771- htonl((uint32_t)atomic_read(&hard_iface->seqno));
64772+ htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
64773
64774 if (vis_server == VIS_TYPE_SERVER_SYNC)
64775 batman_packet->flags |= VIS_SERVER;
64776@@ -293,7 +293,7 @@ void schedule_own_packet(struct hard_ifa
64777 else
64778 batman_packet->gw_flags = 0;
64779
64780- atomic_inc(&hard_iface->seqno);
64781+ atomic_inc_unchecked(&hard_iface->seqno);
64782
64783 slide_own_bcast_window(hard_iface);
64784 send_time = own_send_time(bat_priv);
64785diff -urNp linux-3.0.3/net/batman-adv/soft-interface.c linux-3.0.3/net/batman-adv/soft-interface.c
64786--- linux-3.0.3/net/batman-adv/soft-interface.c 2011-07-21 22:17:23.000000000 -0400
64787+++ linux-3.0.3/net/batman-adv/soft-interface.c 2011-08-23 21:47:56.000000000 -0400
64788@@ -628,7 +628,7 @@ int interface_tx(struct sk_buff *skb, st
64789
64790 /* set broadcast sequence number */
64791 bcast_packet->seqno =
64792- htonl(atomic_inc_return(&bat_priv->bcast_seqno));
64793+ htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
64794
64795 add_bcast_packet_to_list(bat_priv, skb);
64796
64797@@ -830,7 +830,7 @@ struct net_device *softif_create(char *n
64798 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
64799
64800 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
64801- atomic_set(&bat_priv->bcast_seqno, 1);
64802+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
64803 atomic_set(&bat_priv->tt_local_changed, 0);
64804
64805 bat_priv->primary_if = NULL;
64806diff -urNp linux-3.0.3/net/batman-adv/types.h linux-3.0.3/net/batman-adv/types.h
64807--- linux-3.0.3/net/batman-adv/types.h 2011-07-21 22:17:23.000000000 -0400
64808+++ linux-3.0.3/net/batman-adv/types.h 2011-08-23 21:47:56.000000000 -0400
64809@@ -38,8 +38,8 @@ struct hard_iface {
64810 int16_t if_num;
64811 char if_status;
64812 struct net_device *net_dev;
64813- atomic_t seqno;
64814- atomic_t frag_seqno;
64815+ atomic_unchecked_t seqno;
64816+ atomic_unchecked_t frag_seqno;
64817 unsigned char *packet_buff;
64818 int packet_len;
64819 struct kobject *hardif_obj;
64820@@ -142,7 +142,7 @@ struct bat_priv {
64821 atomic_t orig_interval; /* uint */
64822 atomic_t hop_penalty; /* uint */
64823 atomic_t log_level; /* uint */
64824- atomic_t bcast_seqno;
64825+ atomic_unchecked_t bcast_seqno;
64826 atomic_t bcast_queue_left;
64827 atomic_t batman_queue_left;
64828 char num_ifaces;
64829diff -urNp linux-3.0.3/net/batman-adv/unicast.c linux-3.0.3/net/batman-adv/unicast.c
64830--- linux-3.0.3/net/batman-adv/unicast.c 2011-07-21 22:17:23.000000000 -0400
64831+++ linux-3.0.3/net/batman-adv/unicast.c 2011-08-23 21:47:56.000000000 -0400
64832@@ -265,7 +265,7 @@ int frag_send_skb(struct sk_buff *skb, s
64833 frag1->flags = UNI_FRAG_HEAD | large_tail;
64834 frag2->flags = large_tail;
64835
64836- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
64837+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
64838 frag1->seqno = htons(seqno - 1);
64839 frag2->seqno = htons(seqno);
64840
64841diff -urNp linux-3.0.3/net/bridge/br_multicast.c linux-3.0.3/net/bridge/br_multicast.c
64842--- linux-3.0.3/net/bridge/br_multicast.c 2011-07-21 22:17:23.000000000 -0400
64843+++ linux-3.0.3/net/bridge/br_multicast.c 2011-08-23 21:47:56.000000000 -0400
64844@@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct
64845 nexthdr = ip6h->nexthdr;
64846 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
64847
64848- if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
64849+ if (nexthdr != IPPROTO_ICMPV6)
64850 return 0;
64851
64852 /* Okay, we found ICMPv6 header */
64853diff -urNp linux-3.0.3/net/bridge/netfilter/ebtables.c linux-3.0.3/net/bridge/netfilter/ebtables.c
64854--- linux-3.0.3/net/bridge/netfilter/ebtables.c 2011-07-21 22:17:23.000000000 -0400
64855+++ linux-3.0.3/net/bridge/netfilter/ebtables.c 2011-08-23 21:48:14.000000000 -0400
64856@@ -1512,7 +1512,7 @@ static int do_ebt_get_ctl(struct sock *s
64857 tmp.valid_hooks = t->table->valid_hooks;
64858 }
64859 mutex_unlock(&ebt_mutex);
64860- if (copy_to_user(user, &tmp, *len) != 0){
64861+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
64862 BUGPRINT("c2u Didn't work\n");
64863 ret = -EFAULT;
64864 break;
64865@@ -1780,6 +1780,8 @@ static int compat_copy_everything_to_use
64866 int ret;
64867 void __user *pos;
64868
64869+ pax_track_stack();
64870+
64871 memset(&tinfo, 0, sizeof(tinfo));
64872
64873 if (cmd == EBT_SO_GET_ENTRIES) {
64874diff -urNp linux-3.0.3/net/caif/caif_socket.c linux-3.0.3/net/caif/caif_socket.c
64875--- linux-3.0.3/net/caif/caif_socket.c 2011-07-21 22:17:23.000000000 -0400
64876+++ linux-3.0.3/net/caif/caif_socket.c 2011-08-23 21:47:56.000000000 -0400
64877@@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
64878 #ifdef CONFIG_DEBUG_FS
64879 struct debug_fs_counter {
64880 atomic_t caif_nr_socks;
64881- atomic_t caif_sock_create;
64882- atomic_t num_connect_req;
64883- atomic_t num_connect_resp;
64884- atomic_t num_connect_fail_resp;
64885- atomic_t num_disconnect;
64886- atomic_t num_remote_shutdown_ind;
64887- atomic_t num_tx_flow_off_ind;
64888- atomic_t num_tx_flow_on_ind;
64889- atomic_t num_rx_flow_off;
64890- atomic_t num_rx_flow_on;
64891+ atomic_unchecked_t caif_sock_create;
64892+ atomic_unchecked_t num_connect_req;
64893+ atomic_unchecked_t num_connect_resp;
64894+ atomic_unchecked_t num_connect_fail_resp;
64895+ atomic_unchecked_t num_disconnect;
64896+ atomic_unchecked_t num_remote_shutdown_ind;
64897+ atomic_unchecked_t num_tx_flow_off_ind;
64898+ atomic_unchecked_t num_tx_flow_on_ind;
64899+ atomic_unchecked_t num_rx_flow_off;
64900+ atomic_unchecked_t num_rx_flow_on;
64901 };
64902 static struct debug_fs_counter cnt;
64903 #define dbfs_atomic_inc(v) atomic_inc_return(v)
64904+#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
64905 #define dbfs_atomic_dec(v) atomic_dec_return(v)
64906 #else
64907 #define dbfs_atomic_inc(v) 0
64908@@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct soc
64909 atomic_read(&cf_sk->sk.sk_rmem_alloc),
64910 sk_rcvbuf_lowwater(cf_sk));
64911 set_rx_flow_off(cf_sk);
64912- dbfs_atomic_inc(&cnt.num_rx_flow_off);
64913+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
64914 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
64915 }
64916
64917@@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct soc
64918 set_rx_flow_off(cf_sk);
64919 if (net_ratelimit())
64920 pr_debug("sending flow OFF due to rmem_schedule\n");
64921- dbfs_atomic_inc(&cnt.num_rx_flow_off);
64922+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
64923 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
64924 }
64925 skb->dev = NULL;
64926@@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer
64927 switch (flow) {
64928 case CAIF_CTRLCMD_FLOW_ON_IND:
64929 /* OK from modem to start sending again */
64930- dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
64931+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
64932 set_tx_flow_on(cf_sk);
64933 cf_sk->sk.sk_state_change(&cf_sk->sk);
64934 break;
64935
64936 case CAIF_CTRLCMD_FLOW_OFF_IND:
64937 /* Modem asks us to shut up */
64938- dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
64939+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
64940 set_tx_flow_off(cf_sk);
64941 cf_sk->sk.sk_state_change(&cf_sk->sk);
64942 break;
64943@@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer
64944 /* We're now connected */
64945 caif_client_register_refcnt(&cf_sk->layer,
64946 cfsk_hold, cfsk_put);
64947- dbfs_atomic_inc(&cnt.num_connect_resp);
64948+ dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
64949 cf_sk->sk.sk_state = CAIF_CONNECTED;
64950 set_tx_flow_on(cf_sk);
64951 cf_sk->sk.sk_state_change(&cf_sk->sk);
64952@@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer
64953
64954 case CAIF_CTRLCMD_INIT_FAIL_RSP:
64955 /* Connect request failed */
64956- dbfs_atomic_inc(&cnt.num_connect_fail_resp);
64957+ dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
64958 cf_sk->sk.sk_err = ECONNREFUSED;
64959 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
64960 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
64961@@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer
64962
64963 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
64964 /* Modem has closed this connection, or device is down. */
64965- dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
64966+ dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
64967 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
64968 cf_sk->sk.sk_err = ECONNRESET;
64969 set_rx_flow_on(cf_sk);
64970@@ -297,7 +298,7 @@ static void caif_check_flow_release(stru
64971 return;
64972
64973 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
64974- dbfs_atomic_inc(&cnt.num_rx_flow_on);
64975+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
64976 set_rx_flow_on(cf_sk);
64977 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
64978 }
64979@@ -854,7 +855,7 @@ static int caif_connect(struct socket *s
64980 /*ifindex = id of the interface.*/
64981 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
64982
64983- dbfs_atomic_inc(&cnt.num_connect_req);
64984+ dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
64985 cf_sk->layer.receive = caif_sktrecv_cb;
64986
64987 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
64988@@ -943,7 +944,7 @@ static int caif_release(struct socket *s
64989 spin_unlock_bh(&sk->sk_receive_queue.lock);
64990 sock->sk = NULL;
64991
64992- dbfs_atomic_inc(&cnt.num_disconnect);
64993+ dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
64994
64995 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
64996 if (cf_sk->debugfs_socket_dir != NULL)
64997@@ -1122,7 +1123,7 @@ static int caif_create(struct net *net,
64998 cf_sk->conn_req.protocol = protocol;
64999 /* Increase the number of sockets created. */
65000 dbfs_atomic_inc(&cnt.caif_nr_socks);
65001- num = dbfs_atomic_inc(&cnt.caif_sock_create);
65002+ num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
65003 #ifdef CONFIG_DEBUG_FS
65004 if (!IS_ERR(debugfsdir)) {
65005
65006diff -urNp linux-3.0.3/net/caif/cfctrl.c linux-3.0.3/net/caif/cfctrl.c
65007--- linux-3.0.3/net/caif/cfctrl.c 2011-07-21 22:17:23.000000000 -0400
65008+++ linux-3.0.3/net/caif/cfctrl.c 2011-08-23 21:48:14.000000000 -0400
65009@@ -9,6 +9,7 @@
65010 #include <linux/stddef.h>
65011 #include <linux/spinlock.h>
65012 #include <linux/slab.h>
65013+#include <linux/sched.h>
65014 #include <net/caif/caif_layer.h>
65015 #include <net/caif/cfpkt.h>
65016 #include <net/caif/cfctrl.h>
65017@@ -45,8 +46,8 @@ struct cflayer *cfctrl_create(void)
65018 dev_info.id = 0xff;
65019 memset(this, 0, sizeof(*this));
65020 cfsrvl_init(&this->serv, 0, &dev_info, false);
65021- atomic_set(&this->req_seq_no, 1);
65022- atomic_set(&this->rsp_seq_no, 1);
65023+ atomic_set_unchecked(&this->req_seq_no, 1);
65024+ atomic_set_unchecked(&this->rsp_seq_no, 1);
65025 this->serv.layer.receive = cfctrl_recv;
65026 sprintf(this->serv.layer.name, "ctrl");
65027 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
65028@@ -132,8 +133,8 @@ static void cfctrl_insert_req(struct cfc
65029 struct cfctrl_request_info *req)
65030 {
65031 spin_lock_bh(&ctrl->info_list_lock);
65032- atomic_inc(&ctrl->req_seq_no);
65033- req->sequence_no = atomic_read(&ctrl->req_seq_no);
65034+ atomic_inc_unchecked(&ctrl->req_seq_no);
65035+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
65036 list_add_tail(&req->list, &ctrl->list);
65037 spin_unlock_bh(&ctrl->info_list_lock);
65038 }
65039@@ -151,7 +152,7 @@ static struct cfctrl_request_info *cfctr
65040 if (p != first)
65041 pr_warn("Requests are not received in order\n");
65042
65043- atomic_set(&ctrl->rsp_seq_no,
65044+ atomic_set_unchecked(&ctrl->rsp_seq_no,
65045 p->sequence_no);
65046 list_del(&p->list);
65047 goto out;
65048@@ -364,6 +365,7 @@ static int cfctrl_recv(struct cflayer *l
65049 struct cfctrl *cfctrl = container_obj(layer);
65050 struct cfctrl_request_info rsp, *req;
65051
65052+ pax_track_stack();
65053
65054 cfpkt_extr_head(pkt, &cmdrsp, 1);
65055 cmd = cmdrsp & CFCTRL_CMD_MASK;
65056diff -urNp linux-3.0.3/net/core/datagram.c linux-3.0.3/net/core/datagram.c
65057--- linux-3.0.3/net/core/datagram.c 2011-07-21 22:17:23.000000000 -0400
65058+++ linux-3.0.3/net/core/datagram.c 2011-08-23 21:47:56.000000000 -0400
65059@@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, s
65060 }
65061
65062 kfree_skb(skb);
65063- atomic_inc(&sk->sk_drops);
65064+ atomic_inc_unchecked(&sk->sk_drops);
65065 sk_mem_reclaim_partial(sk);
65066
65067 return err;
65068diff -urNp linux-3.0.3/net/core/dev.c linux-3.0.3/net/core/dev.c
65069--- linux-3.0.3/net/core/dev.c 2011-07-21 22:17:23.000000000 -0400
65070+++ linux-3.0.3/net/core/dev.c 2011-08-23 21:48:14.000000000 -0400
65071@@ -1125,10 +1125,14 @@ void dev_load(struct net *net, const cha
65072 if (no_module && capable(CAP_NET_ADMIN))
65073 no_module = request_module("netdev-%s", name);
65074 if (no_module && capable(CAP_SYS_MODULE)) {
65075+#ifdef CONFIG_GRKERNSEC_MODHARDEN
65076+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
65077+#else
65078 if (!request_module("%s", name))
65079 pr_err("Loading kernel module for a network device "
65080 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
65081 "instead\n", name);
65082+#endif
65083 }
65084 }
65085 EXPORT_SYMBOL(dev_load);
65086@@ -1959,7 +1963,7 @@ static int illegal_highdma(struct net_de
65087
65088 struct dev_gso_cb {
65089 void (*destructor)(struct sk_buff *skb);
65090-};
65091+} __no_const;
65092
65093 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
65094
65095@@ -2912,7 +2916,7 @@ int netif_rx_ni(struct sk_buff *skb)
65096 }
65097 EXPORT_SYMBOL(netif_rx_ni);
65098
65099-static void net_tx_action(struct softirq_action *h)
65100+static void net_tx_action(void)
65101 {
65102 struct softnet_data *sd = &__get_cpu_var(softnet_data);
65103
65104@@ -3761,7 +3765,7 @@ void netif_napi_del(struct napi_struct *
65105 }
65106 EXPORT_SYMBOL(netif_napi_del);
65107
65108-static void net_rx_action(struct softirq_action *h)
65109+static void net_rx_action(void)
65110 {
65111 struct softnet_data *sd = &__get_cpu_var(softnet_data);
65112 unsigned long time_limit = jiffies + 2;
65113diff -urNp linux-3.0.3/net/core/flow.c linux-3.0.3/net/core/flow.c
65114--- linux-3.0.3/net/core/flow.c 2011-07-21 22:17:23.000000000 -0400
65115+++ linux-3.0.3/net/core/flow.c 2011-08-23 21:47:56.000000000 -0400
65116@@ -60,7 +60,7 @@ struct flow_cache {
65117 struct timer_list rnd_timer;
65118 };
65119
65120-atomic_t flow_cache_genid = ATOMIC_INIT(0);
65121+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
65122 EXPORT_SYMBOL(flow_cache_genid);
65123 static struct flow_cache flow_cache_global;
65124 static struct kmem_cache *flow_cachep __read_mostly;
65125@@ -85,7 +85,7 @@ static void flow_cache_new_hashrnd(unsig
65126
65127 static int flow_entry_valid(struct flow_cache_entry *fle)
65128 {
65129- if (atomic_read(&flow_cache_genid) != fle->genid)
65130+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
65131 return 0;
65132 if (fle->object && !fle->object->ops->check(fle->object))
65133 return 0;
65134@@ -253,7 +253,7 @@ flow_cache_lookup(struct net *net, const
65135 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
65136 fcp->hash_count++;
65137 }
65138- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
65139+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
65140 flo = fle->object;
65141 if (!flo)
65142 goto ret_object;
65143@@ -274,7 +274,7 @@ nocache:
65144 }
65145 flo = resolver(net, key, family, dir, flo, ctx);
65146 if (fle) {
65147- fle->genid = atomic_read(&flow_cache_genid);
65148+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
65149 if (!IS_ERR(flo))
65150 fle->object = flo;
65151 else
65152diff -urNp linux-3.0.3/net/core/rtnetlink.c linux-3.0.3/net/core/rtnetlink.c
65153--- linux-3.0.3/net/core/rtnetlink.c 2011-07-21 22:17:23.000000000 -0400
65154+++ linux-3.0.3/net/core/rtnetlink.c 2011-08-23 21:47:56.000000000 -0400
65155@@ -56,7 +56,7 @@
65156 struct rtnl_link {
65157 rtnl_doit_func doit;
65158 rtnl_dumpit_func dumpit;
65159-};
65160+} __no_const;
65161
65162 static DEFINE_MUTEX(rtnl_mutex);
65163
65164diff -urNp linux-3.0.3/net/core/skbuff.c linux-3.0.3/net/core/skbuff.c
65165--- linux-3.0.3/net/core/skbuff.c 2011-07-21 22:17:23.000000000 -0400
65166+++ linux-3.0.3/net/core/skbuff.c 2011-08-23 21:48:14.000000000 -0400
65167@@ -1543,6 +1543,8 @@ int skb_splice_bits(struct sk_buff *skb,
65168 struct sock *sk = skb->sk;
65169 int ret = 0;
65170
65171+ pax_track_stack();
65172+
65173 if (splice_grow_spd(pipe, &spd))
65174 return -ENOMEM;
65175
65176diff -urNp linux-3.0.3/net/core/sock.c linux-3.0.3/net/core/sock.c
65177--- linux-3.0.3/net/core/sock.c 2011-07-21 22:17:23.000000000 -0400
65178+++ linux-3.0.3/net/core/sock.c 2011-08-23 21:48:14.000000000 -0400
65179@@ -291,7 +291,7 @@ int sock_queue_rcv_skb(struct sock *sk,
65180 */
65181 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
65182 (unsigned)sk->sk_rcvbuf) {
65183- atomic_inc(&sk->sk_drops);
65184+ atomic_inc_unchecked(&sk->sk_drops);
65185 return -ENOMEM;
65186 }
65187
65188@@ -300,7 +300,7 @@ int sock_queue_rcv_skb(struct sock *sk,
65189 return err;
65190
65191 if (!sk_rmem_schedule(sk, skb->truesize)) {
65192- atomic_inc(&sk->sk_drops);
65193+ atomic_inc_unchecked(&sk->sk_drops);
65194 return -ENOBUFS;
65195 }
65196
65197@@ -320,7 +320,7 @@ int sock_queue_rcv_skb(struct sock *sk,
65198 skb_dst_force(skb);
65199
65200 spin_lock_irqsave(&list->lock, flags);
65201- skb->dropcount = atomic_read(&sk->sk_drops);
65202+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
65203 __skb_queue_tail(list, skb);
65204 spin_unlock_irqrestore(&list->lock, flags);
65205
65206@@ -340,7 +340,7 @@ int sk_receive_skb(struct sock *sk, stru
65207 skb->dev = NULL;
65208
65209 if (sk_rcvqueues_full(sk, skb)) {
65210- atomic_inc(&sk->sk_drops);
65211+ atomic_inc_unchecked(&sk->sk_drops);
65212 goto discard_and_relse;
65213 }
65214 if (nested)
65215@@ -358,7 +358,7 @@ int sk_receive_skb(struct sock *sk, stru
65216 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
65217 } else if (sk_add_backlog(sk, skb)) {
65218 bh_unlock_sock(sk);
65219- atomic_inc(&sk->sk_drops);
65220+ atomic_inc_unchecked(&sk->sk_drops);
65221 goto discard_and_relse;
65222 }
65223
65224@@ -921,7 +921,7 @@ int sock_getsockopt(struct socket *sock,
65225 if (len > sizeof(peercred))
65226 len = sizeof(peercred);
65227 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
65228- if (copy_to_user(optval, &peercred, len))
65229+ if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
65230 return -EFAULT;
65231 goto lenout;
65232 }
65233@@ -934,7 +934,7 @@ int sock_getsockopt(struct socket *sock,
65234 return -ENOTCONN;
65235 if (lv < len)
65236 return -EINVAL;
65237- if (copy_to_user(optval, address, len))
65238+ if (len > sizeof(address) || copy_to_user(optval, address, len))
65239 return -EFAULT;
65240 goto lenout;
65241 }
65242@@ -967,7 +967,7 @@ int sock_getsockopt(struct socket *sock,
65243
65244 if (len > lv)
65245 len = lv;
65246- if (copy_to_user(optval, &v, len))
65247+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
65248 return -EFAULT;
65249 lenout:
65250 if (put_user(len, optlen))
65251@@ -2023,7 +2023,7 @@ void sock_init_data(struct socket *sock,
65252 */
65253 smp_wmb();
65254 atomic_set(&sk->sk_refcnt, 1);
65255- atomic_set(&sk->sk_drops, 0);
65256+ atomic_set_unchecked(&sk->sk_drops, 0);
65257 }
65258 EXPORT_SYMBOL(sock_init_data);
65259
65260diff -urNp linux-3.0.3/net/decnet/sysctl_net_decnet.c linux-3.0.3/net/decnet/sysctl_net_decnet.c
65261--- linux-3.0.3/net/decnet/sysctl_net_decnet.c 2011-07-21 22:17:23.000000000 -0400
65262+++ linux-3.0.3/net/decnet/sysctl_net_decnet.c 2011-08-23 21:47:56.000000000 -0400
65263@@ -173,7 +173,7 @@ static int dn_node_address_handler(ctl_t
65264
65265 if (len > *lenp) len = *lenp;
65266
65267- if (copy_to_user(buffer, addr, len))
65268+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
65269 return -EFAULT;
65270
65271 *lenp = len;
65272@@ -236,7 +236,7 @@ static int dn_def_dev_handler(ctl_table
65273
65274 if (len > *lenp) len = *lenp;
65275
65276- if (copy_to_user(buffer, devname, len))
65277+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
65278 return -EFAULT;
65279
65280 *lenp = len;
65281diff -urNp linux-3.0.3/net/econet/Kconfig linux-3.0.3/net/econet/Kconfig
65282--- linux-3.0.3/net/econet/Kconfig 2011-07-21 22:17:23.000000000 -0400
65283+++ linux-3.0.3/net/econet/Kconfig 2011-08-23 21:48:14.000000000 -0400
65284@@ -4,7 +4,7 @@
65285
65286 config ECONET
65287 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
65288- depends on EXPERIMENTAL && INET
65289+ depends on EXPERIMENTAL && INET && BROKEN
65290 ---help---
65291 Econet is a fairly old and slow networking protocol mainly used by
65292 Acorn computers to access file and print servers. It uses native
65293diff -urNp linux-3.0.3/net/ipv4/fib_frontend.c linux-3.0.3/net/ipv4/fib_frontend.c
65294--- linux-3.0.3/net/ipv4/fib_frontend.c 2011-07-21 22:17:23.000000000 -0400
65295+++ linux-3.0.3/net/ipv4/fib_frontend.c 2011-08-23 21:47:56.000000000 -0400
65296@@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct not
65297 #ifdef CONFIG_IP_ROUTE_MULTIPATH
65298 fib_sync_up(dev);
65299 #endif
65300- atomic_inc(&net->ipv4.dev_addr_genid);
65301+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
65302 rt_cache_flush(dev_net(dev), -1);
65303 break;
65304 case NETDEV_DOWN:
65305 fib_del_ifaddr(ifa, NULL);
65306- atomic_inc(&net->ipv4.dev_addr_genid);
65307+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
65308 if (ifa->ifa_dev->ifa_list == NULL) {
65309 /* Last address was deleted from this interface.
65310 * Disable IP.
65311@@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notif
65312 #ifdef CONFIG_IP_ROUTE_MULTIPATH
65313 fib_sync_up(dev);
65314 #endif
65315- atomic_inc(&net->ipv4.dev_addr_genid);
65316+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
65317 rt_cache_flush(dev_net(dev), -1);
65318 break;
65319 case NETDEV_DOWN:
65320diff -urNp linux-3.0.3/net/ipv4/fib_semantics.c linux-3.0.3/net/ipv4/fib_semantics.c
65321--- linux-3.0.3/net/ipv4/fib_semantics.c 2011-07-21 22:17:23.000000000 -0400
65322+++ linux-3.0.3/net/ipv4/fib_semantics.c 2011-08-23 21:47:56.000000000 -0400
65323@@ -691,7 +691,7 @@ __be32 fib_info_update_nh_saddr(struct n
65324 nh->nh_saddr = inet_select_addr(nh->nh_dev,
65325 nh->nh_gw,
65326 nh->nh_parent->fib_scope);
65327- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
65328+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
65329
65330 return nh->nh_saddr;
65331 }
65332diff -urNp linux-3.0.3/net/ipv4/inet_diag.c linux-3.0.3/net/ipv4/inet_diag.c
65333--- linux-3.0.3/net/ipv4/inet_diag.c 2011-07-21 22:17:23.000000000 -0400
65334+++ linux-3.0.3/net/ipv4/inet_diag.c 2011-08-23 21:48:14.000000000 -0400
65335@@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct soc
65336 r->idiag_retrans = 0;
65337
65338 r->id.idiag_if = sk->sk_bound_dev_if;
65339+
65340+#ifdef CONFIG_GRKERNSEC_HIDESYM
65341+ r->id.idiag_cookie[0] = 0;
65342+ r->id.idiag_cookie[1] = 0;
65343+#else
65344 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
65345 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
65346+#endif
65347
65348 r->id.idiag_sport = inet->inet_sport;
65349 r->id.idiag_dport = inet->inet_dport;
65350@@ -201,8 +207,15 @@ static int inet_twsk_diag_fill(struct in
65351 r->idiag_family = tw->tw_family;
65352 r->idiag_retrans = 0;
65353 r->id.idiag_if = tw->tw_bound_dev_if;
65354+
65355+#ifdef CONFIG_GRKERNSEC_HIDESYM
65356+ r->id.idiag_cookie[0] = 0;
65357+ r->id.idiag_cookie[1] = 0;
65358+#else
65359 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
65360 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
65361+#endif
65362+
65363 r->id.idiag_sport = tw->tw_sport;
65364 r->id.idiag_dport = tw->tw_dport;
65365 r->id.idiag_src[0] = tw->tw_rcv_saddr;
65366@@ -285,12 +298,14 @@ static int inet_diag_get_exact(struct sk
65367 if (sk == NULL)
65368 goto unlock;
65369
65370+#ifndef CONFIG_GRKERNSEC_HIDESYM
65371 err = -ESTALE;
65372 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
65373 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
65374 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
65375 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
65376 goto out;
65377+#endif
65378
65379 err = -ENOMEM;
65380 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
65381@@ -580,8 +595,14 @@ static int inet_diag_fill_req(struct sk_
65382 r->idiag_retrans = req->retrans;
65383
65384 r->id.idiag_if = sk->sk_bound_dev_if;
65385+
65386+#ifdef CONFIG_GRKERNSEC_HIDESYM
65387+ r->id.idiag_cookie[0] = 0;
65388+ r->id.idiag_cookie[1] = 0;
65389+#else
65390 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
65391 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
65392+#endif
65393
65394 tmo = req->expires - jiffies;
65395 if (tmo < 0)
65396diff -urNp linux-3.0.3/net/ipv4/inet_hashtables.c linux-3.0.3/net/ipv4/inet_hashtables.c
65397--- linux-3.0.3/net/ipv4/inet_hashtables.c 2011-08-23 21:44:40.000000000 -0400
65398+++ linux-3.0.3/net/ipv4/inet_hashtables.c 2011-08-23 21:55:24.000000000 -0400
65399@@ -18,12 +18,15 @@
65400 #include <linux/sched.h>
65401 #include <linux/slab.h>
65402 #include <linux/wait.h>
65403+#include <linux/security.h>
65404
65405 #include <net/inet_connection_sock.h>
65406 #include <net/inet_hashtables.h>
65407 #include <net/secure_seq.h>
65408 #include <net/ip.h>
65409
65410+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
65411+
65412 /*
65413 * Allocate and initialize a new local port bind bucket.
65414 * The bindhash mutex for snum's hash chain must be held here.
65415@@ -530,6 +533,8 @@ ok:
65416 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
65417 spin_unlock(&head->lock);
65418
65419+ gr_update_task_in_ip_table(current, inet_sk(sk));
65420+
65421 if (tw) {
65422 inet_twsk_deschedule(tw, death_row);
65423 while (twrefcnt) {
65424diff -urNp linux-3.0.3/net/ipv4/inetpeer.c linux-3.0.3/net/ipv4/inetpeer.c
65425--- linux-3.0.3/net/ipv4/inetpeer.c 2011-08-23 21:44:40.000000000 -0400
65426+++ linux-3.0.3/net/ipv4/inetpeer.c 2011-08-23 21:48:14.000000000 -0400
65427@@ -481,6 +481,8 @@ struct inet_peer *inet_getpeer(struct in
65428 unsigned int sequence;
65429 int invalidated, newrefcnt = 0;
65430
65431+ pax_track_stack();
65432+
65433 /* Look up for the address quickly, lockless.
65434 * Because of a concurrent writer, we might not find an existing entry.
65435 */
65436@@ -517,8 +519,8 @@ found: /* The existing node has been fo
65437 if (p) {
65438 p->daddr = *daddr;
65439 atomic_set(&p->refcnt, 1);
65440- atomic_set(&p->rid, 0);
65441- atomic_set(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
65442+ atomic_set_unchecked(&p->rid, 0);
65443+ atomic_set_unchecked(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
65444 p->tcp_ts_stamp = 0;
65445 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
65446 p->rate_tokens = 0;
65447diff -urNp linux-3.0.3/net/ipv4/ip_fragment.c linux-3.0.3/net/ipv4/ip_fragment.c
65448--- linux-3.0.3/net/ipv4/ip_fragment.c 2011-07-21 22:17:23.000000000 -0400
65449+++ linux-3.0.3/net/ipv4/ip_fragment.c 2011-08-23 21:47:56.000000000 -0400
65450@@ -315,7 +315,7 @@ static inline int ip_frag_too_far(struct
65451 return 0;
65452
65453 start = qp->rid;
65454- end = atomic_inc_return(&peer->rid);
65455+ end = atomic_inc_return_unchecked(&peer->rid);
65456 qp->rid = end;
65457
65458 rc = qp->q.fragments && (end - start) > max;
65459diff -urNp linux-3.0.3/net/ipv4/ip_sockglue.c linux-3.0.3/net/ipv4/ip_sockglue.c
65460--- linux-3.0.3/net/ipv4/ip_sockglue.c 2011-07-21 22:17:23.000000000 -0400
65461+++ linux-3.0.3/net/ipv4/ip_sockglue.c 2011-08-23 21:48:14.000000000 -0400
65462@@ -1073,6 +1073,8 @@ static int do_ip_getsockopt(struct sock
65463 int val;
65464 int len;
65465
65466+ pax_track_stack();
65467+
65468 if (level != SOL_IP)
65469 return -EOPNOTSUPP;
65470
65471@@ -1110,7 +1112,8 @@ static int do_ip_getsockopt(struct sock
65472 len = min_t(unsigned int, len, opt->optlen);
65473 if (put_user(len, optlen))
65474 return -EFAULT;
65475- if (copy_to_user(optval, opt->__data, len))
65476+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
65477+ copy_to_user(optval, opt->__data, len))
65478 return -EFAULT;
65479 return 0;
65480 }
65481diff -urNp linux-3.0.3/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-3.0.3/net/ipv4/netfilter/nf_nat_snmp_basic.c
65482--- linux-3.0.3/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-07-21 22:17:23.000000000 -0400
65483+++ linux-3.0.3/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-08-23 21:47:56.000000000 -0400
65484@@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(
65485
65486 *len = 0;
65487
65488- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
65489+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
65490 if (*octets == NULL) {
65491 if (net_ratelimit())
65492 pr_notice("OOM in bsalg (%d)\n", __LINE__);
65493diff -urNp linux-3.0.3/net/ipv4/ping.c linux-3.0.3/net/ipv4/ping.c
65494--- linux-3.0.3/net/ipv4/ping.c 2011-07-21 22:17:23.000000000 -0400
65495+++ linux-3.0.3/net/ipv4/ping.c 2011-08-23 21:47:56.000000000 -0400
65496@@ -837,7 +837,7 @@ static void ping_format_sock(struct sock
65497 sk_rmem_alloc_get(sp),
65498 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
65499 atomic_read(&sp->sk_refcnt), sp,
65500- atomic_read(&sp->sk_drops), len);
65501+ atomic_read_unchecked(&sp->sk_drops), len);
65502 }
65503
65504 static int ping_seq_show(struct seq_file *seq, void *v)
65505diff -urNp linux-3.0.3/net/ipv4/raw.c linux-3.0.3/net/ipv4/raw.c
65506--- linux-3.0.3/net/ipv4/raw.c 2011-07-21 22:17:23.000000000 -0400
65507+++ linux-3.0.3/net/ipv4/raw.c 2011-08-23 21:48:14.000000000 -0400
65508@@ -302,7 +302,7 @@ static int raw_rcv_skb(struct sock * sk,
65509 int raw_rcv(struct sock *sk, struct sk_buff *skb)
65510 {
65511 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
65512- atomic_inc(&sk->sk_drops);
65513+ atomic_inc_unchecked(&sk->sk_drops);
65514 kfree_skb(skb);
65515 return NET_RX_DROP;
65516 }
65517@@ -736,16 +736,20 @@ static int raw_init(struct sock *sk)
65518
65519 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
65520 {
65521+ struct icmp_filter filter;
65522+
65523 if (optlen > sizeof(struct icmp_filter))
65524 optlen = sizeof(struct icmp_filter);
65525- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
65526+ if (copy_from_user(&filter, optval, optlen))
65527 return -EFAULT;
65528+ raw_sk(sk)->filter = filter;
65529 return 0;
65530 }
65531
65532 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
65533 {
65534 int len, ret = -EFAULT;
65535+ struct icmp_filter filter;
65536
65537 if (get_user(len, optlen))
65538 goto out;
65539@@ -755,8 +759,9 @@ static int raw_geticmpfilter(struct sock
65540 if (len > sizeof(struct icmp_filter))
65541 len = sizeof(struct icmp_filter);
65542 ret = -EFAULT;
65543- if (put_user(len, optlen) ||
65544- copy_to_user(optval, &raw_sk(sk)->filter, len))
65545+ filter = raw_sk(sk)->filter;
65546+ if (put_user(len, optlen) || len > sizeof filter ||
65547+ copy_to_user(optval, &filter, len))
65548 goto out;
65549 ret = 0;
65550 out: return ret;
65551@@ -984,7 +989,13 @@ static void raw_sock_seq_show(struct seq
65552 sk_wmem_alloc_get(sp),
65553 sk_rmem_alloc_get(sp),
65554 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
65555- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
65556+ atomic_read(&sp->sk_refcnt),
65557+#ifdef CONFIG_GRKERNSEC_HIDESYM
65558+ NULL,
65559+#else
65560+ sp,
65561+#endif
65562+ atomic_read_unchecked(&sp->sk_drops));
65563 }
65564
65565 static int raw_seq_show(struct seq_file *seq, void *v)
65566diff -urNp linux-3.0.3/net/ipv4/route.c linux-3.0.3/net/ipv4/route.c
65567--- linux-3.0.3/net/ipv4/route.c 2011-08-23 21:44:40.000000000 -0400
65568+++ linux-3.0.3/net/ipv4/route.c 2011-08-23 21:47:56.000000000 -0400
65569@@ -304,7 +304,7 @@ static inline unsigned int rt_hash(__be3
65570
65571 static inline int rt_genid(struct net *net)
65572 {
65573- return atomic_read(&net->ipv4.rt_genid);
65574+ return atomic_read_unchecked(&net->ipv4.rt_genid);
65575 }
65576
65577 #ifdef CONFIG_PROC_FS
65578@@ -833,7 +833,7 @@ static void rt_cache_invalidate(struct n
65579 unsigned char shuffle;
65580
65581 get_random_bytes(&shuffle, sizeof(shuffle));
65582- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
65583+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
65584 }
65585
65586 /*
65587@@ -2834,7 +2834,7 @@ static int rt_fill_info(struct net *net,
65588 error = rt->dst.error;
65589 if (peer) {
65590 inet_peer_refcheck(rt->peer);
65591- id = atomic_read(&peer->ip_id_count) & 0xffff;
65592+ id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
65593 if (peer->tcp_ts_stamp) {
65594 ts = peer->tcp_ts;
65595 tsage = get_seconds() - peer->tcp_ts_stamp;
65596diff -urNp linux-3.0.3/net/ipv4/tcp.c linux-3.0.3/net/ipv4/tcp.c
65597--- linux-3.0.3/net/ipv4/tcp.c 2011-07-21 22:17:23.000000000 -0400
65598+++ linux-3.0.3/net/ipv4/tcp.c 2011-08-23 21:48:14.000000000 -0400
65599@@ -2122,6 +2122,8 @@ static int do_tcp_setsockopt(struct sock
65600 int val;
65601 int err = 0;
65602
65603+ pax_track_stack();
65604+
65605 /* These are data/string values, all the others are ints */
65606 switch (optname) {
65607 case TCP_CONGESTION: {
65608@@ -2501,6 +2503,8 @@ static int do_tcp_getsockopt(struct sock
65609 struct tcp_sock *tp = tcp_sk(sk);
65610 int val, len;
65611
65612+ pax_track_stack();
65613+
65614 if (get_user(len, optlen))
65615 return -EFAULT;
65616
65617diff -urNp linux-3.0.3/net/ipv4/tcp_ipv4.c linux-3.0.3/net/ipv4/tcp_ipv4.c
65618--- linux-3.0.3/net/ipv4/tcp_ipv4.c 2011-08-23 21:44:40.000000000 -0400
65619+++ linux-3.0.3/net/ipv4/tcp_ipv4.c 2011-08-23 21:48:14.000000000 -0400
65620@@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
65621 int sysctl_tcp_low_latency __read_mostly;
65622 EXPORT_SYMBOL(sysctl_tcp_low_latency);
65623
65624+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65625+extern int grsec_enable_blackhole;
65626+#endif
65627
65628 #ifdef CONFIG_TCP_MD5SIG
65629 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
65630@@ -1607,6 +1610,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
65631 return 0;
65632
65633 reset:
65634+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65635+ if (!grsec_enable_blackhole)
65636+#endif
65637 tcp_v4_send_reset(rsk, skb);
65638 discard:
65639 kfree_skb(skb);
65640@@ -1669,12 +1675,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
65641 TCP_SKB_CB(skb)->sacked = 0;
65642
65643 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
65644- if (!sk)
65645+ if (!sk) {
65646+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65647+ ret = 1;
65648+#endif
65649 goto no_tcp_socket;
65650-
65651+ }
65652 process:
65653- if (sk->sk_state == TCP_TIME_WAIT)
65654+ if (sk->sk_state == TCP_TIME_WAIT) {
65655+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65656+ ret = 2;
65657+#endif
65658 goto do_time_wait;
65659+ }
65660
65661 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
65662 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
65663@@ -1724,6 +1737,10 @@ no_tcp_socket:
65664 bad_packet:
65665 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
65666 } else {
65667+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65668+ if (!grsec_enable_blackhole || (ret == 1 &&
65669+ (skb->dev->flags & IFF_LOOPBACK)))
65670+#endif
65671 tcp_v4_send_reset(NULL, skb);
65672 }
65673
65674@@ -2388,7 +2405,11 @@ static void get_openreq4(struct sock *sk
65675 0, /* non standard timer */
65676 0, /* open_requests have no inode */
65677 atomic_read(&sk->sk_refcnt),
65678+#ifdef CONFIG_GRKERNSEC_HIDESYM
65679+ NULL,
65680+#else
65681 req,
65682+#endif
65683 len);
65684 }
65685
65686@@ -2438,7 +2459,12 @@ static void get_tcp4_sock(struct sock *s
65687 sock_i_uid(sk),
65688 icsk->icsk_probes_out,
65689 sock_i_ino(sk),
65690- atomic_read(&sk->sk_refcnt), sk,
65691+ atomic_read(&sk->sk_refcnt),
65692+#ifdef CONFIG_GRKERNSEC_HIDESYM
65693+ NULL,
65694+#else
65695+ sk,
65696+#endif
65697 jiffies_to_clock_t(icsk->icsk_rto),
65698 jiffies_to_clock_t(icsk->icsk_ack.ato),
65699 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
65700@@ -2466,7 +2492,13 @@ static void get_timewait4_sock(struct in
65701 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
65702 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
65703 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
65704- atomic_read(&tw->tw_refcnt), tw, len);
65705+ atomic_read(&tw->tw_refcnt),
65706+#ifdef CONFIG_GRKERNSEC_HIDESYM
65707+ NULL,
65708+#else
65709+ tw,
65710+#endif
65711+ len);
65712 }
65713
65714 #define TMPSZ 150
65715diff -urNp linux-3.0.3/net/ipv4/tcp_minisocks.c linux-3.0.3/net/ipv4/tcp_minisocks.c
65716--- linux-3.0.3/net/ipv4/tcp_minisocks.c 2011-07-21 22:17:23.000000000 -0400
65717+++ linux-3.0.3/net/ipv4/tcp_minisocks.c 2011-08-23 21:48:14.000000000 -0400
65718@@ -27,6 +27,10 @@
65719 #include <net/inet_common.h>
65720 #include <net/xfrm.h>
65721
65722+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65723+extern int grsec_enable_blackhole;
65724+#endif
65725+
65726 int sysctl_tcp_syncookies __read_mostly = 1;
65727 EXPORT_SYMBOL(sysctl_tcp_syncookies);
65728
65729@@ -745,6 +749,10 @@ listen_overflow:
65730
65731 embryonic_reset:
65732 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
65733+
65734+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65735+ if (!grsec_enable_blackhole)
65736+#endif
65737 if (!(flg & TCP_FLAG_RST))
65738 req->rsk_ops->send_reset(sk, skb);
65739
65740diff -urNp linux-3.0.3/net/ipv4/tcp_output.c linux-3.0.3/net/ipv4/tcp_output.c
65741--- linux-3.0.3/net/ipv4/tcp_output.c 2011-07-21 22:17:23.000000000 -0400
65742+++ linux-3.0.3/net/ipv4/tcp_output.c 2011-08-23 21:48:14.000000000 -0400
65743@@ -2421,6 +2421,8 @@ struct sk_buff *tcp_make_synack(struct s
65744 int mss;
65745 int s_data_desired = 0;
65746
65747+ pax_track_stack();
65748+
65749 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
65750 s_data_desired = cvp->s_data_desired;
65751 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
65752diff -urNp linux-3.0.3/net/ipv4/tcp_probe.c linux-3.0.3/net/ipv4/tcp_probe.c
65753--- linux-3.0.3/net/ipv4/tcp_probe.c 2011-07-21 22:17:23.000000000 -0400
65754+++ linux-3.0.3/net/ipv4/tcp_probe.c 2011-08-23 21:47:56.000000000 -0400
65755@@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file
65756 if (cnt + width >= len)
65757 break;
65758
65759- if (copy_to_user(buf + cnt, tbuf, width))
65760+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
65761 return -EFAULT;
65762 cnt += width;
65763 }
65764diff -urNp linux-3.0.3/net/ipv4/tcp_timer.c linux-3.0.3/net/ipv4/tcp_timer.c
65765--- linux-3.0.3/net/ipv4/tcp_timer.c 2011-07-21 22:17:23.000000000 -0400
65766+++ linux-3.0.3/net/ipv4/tcp_timer.c 2011-08-23 21:48:14.000000000 -0400
65767@@ -22,6 +22,10 @@
65768 #include <linux/gfp.h>
65769 #include <net/tcp.h>
65770
65771+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65772+extern int grsec_lastack_retries;
65773+#endif
65774+
65775 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
65776 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
65777 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
65778@@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock
65779 }
65780 }
65781
65782+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65783+ if ((sk->sk_state == TCP_LAST_ACK) &&
65784+ (grsec_lastack_retries > 0) &&
65785+ (grsec_lastack_retries < retry_until))
65786+ retry_until = grsec_lastack_retries;
65787+#endif
65788+
65789 if (retransmits_timed_out(sk, retry_until,
65790 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
65791 /* Has it gone just too far? */
65792diff -urNp linux-3.0.3/net/ipv4/udp.c linux-3.0.3/net/ipv4/udp.c
65793--- linux-3.0.3/net/ipv4/udp.c 2011-07-21 22:17:23.000000000 -0400
65794+++ linux-3.0.3/net/ipv4/udp.c 2011-08-23 21:48:14.000000000 -0400
65795@@ -86,6 +86,7 @@
65796 #include <linux/types.h>
65797 #include <linux/fcntl.h>
65798 #include <linux/module.h>
65799+#include <linux/security.h>
65800 #include <linux/socket.h>
65801 #include <linux/sockios.h>
65802 #include <linux/igmp.h>
65803@@ -107,6 +108,10 @@
65804 #include <net/xfrm.h>
65805 #include "udp_impl.h"
65806
65807+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65808+extern int grsec_enable_blackhole;
65809+#endif
65810+
65811 struct udp_table udp_table __read_mostly;
65812 EXPORT_SYMBOL(udp_table);
65813
65814@@ -564,6 +569,9 @@ found:
65815 return s;
65816 }
65817
65818+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
65819+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
65820+
65821 /*
65822 * This routine is called by the ICMP module when it gets some
65823 * sort of error condition. If err < 0 then the socket should
65824@@ -855,9 +863,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
65825 dport = usin->sin_port;
65826 if (dport == 0)
65827 return -EINVAL;
65828+
65829+ err = gr_search_udp_sendmsg(sk, usin);
65830+ if (err)
65831+ return err;
65832 } else {
65833 if (sk->sk_state != TCP_ESTABLISHED)
65834 return -EDESTADDRREQ;
65835+
65836+ err = gr_search_udp_sendmsg(sk, NULL);
65837+ if (err)
65838+ return err;
65839+
65840 daddr = inet->inet_daddr;
65841 dport = inet->inet_dport;
65842 /* Open fast path for connected socket.
65843@@ -1098,7 +1115,7 @@ static unsigned int first_packet_length(
65844 udp_lib_checksum_complete(skb)) {
65845 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
65846 IS_UDPLITE(sk));
65847- atomic_inc(&sk->sk_drops);
65848+ atomic_inc_unchecked(&sk->sk_drops);
65849 __skb_unlink(skb, rcvq);
65850 __skb_queue_tail(&list_kill, skb);
65851 }
65852@@ -1184,6 +1201,10 @@ try_again:
65853 if (!skb)
65854 goto out;
65855
65856+ err = gr_search_udp_recvmsg(sk, skb);
65857+ if (err)
65858+ goto out_free;
65859+
65860 ulen = skb->len - sizeof(struct udphdr);
65861 if (len > ulen)
65862 len = ulen;
65863@@ -1483,7 +1504,7 @@ int udp_queue_rcv_skb(struct sock *sk, s
65864
65865 drop:
65866 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
65867- atomic_inc(&sk->sk_drops);
65868+ atomic_inc_unchecked(&sk->sk_drops);
65869 kfree_skb(skb);
65870 return -1;
65871 }
65872@@ -1502,7 +1523,7 @@ static void flush_stack(struct sock **st
65873 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
65874
65875 if (!skb1) {
65876- atomic_inc(&sk->sk_drops);
65877+ atomic_inc_unchecked(&sk->sk_drops);
65878 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
65879 IS_UDPLITE(sk));
65880 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
65881@@ -1671,6 +1692,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
65882 goto csum_error;
65883
65884 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
65885+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65886+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
65887+#endif
65888 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
65889
65890 /*
65891@@ -2098,8 +2122,13 @@ static void udp4_format_sock(struct sock
65892 sk_wmem_alloc_get(sp),
65893 sk_rmem_alloc_get(sp),
65894 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
65895- atomic_read(&sp->sk_refcnt), sp,
65896- atomic_read(&sp->sk_drops), len);
65897+ atomic_read(&sp->sk_refcnt),
65898+#ifdef CONFIG_GRKERNSEC_HIDESYM
65899+ NULL,
65900+#else
65901+ sp,
65902+#endif
65903+ atomic_read_unchecked(&sp->sk_drops), len);
65904 }
65905
65906 int udp4_seq_show(struct seq_file *seq, void *v)
65907diff -urNp linux-3.0.3/net/ipv6/inet6_connection_sock.c linux-3.0.3/net/ipv6/inet6_connection_sock.c
65908--- linux-3.0.3/net/ipv6/inet6_connection_sock.c 2011-07-21 22:17:23.000000000 -0400
65909+++ linux-3.0.3/net/ipv6/inet6_connection_sock.c 2011-08-23 21:47:56.000000000 -0400
65910@@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *
65911 #ifdef CONFIG_XFRM
65912 {
65913 struct rt6_info *rt = (struct rt6_info *)dst;
65914- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
65915+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
65916 }
65917 #endif
65918 }
65919@@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(
65920 #ifdef CONFIG_XFRM
65921 if (dst) {
65922 struct rt6_info *rt = (struct rt6_info *)dst;
65923- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
65924+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
65925 __sk_dst_reset(sk);
65926 dst = NULL;
65927 }
65928diff -urNp linux-3.0.3/net/ipv6/ipv6_sockglue.c linux-3.0.3/net/ipv6/ipv6_sockglue.c
65929--- linux-3.0.3/net/ipv6/ipv6_sockglue.c 2011-07-21 22:17:23.000000000 -0400
65930+++ linux-3.0.3/net/ipv6/ipv6_sockglue.c 2011-08-23 21:48:14.000000000 -0400
65931@@ -129,6 +129,8 @@ static int do_ipv6_setsockopt(struct soc
65932 int val, valbool;
65933 int retv = -ENOPROTOOPT;
65934
65935+ pax_track_stack();
65936+
65937 if (optval == NULL)
65938 val=0;
65939 else {
65940@@ -919,6 +921,8 @@ static int do_ipv6_getsockopt(struct soc
65941 int len;
65942 int val;
65943
65944+ pax_track_stack();
65945+
65946 if (ip6_mroute_opt(optname))
65947 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
65948
65949diff -urNp linux-3.0.3/net/ipv6/raw.c linux-3.0.3/net/ipv6/raw.c
65950--- linux-3.0.3/net/ipv6/raw.c 2011-07-21 22:17:23.000000000 -0400
65951+++ linux-3.0.3/net/ipv6/raw.c 2011-08-23 21:48:14.000000000 -0400
65952@@ -376,7 +376,7 @@ static inline int rawv6_rcv_skb(struct s
65953 {
65954 if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
65955 skb_checksum_complete(skb)) {
65956- atomic_inc(&sk->sk_drops);
65957+ atomic_inc_unchecked(&sk->sk_drops);
65958 kfree_skb(skb);
65959 return NET_RX_DROP;
65960 }
65961@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
65962 struct raw6_sock *rp = raw6_sk(sk);
65963
65964 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
65965- atomic_inc(&sk->sk_drops);
65966+ atomic_inc_unchecked(&sk->sk_drops);
65967 kfree_skb(skb);
65968 return NET_RX_DROP;
65969 }
65970@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
65971
65972 if (inet->hdrincl) {
65973 if (skb_checksum_complete(skb)) {
65974- atomic_inc(&sk->sk_drops);
65975+ atomic_inc_unchecked(&sk->sk_drops);
65976 kfree_skb(skb);
65977 return NET_RX_DROP;
65978 }
65979@@ -601,7 +601,7 @@ out:
65980 return err;
65981 }
65982
65983-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
65984+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
65985 struct flowi6 *fl6, struct dst_entry **dstp,
65986 unsigned int flags)
65987 {
65988@@ -742,6 +742,8 @@ static int rawv6_sendmsg(struct kiocb *i
65989 u16 proto;
65990 int err;
65991
65992+ pax_track_stack();
65993+
65994 /* Rough check on arithmetic overflow,
65995 better check is made in ip6_append_data().
65996 */
65997@@ -909,12 +911,15 @@ do_confirm:
65998 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
65999 char __user *optval, int optlen)
66000 {
66001+ struct icmp6_filter filter;
66002+
66003 switch (optname) {
66004 case ICMPV6_FILTER:
66005 if (optlen > sizeof(struct icmp6_filter))
66006 optlen = sizeof(struct icmp6_filter);
66007- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
66008+ if (copy_from_user(&filter, optval, optlen))
66009 return -EFAULT;
66010+ raw6_sk(sk)->filter = filter;
66011 return 0;
66012 default:
66013 return -ENOPROTOOPT;
66014@@ -927,6 +932,7 @@ static int rawv6_geticmpfilter(struct so
66015 char __user *optval, int __user *optlen)
66016 {
66017 int len;
66018+ struct icmp6_filter filter;
66019
66020 switch (optname) {
66021 case ICMPV6_FILTER:
66022@@ -938,7 +944,8 @@ static int rawv6_geticmpfilter(struct so
66023 len = sizeof(struct icmp6_filter);
66024 if (put_user(len, optlen))
66025 return -EFAULT;
66026- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
66027+ filter = raw6_sk(sk)->filter;
66028+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
66029 return -EFAULT;
66030 return 0;
66031 default:
66032@@ -1252,7 +1259,13 @@ static void raw6_sock_seq_show(struct se
66033 0, 0L, 0,
66034 sock_i_uid(sp), 0,
66035 sock_i_ino(sp),
66036- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
66037+ atomic_read(&sp->sk_refcnt),
66038+#ifdef CONFIG_GRKERNSEC_HIDESYM
66039+ NULL,
66040+#else
66041+ sp,
66042+#endif
66043+ atomic_read_unchecked(&sp->sk_drops));
66044 }
66045
66046 static int raw6_seq_show(struct seq_file *seq, void *v)
66047diff -urNp linux-3.0.3/net/ipv6/tcp_ipv6.c linux-3.0.3/net/ipv6/tcp_ipv6.c
66048--- linux-3.0.3/net/ipv6/tcp_ipv6.c 2011-08-23 21:44:40.000000000 -0400
66049+++ linux-3.0.3/net/ipv6/tcp_ipv6.c 2011-08-23 21:48:14.000000000 -0400
66050@@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
66051 }
66052 #endif
66053
66054+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66055+extern int grsec_enable_blackhole;
66056+#endif
66057+
66058 static void tcp_v6_hash(struct sock *sk)
66059 {
66060 if (sk->sk_state != TCP_CLOSE) {
66061@@ -1662,6 +1666,9 @@ static int tcp_v6_do_rcv(struct sock *sk
66062 return 0;
66063
66064 reset:
66065+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66066+ if (!grsec_enable_blackhole)
66067+#endif
66068 tcp_v6_send_reset(sk, skb);
66069 discard:
66070 if (opt_skb)
66071@@ -1741,12 +1748,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
66072 TCP_SKB_CB(skb)->sacked = 0;
66073
66074 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
66075- if (!sk)
66076+ if (!sk) {
66077+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66078+ ret = 1;
66079+#endif
66080 goto no_tcp_socket;
66081+ }
66082
66083 process:
66084- if (sk->sk_state == TCP_TIME_WAIT)
66085+ if (sk->sk_state == TCP_TIME_WAIT) {
66086+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66087+ ret = 2;
66088+#endif
66089 goto do_time_wait;
66090+ }
66091
66092 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
66093 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
66094@@ -1794,6 +1809,10 @@ no_tcp_socket:
66095 bad_packet:
66096 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
66097 } else {
66098+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66099+ if (!grsec_enable_blackhole || (ret == 1 &&
66100+ (skb->dev->flags & IFF_LOOPBACK)))
66101+#endif
66102 tcp_v6_send_reset(NULL, skb);
66103 }
66104
66105@@ -2054,7 +2073,13 @@ static void get_openreq6(struct seq_file
66106 uid,
66107 0, /* non standard timer */
66108 0, /* open_requests have no inode */
66109- 0, req);
66110+ 0,
66111+#ifdef CONFIG_GRKERNSEC_HIDESYM
66112+ NULL
66113+#else
66114+ req
66115+#endif
66116+ );
66117 }
66118
66119 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
66120@@ -2104,7 +2129,12 @@ static void get_tcp6_sock(struct seq_fil
66121 sock_i_uid(sp),
66122 icsk->icsk_probes_out,
66123 sock_i_ino(sp),
66124- atomic_read(&sp->sk_refcnt), sp,
66125+ atomic_read(&sp->sk_refcnt),
66126+#ifdef CONFIG_GRKERNSEC_HIDESYM
66127+ NULL,
66128+#else
66129+ sp,
66130+#endif
66131 jiffies_to_clock_t(icsk->icsk_rto),
66132 jiffies_to_clock_t(icsk->icsk_ack.ato),
66133 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
66134@@ -2139,7 +2169,13 @@ static void get_timewait6_sock(struct se
66135 dest->s6_addr32[2], dest->s6_addr32[3], destp,
66136 tw->tw_substate, 0, 0,
66137 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
66138- atomic_read(&tw->tw_refcnt), tw);
66139+ atomic_read(&tw->tw_refcnt),
66140+#ifdef CONFIG_GRKERNSEC_HIDESYM
66141+ NULL
66142+#else
66143+ tw
66144+#endif
66145+ );
66146 }
66147
66148 static int tcp6_seq_show(struct seq_file *seq, void *v)
66149diff -urNp linux-3.0.3/net/ipv6/udp.c linux-3.0.3/net/ipv6/udp.c
66150--- linux-3.0.3/net/ipv6/udp.c 2011-08-23 21:44:40.000000000 -0400
66151+++ linux-3.0.3/net/ipv6/udp.c 2011-08-23 21:48:14.000000000 -0400
66152@@ -50,6 +50,10 @@
66153 #include <linux/seq_file.h>
66154 #include "udp_impl.h"
66155
66156+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66157+extern int grsec_enable_blackhole;
66158+#endif
66159+
66160 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
66161 {
66162 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
66163@@ -548,7 +552,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
66164
66165 return 0;
66166 drop:
66167- atomic_inc(&sk->sk_drops);
66168+ atomic_inc_unchecked(&sk->sk_drops);
66169 drop_no_sk_drops_inc:
66170 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
66171 kfree_skb(skb);
66172@@ -624,7 +628,7 @@ static void flush_stack(struct sock **st
66173 continue;
66174 }
66175 drop:
66176- atomic_inc(&sk->sk_drops);
66177+ atomic_inc_unchecked(&sk->sk_drops);
66178 UDP6_INC_STATS_BH(sock_net(sk),
66179 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
66180 UDP6_INC_STATS_BH(sock_net(sk),
66181@@ -779,6 +783,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
66182 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
66183 proto == IPPROTO_UDPLITE);
66184
66185+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66186+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
66187+#endif
66188 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
66189
66190 kfree_skb(skb);
66191@@ -795,7 +802,7 @@ int __udp6_lib_rcv(struct sk_buff *skb,
66192 if (!sock_owned_by_user(sk))
66193 udpv6_queue_rcv_skb(sk, skb);
66194 else if (sk_add_backlog(sk, skb)) {
66195- atomic_inc(&sk->sk_drops);
66196+ atomic_inc_unchecked(&sk->sk_drops);
66197 bh_unlock_sock(sk);
66198 sock_put(sk);
66199 goto discard;
66200@@ -1406,8 +1413,13 @@ static void udp6_sock_seq_show(struct se
66201 0, 0L, 0,
66202 sock_i_uid(sp), 0,
66203 sock_i_ino(sp),
66204- atomic_read(&sp->sk_refcnt), sp,
66205- atomic_read(&sp->sk_drops));
66206+ atomic_read(&sp->sk_refcnt),
66207+#ifdef CONFIG_GRKERNSEC_HIDESYM
66208+ NULL,
66209+#else
66210+ sp,
66211+#endif
66212+ atomic_read_unchecked(&sp->sk_drops));
66213 }
66214
66215 int udp6_seq_show(struct seq_file *seq, void *v)
66216diff -urNp linux-3.0.3/net/irda/ircomm/ircomm_tty.c linux-3.0.3/net/irda/ircomm/ircomm_tty.c
66217--- linux-3.0.3/net/irda/ircomm/ircomm_tty.c 2011-07-21 22:17:23.000000000 -0400
66218+++ linux-3.0.3/net/irda/ircomm/ircomm_tty.c 2011-08-23 21:47:56.000000000 -0400
66219@@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(st
66220 add_wait_queue(&self->open_wait, &wait);
66221
66222 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
66223- __FILE__,__LINE__, tty->driver->name, self->open_count );
66224+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
66225
66226 /* As far as I can see, we protect open_count - Jean II */
66227 spin_lock_irqsave(&self->spinlock, flags);
66228 if (!tty_hung_up_p(filp)) {
66229 extra_count = 1;
66230- self->open_count--;
66231+ local_dec(&self->open_count);
66232 }
66233 spin_unlock_irqrestore(&self->spinlock, flags);
66234- self->blocked_open++;
66235+ local_inc(&self->blocked_open);
66236
66237 while (1) {
66238 if (tty->termios->c_cflag & CBAUD) {
66239@@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(st
66240 }
66241
66242 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
66243- __FILE__,__LINE__, tty->driver->name, self->open_count );
66244+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
66245
66246 schedule();
66247 }
66248@@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(st
66249 if (extra_count) {
66250 /* ++ is not atomic, so this should be protected - Jean II */
66251 spin_lock_irqsave(&self->spinlock, flags);
66252- self->open_count++;
66253+ local_inc(&self->open_count);
66254 spin_unlock_irqrestore(&self->spinlock, flags);
66255 }
66256- self->blocked_open--;
66257+ local_dec(&self->blocked_open);
66258
66259 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
66260- __FILE__,__LINE__, tty->driver->name, self->open_count);
66261+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
66262
66263 if (!retval)
66264 self->flags |= ASYNC_NORMAL_ACTIVE;
66265@@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_st
66266 }
66267 /* ++ is not atomic, so this should be protected - Jean II */
66268 spin_lock_irqsave(&self->spinlock, flags);
66269- self->open_count++;
66270+ local_inc(&self->open_count);
66271
66272 tty->driver_data = self;
66273 self->tty = tty;
66274 spin_unlock_irqrestore(&self->spinlock, flags);
66275
66276 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
66277- self->line, self->open_count);
66278+ self->line, local_read(&self->open_count));
66279
66280 /* Not really used by us, but lets do it anyway */
66281 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
66282@@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_
66283 return;
66284 }
66285
66286- if ((tty->count == 1) && (self->open_count != 1)) {
66287+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
66288 /*
66289 * Uh, oh. tty->count is 1, which means that the tty
66290 * structure will be freed. state->count should always
66291@@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_
66292 */
66293 IRDA_DEBUG(0, "%s(), bad serial port count; "
66294 "tty->count is 1, state->count is %d\n", __func__ ,
66295- self->open_count);
66296- self->open_count = 1;
66297+ local_read(&self->open_count));
66298+ local_set(&self->open_count, 1);
66299 }
66300
66301- if (--self->open_count < 0) {
66302+ if (local_dec_return(&self->open_count) < 0) {
66303 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
66304- __func__, self->line, self->open_count);
66305- self->open_count = 0;
66306+ __func__, self->line, local_read(&self->open_count));
66307+ local_set(&self->open_count, 0);
66308 }
66309- if (self->open_count) {
66310+ if (local_read(&self->open_count)) {
66311 spin_unlock_irqrestore(&self->spinlock, flags);
66312
66313 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
66314@@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_
66315 tty->closing = 0;
66316 self->tty = NULL;
66317
66318- if (self->blocked_open) {
66319+ if (local_read(&self->blocked_open)) {
66320 if (self->close_delay)
66321 schedule_timeout_interruptible(self->close_delay);
66322 wake_up_interruptible(&self->open_wait);
66323@@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty
66324 spin_lock_irqsave(&self->spinlock, flags);
66325 self->flags &= ~ASYNC_NORMAL_ACTIVE;
66326 self->tty = NULL;
66327- self->open_count = 0;
66328+ local_set(&self->open_count, 0);
66329 spin_unlock_irqrestore(&self->spinlock, flags);
66330
66331 wake_up_interruptible(&self->open_wait);
66332@@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct
66333 seq_putc(m, '\n');
66334
66335 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
66336- seq_printf(m, "Open count: %d\n", self->open_count);
66337+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
66338 seq_printf(m, "Max data size: %d\n", self->max_data_size);
66339 seq_printf(m, "Max header size: %d\n", self->max_header_size);
66340
66341diff -urNp linux-3.0.3/net/iucv/af_iucv.c linux-3.0.3/net/iucv/af_iucv.c
66342--- linux-3.0.3/net/iucv/af_iucv.c 2011-07-21 22:17:23.000000000 -0400
66343+++ linux-3.0.3/net/iucv/af_iucv.c 2011-08-23 21:47:56.000000000 -0400
66344@@ -648,10 +648,10 @@ static int iucv_sock_autobind(struct soc
66345
66346 write_lock_bh(&iucv_sk_list.lock);
66347
66348- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
66349+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
66350 while (__iucv_get_sock_by_name(name)) {
66351 sprintf(name, "%08x",
66352- atomic_inc_return(&iucv_sk_list.autobind_name));
66353+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
66354 }
66355
66356 write_unlock_bh(&iucv_sk_list.lock);
66357diff -urNp linux-3.0.3/net/key/af_key.c linux-3.0.3/net/key/af_key.c
66358--- linux-3.0.3/net/key/af_key.c 2011-07-21 22:17:23.000000000 -0400
66359+++ linux-3.0.3/net/key/af_key.c 2011-08-23 21:48:14.000000000 -0400
66360@@ -2481,6 +2481,8 @@ static int pfkey_migrate(struct sock *sk
66361 struct xfrm_migrate m[XFRM_MAX_DEPTH];
66362 struct xfrm_kmaddress k;
66363
66364+ pax_track_stack();
66365+
66366 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
66367 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
66368 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
66369@@ -3016,10 +3018,10 @@ static int pfkey_send_policy_notify(stru
66370 static u32 get_acqseq(void)
66371 {
66372 u32 res;
66373- static atomic_t acqseq;
66374+ static atomic_unchecked_t acqseq;
66375
66376 do {
66377- res = atomic_inc_return(&acqseq);
66378+ res = atomic_inc_return_unchecked(&acqseq);
66379 } while (!res);
66380 return res;
66381 }
66382diff -urNp linux-3.0.3/net/lapb/lapb_iface.c linux-3.0.3/net/lapb/lapb_iface.c
66383--- linux-3.0.3/net/lapb/lapb_iface.c 2011-07-21 22:17:23.000000000 -0400
66384+++ linux-3.0.3/net/lapb/lapb_iface.c 2011-08-23 21:47:56.000000000 -0400
66385@@ -158,7 +158,7 @@ int lapb_register(struct net_device *dev
66386 goto out;
66387
66388 lapb->dev = dev;
66389- lapb->callbacks = *callbacks;
66390+ lapb->callbacks = callbacks;
66391
66392 __lapb_insert_cb(lapb);
66393
66394@@ -380,32 +380,32 @@ int lapb_data_received(struct net_device
66395
66396 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
66397 {
66398- if (lapb->callbacks.connect_confirmation)
66399- lapb->callbacks.connect_confirmation(lapb->dev, reason);
66400+ if (lapb->callbacks->connect_confirmation)
66401+ lapb->callbacks->connect_confirmation(lapb->dev, reason);
66402 }
66403
66404 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
66405 {
66406- if (lapb->callbacks.connect_indication)
66407- lapb->callbacks.connect_indication(lapb->dev, reason);
66408+ if (lapb->callbacks->connect_indication)
66409+ lapb->callbacks->connect_indication(lapb->dev, reason);
66410 }
66411
66412 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
66413 {
66414- if (lapb->callbacks.disconnect_confirmation)
66415- lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
66416+ if (lapb->callbacks->disconnect_confirmation)
66417+ lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
66418 }
66419
66420 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
66421 {
66422- if (lapb->callbacks.disconnect_indication)
66423- lapb->callbacks.disconnect_indication(lapb->dev, reason);
66424+ if (lapb->callbacks->disconnect_indication)
66425+ lapb->callbacks->disconnect_indication(lapb->dev, reason);
66426 }
66427
66428 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
66429 {
66430- if (lapb->callbacks.data_indication)
66431- return lapb->callbacks.data_indication(lapb->dev, skb);
66432+ if (lapb->callbacks->data_indication)
66433+ return lapb->callbacks->data_indication(lapb->dev, skb);
66434
66435 kfree_skb(skb);
66436 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
66437@@ -415,8 +415,8 @@ int lapb_data_transmit(struct lapb_cb *l
66438 {
66439 int used = 0;
66440
66441- if (lapb->callbacks.data_transmit) {
66442- lapb->callbacks.data_transmit(lapb->dev, skb);
66443+ if (lapb->callbacks->data_transmit) {
66444+ lapb->callbacks->data_transmit(lapb->dev, skb);
66445 used = 1;
66446 }
66447
66448diff -urNp linux-3.0.3/net/mac80211/debugfs_sta.c linux-3.0.3/net/mac80211/debugfs_sta.c
66449--- linux-3.0.3/net/mac80211/debugfs_sta.c 2011-07-21 22:17:23.000000000 -0400
66450+++ linux-3.0.3/net/mac80211/debugfs_sta.c 2011-08-23 21:48:14.000000000 -0400
66451@@ -140,6 +140,8 @@ static ssize_t sta_agg_status_read(struc
66452 struct tid_ampdu_rx *tid_rx;
66453 struct tid_ampdu_tx *tid_tx;
66454
66455+ pax_track_stack();
66456+
66457 rcu_read_lock();
66458
66459 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
66460@@ -240,6 +242,8 @@ static ssize_t sta_ht_capa_read(struct f
66461 struct sta_info *sta = file->private_data;
66462 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
66463
66464+ pax_track_stack();
66465+
66466 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
66467 htc->ht_supported ? "" : "not ");
66468 if (htc->ht_supported) {
66469diff -urNp linux-3.0.3/net/mac80211/ieee80211_i.h linux-3.0.3/net/mac80211/ieee80211_i.h
66470--- linux-3.0.3/net/mac80211/ieee80211_i.h 2011-07-21 22:17:23.000000000 -0400
66471+++ linux-3.0.3/net/mac80211/ieee80211_i.h 2011-08-23 21:47:56.000000000 -0400
66472@@ -27,6 +27,7 @@
66473 #include <net/ieee80211_radiotap.h>
66474 #include <net/cfg80211.h>
66475 #include <net/mac80211.h>
66476+#include <asm/local.h>
66477 #include "key.h"
66478 #include "sta_info.h"
66479
66480@@ -721,7 +722,7 @@ struct ieee80211_local {
66481 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
66482 spinlock_t queue_stop_reason_lock;
66483
66484- int open_count;
66485+ local_t open_count;
66486 int monitors, cooked_mntrs;
66487 /* number of interfaces with corresponding FIF_ flags */
66488 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
66489diff -urNp linux-3.0.3/net/mac80211/iface.c linux-3.0.3/net/mac80211/iface.c
66490--- linux-3.0.3/net/mac80211/iface.c 2011-08-23 21:44:40.000000000 -0400
66491+++ linux-3.0.3/net/mac80211/iface.c 2011-08-23 21:47:56.000000000 -0400
66492@@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_
66493 break;
66494 }
66495
66496- if (local->open_count == 0) {
66497+ if (local_read(&local->open_count) == 0) {
66498 res = drv_start(local);
66499 if (res)
66500 goto err_del_bss;
66501@@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_
66502 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
66503
66504 if (!is_valid_ether_addr(dev->dev_addr)) {
66505- if (!local->open_count)
66506+ if (!local_read(&local->open_count))
66507 drv_stop(local);
66508 return -EADDRNOTAVAIL;
66509 }
66510@@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_
66511 mutex_unlock(&local->mtx);
66512
66513 if (coming_up)
66514- local->open_count++;
66515+ local_inc(&local->open_count);
66516
66517 if (hw_reconf_flags) {
66518 ieee80211_hw_config(local, hw_reconf_flags);
66519@@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_
66520 err_del_interface:
66521 drv_remove_interface(local, &sdata->vif);
66522 err_stop:
66523- if (!local->open_count)
66524+ if (!local_read(&local->open_count))
66525 drv_stop(local);
66526 err_del_bss:
66527 sdata->bss = NULL;
66528@@ -475,7 +475,7 @@ static void ieee80211_do_stop(struct iee
66529 }
66530
66531 if (going_down)
66532- local->open_count--;
66533+ local_dec(&local->open_count);
66534
66535 switch (sdata->vif.type) {
66536 case NL80211_IFTYPE_AP_VLAN:
66537@@ -534,7 +534,7 @@ static void ieee80211_do_stop(struct iee
66538
66539 ieee80211_recalc_ps(local, -1);
66540
66541- if (local->open_count == 0) {
66542+ if (local_read(&local->open_count) == 0) {
66543 if (local->ops->napi_poll)
66544 napi_disable(&local->napi);
66545 ieee80211_clear_tx_pending(local);
66546diff -urNp linux-3.0.3/net/mac80211/main.c linux-3.0.3/net/mac80211/main.c
66547--- linux-3.0.3/net/mac80211/main.c 2011-07-21 22:17:23.000000000 -0400
66548+++ linux-3.0.3/net/mac80211/main.c 2011-08-23 21:47:56.000000000 -0400
66549@@ -209,7 +209,7 @@ int ieee80211_hw_config(struct ieee80211
66550 local->hw.conf.power_level = power;
66551 }
66552
66553- if (changed && local->open_count) {
66554+ if (changed && local_read(&local->open_count)) {
66555 ret = drv_config(local, changed);
66556 /*
66557 * Goal:
66558diff -urNp linux-3.0.3/net/mac80211/mlme.c linux-3.0.3/net/mac80211/mlme.c
66559--- linux-3.0.3/net/mac80211/mlme.c 2011-08-23 21:44:40.000000000 -0400
66560+++ linux-3.0.3/net/mac80211/mlme.c 2011-08-23 21:48:14.000000000 -0400
66561@@ -1444,6 +1444,8 @@ static bool ieee80211_assoc_success(stru
66562 bool have_higher_than_11mbit = false;
66563 u16 ap_ht_cap_flags;
66564
66565+ pax_track_stack();
66566+
66567 /* AssocResp and ReassocResp have identical structure */
66568
66569 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
66570diff -urNp linux-3.0.3/net/mac80211/pm.c linux-3.0.3/net/mac80211/pm.c
66571--- linux-3.0.3/net/mac80211/pm.c 2011-07-21 22:17:23.000000000 -0400
66572+++ linux-3.0.3/net/mac80211/pm.c 2011-08-23 21:47:56.000000000 -0400
66573@@ -47,7 +47,7 @@ int __ieee80211_suspend(struct ieee80211
66574 cancel_work_sync(&local->dynamic_ps_enable_work);
66575 del_timer_sync(&local->dynamic_ps_timer);
66576
66577- local->wowlan = wowlan && local->open_count;
66578+ local->wowlan = wowlan && local_read(&local->open_count);
66579 if (local->wowlan) {
66580 int err = drv_suspend(local, wowlan);
66581 if (err) {
66582@@ -111,7 +111,7 @@ int __ieee80211_suspend(struct ieee80211
66583 }
66584
66585 /* stop hardware - this must stop RX */
66586- if (local->open_count)
66587+ if (local_read(&local->open_count))
66588 ieee80211_stop_device(local);
66589
66590 suspend:
66591diff -urNp linux-3.0.3/net/mac80211/rate.c linux-3.0.3/net/mac80211/rate.c
66592--- linux-3.0.3/net/mac80211/rate.c 2011-07-21 22:17:23.000000000 -0400
66593+++ linux-3.0.3/net/mac80211/rate.c 2011-08-23 21:47:56.000000000 -0400
66594@@ -371,7 +371,7 @@ int ieee80211_init_rate_ctrl_alg(struct
66595
66596 ASSERT_RTNL();
66597
66598- if (local->open_count)
66599+ if (local_read(&local->open_count))
66600 return -EBUSY;
66601
66602 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
66603diff -urNp linux-3.0.3/net/mac80211/rc80211_pid_debugfs.c linux-3.0.3/net/mac80211/rc80211_pid_debugfs.c
66604--- linux-3.0.3/net/mac80211/rc80211_pid_debugfs.c 2011-07-21 22:17:23.000000000 -0400
66605+++ linux-3.0.3/net/mac80211/rc80211_pid_debugfs.c 2011-08-23 21:47:56.000000000 -0400
66606@@ -192,7 +192,7 @@ static ssize_t rate_control_pid_events_r
66607
66608 spin_unlock_irqrestore(&events->lock, status);
66609
66610- if (copy_to_user(buf, pb, p))
66611+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
66612 return -EFAULT;
66613
66614 return p;
66615diff -urNp linux-3.0.3/net/mac80211/util.c linux-3.0.3/net/mac80211/util.c
66616--- linux-3.0.3/net/mac80211/util.c 2011-07-21 22:17:23.000000000 -0400
66617+++ linux-3.0.3/net/mac80211/util.c 2011-08-23 21:47:56.000000000 -0400
66618@@ -1147,7 +1147,7 @@ int ieee80211_reconfig(struct ieee80211_
66619 #endif
66620
66621 /* restart hardware */
66622- if (local->open_count) {
66623+ if (local_read(&local->open_count)) {
66624 /*
66625 * Upon resume hardware can sometimes be goofy due to
66626 * various platform / driver / bus issues, so restarting
66627diff -urNp linux-3.0.3/net/netfilter/ipvs/ip_vs_conn.c linux-3.0.3/net/netfilter/ipvs/ip_vs_conn.c
66628--- linux-3.0.3/net/netfilter/ipvs/ip_vs_conn.c 2011-07-21 22:17:23.000000000 -0400
66629+++ linux-3.0.3/net/netfilter/ipvs/ip_vs_conn.c 2011-08-23 21:47:56.000000000 -0400
66630@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
66631 /* Increase the refcnt counter of the dest */
66632 atomic_inc(&dest->refcnt);
66633
66634- conn_flags = atomic_read(&dest->conn_flags);
66635+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
66636 if (cp->protocol != IPPROTO_UDP)
66637 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
66638 /* Bind with the destination and its corresponding transmitter */
66639@@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_p
66640 atomic_set(&cp->refcnt, 1);
66641
66642 atomic_set(&cp->n_control, 0);
66643- atomic_set(&cp->in_pkts, 0);
66644+ atomic_set_unchecked(&cp->in_pkts, 0);
66645
66646 atomic_inc(&ipvs->conn_count);
66647 if (flags & IP_VS_CONN_F_NO_CPORT)
66648@@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip
66649
66650 /* Don't drop the entry if its number of incoming packets is not
66651 located in [0, 8] */
66652- i = atomic_read(&cp->in_pkts);
66653+ i = atomic_read_unchecked(&cp->in_pkts);
66654 if (i > 8 || i < 0) return 0;
66655
66656 if (!todrop_rate[i]) return 0;
66657diff -urNp linux-3.0.3/net/netfilter/ipvs/ip_vs_core.c linux-3.0.3/net/netfilter/ipvs/ip_vs_core.c
66658--- linux-3.0.3/net/netfilter/ipvs/ip_vs_core.c 2011-07-21 22:17:23.000000000 -0400
66659+++ linux-3.0.3/net/netfilter/ipvs/ip_vs_core.c 2011-08-23 21:47:56.000000000 -0400
66660@@ -563,7 +563,7 @@ int ip_vs_leave(struct ip_vs_service *sv
66661 ret = cp->packet_xmit(skb, cp, pd->pp);
66662 /* do not touch skb anymore */
66663
66664- atomic_inc(&cp->in_pkts);
66665+ atomic_inc_unchecked(&cp->in_pkts);
66666 ip_vs_conn_put(cp);
66667 return ret;
66668 }
66669@@ -1613,7 +1613,7 @@ ip_vs_in(unsigned int hooknum, struct sk
66670 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
66671 pkts = sysctl_sync_threshold(ipvs);
66672 else
66673- pkts = atomic_add_return(1, &cp->in_pkts);
66674+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
66675
66676 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
66677 cp->protocol == IPPROTO_SCTP) {
66678diff -urNp linux-3.0.3/net/netfilter/ipvs/ip_vs_ctl.c linux-3.0.3/net/netfilter/ipvs/ip_vs_ctl.c
66679--- linux-3.0.3/net/netfilter/ipvs/ip_vs_ctl.c 2011-08-23 21:44:40.000000000 -0400
66680+++ linux-3.0.3/net/netfilter/ipvs/ip_vs_ctl.c 2011-08-23 21:48:14.000000000 -0400
66681@@ -782,7 +782,7 @@ __ip_vs_update_dest(struct ip_vs_service
66682 ip_vs_rs_hash(ipvs, dest);
66683 write_unlock_bh(&ipvs->rs_lock);
66684 }
66685- atomic_set(&dest->conn_flags, conn_flags);
66686+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
66687
66688 /* bind the service */
66689 if (!dest->svc) {
66690@@ -2027,7 +2027,7 @@ static int ip_vs_info_seq_show(struct se
66691 " %-7s %-6d %-10d %-10d\n",
66692 &dest->addr.in6,
66693 ntohs(dest->port),
66694- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
66695+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
66696 atomic_read(&dest->weight),
66697 atomic_read(&dest->activeconns),
66698 atomic_read(&dest->inactconns));
66699@@ -2038,7 +2038,7 @@ static int ip_vs_info_seq_show(struct se
66700 "%-7s %-6d %-10d %-10d\n",
66701 ntohl(dest->addr.ip),
66702 ntohs(dest->port),
66703- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
66704+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
66705 atomic_read(&dest->weight),
66706 atomic_read(&dest->activeconns),
66707 atomic_read(&dest->inactconns));
66708@@ -2284,6 +2284,8 @@ do_ip_vs_set_ctl(struct sock *sk, int cm
66709 struct ip_vs_dest_user *udest_compat;
66710 struct ip_vs_dest_user_kern udest;
66711
66712+ pax_track_stack();
66713+
66714 if (!capable(CAP_NET_ADMIN))
66715 return -EPERM;
66716
66717@@ -2498,7 +2500,7 @@ __ip_vs_get_dest_entries(struct net *net
66718
66719 entry.addr = dest->addr.ip;
66720 entry.port = dest->port;
66721- entry.conn_flags = atomic_read(&dest->conn_flags);
66722+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
66723 entry.weight = atomic_read(&dest->weight);
66724 entry.u_threshold = dest->u_threshold;
66725 entry.l_threshold = dest->l_threshold;
66726@@ -3026,7 +3028,7 @@ static int ip_vs_genl_fill_dest(struct s
66727 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
66728
66729 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
66730- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
66731+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
66732 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
66733 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
66734 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
66735diff -urNp linux-3.0.3/net/netfilter/ipvs/ip_vs_sync.c linux-3.0.3/net/netfilter/ipvs/ip_vs_sync.c
66736--- linux-3.0.3/net/netfilter/ipvs/ip_vs_sync.c 2011-07-21 22:17:23.000000000 -0400
66737+++ linux-3.0.3/net/netfilter/ipvs/ip_vs_sync.c 2011-08-23 21:47:56.000000000 -0400
66738@@ -648,7 +648,7 @@ control:
66739 * i.e only increment in_pkts for Templates.
66740 */
66741 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
66742- int pkts = atomic_add_return(1, &cp->in_pkts);
66743+ int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
66744
66745 if (pkts % sysctl_sync_period(ipvs) != 1)
66746 return;
66747@@ -794,7 +794,7 @@ static void ip_vs_proc_conn(struct net *
66748
66749 if (opt)
66750 memcpy(&cp->in_seq, opt, sizeof(*opt));
66751- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
66752+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
66753 cp->state = state;
66754 cp->old_state = cp->state;
66755 /*
66756diff -urNp linux-3.0.3/net/netfilter/ipvs/ip_vs_xmit.c linux-3.0.3/net/netfilter/ipvs/ip_vs_xmit.c
66757--- linux-3.0.3/net/netfilter/ipvs/ip_vs_xmit.c 2011-07-21 22:17:23.000000000 -0400
66758+++ linux-3.0.3/net/netfilter/ipvs/ip_vs_xmit.c 2011-08-23 21:47:56.000000000 -0400
66759@@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
66760 else
66761 rc = NF_ACCEPT;
66762 /* do not touch skb anymore */
66763- atomic_inc(&cp->in_pkts);
66764+ atomic_inc_unchecked(&cp->in_pkts);
66765 goto out;
66766 }
66767
66768@@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
66769 else
66770 rc = NF_ACCEPT;
66771 /* do not touch skb anymore */
66772- atomic_inc(&cp->in_pkts);
66773+ atomic_inc_unchecked(&cp->in_pkts);
66774 goto out;
66775 }
66776
66777diff -urNp linux-3.0.3/net/netfilter/Kconfig linux-3.0.3/net/netfilter/Kconfig
66778--- linux-3.0.3/net/netfilter/Kconfig 2011-07-21 22:17:23.000000000 -0400
66779+++ linux-3.0.3/net/netfilter/Kconfig 2011-08-23 21:48:14.000000000 -0400
66780@@ -781,6 +781,16 @@ config NETFILTER_XT_MATCH_ESP
66781
66782 To compile it as a module, choose M here. If unsure, say N.
66783
66784+config NETFILTER_XT_MATCH_GRADM
66785+ tristate '"gradm" match support'
66786+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
66787+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
66788+ ---help---
66789+ The gradm match allows to match on grsecurity RBAC being enabled.
66790+ It is useful when iptables rules are applied early on bootup to
66791+ prevent connections to the machine (except from a trusted host)
66792+ while the RBAC system is disabled.
66793+
66794 config NETFILTER_XT_MATCH_HASHLIMIT
66795 tristate '"hashlimit" match support'
66796 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
66797diff -urNp linux-3.0.3/net/netfilter/Makefile linux-3.0.3/net/netfilter/Makefile
66798--- linux-3.0.3/net/netfilter/Makefile 2011-07-21 22:17:23.000000000 -0400
66799+++ linux-3.0.3/net/netfilter/Makefile 2011-08-23 21:48:14.000000000 -0400
66800@@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) +=
66801 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
66802 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
66803 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
66804+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
66805 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
66806 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
66807 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
66808diff -urNp linux-3.0.3/net/netfilter/nfnetlink_log.c linux-3.0.3/net/netfilter/nfnetlink_log.c
66809--- linux-3.0.3/net/netfilter/nfnetlink_log.c 2011-07-21 22:17:23.000000000 -0400
66810+++ linux-3.0.3/net/netfilter/nfnetlink_log.c 2011-08-23 21:47:56.000000000 -0400
66811@@ -70,7 +70,7 @@ struct nfulnl_instance {
66812 };
66813
66814 static DEFINE_SPINLOCK(instances_lock);
66815-static atomic_t global_seq;
66816+static atomic_unchecked_t global_seq;
66817
66818 #define INSTANCE_BUCKETS 16
66819 static struct hlist_head instance_table[INSTANCE_BUCKETS];
66820@@ -505,7 +505,7 @@ __build_packet_message(struct nfulnl_ins
66821 /* global sequence number */
66822 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
66823 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
66824- htonl(atomic_inc_return(&global_seq)));
66825+ htonl(atomic_inc_return_unchecked(&global_seq)));
66826
66827 if (data_len) {
66828 struct nlattr *nla;
66829diff -urNp linux-3.0.3/net/netfilter/nfnetlink_queue.c linux-3.0.3/net/netfilter/nfnetlink_queue.c
66830--- linux-3.0.3/net/netfilter/nfnetlink_queue.c 2011-07-21 22:17:23.000000000 -0400
66831+++ linux-3.0.3/net/netfilter/nfnetlink_queue.c 2011-08-23 21:47:56.000000000 -0400
66832@@ -58,7 +58,7 @@ struct nfqnl_instance {
66833 */
66834 spinlock_t lock;
66835 unsigned int queue_total;
66836- atomic_t id_sequence; /* 'sequence' of pkt ids */
66837+ atomic_unchecked_t id_sequence; /* 'sequence' of pkt ids */
66838 struct list_head queue_list; /* packets in queue */
66839 };
66840
66841@@ -272,7 +272,7 @@ nfqnl_build_packet_message(struct nfqnl_
66842 nfmsg->version = NFNETLINK_V0;
66843 nfmsg->res_id = htons(queue->queue_num);
66844
66845- entry->id = atomic_inc_return(&queue->id_sequence);
66846+ entry->id = atomic_inc_return_unchecked(&queue->id_sequence);
66847 pmsg.packet_id = htonl(entry->id);
66848 pmsg.hw_protocol = entskb->protocol;
66849 pmsg.hook = entry->hook;
66850@@ -870,7 +870,7 @@ static int seq_show(struct seq_file *s,
66851 inst->peer_pid, inst->queue_total,
66852 inst->copy_mode, inst->copy_range,
66853 inst->queue_dropped, inst->queue_user_dropped,
66854- atomic_read(&inst->id_sequence), 1);
66855+ atomic_read_unchecked(&inst->id_sequence), 1);
66856 }
66857
66858 static const struct seq_operations nfqnl_seq_ops = {
66859diff -urNp linux-3.0.3/net/netfilter/xt_gradm.c linux-3.0.3/net/netfilter/xt_gradm.c
66860--- linux-3.0.3/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
66861+++ linux-3.0.3/net/netfilter/xt_gradm.c 2011-08-23 21:48:14.000000000 -0400
66862@@ -0,0 +1,51 @@
66863+/*
66864+ * gradm match for netfilter
66865